1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/smp.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/pcpu.h> 40 #include <sys/proc.h> 41 #include <sys/reg.h> 42 #include <sys/smr.h> 43 #include <sys/sysctl.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 #include <vm/pmap.h> 48 49 #include <machine/cpufunc.h> 50 #include <machine/psl.h> 51 #include <machine/md_var.h> 52 #include <machine/specialreg.h> 53 #include <machine/smp.h> 54 #include <machine/vmm.h> 55 #include <machine/vmm_dev.h> 56 #include <machine/vmm_instruction_emul.h> 57 #include <machine/vmm_snapshot.h> 58 59 #include "vmm_lapic.h" 60 #include "vmm_stat.h" 61 #include "vmm_ktr.h" 62 #include "vmm_ioport.h" 63 #include "vatpic.h" 64 #include "vlapic.h" 65 #include "vlapic_priv.h" 66 67 #include "x86.h" 68 #include "vmcb.h" 69 #include "svm.h" 70 #include "svm_softc.h" 71 #include "svm_msr.h" 72 #include "npt.h" 73 74 SYSCTL_DECL(_hw_vmm); 75 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 76 NULL); 77 78 /* 79 * SVM CPUID function 0x8000_000A, edx bit decoding. 80 */ 81 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 82 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 83 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 84 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 85 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 86 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 87 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 88 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 89 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 90 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 91 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 92 93 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 94 VMCB_CACHE_IOPM | \ 95 VMCB_CACHE_I | \ 96 VMCB_CACHE_TPR | \ 97 VMCB_CACHE_CR2 | \ 98 VMCB_CACHE_CR | \ 99 VMCB_CACHE_DR | \ 100 VMCB_CACHE_DT | \ 101 VMCB_CACHE_SEG | \ 102 VMCB_CACHE_NP) 103 104 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 105 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 106 0, NULL); 107 108 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 109 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 110 111 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 112 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 113 "SVM features advertised by CPUID.8000000AH:EDX"); 114 115 static int disable_npf_assist; 116 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 117 &disable_npf_assist, 0, NULL); 118 119 /* Maximum ASIDs supported by the processor */ 120 static uint32_t nasid; 121 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 122 "Number of ASIDs supported by this processor"); 123 124 /* Current ASID generation for each host cpu */ 125 static struct asid asid[MAXCPU]; 126 127 /* SVM host state saved area of size 4KB for each physical core. */ 128 static uint8_t *hsave; 129 130 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 131 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 132 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 133 134 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 135 static int svm_setreg(void *vcpui, int ident, uint64_t val); 136 137 static __inline int 138 flush_by_asid(void) 139 { 140 141 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 142 } 143 144 static __inline int 145 decode_assist(void) 146 { 147 148 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 149 } 150 151 static void 152 svm_disable(void *arg __unused) 153 { 154 uint64_t efer; 155 156 efer = rdmsr(MSR_EFER); 157 efer &= ~EFER_SVM; 158 wrmsr(MSR_EFER, efer); 159 } 160 161 /* 162 * Disable SVM on all CPUs. 163 */ 164 static int 165 svm_modcleanup(void) 166 { 167 168 smp_rendezvous(NULL, svm_disable, NULL, NULL); 169 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 170 return (0); 171 } 172 173 /* 174 * Verify that all the features required by bhyve are available. 175 */ 176 static int 177 check_svm_features(void) 178 { 179 u_int regs[4]; 180 181 /* CPUID Fn8000_000A is for SVM */ 182 do_cpuid(0x8000000A, regs); 183 svm_feature &= regs[3]; 184 185 /* 186 * The number of ASIDs can be configured to be less than what is 187 * supported by the hardware but not more. 188 */ 189 if (nasid == 0 || nasid > regs[1]) 190 nasid = regs[1]; 191 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 192 193 /* bhyve requires the Nested Paging feature */ 194 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 195 printf("SVM: Nested Paging feature not available.\n"); 196 return (ENXIO); 197 } 198 199 /* bhyve requires the NRIP Save feature */ 200 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 201 printf("SVM: NRIP Save feature not available.\n"); 202 return (ENXIO); 203 } 204 205 return (0); 206 } 207 208 static void 209 svm_enable(void *arg __unused) 210 { 211 uint64_t efer; 212 213 efer = rdmsr(MSR_EFER); 214 efer |= EFER_SVM; 215 wrmsr(MSR_EFER, efer); 216 217 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 218 } 219 220 /* 221 * Return 1 if SVM is enabled on this processor and 0 otherwise. 222 */ 223 static int 224 svm_available(void) 225 { 226 uint64_t msr; 227 228 /* Section 15.4 Enabling SVM from APM2. */ 229 if ((amd_feature2 & AMDID2_SVM) == 0) { 230 printf("SVM: not available.\n"); 231 return (0); 232 } 233 234 msr = rdmsr(MSR_VM_CR); 235 if ((msr & VM_CR_SVMDIS) != 0) { 236 printf("SVM: disabled by BIOS.\n"); 237 return (0); 238 } 239 240 return (1); 241 } 242 243 static int 244 svm_modinit(int ipinum) 245 { 246 int error, cpu; 247 248 if (!svm_available()) 249 return (ENXIO); 250 251 error = check_svm_features(); 252 if (error) 253 return (error); 254 255 vmcb_clean &= VMCB_CACHE_DEFAULT; 256 257 for (cpu = 0; cpu < MAXCPU; cpu++) { 258 /* 259 * Initialize the host ASIDs to their "highest" valid values. 260 * 261 * The next ASID allocation will rollover both 'gen' and 'num' 262 * and start off the sequence at {1,1}. 263 */ 264 asid[cpu].gen = ~0UL; 265 asid[cpu].num = nasid - 1; 266 } 267 268 svm_msr_init(); 269 svm_npt_init(ipinum); 270 271 /* Enable SVM on all CPUs */ 272 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 273 smp_rendezvous(NULL, svm_enable, NULL, NULL); 274 275 return (0); 276 } 277 278 static void 279 svm_modresume(void) 280 { 281 282 svm_enable(NULL); 283 } 284 285 #ifdef BHYVE_SNAPSHOT 286 void 287 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 288 { 289 struct vmcb_ctrl *ctrl; 290 291 ctrl = svm_get_vmcb_ctrl(vcpu); 292 ctrl->tsc_offset = offset; 293 294 svm_set_dirty(vcpu, VMCB_CACHE_I); 295 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 296 297 vm_set_tsc_offset(vcpu->vcpu, offset); 298 } 299 #endif 300 301 /* Pentium compatible MSRs */ 302 #define MSR_PENTIUM_START 0 303 #define MSR_PENTIUM_END 0x1FFF 304 /* AMD 6th generation and Intel compatible MSRs */ 305 #define MSR_AMD6TH_START 0xC0000000UL 306 #define MSR_AMD6TH_END 0xC0001FFFUL 307 /* AMD 7th and 8th generation compatible MSRs */ 308 #define MSR_AMD7TH_START 0xC0010000UL 309 #define MSR_AMD7TH_END 0xC0011FFFUL 310 311 /* 312 * Get the index and bit position for a MSR in permission bitmap. 313 * Two bits are used for each MSR: lower bit for read and higher bit for write. 314 */ 315 static int 316 svm_msr_index(uint64_t msr, int *index, int *bit) 317 { 318 uint32_t base, off; 319 320 *index = -1; 321 *bit = (msr % 4) * 2; 322 base = 0; 323 324 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 325 *index = msr / 4; 326 return (0); 327 } 328 329 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 330 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 331 off = (msr - MSR_AMD6TH_START); 332 *index = (off + base) / 4; 333 return (0); 334 } 335 336 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 337 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 338 off = (msr - MSR_AMD7TH_START); 339 *index = (off + base) / 4; 340 return (0); 341 } 342 343 return (EINVAL); 344 } 345 346 /* 347 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 348 */ 349 static void 350 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 351 { 352 int index, bit, error __diagused; 353 354 error = svm_msr_index(msr, &index, &bit); 355 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 356 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 357 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 358 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 359 "msr %#lx", __func__, bit, msr)); 360 361 if (read) 362 perm_bitmap[index] &= ~(1UL << bit); 363 364 if (write) 365 perm_bitmap[index] &= ~(2UL << bit); 366 } 367 368 static void 369 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 370 { 371 372 svm_msr_perm(perm_bitmap, msr, true, true); 373 } 374 375 static void 376 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 377 { 378 379 svm_msr_perm(perm_bitmap, msr, true, false); 380 } 381 382 static __inline int 383 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 384 { 385 struct vmcb_ctrl *ctrl; 386 387 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 388 389 ctrl = svm_get_vmcb_ctrl(vcpu); 390 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 391 } 392 393 static __inline void 394 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 395 { 396 struct vmcb_ctrl *ctrl; 397 uint32_t oldval; 398 399 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 400 401 ctrl = svm_get_vmcb_ctrl(vcpu); 402 oldval = ctrl->intercept[idx]; 403 404 if (enabled) 405 ctrl->intercept[idx] |= bitmask; 406 else 407 ctrl->intercept[idx] &= ~bitmask; 408 409 if (ctrl->intercept[idx] != oldval) { 410 svm_set_dirty(vcpu, VMCB_CACHE_I); 411 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 412 oldval, ctrl->intercept[idx]); 413 } 414 } 415 416 static __inline void 417 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 418 { 419 420 svm_set_intercept(vcpu, off, bitmask, 0); 421 } 422 423 static __inline void 424 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 425 { 426 427 svm_set_intercept(vcpu, off, bitmask, 1); 428 } 429 430 static void 431 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 432 uint64_t msrpm_base_pa, uint64_t np_pml4) 433 { 434 struct vmcb_ctrl *ctrl; 435 struct vmcb_state *state; 436 uint32_t mask; 437 int n; 438 439 ctrl = svm_get_vmcb_ctrl(vcpu); 440 state = svm_get_vmcb_state(vcpu); 441 442 ctrl->iopm_base_pa = iopm_base_pa; 443 ctrl->msrpm_base_pa = msrpm_base_pa; 444 445 /* Enable nested paging */ 446 ctrl->np_enable = 1; 447 ctrl->n_cr3 = np_pml4; 448 449 /* 450 * Intercept accesses to the control registers that are not shadowed 451 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 452 */ 453 for (n = 0; n < 16; n++) { 454 mask = (BIT(n) << 16) | BIT(n); 455 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 456 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 457 else 458 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 459 } 460 461 /* 462 * Intercept everything when tracing guest exceptions otherwise 463 * just intercept machine check exception. 464 */ 465 if (vcpu_trace_exceptions(vcpu->vcpu)) { 466 for (n = 0; n < 32; n++) { 467 /* 468 * Skip unimplemented vectors in the exception bitmap. 469 */ 470 if (n == 2 || n == 9) { 471 continue; 472 } 473 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 474 } 475 } else { 476 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 477 } 478 479 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 480 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 481 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 482 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 483 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 484 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 485 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 486 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 491 492 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 493 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 494 495 /* 496 * Intercept SVM instructions since AMD enables them in guests otherwise. 497 * Non-intercepted VMMCALL causes #UD, skip it. 498 */ 499 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 501 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 503 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 504 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 505 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 507 VMCB_INTCPT_WBINVD); 508 } 509 510 /* 511 * From section "Canonicalization and Consistency Checks" in APMv2 512 * the VMRUN intercept bit must be set to pass the consistency check. 513 */ 514 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 515 516 /* 517 * The ASID will be set to a non-zero value just before VMRUN. 518 */ 519 ctrl->asid = 0; 520 521 /* 522 * Section 15.21.1, Interrupt Masking in EFLAGS 523 * Section 15.21.2, Virtualizing APIC.TPR 524 * 525 * This must be set for %rflag and %cr8 isolation of guest and host. 526 */ 527 ctrl->v_intr_masking = 1; 528 529 /* Enable Last Branch Record aka LBR for debugging */ 530 ctrl->lbr_virt_en = 1; 531 state->dbgctl = BIT(0); 532 533 /* EFER_SVM must always be set when the guest is executing */ 534 state->efer = EFER_SVM; 535 536 /* Set up the PAT to power-on state */ 537 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 538 PAT_VALUE(1, PAT_WRITE_THROUGH) | 539 PAT_VALUE(2, PAT_UNCACHED) | 540 PAT_VALUE(3, PAT_UNCACHEABLE) | 541 PAT_VALUE(4, PAT_WRITE_BACK) | 542 PAT_VALUE(5, PAT_WRITE_THROUGH) | 543 PAT_VALUE(6, PAT_UNCACHED) | 544 PAT_VALUE(7, PAT_UNCACHEABLE); 545 546 /* Set up DR6/7 to power-on state */ 547 state->dr6 = DBREG_DR6_RESERVED1; 548 state->dr7 = DBREG_DR7_RESERVED1; 549 } 550 551 /* 552 * Initialize a virtual machine. 553 */ 554 static void * 555 svm_init(struct vm *vm, pmap_t pmap) 556 { 557 struct svm_softc *svm_sc; 558 559 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 560 561 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 562 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 563 if (svm_sc->msr_bitmap == NULL) 564 panic("contigmalloc of SVM MSR bitmap failed"); 565 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 566 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 567 if (svm_sc->iopm_bitmap == NULL) 568 panic("contigmalloc of SVM IO bitmap failed"); 569 570 svm_sc->vm = vm; 571 svm_sc->nptp = vtophys(pmap->pm_pmltop); 572 573 /* 574 * Intercept read and write accesses to all MSRs. 575 */ 576 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 577 578 /* 579 * Access to the following MSRs is redirected to the VMCB when the 580 * guest is executing. Therefore it is safe to allow the guest to 581 * read/write these MSRs directly without hypervisor involvement. 582 */ 583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 586 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 595 596 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 597 598 /* 599 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 600 */ 601 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 602 603 /* Intercept access to all I/O ports. */ 604 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 605 606 return (svm_sc); 607 } 608 609 static void * 610 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 611 { 612 struct svm_softc *sc = vmi; 613 struct svm_vcpu *vcpu; 614 615 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 616 vcpu->sc = sc; 617 vcpu->vcpu = vcpu1; 618 vcpu->vcpuid = vcpuid; 619 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 620 M_WAITOK | M_ZERO); 621 vcpu->nextrip = ~0; 622 vcpu->lastcpu = NOCPU; 623 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 624 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 625 sc->nptp); 626 svm_msr_guest_init(sc, vcpu); 627 return (vcpu); 628 } 629 630 /* 631 * Collateral for a generic SVM VM-exit. 632 */ 633 static void 634 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 635 { 636 637 vme->exitcode = VM_EXITCODE_SVM; 638 vme->u.svm.exitcode = code; 639 vme->u.svm.exitinfo1 = info1; 640 vme->u.svm.exitinfo2 = info2; 641 } 642 643 static int 644 svm_cpl(struct vmcb_state *state) 645 { 646 647 /* 648 * From APMv2: 649 * "Retrieve the CPL from the CPL field in the VMCB, not 650 * from any segment DPL" 651 */ 652 return (state->cpl); 653 } 654 655 static enum vm_cpu_mode 656 svm_vcpu_mode(struct vmcb *vmcb) 657 { 658 struct vmcb_segment seg; 659 struct vmcb_state *state; 660 int error __diagused; 661 662 state = &vmcb->state; 663 664 if (state->efer & EFER_LMA) { 665 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 666 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 667 error)); 668 669 /* 670 * Section 4.8.1 for APM2, check if Code Segment has 671 * Long attribute set in descriptor. 672 */ 673 if (seg.attrib & VMCB_CS_ATTRIB_L) 674 return (CPU_MODE_64BIT); 675 else 676 return (CPU_MODE_COMPATIBILITY); 677 } else if (state->cr0 & CR0_PE) { 678 return (CPU_MODE_PROTECTED); 679 } else { 680 return (CPU_MODE_REAL); 681 } 682 } 683 684 static enum vm_paging_mode 685 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 686 { 687 688 if ((cr0 & CR0_PG) == 0) 689 return (PAGING_MODE_FLAT); 690 if ((cr4 & CR4_PAE) == 0) 691 return (PAGING_MODE_32); 692 if (efer & EFER_LME) 693 return (PAGING_MODE_64); 694 else 695 return (PAGING_MODE_PAE); 696 } 697 698 /* 699 * ins/outs utility routines 700 */ 701 static uint64_t 702 svm_inout_str_index(struct svm_regctx *regs, int in) 703 { 704 uint64_t val; 705 706 val = in ? regs->sctx_rdi : regs->sctx_rsi; 707 708 return (val); 709 } 710 711 static uint64_t 712 svm_inout_str_count(struct svm_regctx *regs, int rep) 713 { 714 uint64_t val; 715 716 val = rep ? regs->sctx_rcx : 1; 717 718 return (val); 719 } 720 721 static void 722 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 723 struct vm_inout_str *vis) 724 { 725 int error __diagused, s; 726 727 if (in) { 728 vis->seg_name = VM_REG_GUEST_ES; 729 } else { 730 /* The segment field has standard encoding */ 731 s = (info1 >> 10) & 0x7; 732 vis->seg_name = vm_segment_name(s); 733 } 734 735 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 736 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 737 } 738 739 static int 740 svm_inout_str_addrsize(uint64_t info1) 741 { 742 uint32_t size; 743 744 size = (info1 >> 7) & 0x7; 745 switch (size) { 746 case 1: 747 return (2); /* 16 bit */ 748 case 2: 749 return (4); /* 32 bit */ 750 case 4: 751 return (8); /* 64 bit */ 752 default: 753 panic("%s: invalid size encoding %d", __func__, size); 754 } 755 } 756 757 static void 758 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 759 { 760 struct vmcb_state *state; 761 762 state = &vmcb->state; 763 paging->cr3 = state->cr3; 764 paging->cpl = svm_cpl(state); 765 paging->cpu_mode = svm_vcpu_mode(vmcb); 766 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 767 state->efer); 768 } 769 770 #define UNHANDLED 0 771 772 /* 773 * Handle guest I/O intercept. 774 */ 775 static int 776 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 777 { 778 struct vmcb_ctrl *ctrl; 779 struct vmcb_state *state; 780 struct svm_regctx *regs; 781 struct vm_inout_str *vis; 782 uint64_t info1; 783 int inout_string; 784 785 state = svm_get_vmcb_state(vcpu); 786 ctrl = svm_get_vmcb_ctrl(vcpu); 787 regs = svm_get_guest_regctx(vcpu); 788 789 info1 = ctrl->exitinfo1; 790 inout_string = info1 & BIT(2) ? 1 : 0; 791 792 /* 793 * The effective segment number in EXITINFO1[12:10] is populated 794 * only if the processor has the DecodeAssist capability. 795 * 796 * XXX this is not specified explicitly in APMv2 but can be verified 797 * empirically. 798 */ 799 if (inout_string && !decode_assist()) 800 return (UNHANDLED); 801 802 vmexit->exitcode = VM_EXITCODE_INOUT; 803 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 804 vmexit->u.inout.string = inout_string; 805 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 806 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 807 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 808 vmexit->u.inout.eax = (uint32_t)(state->rax); 809 810 if (inout_string) { 811 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 812 vis = &vmexit->u.inout_str; 813 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 814 vis->rflags = state->rflags; 815 vis->cr0 = state->cr0; 816 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 817 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 818 vis->addrsize = svm_inout_str_addrsize(info1); 819 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 820 } 821 822 return (UNHANDLED); 823 } 824 825 static int 826 npf_fault_type(uint64_t exitinfo1) 827 { 828 829 if (exitinfo1 & VMCB_NPF_INFO1_W) 830 return (VM_PROT_WRITE); 831 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 832 return (VM_PROT_EXECUTE); 833 else 834 return (VM_PROT_READ); 835 } 836 837 static bool 838 svm_npf_emul_fault(uint64_t exitinfo1) 839 { 840 841 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 842 return (false); 843 } 844 845 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 846 return (false); 847 } 848 849 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 850 return (false); 851 } 852 853 return (true); 854 } 855 856 static void 857 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 858 { 859 struct vm_guest_paging *paging; 860 struct vmcb_segment seg; 861 struct vmcb_ctrl *ctrl; 862 char *inst_bytes; 863 int error __diagused, inst_len; 864 865 ctrl = &vmcb->ctrl; 866 paging = &vmexit->u.inst_emul.paging; 867 868 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 869 vmexit->u.inst_emul.gpa = gpa; 870 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 871 svm_paging_info(vmcb, paging); 872 873 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 874 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 875 876 switch(paging->cpu_mode) { 877 case CPU_MODE_REAL: 878 vmexit->u.inst_emul.cs_base = seg.base; 879 vmexit->u.inst_emul.cs_d = 0; 880 break; 881 case CPU_MODE_PROTECTED: 882 case CPU_MODE_COMPATIBILITY: 883 vmexit->u.inst_emul.cs_base = seg.base; 884 885 /* 886 * Section 4.8.1 of APM2, Default Operand Size or D bit. 887 */ 888 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 889 1 : 0; 890 break; 891 default: 892 vmexit->u.inst_emul.cs_base = 0; 893 vmexit->u.inst_emul.cs_d = 0; 894 break; 895 } 896 897 /* 898 * Copy the instruction bytes into 'vie' if available. 899 */ 900 if (decode_assist() && !disable_npf_assist) { 901 inst_len = ctrl->inst_len; 902 inst_bytes = ctrl->inst_bytes; 903 } else { 904 inst_len = 0; 905 inst_bytes = NULL; 906 } 907 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 908 } 909 910 #ifdef KTR 911 static const char * 912 intrtype_to_str(int intr_type) 913 { 914 switch (intr_type) { 915 case VMCB_EVENTINJ_TYPE_INTR: 916 return ("hwintr"); 917 case VMCB_EVENTINJ_TYPE_NMI: 918 return ("nmi"); 919 case VMCB_EVENTINJ_TYPE_INTn: 920 return ("swintr"); 921 case VMCB_EVENTINJ_TYPE_EXCEPTION: 922 return ("exception"); 923 default: 924 panic("%s: unknown intr_type %d", __func__, intr_type); 925 } 926 } 927 #endif 928 929 /* 930 * Inject an event to vcpu as described in section 15.20, "Event injection". 931 */ 932 static void 933 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 934 uint32_t error, bool ec_valid) 935 { 936 struct vmcb_ctrl *ctrl; 937 938 ctrl = svm_get_vmcb_ctrl(vcpu); 939 940 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 941 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 942 943 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 944 __func__, vector)); 945 946 switch (intr_type) { 947 case VMCB_EVENTINJ_TYPE_INTR: 948 case VMCB_EVENTINJ_TYPE_NMI: 949 case VMCB_EVENTINJ_TYPE_INTn: 950 break; 951 case VMCB_EVENTINJ_TYPE_EXCEPTION: 952 if (vector >= 0 && vector <= 31 && vector != 2) 953 break; 954 /* FALLTHROUGH */ 955 default: 956 panic("%s: invalid intr_type/vector: %d/%d", __func__, 957 intr_type, vector); 958 } 959 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 960 if (ec_valid) { 961 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 962 ctrl->eventinj |= (uint64_t)error << 32; 963 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 964 intrtype_to_str(intr_type), vector, error); 965 } else { 966 SVM_CTR2(vcpu, "Injecting %s at vector %d", 967 intrtype_to_str(intr_type), vector); 968 } 969 } 970 971 static void 972 svm_update_virqinfo(struct svm_vcpu *vcpu) 973 { 974 struct vlapic *vlapic; 975 struct vmcb_ctrl *ctrl; 976 977 vlapic = vm_lapic(vcpu->vcpu); 978 ctrl = svm_get_vmcb_ctrl(vcpu); 979 980 /* Update %cr8 in the emulated vlapic */ 981 vlapic_set_cr8(vlapic, ctrl->v_tpr); 982 983 /* Virtual interrupt injection is not used. */ 984 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 985 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 986 } 987 988 static void 989 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 990 { 991 struct vmcb_ctrl *ctrl; 992 uint64_t intinfo; 993 994 ctrl = svm_get_vmcb_ctrl(vcpu); 995 intinfo = ctrl->exitintinfo; 996 if (!VMCB_EXITINTINFO_VALID(intinfo)) 997 return; 998 999 /* 1000 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1001 * 1002 * If a #VMEXIT happened during event delivery then record the event 1003 * that was being delivered. 1004 */ 1005 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1006 VMCB_EXITINTINFO_VECTOR(intinfo)); 1007 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1008 vm_exit_intinfo(vcpu->vcpu, intinfo); 1009 } 1010 1011 #ifdef INVARIANTS 1012 static __inline int 1013 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1014 { 1015 1016 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1017 } 1018 #endif 1019 1020 static __inline void 1021 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1022 { 1023 struct vmcb_ctrl *ctrl; 1024 1025 ctrl = svm_get_vmcb_ctrl(vcpu); 1026 1027 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1028 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1029 KASSERT(vintr_intercept_enabled(vcpu), 1030 ("%s: vintr intercept should be enabled", __func__)); 1031 return; 1032 } 1033 1034 SVM_CTR0(vcpu, "Enable intr window exiting"); 1035 ctrl->v_irq = 1; 1036 ctrl->v_ign_tpr = 1; 1037 ctrl->v_intr_vector = 0; 1038 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1039 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1040 } 1041 1042 static __inline void 1043 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1044 { 1045 struct vmcb_ctrl *ctrl; 1046 1047 ctrl = svm_get_vmcb_ctrl(vcpu); 1048 1049 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1050 KASSERT(!vintr_intercept_enabled(vcpu), 1051 ("%s: vintr intercept should be disabled", __func__)); 1052 return; 1053 } 1054 1055 SVM_CTR0(vcpu, "Disable intr window exiting"); 1056 ctrl->v_irq = 0; 1057 ctrl->v_intr_vector = 0; 1058 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1059 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1060 } 1061 1062 static int 1063 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1064 { 1065 struct vmcb_ctrl *ctrl; 1066 int oldval, newval; 1067 1068 ctrl = svm_get_vmcb_ctrl(vcpu); 1069 oldval = ctrl->intr_shadow; 1070 newval = val ? 1 : 0; 1071 if (newval != oldval) { 1072 ctrl->intr_shadow = newval; 1073 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1074 } 1075 return (0); 1076 } 1077 1078 static int 1079 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1080 { 1081 struct vmcb_ctrl *ctrl; 1082 1083 ctrl = svm_get_vmcb_ctrl(vcpu); 1084 *val = ctrl->intr_shadow; 1085 return (0); 1086 } 1087 1088 /* 1089 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1090 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1091 * to track when the vcpu is done handling the NMI. 1092 */ 1093 static int 1094 nmi_blocked(struct svm_vcpu *vcpu) 1095 { 1096 int blocked; 1097 1098 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1099 return (blocked); 1100 } 1101 1102 static void 1103 enable_nmi_blocking(struct svm_vcpu *vcpu) 1104 { 1105 1106 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1107 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1108 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1109 } 1110 1111 static void 1112 clear_nmi_blocking(struct svm_vcpu *vcpu) 1113 { 1114 int error __diagused; 1115 1116 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1117 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1118 /* 1119 * When the IRET intercept is cleared the vcpu will attempt to execute 1120 * the "iret" when it runs next. However, it is possible to inject 1121 * another NMI into the vcpu before the "iret" has actually executed. 1122 * 1123 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1124 * it will trap back into the hypervisor. If an NMI is pending for 1125 * the vcpu it will be injected into the guest. 1126 * 1127 * XXX this needs to be fixed 1128 */ 1129 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1130 1131 /* 1132 * Set 'intr_shadow' to prevent an NMI from being injected on the 1133 * immediate VMRUN. 1134 */ 1135 error = svm_modify_intr_shadow(vcpu, 1); 1136 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1137 } 1138 1139 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1140 1141 static int 1142 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1143 bool *retu) 1144 { 1145 struct vm_exit *vme; 1146 struct vmcb_state *state; 1147 uint64_t changed, lma, oldval; 1148 int error __diagused; 1149 1150 state = svm_get_vmcb_state(vcpu); 1151 1152 oldval = state->efer; 1153 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1154 1155 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1156 changed = oldval ^ newval; 1157 1158 if (newval & EFER_MBZ_BITS) 1159 goto gpf; 1160 1161 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1162 if (changed & EFER_LME) { 1163 if (state->cr0 & CR0_PG) 1164 goto gpf; 1165 } 1166 1167 /* EFER.LMA = EFER.LME & CR0.PG */ 1168 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1169 lma = EFER_LMA; 1170 else 1171 lma = 0; 1172 1173 if ((newval & EFER_LMA) != lma) 1174 goto gpf; 1175 1176 if (newval & EFER_NXE) { 1177 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1178 goto gpf; 1179 } 1180 1181 /* 1182 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1183 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1184 */ 1185 if (newval & EFER_LMSLE) { 1186 vme = vm_exitinfo(vcpu->vcpu); 1187 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1188 *retu = true; 1189 return (0); 1190 } 1191 1192 if (newval & EFER_FFXSR) { 1193 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1194 goto gpf; 1195 } 1196 1197 if (newval & EFER_TCE) { 1198 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1199 goto gpf; 1200 } 1201 1202 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1203 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1204 return (0); 1205 gpf: 1206 vm_inject_gp(vcpu->vcpu); 1207 return (0); 1208 } 1209 1210 static int 1211 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1212 uint64_t val, bool *retu) 1213 { 1214 int error; 1215 1216 if (lapic_msr(num)) 1217 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1218 else if (num == MSR_EFER) 1219 error = svm_write_efer(sc, vcpu, val, retu); 1220 else 1221 error = svm_wrmsr(vcpu, num, val, retu); 1222 1223 return (error); 1224 } 1225 1226 static int 1227 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1228 { 1229 struct vmcb_state *state; 1230 struct svm_regctx *ctx; 1231 uint64_t result; 1232 int error; 1233 1234 if (lapic_msr(num)) 1235 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1236 else 1237 error = svm_rdmsr(vcpu, num, &result, retu); 1238 1239 if (error == 0) { 1240 state = svm_get_vmcb_state(vcpu); 1241 ctx = svm_get_guest_regctx(vcpu); 1242 state->rax = result & 0xffffffff; 1243 ctx->sctx_rdx = result >> 32; 1244 } 1245 1246 return (error); 1247 } 1248 1249 #ifdef KTR 1250 static const char * 1251 exit_reason_to_str(uint64_t reason) 1252 { 1253 int i; 1254 static char reasonbuf[32]; 1255 static const struct { 1256 int reason; 1257 const char *str; 1258 } reasons[] = { 1259 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1260 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1261 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1262 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1263 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1264 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1265 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1266 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1267 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1268 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1269 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1270 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1271 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1272 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1273 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1274 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1275 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1276 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1277 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1278 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1279 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1280 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1281 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1282 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1283 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1284 }; 1285 1286 for (i = 0; i < nitems(reasons); i++) { 1287 if (reasons[i].reason == reason) 1288 return (reasons[i].str); 1289 } 1290 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1291 return (reasonbuf); 1292 } 1293 #endif /* KTR */ 1294 1295 /* 1296 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1297 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1298 * and exceptions caused by INT3, INTO and BOUND instructions. 1299 * 1300 * Return 1 if the nRIP is valid and 0 otherwise. 1301 */ 1302 static int 1303 nrip_valid(uint64_t exitcode) 1304 { 1305 switch (exitcode) { 1306 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1307 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1308 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1309 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1310 case 0x43: /* INT3 */ 1311 case 0x44: /* INTO */ 1312 case 0x45: /* BOUND */ 1313 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1314 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1315 return (1); 1316 default: 1317 return (0); 1318 } 1319 } 1320 1321 static int 1322 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1323 struct vm_exit *vmexit) 1324 { 1325 struct vmcb *vmcb; 1326 struct vmcb_state *state; 1327 struct vmcb_ctrl *ctrl; 1328 struct svm_regctx *ctx; 1329 uint64_t code, info1, info2, val; 1330 uint32_t eax, ecx, edx; 1331 int error __diagused, errcode_valid, handled, idtvec, reflect; 1332 bool retu; 1333 1334 ctx = svm_get_guest_regctx(vcpu); 1335 vmcb = svm_get_vmcb(vcpu); 1336 state = &vmcb->state; 1337 ctrl = &vmcb->ctrl; 1338 1339 handled = 0; 1340 code = ctrl->exitcode; 1341 info1 = ctrl->exitinfo1; 1342 info2 = ctrl->exitinfo2; 1343 1344 vmexit->exitcode = VM_EXITCODE_BOGUS; 1345 vmexit->rip = state->rip; 1346 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1347 1348 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1349 1350 /* 1351 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1352 * in an inconsistent state and can trigger assertions that would 1353 * never happen otherwise. 1354 */ 1355 if (code == VMCB_EXIT_INVALID) { 1356 vm_exit_svm(vmexit, code, info1, info2); 1357 return (0); 1358 } 1359 1360 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1361 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1362 1363 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1364 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1365 vmexit->inst_length, code, info1, info2)); 1366 1367 svm_update_virqinfo(vcpu); 1368 svm_save_intinfo(svm_sc, vcpu); 1369 1370 switch (code) { 1371 case VMCB_EXIT_IRET: 1372 /* 1373 * Restart execution at "iret" but with the intercept cleared. 1374 */ 1375 vmexit->inst_length = 0; 1376 clear_nmi_blocking(vcpu); 1377 handled = 1; 1378 break; 1379 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1380 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1381 handled = 1; 1382 break; 1383 case VMCB_EXIT_INTR: /* external interrupt */ 1384 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1385 handled = 1; 1386 break; 1387 case VMCB_EXIT_NMI: /* external NMI */ 1388 handled = 1; 1389 break; 1390 case 0x40 ... 0x5F: 1391 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1392 reflect = 1; 1393 idtvec = code - 0x40; 1394 switch (idtvec) { 1395 case IDT_MC: 1396 /* 1397 * Call the machine check handler by hand. Also don't 1398 * reflect the machine check back into the guest. 1399 */ 1400 reflect = 0; 1401 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1402 __asm __volatile("int $18"); 1403 break; 1404 case IDT_PF: 1405 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1406 KASSERT(error == 0, ("%s: error %d updating cr2", 1407 __func__, error)); 1408 /* fallthru */ 1409 case IDT_NP: 1410 case IDT_SS: 1411 case IDT_GP: 1412 case IDT_AC: 1413 case IDT_TS: 1414 errcode_valid = 1; 1415 break; 1416 1417 case IDT_DF: 1418 errcode_valid = 1; 1419 info1 = 0; 1420 break; 1421 1422 case IDT_BP: 1423 case IDT_OF: 1424 case IDT_BR: 1425 /* 1426 * The 'nrip' field is populated for INT3, INTO and 1427 * BOUND exceptions and this also implies that 1428 * 'inst_length' is non-zero. 1429 * 1430 * Reset 'inst_length' to zero so the guest %rip at 1431 * event injection is identical to what it was when 1432 * the exception originally happened. 1433 */ 1434 SVM_CTR2(vcpu, "Reset inst_length from %d " 1435 "to zero before injecting exception %d", 1436 vmexit->inst_length, idtvec); 1437 vmexit->inst_length = 0; 1438 /* fallthru */ 1439 default: 1440 errcode_valid = 0; 1441 info1 = 0; 1442 break; 1443 } 1444 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1445 "when reflecting exception %d into guest", 1446 vmexit->inst_length, idtvec)); 1447 1448 if (reflect) { 1449 /* Reflect the exception back into the guest */ 1450 SVM_CTR2(vcpu, "Reflecting exception " 1451 "%d/%#x into the guest", idtvec, (int)info1); 1452 error = vm_inject_exception(vcpu->vcpu, idtvec, 1453 errcode_valid, info1, 0); 1454 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1455 __func__, error)); 1456 } 1457 handled = 1; 1458 break; 1459 case VMCB_EXIT_MSR: /* MSR access. */ 1460 eax = state->rax; 1461 ecx = ctx->sctx_rcx; 1462 edx = ctx->sctx_rdx; 1463 retu = false; 1464 1465 if (info1) { 1466 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1467 val = (uint64_t)edx << 32 | eax; 1468 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1469 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1470 vmexit->exitcode = VM_EXITCODE_WRMSR; 1471 vmexit->u.msr.code = ecx; 1472 vmexit->u.msr.wval = val; 1473 } else if (!retu) { 1474 handled = 1; 1475 } else { 1476 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1477 ("emulate_wrmsr retu with bogus exitcode")); 1478 } 1479 } else { 1480 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1481 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1482 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1483 vmexit->exitcode = VM_EXITCODE_RDMSR; 1484 vmexit->u.msr.code = ecx; 1485 } else if (!retu) { 1486 handled = 1; 1487 } else { 1488 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1489 ("emulate_rdmsr retu with bogus exitcode")); 1490 } 1491 } 1492 break; 1493 case VMCB_EXIT_IO: 1494 handled = svm_handle_io(vcpu, vmexit); 1495 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1496 break; 1497 case VMCB_EXIT_CPUID: 1498 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1499 handled = x86_emulate_cpuid(vcpu->vcpu, 1500 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1501 &ctx->sctx_rdx); 1502 break; 1503 case VMCB_EXIT_HLT: 1504 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1505 vmexit->exitcode = VM_EXITCODE_HLT; 1506 vmexit->u.hlt.rflags = state->rflags; 1507 break; 1508 case VMCB_EXIT_PAUSE: 1509 vmexit->exitcode = VM_EXITCODE_PAUSE; 1510 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1511 break; 1512 case VMCB_EXIT_NPF: 1513 /* EXITINFO2 contains the faulting guest physical address */ 1514 if (info1 & VMCB_NPF_INFO1_RSV) { 1515 SVM_CTR2(vcpu, "nested page fault with " 1516 "reserved bits set: info1(%#lx) info2(%#lx)", 1517 info1, info2); 1518 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { 1519 vmexit->exitcode = VM_EXITCODE_PAGING; 1520 vmexit->u.paging.gpa = info2; 1521 vmexit->u.paging.fault_type = npf_fault_type(info1); 1522 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1523 SVM_CTR3(vcpu, "nested page fault " 1524 "on gpa %#lx/%#lx at rip %#lx", 1525 info2, info1, state->rip); 1526 } else if (svm_npf_emul_fault(info1)) { 1527 svm_handle_inst_emul(vmcb, info2, vmexit); 1528 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1529 SVM_CTR3(vcpu, "inst_emul fault " 1530 "for gpa %#lx/%#lx at rip %#lx", 1531 info2, info1, state->rip); 1532 } 1533 break; 1534 case VMCB_EXIT_MONITOR: 1535 vmexit->exitcode = VM_EXITCODE_MONITOR; 1536 break; 1537 case VMCB_EXIT_MWAIT: 1538 vmexit->exitcode = VM_EXITCODE_MWAIT; 1539 break; 1540 case VMCB_EXIT_SHUTDOWN: 1541 case VMCB_EXIT_VMRUN: 1542 case VMCB_EXIT_VMMCALL: 1543 case VMCB_EXIT_VMLOAD: 1544 case VMCB_EXIT_VMSAVE: 1545 case VMCB_EXIT_STGI: 1546 case VMCB_EXIT_CLGI: 1547 case VMCB_EXIT_SKINIT: 1548 case VMCB_EXIT_ICEBP: 1549 case VMCB_EXIT_INVLPGA: 1550 vm_inject_ud(vcpu->vcpu); 1551 handled = 1; 1552 break; 1553 case VMCB_EXIT_INVD: 1554 case VMCB_EXIT_WBINVD: 1555 /* ignore exit */ 1556 handled = 1; 1557 break; 1558 default: 1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1560 break; 1561 } 1562 1563 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1564 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1565 vmexit->rip, vmexit->inst_length); 1566 1567 if (handled) { 1568 vmexit->rip += vmexit->inst_length; 1569 vmexit->inst_length = 0; 1570 state->rip = vmexit->rip; 1571 } else { 1572 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1573 /* 1574 * If this VM exit was not claimed by anybody then 1575 * treat it as a generic SVM exit. 1576 */ 1577 vm_exit_svm(vmexit, code, info1, info2); 1578 } else { 1579 /* 1580 * The exitcode and collateral have been populated. 1581 * The VM exit will be processed further in userland. 1582 */ 1583 } 1584 } 1585 return (handled); 1586 } 1587 1588 static void 1589 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1590 { 1591 uint64_t intinfo; 1592 1593 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1594 return; 1595 1596 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1597 "valid: %#lx", __func__, intinfo)); 1598 1599 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1600 VMCB_EXITINTINFO_VECTOR(intinfo), 1601 VMCB_EXITINTINFO_EC(intinfo), 1602 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1603 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1604 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1605 } 1606 1607 /* 1608 * Inject event to virtual cpu. 1609 */ 1610 static void 1611 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1612 struct vlapic *vlapic) 1613 { 1614 struct vmcb_ctrl *ctrl; 1615 struct vmcb_state *state; 1616 uint8_t v_tpr; 1617 int vector, need_intr_window; 1618 int extint_pending; 1619 1620 state = svm_get_vmcb_state(vcpu); 1621 ctrl = svm_get_vmcb_ctrl(vcpu); 1622 1623 need_intr_window = 0; 1624 1625 if (vcpu->nextrip != state->rip) { 1626 ctrl->intr_shadow = 0; 1627 SVM_CTR2(vcpu, "Guest interrupt blocking " 1628 "cleared due to rip change: %#lx/%#lx", 1629 vcpu->nextrip, state->rip); 1630 } 1631 1632 /* 1633 * Inject pending events or exceptions for this vcpu. 1634 * 1635 * An event might be pending because the previous #VMEXIT happened 1636 * during event delivery (i.e. ctrl->exitintinfo). 1637 * 1638 * An event might also be pending because an exception was injected 1639 * by the hypervisor (e.g. #PF during instruction emulation). 1640 */ 1641 svm_inj_intinfo(sc, vcpu); 1642 1643 /* NMI event has priority over interrupts. */ 1644 if (vm_nmi_pending(vcpu->vcpu)) { 1645 if (nmi_blocked(vcpu)) { 1646 /* 1647 * Can't inject another NMI if the guest has not 1648 * yet executed an "iret" after the last NMI. 1649 */ 1650 SVM_CTR0(vcpu, "Cannot inject NMI due " 1651 "to NMI-blocking"); 1652 } else if (ctrl->intr_shadow) { 1653 /* 1654 * Can't inject an NMI if the vcpu is in an intr_shadow. 1655 */ 1656 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1657 "interrupt shadow"); 1658 need_intr_window = 1; 1659 goto done; 1660 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1661 /* 1662 * If there is already an exception/interrupt pending 1663 * then defer the NMI until after that. 1664 */ 1665 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1666 "eventinj %#lx", ctrl->eventinj); 1667 1668 /* 1669 * Use self-IPI to trigger a VM-exit as soon as 1670 * possible after the event injection is completed. 1671 * 1672 * This works only if the external interrupt exiting 1673 * is at a lower priority than the event injection. 1674 * 1675 * Although not explicitly specified in APMv2 the 1676 * relative priorities were verified empirically. 1677 */ 1678 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1679 } else { 1680 vm_nmi_clear(vcpu->vcpu); 1681 1682 /* Inject NMI, vector number is not used */ 1683 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1684 IDT_NMI, 0, false); 1685 1686 /* virtual NMI blocking is now in effect */ 1687 enable_nmi_blocking(vcpu); 1688 1689 SVM_CTR0(vcpu, "Injecting vNMI"); 1690 } 1691 } 1692 1693 extint_pending = vm_extint_pending(vcpu->vcpu); 1694 if (!extint_pending) { 1695 if (!vlapic_pending_intr(vlapic, &vector)) 1696 goto done; 1697 KASSERT(vector >= 16 && vector <= 255, 1698 ("invalid vector %d from local APIC", vector)); 1699 } else { 1700 /* Ask the legacy pic for a vector to inject */ 1701 vatpic_pending_intr(sc->vm, &vector); 1702 KASSERT(vector >= 0 && vector <= 255, 1703 ("invalid vector %d from INTR", vector)); 1704 } 1705 1706 /* 1707 * If the guest has disabled interrupts or is in an interrupt shadow 1708 * then we cannot inject the pending interrupt. 1709 */ 1710 if ((state->rflags & PSL_I) == 0) { 1711 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1712 "rflags %#lx", vector, state->rflags); 1713 need_intr_window = 1; 1714 goto done; 1715 } 1716 1717 if (ctrl->intr_shadow) { 1718 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1719 "interrupt shadow", vector); 1720 need_intr_window = 1; 1721 goto done; 1722 } 1723 1724 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1725 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1726 "eventinj %#lx", vector, ctrl->eventinj); 1727 need_intr_window = 1; 1728 goto done; 1729 } 1730 1731 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1732 1733 if (!extint_pending) { 1734 vlapic_intr_accepted(vlapic, vector); 1735 } else { 1736 vm_extint_clear(vcpu->vcpu); 1737 vatpic_intr_accepted(sc->vm, vector); 1738 } 1739 1740 /* 1741 * Force a VM-exit as soon as the vcpu is ready to accept another 1742 * interrupt. This is done because the PIC might have another vector 1743 * that it wants to inject. Also, if the APIC has a pending interrupt 1744 * that was preempted by the ExtInt then it allows us to inject the 1745 * APIC vector as soon as possible. 1746 */ 1747 need_intr_window = 1; 1748 done: 1749 /* 1750 * The guest can modify the TPR by writing to %CR8. In guest mode 1751 * the processor reflects this write to V_TPR without hypervisor 1752 * intervention. 1753 * 1754 * The guest can also modify the TPR by writing to it via the memory 1755 * mapped APIC page. In this case, the write will be emulated by the 1756 * hypervisor. For this reason V_TPR must be updated before every 1757 * VMRUN. 1758 */ 1759 v_tpr = vlapic_get_cr8(vlapic); 1760 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1761 if (ctrl->v_tpr != v_tpr) { 1762 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1763 ctrl->v_tpr, v_tpr); 1764 ctrl->v_tpr = v_tpr; 1765 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1766 } 1767 1768 if (need_intr_window) { 1769 /* 1770 * We use V_IRQ in conjunction with the VINTR intercept to 1771 * trap into the hypervisor as soon as a virtual interrupt 1772 * can be delivered. 1773 * 1774 * Since injected events are not subject to intercept checks 1775 * we need to ensure that the V_IRQ is not actually going to 1776 * be delivered on VM entry. The KASSERT below enforces this. 1777 */ 1778 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1779 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1780 ("Bogus intr_window_exiting: eventinj (%#lx), " 1781 "intr_shadow (%u), rflags (%#lx)", 1782 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1783 enable_intr_window_exiting(vcpu); 1784 } else { 1785 disable_intr_window_exiting(vcpu); 1786 } 1787 } 1788 1789 static __inline void 1790 restore_host_tss(void) 1791 { 1792 struct system_segment_descriptor *tss_sd; 1793 1794 /* 1795 * The TSS descriptor was in use prior to launching the guest so it 1796 * has been marked busy. 1797 * 1798 * 'ltr' requires the descriptor to be marked available so change the 1799 * type to "64-bit available TSS". 1800 */ 1801 tss_sd = PCPU_GET(tss); 1802 tss_sd->sd_type = SDT_SYSTSS; 1803 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1804 } 1805 1806 static void 1807 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1808 { 1809 struct vmcb_ctrl *ctrl; 1810 long eptgen; 1811 int cpu; 1812 bool alloc_asid; 1813 1814 cpu = curcpu; 1815 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1816 smr_enter(pmap->pm_eptsmr); 1817 1818 ctrl = svm_get_vmcb_ctrl(vcpu); 1819 1820 /* 1821 * The TLB entries associated with the vcpu's ASID are not valid 1822 * if either of the following conditions is true: 1823 * 1824 * 1. The vcpu's ASID generation is different than the host cpu's 1825 * ASID generation. This happens when the vcpu migrates to a new 1826 * host cpu. It can also happen when the number of vcpus executing 1827 * on a host cpu is greater than the number of ASIDs available. 1828 * 1829 * 2. The pmap generation number is different than the value cached in 1830 * the 'vcpustate'. This happens when the host invalidates pages 1831 * belonging to the guest. 1832 * 1833 * asidgen eptgen Action 1834 * mismatch mismatch 1835 * 0 0 (a) 1836 * 0 1 (b1) or (b2) 1837 * 1 0 (c) 1838 * 1 1 (d) 1839 * 1840 * (a) There is no mismatch in eptgen or ASID generation and therefore 1841 * no further action is needed. 1842 * 1843 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1844 * retained and the TLB entries associated with this ASID 1845 * are flushed by VMRUN. 1846 * 1847 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1848 * allocated. 1849 * 1850 * (c) A new ASID is allocated. 1851 * 1852 * (d) A new ASID is allocated. 1853 */ 1854 1855 alloc_asid = false; 1856 eptgen = atomic_load_long(&pmap->pm_eptgen); 1857 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1858 1859 if (vcpu->asid.gen != asid[cpu].gen) { 1860 alloc_asid = true; /* (c) and (d) */ 1861 } else if (vcpu->eptgen != eptgen) { 1862 if (flush_by_asid()) 1863 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1864 else 1865 alloc_asid = true; /* (b2) */ 1866 } else { 1867 /* 1868 * This is the common case (a). 1869 */ 1870 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1871 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1872 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1873 } 1874 1875 if (alloc_asid) { 1876 if (++asid[cpu].num >= nasid) { 1877 asid[cpu].num = 1; 1878 if (++asid[cpu].gen == 0) 1879 asid[cpu].gen = 1; 1880 /* 1881 * If this cpu does not support "flush-by-asid" 1882 * then flush the entire TLB on a generation 1883 * bump. Subsequent ASID allocation in this 1884 * generation can be done without a TLB flush. 1885 */ 1886 if (!flush_by_asid()) 1887 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1888 } 1889 vcpu->asid.gen = asid[cpu].gen; 1890 vcpu->asid.num = asid[cpu].num; 1891 1892 ctrl->asid = vcpu->asid.num; 1893 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 1894 /* 1895 * If this cpu supports "flush-by-asid" then the TLB 1896 * was not flushed after the generation bump. The TLB 1897 * is flushed selectively after every new ASID allocation. 1898 */ 1899 if (flush_by_asid()) 1900 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1901 } 1902 vcpu->eptgen = eptgen; 1903 1904 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 1905 KASSERT(ctrl->asid == vcpu->asid.num, 1906 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 1907 } 1908 1909 static void 1910 svm_pmap_deactivate(pmap_t pmap) 1911 { 1912 smr_exit(pmap->pm_eptsmr); 1913 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 1914 } 1915 1916 static __inline void 1917 disable_gintr(void) 1918 { 1919 1920 __asm __volatile("clgi"); 1921 } 1922 1923 static __inline void 1924 enable_gintr(void) 1925 { 1926 1927 __asm __volatile("stgi"); 1928 } 1929 1930 static __inline void 1931 svm_dr_enter_guest(struct svm_regctx *gctx) 1932 { 1933 1934 /* Save host control debug registers. */ 1935 gctx->host_dr7 = rdr7(); 1936 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1937 1938 /* 1939 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1940 * exceptions in the host based on the guest DRx values. The 1941 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1942 * VMCB. 1943 */ 1944 load_dr7(0); 1945 wrmsr(MSR_DEBUGCTLMSR, 0); 1946 1947 /* Save host debug registers. */ 1948 gctx->host_dr0 = rdr0(); 1949 gctx->host_dr1 = rdr1(); 1950 gctx->host_dr2 = rdr2(); 1951 gctx->host_dr3 = rdr3(); 1952 gctx->host_dr6 = rdr6(); 1953 1954 /* Restore guest debug registers. */ 1955 load_dr0(gctx->sctx_dr0); 1956 load_dr1(gctx->sctx_dr1); 1957 load_dr2(gctx->sctx_dr2); 1958 load_dr3(gctx->sctx_dr3); 1959 } 1960 1961 static __inline void 1962 svm_dr_leave_guest(struct svm_regctx *gctx) 1963 { 1964 1965 /* Save guest debug registers. */ 1966 gctx->sctx_dr0 = rdr0(); 1967 gctx->sctx_dr1 = rdr1(); 1968 gctx->sctx_dr2 = rdr2(); 1969 gctx->sctx_dr3 = rdr3(); 1970 1971 /* 1972 * Restore host debug registers. Restore DR7 and DEBUGCTL 1973 * last. 1974 */ 1975 load_dr0(gctx->host_dr0); 1976 load_dr1(gctx->host_dr1); 1977 load_dr2(gctx->host_dr2); 1978 load_dr3(gctx->host_dr3); 1979 load_dr6(gctx->host_dr6); 1980 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1981 load_dr7(gctx->host_dr7); 1982 } 1983 1984 /* 1985 * Start vcpu with specified RIP. 1986 */ 1987 static int 1988 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 1989 { 1990 struct svm_regctx *gctx; 1991 struct svm_softc *svm_sc; 1992 struct svm_vcpu *vcpu; 1993 struct vmcb_state *state; 1994 struct vmcb_ctrl *ctrl; 1995 struct vm_exit *vmexit; 1996 struct vlapic *vlapic; 1997 uint64_t vmcb_pa; 1998 int handled; 1999 uint16_t ldt_sel; 2000 2001 vcpu = vcpui; 2002 svm_sc = vcpu->sc; 2003 state = svm_get_vmcb_state(vcpu); 2004 ctrl = svm_get_vmcb_ctrl(vcpu); 2005 vmexit = vm_exitinfo(vcpu->vcpu); 2006 vlapic = vm_lapic(vcpu->vcpu); 2007 2008 gctx = svm_get_guest_regctx(vcpu); 2009 vmcb_pa = vcpu->vmcb_pa; 2010 2011 if (vcpu->lastcpu != curcpu) { 2012 /* 2013 * Force new ASID allocation by invalidating the generation. 2014 */ 2015 vcpu->asid.gen = 0; 2016 2017 /* 2018 * Invalidate the VMCB state cache by marking all fields dirty. 2019 */ 2020 svm_set_dirty(vcpu, 0xffffffff); 2021 2022 /* 2023 * XXX 2024 * Setting 'vcpu->lastcpu' here is bit premature because 2025 * we may return from this function without actually executing 2026 * the VMRUN instruction. This could happen if a rendezvous 2027 * or an AST is pending on the first time through the loop. 2028 * 2029 * This works for now but any new side-effects of vcpu 2030 * migration should take this case into account. 2031 */ 2032 vcpu->lastcpu = curcpu; 2033 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2034 } 2035 2036 svm_msr_guest_enter(vcpu); 2037 2038 /* Update Guest RIP */ 2039 state->rip = rip; 2040 2041 do { 2042 /* 2043 * Disable global interrupts to guarantee atomicity during 2044 * loading of guest state. This includes not only the state 2045 * loaded by the "vmrun" instruction but also software state 2046 * maintained by the hypervisor: suspended and rendezvous 2047 * state, NPT generation number, vlapic interrupts etc. 2048 */ 2049 disable_gintr(); 2050 2051 if (vcpu_suspended(evinfo)) { 2052 enable_gintr(); 2053 vm_exit_suspended(vcpu->vcpu, state->rip); 2054 break; 2055 } 2056 2057 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2058 enable_gintr(); 2059 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2060 break; 2061 } 2062 2063 if (vcpu_reqidle(evinfo)) { 2064 enable_gintr(); 2065 vm_exit_reqidle(vcpu->vcpu, state->rip); 2066 break; 2067 } 2068 2069 /* We are asked to give the cpu by scheduler. */ 2070 if (vcpu_should_yield(vcpu->vcpu)) { 2071 enable_gintr(); 2072 vm_exit_astpending(vcpu->vcpu, state->rip); 2073 break; 2074 } 2075 2076 if (vcpu_debugged(vcpu->vcpu)) { 2077 enable_gintr(); 2078 vm_exit_debug(vcpu->vcpu, state->rip); 2079 break; 2080 } 2081 2082 /* 2083 * #VMEXIT resumes the host with the guest LDTR, so 2084 * save the current LDT selector so it can be restored 2085 * after an exit. The userspace hypervisor probably 2086 * doesn't use a LDT, but save and restore it to be 2087 * safe. 2088 */ 2089 ldt_sel = sldt(); 2090 2091 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2092 2093 /* 2094 * Check the pmap generation and the ASID generation to 2095 * ensure that the vcpu does not use stale TLB mappings. 2096 */ 2097 svm_pmap_activate(vcpu, pmap); 2098 2099 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2100 vcpu->dirty = 0; 2101 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2102 2103 /* Launch Virtual Machine. */ 2104 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2105 svm_dr_enter_guest(gctx); 2106 svm_launch(vmcb_pa, gctx, get_pcpu()); 2107 svm_dr_leave_guest(gctx); 2108 2109 svm_pmap_deactivate(pmap); 2110 2111 /* 2112 * The host GDTR and IDTR is saved by VMRUN and restored 2113 * automatically on #VMEXIT. However, the host TSS needs 2114 * to be restored explicitly. 2115 */ 2116 restore_host_tss(); 2117 2118 /* Restore host LDTR. */ 2119 lldt(ldt_sel); 2120 2121 /* #VMEXIT disables interrupts so re-enable them here. */ 2122 enable_gintr(); 2123 2124 /* Update 'nextrip' */ 2125 vcpu->nextrip = state->rip; 2126 2127 /* Handle #VMEXIT and if required return to user space. */ 2128 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2129 } while (handled); 2130 2131 svm_msr_guest_exit(vcpu); 2132 2133 return (0); 2134 } 2135 2136 static void 2137 svm_vcpu_cleanup(void *vcpui) 2138 { 2139 struct svm_vcpu *vcpu = vcpui; 2140 2141 free(vcpu->vmcb, M_SVM); 2142 free(vcpu, M_SVM); 2143 } 2144 2145 static void 2146 svm_cleanup(void *vmi) 2147 { 2148 struct svm_softc *sc = vmi; 2149 2150 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2151 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2152 free(sc, M_SVM); 2153 } 2154 2155 static register_t * 2156 swctx_regptr(struct svm_regctx *regctx, int reg) 2157 { 2158 2159 switch (reg) { 2160 case VM_REG_GUEST_RBX: 2161 return (®ctx->sctx_rbx); 2162 case VM_REG_GUEST_RCX: 2163 return (®ctx->sctx_rcx); 2164 case VM_REG_GUEST_RDX: 2165 return (®ctx->sctx_rdx); 2166 case VM_REG_GUEST_RDI: 2167 return (®ctx->sctx_rdi); 2168 case VM_REG_GUEST_RSI: 2169 return (®ctx->sctx_rsi); 2170 case VM_REG_GUEST_RBP: 2171 return (®ctx->sctx_rbp); 2172 case VM_REG_GUEST_R8: 2173 return (®ctx->sctx_r8); 2174 case VM_REG_GUEST_R9: 2175 return (®ctx->sctx_r9); 2176 case VM_REG_GUEST_R10: 2177 return (®ctx->sctx_r10); 2178 case VM_REG_GUEST_R11: 2179 return (®ctx->sctx_r11); 2180 case VM_REG_GUEST_R12: 2181 return (®ctx->sctx_r12); 2182 case VM_REG_GUEST_R13: 2183 return (®ctx->sctx_r13); 2184 case VM_REG_GUEST_R14: 2185 return (®ctx->sctx_r14); 2186 case VM_REG_GUEST_R15: 2187 return (®ctx->sctx_r15); 2188 case VM_REG_GUEST_DR0: 2189 return (®ctx->sctx_dr0); 2190 case VM_REG_GUEST_DR1: 2191 return (®ctx->sctx_dr1); 2192 case VM_REG_GUEST_DR2: 2193 return (®ctx->sctx_dr2); 2194 case VM_REG_GUEST_DR3: 2195 return (®ctx->sctx_dr3); 2196 default: 2197 return (NULL); 2198 } 2199 } 2200 2201 static int 2202 svm_getreg(void *vcpui, int ident, uint64_t *val) 2203 { 2204 struct svm_vcpu *vcpu; 2205 register_t *reg; 2206 2207 vcpu = vcpui; 2208 2209 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2210 return (svm_get_intr_shadow(vcpu, val)); 2211 } 2212 2213 if (vmcb_read(vcpu, ident, val) == 0) { 2214 return (0); 2215 } 2216 2217 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2218 2219 if (reg != NULL) { 2220 *val = *reg; 2221 return (0); 2222 } 2223 2224 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2225 return (EINVAL); 2226 } 2227 2228 static int 2229 svm_setreg(void *vcpui, int ident, uint64_t val) 2230 { 2231 struct svm_vcpu *vcpu; 2232 register_t *reg; 2233 2234 vcpu = vcpui; 2235 2236 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2237 return (svm_modify_intr_shadow(vcpu, val)); 2238 } 2239 2240 /* Do not permit user write access to VMCB fields by offset. */ 2241 if (!VMCB_ACCESS_OK(ident)) { 2242 if (vmcb_write(vcpu, ident, val) == 0) { 2243 return (0); 2244 } 2245 } 2246 2247 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2248 2249 if (reg != NULL) { 2250 *reg = val; 2251 return (0); 2252 } 2253 2254 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2255 /* Ignore. */ 2256 return (0); 2257 } 2258 2259 /* 2260 * XXX deal with CR3 and invalidate TLB entries tagged with the 2261 * vcpu's ASID. This needs to be treated differently depending on 2262 * whether 'running' is true/false. 2263 */ 2264 2265 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2266 return (EINVAL); 2267 } 2268 2269 static int 2270 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2271 { 2272 return (vmcb_getdesc(vcpui, reg, desc)); 2273 } 2274 2275 static int 2276 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2277 { 2278 return (vmcb_setdesc(vcpui, reg, desc)); 2279 } 2280 2281 #ifdef BHYVE_SNAPSHOT 2282 static int 2283 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2284 { 2285 int ret; 2286 uint64_t val; 2287 2288 if (meta->op == VM_SNAPSHOT_SAVE) { 2289 ret = svm_getreg(vcpui, ident, &val); 2290 if (ret != 0) 2291 goto done; 2292 2293 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2294 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2295 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2296 2297 ret = svm_setreg(vcpui, ident, val); 2298 if (ret != 0) 2299 goto done; 2300 } else { 2301 ret = EINVAL; 2302 goto done; 2303 } 2304 2305 done: 2306 return (ret); 2307 } 2308 #endif 2309 2310 static int 2311 svm_setcap(void *vcpui, int type, int val) 2312 { 2313 struct svm_vcpu *vcpu; 2314 struct vlapic *vlapic; 2315 int error; 2316 2317 vcpu = vcpui; 2318 error = 0; 2319 2320 switch (type) { 2321 case VM_CAP_HALT_EXIT: 2322 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2323 VMCB_INTCPT_HLT, val); 2324 break; 2325 case VM_CAP_PAUSE_EXIT: 2326 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2327 VMCB_INTCPT_PAUSE, val); 2328 break; 2329 case VM_CAP_UNRESTRICTED_GUEST: 2330 /* Unrestricted guest execution cannot be disabled in SVM */ 2331 if (val == 0) 2332 error = EINVAL; 2333 break; 2334 case VM_CAP_IPI_EXIT: 2335 vlapic = vm_lapic(vcpu->vcpu); 2336 vlapic->ipi_exit = val; 2337 break; 2338 default: 2339 error = ENOENT; 2340 break; 2341 } 2342 return (error); 2343 } 2344 2345 static int 2346 svm_getcap(void *vcpui, int type, int *retval) 2347 { 2348 struct svm_vcpu *vcpu; 2349 struct vlapic *vlapic; 2350 int error; 2351 2352 vcpu = vcpui; 2353 error = 0; 2354 2355 switch (type) { 2356 case VM_CAP_HALT_EXIT: 2357 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2358 VMCB_INTCPT_HLT); 2359 break; 2360 case VM_CAP_PAUSE_EXIT: 2361 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2362 VMCB_INTCPT_PAUSE); 2363 break; 2364 case VM_CAP_UNRESTRICTED_GUEST: 2365 *retval = 1; /* unrestricted guest is always enabled */ 2366 break; 2367 case VM_CAP_IPI_EXIT: 2368 vlapic = vm_lapic(vcpu->vcpu); 2369 *retval = vlapic->ipi_exit; 2370 break; 2371 default: 2372 error = ENOENT; 2373 break; 2374 } 2375 return (error); 2376 } 2377 2378 static struct vmspace * 2379 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2380 { 2381 return (svm_npt_alloc(min, max)); 2382 } 2383 2384 static void 2385 svm_vmspace_free(struct vmspace *vmspace) 2386 { 2387 svm_npt_free(vmspace); 2388 } 2389 2390 static struct vlapic * 2391 svm_vlapic_init(void *vcpui) 2392 { 2393 struct svm_vcpu *vcpu; 2394 struct vlapic *vlapic; 2395 2396 vcpu = vcpui; 2397 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2398 vlapic->vm = vcpu->sc->vm; 2399 vlapic->vcpu = vcpu->vcpu; 2400 vlapic->vcpuid = vcpu->vcpuid; 2401 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2402 M_WAITOK | M_ZERO); 2403 2404 vlapic_init(vlapic); 2405 2406 return (vlapic); 2407 } 2408 2409 static void 2410 svm_vlapic_cleanup(struct vlapic *vlapic) 2411 { 2412 2413 vlapic_cleanup(vlapic); 2414 free(vlapic->apic_page, M_SVM_VLAPIC); 2415 free(vlapic, M_SVM_VLAPIC); 2416 } 2417 2418 #ifdef BHYVE_SNAPSHOT 2419 static int 2420 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2421 { 2422 struct svm_vcpu *vcpu; 2423 int err, running, hostcpu; 2424 2425 vcpu = vcpui; 2426 err = 0; 2427 2428 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2429 if (running && hostcpu != curcpu) { 2430 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2431 vcpu->vcpuid); 2432 return (EINVAL); 2433 } 2434 2435 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2436 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2437 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2438 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2439 2440 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2441 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2442 2443 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2444 2445 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2446 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2447 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2448 2449 /* Guest segments */ 2450 /* ES */ 2451 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2452 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2453 2454 /* CS */ 2455 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2456 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2457 2458 /* SS */ 2459 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2460 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2461 2462 /* DS */ 2463 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2464 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2465 2466 /* FS */ 2467 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2468 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2469 2470 /* GS */ 2471 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2472 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2473 2474 /* TR */ 2475 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2476 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2477 2478 /* LDTR */ 2479 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2480 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2481 2482 /* EFER */ 2483 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2484 2485 /* IDTR and GDTR */ 2486 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2487 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2488 2489 /* Specific AMD registers */ 2490 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2491 2492 err += vmcb_snapshot_any(vcpu, 2493 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2494 err += vmcb_snapshot_any(vcpu, 2495 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2496 err += vmcb_snapshot_any(vcpu, 2497 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2498 err += vmcb_snapshot_any(vcpu, 2499 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2500 err += vmcb_snapshot_any(vcpu, 2501 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2502 2503 err += vmcb_snapshot_any(vcpu, 2504 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2505 err += vmcb_snapshot_any(vcpu, 2506 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2507 2508 err += vmcb_snapshot_any(vcpu, 2509 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2510 2511 err += vmcb_snapshot_any(vcpu, 2512 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2513 2514 err += vmcb_snapshot_any(vcpu, 2515 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2516 2517 err += vmcb_snapshot_any(vcpu, 2518 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2519 err += vmcb_snapshot_any(vcpu, 2520 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2521 err += vmcb_snapshot_any(vcpu, 2522 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2523 err += vmcb_snapshot_any(vcpu, 2524 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2525 2526 err += vmcb_snapshot_any(vcpu, 2527 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2528 2529 err += vmcb_snapshot_any(vcpu, 2530 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2531 err += vmcb_snapshot_any(vcpu, 2532 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2533 err += vmcb_snapshot_any(vcpu, 2534 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2535 err += vmcb_snapshot_any(vcpu, 2536 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2537 2538 err += vmcb_snapshot_any(vcpu, 2539 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2540 2541 err += vmcb_snapshot_any(vcpu, 2542 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2543 err += vmcb_snapshot_any(vcpu, 2544 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2545 err += vmcb_snapshot_any(vcpu, 2546 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2547 2548 err += vmcb_snapshot_any(vcpu, 2549 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2550 2551 err += vmcb_snapshot_any(vcpu, 2552 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2553 2554 err += vmcb_snapshot_any(vcpu, 2555 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2556 err += vmcb_snapshot_any(vcpu, 2557 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2558 err += vmcb_snapshot_any(vcpu, 2559 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2560 2561 err += vmcb_snapshot_any(vcpu, 2562 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2563 2564 err += vmcb_snapshot_any(vcpu, 2565 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2566 err += vmcb_snapshot_any(vcpu, 2567 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2568 err += vmcb_snapshot_any(vcpu, 2569 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2570 err += vmcb_snapshot_any(vcpu, 2571 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2572 err += vmcb_snapshot_any(vcpu, 2573 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2574 if (err != 0) 2575 goto done; 2576 2577 /* Snapshot swctx for virtual cpu */ 2578 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2579 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2580 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2581 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2582 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2583 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2584 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2585 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2586 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2587 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2588 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2589 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2590 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2591 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2592 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2593 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2594 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2595 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2596 2597 /* Restore other svm_vcpu struct fields */ 2598 2599 /* Restore NEXTRIP field */ 2600 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2601 2602 /* Restore lastcpu field */ 2603 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2604 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2605 2606 /* Restore EPTGEN field - EPT is Extended Page Table */ 2607 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2608 2609 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2610 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2611 2612 /* Set all caches dirty */ 2613 if (meta->op == VM_SNAPSHOT_RESTORE) 2614 svm_set_dirty(vcpu, 0xffffffff); 2615 2616 done: 2617 return (err); 2618 } 2619 2620 static int 2621 svm_restore_tsc(void *vcpui, uint64_t offset) 2622 { 2623 struct svm_vcpu *vcpu = vcpui; 2624 2625 svm_set_tsc_offset(vcpu, offset); 2626 2627 return (0); 2628 } 2629 #endif 2630 2631 const struct vmm_ops vmm_ops_amd = { 2632 .modinit = svm_modinit, 2633 .modcleanup = svm_modcleanup, 2634 .modresume = svm_modresume, 2635 .init = svm_init, 2636 .run = svm_run, 2637 .cleanup = svm_cleanup, 2638 .vcpu_init = svm_vcpu_init, 2639 .vcpu_cleanup = svm_vcpu_cleanup, 2640 .getreg = svm_getreg, 2641 .setreg = svm_setreg, 2642 .getdesc = svm_getdesc, 2643 .setdesc = svm_setdesc, 2644 .getcap = svm_getcap, 2645 .setcap = svm_setcap, 2646 .vmspace_alloc = svm_vmspace_alloc, 2647 .vmspace_free = svm_vmspace_free, 2648 .vlapic_init = svm_vlapic_init, 2649 .vlapic_cleanup = svm_vlapic_cleanup, 2650 #ifdef BHYVE_SNAPSHOT 2651 .vcpu_snapshot = svm_vcpu_snapshot, 2652 .restore_tsc = svm_restore_tsc, 2653 #endif 2654 }; 2655