1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "opt_bhyve_snapshot.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/reg.h> 40 #include <sys/smr.h> 41 #include <sys/sysctl.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 #include <vm/pmap.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/psl.h> 49 #include <machine/md_var.h> 50 #include <machine/specialreg.h> 51 #include <machine/smp.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include <machine/vmm_snapshot.h> 56 57 #include <dev/vmm/vmm_ktr.h> 58 59 #include "vmm_lapic.h" 60 #include "vmm_stat.h" 61 #include "vmm_ioport.h" 62 #include "vatpic.h" 63 #include "vlapic.h" 64 #include "vlapic_priv.h" 65 66 #include "x86.h" 67 #include "vmcb.h" 68 #include "svm.h" 69 #include "svm_softc.h" 70 #include "svm_msr.h" 71 #include "npt.h" 72 73 SYSCTL_DECL(_hw_vmm); 74 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 75 NULL); 76 77 /* 78 * SVM CPUID function 0x8000_000A, edx bit decoding. 79 */ 80 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 81 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 82 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 83 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 84 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 85 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 86 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 87 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 88 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 89 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 90 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 91 92 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 93 VMCB_CACHE_IOPM | \ 94 VMCB_CACHE_I | \ 95 VMCB_CACHE_TPR | \ 96 VMCB_CACHE_CR2 | \ 97 VMCB_CACHE_CR | \ 98 VMCB_CACHE_DR | \ 99 VMCB_CACHE_DT | \ 100 VMCB_CACHE_SEG | \ 101 VMCB_CACHE_NP) 102 103 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 104 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 105 0, NULL); 106 107 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 109 110 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 111 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 112 "SVM features advertised by CPUID.8000000AH:EDX"); 113 114 static int disable_npf_assist; 115 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 116 &disable_npf_assist, 0, NULL); 117 118 /* Maximum ASIDs supported by the processor */ 119 static uint32_t nasid; 120 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 121 "Number of ASIDs supported by this processor"); 122 123 /* Current ASID generation for each host cpu */ 124 static struct asid asid[MAXCPU]; 125 126 /* SVM host state saved area of size 4KB for each physical core. */ 127 static uint8_t *hsave; 128 129 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 130 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 131 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 132 133 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 134 static int svm_setreg(void *vcpui, int ident, uint64_t val); 135 static int svm_getreg(void *vcpui, int ident, uint64_t *val); 136 static __inline int 137 flush_by_asid(void) 138 { 139 140 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 141 } 142 143 static __inline int 144 decode_assist(void) 145 { 146 147 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 148 } 149 150 static void 151 svm_disable(void *arg __unused) 152 { 153 uint64_t efer; 154 155 efer = rdmsr(MSR_EFER); 156 efer &= ~EFER_SVM; 157 wrmsr(MSR_EFER, efer); 158 } 159 160 /* 161 * Disable SVM on all CPUs. 162 */ 163 static int 164 svm_modcleanup(void) 165 { 166 167 smp_rendezvous(NULL, svm_disable, NULL, NULL); 168 169 if (hsave != NULL) 170 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 171 172 return (0); 173 } 174 175 /* 176 * Verify that all the features required by bhyve are available. 177 */ 178 static int 179 check_svm_features(void) 180 { 181 u_int regs[4]; 182 183 /* CPUID Fn8000_000A is for SVM */ 184 do_cpuid(0x8000000A, regs); 185 svm_feature &= regs[3]; 186 187 /* 188 * The number of ASIDs can be configured to be less than what is 189 * supported by the hardware but not more. 190 */ 191 if (nasid == 0 || nasid > regs[1]) 192 nasid = regs[1]; 193 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 194 195 /* bhyve requires the Nested Paging feature */ 196 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 197 printf("SVM: Nested Paging feature not available.\n"); 198 return (ENXIO); 199 } 200 201 /* bhyve requires the NRIP Save feature */ 202 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 203 printf("SVM: NRIP Save feature not available.\n"); 204 return (ENXIO); 205 } 206 207 return (0); 208 } 209 210 static void 211 svm_enable(void *arg __unused) 212 { 213 uint64_t efer; 214 215 efer = rdmsr(MSR_EFER); 216 efer |= EFER_SVM; 217 wrmsr(MSR_EFER, efer); 218 219 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 220 } 221 222 /* 223 * Return 1 if SVM is enabled on this processor and 0 otherwise. 224 */ 225 static int 226 svm_available(void) 227 { 228 uint64_t msr; 229 230 /* Section 15.4 Enabling SVM from APM2. */ 231 if ((amd_feature2 & AMDID2_SVM) == 0) { 232 printf("SVM: not available.\n"); 233 return (0); 234 } 235 236 msr = rdmsr(MSR_VM_CR); 237 if ((msr & VM_CR_SVMDIS) != 0) { 238 printf("SVM: disabled by BIOS.\n"); 239 return (0); 240 } 241 242 return (1); 243 } 244 245 static int 246 svm_modinit(int ipinum) 247 { 248 int error, cpu; 249 250 if (!svm_available()) 251 return (ENXIO); 252 253 error = check_svm_features(); 254 if (error) 255 return (error); 256 257 vmcb_clean &= VMCB_CACHE_DEFAULT; 258 259 for (cpu = 0; cpu < MAXCPU; cpu++) { 260 /* 261 * Initialize the host ASIDs to their "highest" valid values. 262 * 263 * The next ASID allocation will rollover both 'gen' and 'num' 264 * and start off the sequence at {1,1}. 265 */ 266 asid[cpu].gen = ~0UL; 267 asid[cpu].num = nasid - 1; 268 } 269 270 svm_msr_init(); 271 svm_npt_init(ipinum); 272 273 /* Enable SVM on all CPUs */ 274 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 275 smp_rendezvous(NULL, svm_enable, NULL, NULL); 276 277 return (0); 278 } 279 280 static void 281 svm_modsuspend(void) 282 { 283 } 284 285 static void 286 svm_modresume(void) 287 { 288 289 svm_enable(NULL); 290 } 291 292 #ifdef BHYVE_SNAPSHOT 293 void 294 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 295 { 296 struct vmcb_ctrl *ctrl; 297 298 ctrl = svm_get_vmcb_ctrl(vcpu); 299 ctrl->tsc_offset = offset; 300 301 svm_set_dirty(vcpu, VMCB_CACHE_I); 302 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 303 304 vm_set_tsc_offset(vcpu->vcpu, offset); 305 } 306 #endif 307 308 /* Pentium compatible MSRs */ 309 #define MSR_PENTIUM_START 0 310 #define MSR_PENTIUM_END 0x1FFF 311 /* AMD 6th generation and Intel compatible MSRs */ 312 #define MSR_AMD6TH_START 0xC0000000UL 313 #define MSR_AMD6TH_END 0xC0001FFFUL 314 /* AMD 7th and 8th generation compatible MSRs */ 315 #define MSR_AMD7TH_START 0xC0010000UL 316 #define MSR_AMD7TH_END 0xC0011FFFUL 317 318 /* 319 * Get the index and bit position for a MSR in permission bitmap. 320 * Two bits are used for each MSR: lower bit for read and higher bit for write. 321 */ 322 static int 323 svm_msr_index(uint64_t msr, int *index, int *bit) 324 { 325 uint32_t base, off; 326 327 *index = -1; 328 *bit = (msr % 4) * 2; 329 base = 0; 330 331 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 332 *index = msr / 4; 333 return (0); 334 } 335 336 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 337 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 338 off = (msr - MSR_AMD6TH_START); 339 *index = (off + base) / 4; 340 return (0); 341 } 342 343 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 344 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 345 off = (msr - MSR_AMD7TH_START); 346 *index = (off + base) / 4; 347 return (0); 348 } 349 350 return (EINVAL); 351 } 352 353 /* 354 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 355 */ 356 static void 357 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 358 { 359 int index, bit, error __diagused; 360 361 error = svm_msr_index(msr, &index, &bit); 362 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 363 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 364 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 365 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 366 "msr %#lx", __func__, bit, msr)); 367 368 if (read) 369 perm_bitmap[index] &= ~(1UL << bit); 370 371 if (write) 372 perm_bitmap[index] &= ~(2UL << bit); 373 } 374 375 static void 376 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 377 { 378 379 svm_msr_perm(perm_bitmap, msr, true, true); 380 } 381 382 static void 383 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 384 { 385 386 svm_msr_perm(perm_bitmap, msr, true, false); 387 } 388 389 static __inline int 390 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 391 { 392 struct vmcb_ctrl *ctrl; 393 394 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 395 396 ctrl = svm_get_vmcb_ctrl(vcpu); 397 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 398 } 399 400 static __inline void 401 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 402 { 403 struct vmcb_ctrl *ctrl; 404 uint32_t oldval; 405 406 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 407 408 ctrl = svm_get_vmcb_ctrl(vcpu); 409 oldval = ctrl->intercept[idx]; 410 411 if (enabled) 412 ctrl->intercept[idx] |= bitmask; 413 else 414 ctrl->intercept[idx] &= ~bitmask; 415 416 if (ctrl->intercept[idx] != oldval) { 417 svm_set_dirty(vcpu, VMCB_CACHE_I); 418 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 419 oldval, ctrl->intercept[idx]); 420 } 421 } 422 423 static __inline void 424 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 425 { 426 427 svm_set_intercept(vcpu, off, bitmask, 0); 428 } 429 430 static __inline void 431 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 432 { 433 434 svm_set_intercept(vcpu, off, bitmask, 1); 435 } 436 437 static void 438 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 439 uint64_t msrpm_base_pa, uint64_t np_pml4) 440 { 441 struct vmcb_ctrl *ctrl; 442 struct vmcb_state *state; 443 uint32_t mask; 444 int n; 445 446 ctrl = svm_get_vmcb_ctrl(vcpu); 447 state = svm_get_vmcb_state(vcpu); 448 449 ctrl->iopm_base_pa = iopm_base_pa; 450 ctrl->msrpm_base_pa = msrpm_base_pa; 451 452 /* Enable nested paging */ 453 ctrl->np_enable = 1; 454 ctrl->n_cr3 = np_pml4; 455 456 /* 457 * Intercept accesses to the control registers that are not shadowed 458 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 459 */ 460 for (n = 0; n < 16; n++) { 461 mask = (BIT(n) << 16) | BIT(n); 462 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 463 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 464 else 465 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 466 } 467 468 /* 469 * Intercept everything when tracing guest exceptions otherwise 470 * just intercept machine check exception. 471 */ 472 if (vcpu_trace_exceptions(vcpu->vcpu)) { 473 for (n = 0; n < 32; n++) { 474 /* 475 * Skip unimplemented vectors in the exception bitmap. 476 */ 477 if (n == 2 || n == 9) { 478 continue; 479 } 480 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 481 } 482 } else { 483 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 484 } 485 486 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 493 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 494 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 495 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 496 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 497 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 498 499 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 501 502 /* 503 * Intercept SVM instructions since AMD enables them in guests otherwise. 504 * Non-intercepted VMMCALL causes #UD, skip it. 505 */ 506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 508 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 509 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 510 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 511 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 512 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 513 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 514 VMCB_INTCPT_WBINVD); 515 } 516 517 /* 518 * From section "Canonicalization and Consistency Checks" in APMv2 519 * the VMRUN intercept bit must be set to pass the consistency check. 520 */ 521 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 522 523 /* 524 * The ASID will be set to a non-zero value just before VMRUN. 525 */ 526 ctrl->asid = 0; 527 528 /* 529 * Section 15.21.1, Interrupt Masking in EFLAGS 530 * Section 15.21.2, Virtualizing APIC.TPR 531 * 532 * This must be set for %rflag and %cr8 isolation of guest and host. 533 */ 534 ctrl->v_intr_masking = 1; 535 536 /* Enable Last Branch Record aka LBR for debugging */ 537 ctrl->lbr_virt_en = 1; 538 state->dbgctl = BIT(0); 539 540 /* EFER_SVM must always be set when the guest is executing */ 541 state->efer = EFER_SVM; 542 543 /* Set up the PAT to power-on state */ 544 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 545 PAT_VALUE(1, PAT_WRITE_THROUGH) | 546 PAT_VALUE(2, PAT_UNCACHED) | 547 PAT_VALUE(3, PAT_UNCACHEABLE) | 548 PAT_VALUE(4, PAT_WRITE_BACK) | 549 PAT_VALUE(5, PAT_WRITE_THROUGH) | 550 PAT_VALUE(6, PAT_UNCACHED) | 551 PAT_VALUE(7, PAT_UNCACHEABLE); 552 553 /* Set up DR6/7 to power-on state */ 554 state->dr6 = DBREG_DR6_RESERVED1; 555 state->dr7 = DBREG_DR7_RESERVED1; 556 } 557 558 /* 559 * Initialize a virtual machine. 560 */ 561 static void * 562 svm_init(struct vm *vm, pmap_t pmap) 563 { 564 struct svm_softc *svm_sc; 565 566 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 567 568 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 569 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 570 if (svm_sc->msr_bitmap == NULL) 571 panic("contigmalloc of SVM MSR bitmap failed"); 572 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 573 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 574 if (svm_sc->iopm_bitmap == NULL) 575 panic("contigmalloc of SVM IO bitmap failed"); 576 577 svm_sc->vm = vm; 578 svm_sc->nptp = vtophys(pmap->pm_pmltop); 579 580 /* 581 * Intercept read and write accesses to all MSRs. 582 */ 583 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 584 585 /* 586 * Access to the following MSRs is redirected to the VMCB when the 587 * guest is executing. Therefore it is safe to allow the guest to 588 * read/write these MSRs directly without hypervisor involvement. 589 */ 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 593 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 602 603 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 604 605 /* 606 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 607 */ 608 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 609 610 /* Intercept access to all I/O ports. */ 611 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 612 613 return (svm_sc); 614 } 615 616 static void * 617 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 618 { 619 struct svm_softc *sc = vmi; 620 struct svm_vcpu *vcpu; 621 622 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 623 vcpu->sc = sc; 624 vcpu->vcpu = vcpu1; 625 vcpu->vcpuid = vcpuid; 626 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 627 M_WAITOK | M_ZERO); 628 vcpu->nextrip = ~0; 629 vcpu->lastcpu = NOCPU; 630 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 631 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 632 sc->nptp); 633 svm_msr_guest_init(sc, vcpu); 634 return (vcpu); 635 } 636 637 /* 638 * Collateral for a generic SVM VM-exit. 639 */ 640 static void 641 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 642 { 643 644 vme->exitcode = VM_EXITCODE_SVM; 645 vme->u.svm.exitcode = code; 646 vme->u.svm.exitinfo1 = info1; 647 vme->u.svm.exitinfo2 = info2; 648 } 649 650 static int 651 svm_cpl(struct vmcb_state *state) 652 { 653 654 /* 655 * From APMv2: 656 * "Retrieve the CPL from the CPL field in the VMCB, not 657 * from any segment DPL" 658 */ 659 return (state->cpl); 660 } 661 662 static enum vm_cpu_mode 663 svm_vcpu_mode(struct vmcb *vmcb) 664 { 665 struct vmcb_segment seg; 666 struct vmcb_state *state; 667 int error __diagused; 668 669 state = &vmcb->state; 670 671 if (state->efer & EFER_LMA) { 672 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 673 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 674 error)); 675 676 /* 677 * Section 4.8.1 for APM2, check if Code Segment has 678 * Long attribute set in descriptor. 679 */ 680 if (seg.attrib & VMCB_CS_ATTRIB_L) 681 return (CPU_MODE_64BIT); 682 else 683 return (CPU_MODE_COMPATIBILITY); 684 } else if (state->cr0 & CR0_PE) { 685 return (CPU_MODE_PROTECTED); 686 } else { 687 return (CPU_MODE_REAL); 688 } 689 } 690 691 static enum vm_paging_mode 692 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 693 { 694 695 if ((cr0 & CR0_PG) == 0) 696 return (PAGING_MODE_FLAT); 697 if ((cr4 & CR4_PAE) == 0) 698 return (PAGING_MODE_32); 699 if (efer & EFER_LME) 700 return (PAGING_MODE_64); 701 else 702 return (PAGING_MODE_PAE); 703 } 704 705 /* 706 * ins/outs utility routines 707 */ 708 static uint64_t 709 svm_inout_str_index(struct svm_regctx *regs, int in) 710 { 711 uint64_t val; 712 713 val = in ? regs->sctx_rdi : regs->sctx_rsi; 714 715 return (val); 716 } 717 718 static uint64_t 719 svm_inout_str_count(struct svm_regctx *regs, int rep) 720 { 721 uint64_t val; 722 723 val = rep ? regs->sctx_rcx : 1; 724 725 return (val); 726 } 727 728 static void 729 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 730 struct vm_inout_str *vis) 731 { 732 int error __diagused, s; 733 734 if (in) { 735 vis->seg_name = VM_REG_GUEST_ES; 736 } else { 737 /* The segment field has standard encoding */ 738 s = (info1 >> 10) & 0x7; 739 vis->seg_name = vm_segment_name(s); 740 } 741 742 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 743 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 744 } 745 746 static int 747 svm_inout_str_addrsize(uint64_t info1) 748 { 749 uint32_t size; 750 751 size = (info1 >> 7) & 0x7; 752 switch (size) { 753 case 1: 754 return (2); /* 16 bit */ 755 case 2: 756 return (4); /* 32 bit */ 757 case 4: 758 return (8); /* 64 bit */ 759 default: 760 panic("%s: invalid size encoding %d", __func__, size); 761 } 762 } 763 764 static void 765 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 766 { 767 struct vmcb_state *state; 768 769 state = &vmcb->state; 770 paging->cr3 = state->cr3; 771 paging->cpl = svm_cpl(state); 772 paging->cpu_mode = svm_vcpu_mode(vmcb); 773 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 774 state->efer); 775 } 776 777 #define UNHANDLED 0 778 779 /* 780 * Handle guest I/O intercept. 781 */ 782 static int 783 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 784 { 785 struct vmcb_ctrl *ctrl; 786 struct vmcb_state *state; 787 struct svm_regctx *regs; 788 struct vm_inout_str *vis; 789 uint64_t info1; 790 int inout_string; 791 792 state = svm_get_vmcb_state(vcpu); 793 ctrl = svm_get_vmcb_ctrl(vcpu); 794 regs = svm_get_guest_regctx(vcpu); 795 796 info1 = ctrl->exitinfo1; 797 inout_string = info1 & BIT(2) ? 1 : 0; 798 799 /* 800 * The effective segment number in EXITINFO1[12:10] is populated 801 * only if the processor has the DecodeAssist capability. 802 * 803 * XXX this is not specified explicitly in APMv2 but can be verified 804 * empirically. 805 */ 806 if (inout_string && !decode_assist()) 807 return (UNHANDLED); 808 809 vmexit->exitcode = VM_EXITCODE_INOUT; 810 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 811 vmexit->u.inout.string = inout_string; 812 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 813 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 814 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 815 vmexit->u.inout.eax = (uint32_t)(state->rax); 816 817 if (inout_string) { 818 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 819 vis = &vmexit->u.inout_str; 820 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 821 vis->rflags = state->rflags; 822 vis->cr0 = state->cr0; 823 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 824 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 825 vis->addrsize = svm_inout_str_addrsize(info1); 826 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 827 } 828 829 return (UNHANDLED); 830 } 831 832 static int 833 npf_fault_type(uint64_t exitinfo1) 834 { 835 836 if (exitinfo1 & VMCB_NPF_INFO1_W) 837 return (VM_PROT_WRITE); 838 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 839 return (VM_PROT_EXECUTE); 840 else 841 return (VM_PROT_READ); 842 } 843 844 static bool 845 svm_npf_emul_fault(uint64_t exitinfo1) 846 { 847 848 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 849 return (false); 850 } 851 852 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 853 return (false); 854 } 855 856 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 857 return (false); 858 } 859 860 return (true); 861 } 862 863 static void 864 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 865 { 866 struct vm_guest_paging *paging; 867 struct vmcb_segment seg; 868 struct vmcb_ctrl *ctrl; 869 char *inst_bytes; 870 int error __diagused, inst_len; 871 872 ctrl = &vmcb->ctrl; 873 paging = &vmexit->u.inst_emul.paging; 874 875 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 876 vmexit->u.inst_emul.gpa = gpa; 877 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 878 svm_paging_info(vmcb, paging); 879 880 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 881 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 882 883 switch(paging->cpu_mode) { 884 case CPU_MODE_REAL: 885 vmexit->u.inst_emul.cs_base = seg.base; 886 vmexit->u.inst_emul.cs_d = 0; 887 break; 888 case CPU_MODE_PROTECTED: 889 case CPU_MODE_COMPATIBILITY: 890 vmexit->u.inst_emul.cs_base = seg.base; 891 892 /* 893 * Section 4.8.1 of APM2, Default Operand Size or D bit. 894 */ 895 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 896 1 : 0; 897 break; 898 default: 899 vmexit->u.inst_emul.cs_base = 0; 900 vmexit->u.inst_emul.cs_d = 0; 901 break; 902 } 903 904 /* 905 * Copy the instruction bytes into 'vie' if available. 906 */ 907 if (decode_assist() && !disable_npf_assist) { 908 inst_len = ctrl->inst_len; 909 inst_bytes = ctrl->inst_bytes; 910 } else { 911 inst_len = 0; 912 inst_bytes = NULL; 913 } 914 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 915 } 916 917 #ifdef KTR 918 static const char * 919 intrtype_to_str(int intr_type) 920 { 921 switch (intr_type) { 922 case VMCB_EVENTINJ_TYPE_INTR: 923 return ("hwintr"); 924 case VMCB_EVENTINJ_TYPE_NMI: 925 return ("nmi"); 926 case VMCB_EVENTINJ_TYPE_INTn: 927 return ("swintr"); 928 case VMCB_EVENTINJ_TYPE_EXCEPTION: 929 return ("exception"); 930 default: 931 panic("%s: unknown intr_type %d", __func__, intr_type); 932 } 933 } 934 #endif 935 936 /* 937 * Inject an event to vcpu as described in section 15.20, "Event injection". 938 */ 939 static void 940 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 941 uint32_t error, bool ec_valid) 942 { 943 struct vmcb_ctrl *ctrl; 944 945 ctrl = svm_get_vmcb_ctrl(vcpu); 946 947 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 948 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 949 950 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 951 __func__, vector)); 952 953 switch (intr_type) { 954 case VMCB_EVENTINJ_TYPE_INTR: 955 case VMCB_EVENTINJ_TYPE_NMI: 956 case VMCB_EVENTINJ_TYPE_INTn: 957 break; 958 case VMCB_EVENTINJ_TYPE_EXCEPTION: 959 if (vector >= 0 && vector <= 31 && vector != 2) 960 break; 961 /* FALLTHROUGH */ 962 default: 963 panic("%s: invalid intr_type/vector: %d/%d", __func__, 964 intr_type, vector); 965 } 966 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 967 if (ec_valid) { 968 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 969 ctrl->eventinj |= (uint64_t)error << 32; 970 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 971 intrtype_to_str(intr_type), vector, error); 972 } else { 973 SVM_CTR2(vcpu, "Injecting %s at vector %d", 974 intrtype_to_str(intr_type), vector); 975 } 976 } 977 978 static void 979 svm_update_virqinfo(struct svm_vcpu *vcpu) 980 { 981 struct vlapic *vlapic; 982 struct vmcb_ctrl *ctrl; 983 984 vlapic = vm_lapic(vcpu->vcpu); 985 ctrl = svm_get_vmcb_ctrl(vcpu); 986 987 /* Update %cr8 in the emulated vlapic */ 988 vlapic_set_cr8(vlapic, ctrl->v_tpr); 989 990 /* Virtual interrupt injection is not used. */ 991 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 992 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 993 } 994 995 static void 996 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 997 { 998 struct vmcb_ctrl *ctrl; 999 uint64_t intinfo; 1000 1001 ctrl = svm_get_vmcb_ctrl(vcpu); 1002 intinfo = ctrl->exitintinfo; 1003 if (!VMCB_EXITINTINFO_VALID(intinfo)) 1004 return; 1005 1006 /* 1007 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1008 * 1009 * If a #VMEXIT happened during event delivery then record the event 1010 * that was being delivered. 1011 */ 1012 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1013 VMCB_EXITINTINFO_VECTOR(intinfo)); 1014 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1015 vm_exit_intinfo(vcpu->vcpu, intinfo); 1016 } 1017 1018 #ifdef INVARIANTS 1019 static __inline int 1020 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1021 { 1022 1023 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1024 } 1025 #endif 1026 1027 static __inline void 1028 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1029 { 1030 struct vmcb_ctrl *ctrl; 1031 1032 ctrl = svm_get_vmcb_ctrl(vcpu); 1033 1034 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1035 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1036 KASSERT(vintr_intercept_enabled(vcpu), 1037 ("%s: vintr intercept should be enabled", __func__)); 1038 return; 1039 } 1040 1041 SVM_CTR0(vcpu, "Enable intr window exiting"); 1042 ctrl->v_irq = 1; 1043 ctrl->v_ign_tpr = 1; 1044 ctrl->v_intr_vector = 0; 1045 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1046 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1047 } 1048 1049 static __inline void 1050 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1051 { 1052 struct vmcb_ctrl *ctrl; 1053 1054 ctrl = svm_get_vmcb_ctrl(vcpu); 1055 1056 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1057 KASSERT(!vintr_intercept_enabled(vcpu), 1058 ("%s: vintr intercept should be disabled", __func__)); 1059 return; 1060 } 1061 1062 SVM_CTR0(vcpu, "Disable intr window exiting"); 1063 ctrl->v_irq = 0; 1064 ctrl->v_intr_vector = 0; 1065 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1066 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1067 } 1068 1069 static int 1070 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1071 { 1072 struct vmcb_ctrl *ctrl; 1073 int oldval, newval; 1074 1075 ctrl = svm_get_vmcb_ctrl(vcpu); 1076 oldval = ctrl->intr_shadow; 1077 newval = val ? 1 : 0; 1078 if (newval != oldval) { 1079 ctrl->intr_shadow = newval; 1080 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1081 } 1082 return (0); 1083 } 1084 1085 static int 1086 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1087 { 1088 struct vmcb_ctrl *ctrl; 1089 1090 ctrl = svm_get_vmcb_ctrl(vcpu); 1091 *val = ctrl->intr_shadow; 1092 return (0); 1093 } 1094 1095 /* 1096 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1097 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1098 * to track when the vcpu is done handling the NMI. 1099 */ 1100 static int 1101 nmi_blocked(struct svm_vcpu *vcpu) 1102 { 1103 int blocked; 1104 1105 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1106 return (blocked); 1107 } 1108 1109 static void 1110 enable_nmi_blocking(struct svm_vcpu *vcpu) 1111 { 1112 1113 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1114 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1115 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1116 } 1117 1118 static void 1119 clear_nmi_blocking(struct svm_vcpu *vcpu) 1120 { 1121 int error __diagused; 1122 1123 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1124 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1125 /* 1126 * When the IRET intercept is cleared the vcpu will attempt to execute 1127 * the "iret" when it runs next. However, it is possible to inject 1128 * another NMI into the vcpu before the "iret" has actually executed. 1129 * 1130 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1131 * it will trap back into the hypervisor. If an NMI is pending for 1132 * the vcpu it will be injected into the guest. 1133 * 1134 * XXX this needs to be fixed 1135 */ 1136 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1137 1138 /* 1139 * Set 'intr_shadow' to prevent an NMI from being injected on the 1140 * immediate VMRUN. 1141 */ 1142 error = svm_modify_intr_shadow(vcpu, 1); 1143 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1144 } 1145 1146 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1147 1148 static int 1149 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1150 bool *retu) 1151 { 1152 struct vm_exit *vme; 1153 struct vmcb_state *state; 1154 uint64_t changed, lma, oldval; 1155 int error __diagused; 1156 1157 state = svm_get_vmcb_state(vcpu); 1158 1159 oldval = state->efer; 1160 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1161 1162 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1163 changed = oldval ^ newval; 1164 1165 if (newval & EFER_MBZ_BITS) 1166 goto gpf; 1167 1168 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1169 if (changed & EFER_LME) { 1170 if (state->cr0 & CR0_PG) 1171 goto gpf; 1172 } 1173 1174 /* EFER.LMA = EFER.LME & CR0.PG */ 1175 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1176 lma = EFER_LMA; 1177 else 1178 lma = 0; 1179 1180 if ((newval & EFER_LMA) != lma) 1181 goto gpf; 1182 1183 if (newval & EFER_NXE) { 1184 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1185 goto gpf; 1186 } 1187 1188 /* 1189 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1190 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1191 */ 1192 if (newval & EFER_LMSLE) { 1193 vme = vm_exitinfo(vcpu->vcpu); 1194 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1195 *retu = true; 1196 return (0); 1197 } 1198 1199 if (newval & EFER_FFXSR) { 1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1201 goto gpf; 1202 } 1203 1204 if (newval & EFER_TCE) { 1205 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1206 goto gpf; 1207 } 1208 1209 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1210 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1211 return (0); 1212 gpf: 1213 vm_inject_gp(vcpu->vcpu); 1214 return (0); 1215 } 1216 1217 static int 1218 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1219 uint64_t val, bool *retu) 1220 { 1221 int error; 1222 1223 if (lapic_msr(num)) 1224 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1225 else if (num == MSR_EFER) 1226 error = svm_write_efer(sc, vcpu, val, retu); 1227 else 1228 error = svm_wrmsr(vcpu, num, val, retu); 1229 1230 return (error); 1231 } 1232 1233 static int 1234 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1235 { 1236 struct vmcb_state *state; 1237 struct svm_regctx *ctx; 1238 uint64_t result; 1239 int error; 1240 1241 if (lapic_msr(num)) 1242 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1243 else 1244 error = svm_rdmsr(vcpu, num, &result, retu); 1245 1246 if (error == 0) { 1247 state = svm_get_vmcb_state(vcpu); 1248 ctx = svm_get_guest_regctx(vcpu); 1249 state->rax = result & 0xffffffff; 1250 ctx->sctx_rdx = result >> 32; 1251 } 1252 1253 return (error); 1254 } 1255 1256 #ifdef KTR 1257 static const char * 1258 exit_reason_to_str(uint64_t reason) 1259 { 1260 int i; 1261 static char reasonbuf[32]; 1262 static const struct { 1263 int reason; 1264 const char *str; 1265 } reasons[] = { 1266 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1267 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1268 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1269 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1270 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1271 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1272 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1273 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1274 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1275 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1276 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1277 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1278 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1279 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1280 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1281 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1282 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1283 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1284 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1285 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1286 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1287 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1288 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1289 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1290 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1291 { .reason = VMCB_EXIT_POPF, .str = "popf" }, 1292 { .reason = VMCB_EXIT_PUSHF, .str = "pushf" }, 1293 }; 1294 1295 for (i = 0; i < nitems(reasons); i++) { 1296 if (reasons[i].reason == reason) 1297 return (reasons[i].str); 1298 } 1299 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1300 return (reasonbuf); 1301 } 1302 #endif /* KTR */ 1303 1304 /* 1305 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1306 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1307 * and exceptions caused by INT3, INTO and BOUND instructions. 1308 * 1309 * Return 1 if the nRIP is valid and 0 otherwise. 1310 */ 1311 static int 1312 nrip_valid(uint64_t exitcode) 1313 { 1314 switch (exitcode) { 1315 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1316 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1317 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1318 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1319 case 0x43: /* INT3 */ 1320 case 0x44: /* INTO */ 1321 case 0x45: /* BOUND */ 1322 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1323 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1324 return (1); 1325 default: 1326 return (0); 1327 } 1328 } 1329 1330 static int 1331 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1332 struct vm_exit *vmexit) 1333 { 1334 struct vmcb *vmcb; 1335 struct vmcb_state *state; 1336 struct vmcb_ctrl *ctrl; 1337 struct svm_regctx *ctx; 1338 uint64_t code, info1, info2, val; 1339 uint32_t eax, ecx, edx; 1340 int error __diagused, errcode_valid, handled, idtvec, reflect; 1341 bool retu; 1342 1343 ctx = svm_get_guest_regctx(vcpu); 1344 vmcb = svm_get_vmcb(vcpu); 1345 state = &vmcb->state; 1346 ctrl = &vmcb->ctrl; 1347 1348 handled = 0; 1349 code = ctrl->exitcode; 1350 info1 = ctrl->exitinfo1; 1351 info2 = ctrl->exitinfo2; 1352 1353 vmexit->exitcode = VM_EXITCODE_BOGUS; 1354 vmexit->rip = state->rip; 1355 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1356 1357 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1358 1359 /* 1360 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1361 * in an inconsistent state and can trigger assertions that would 1362 * never happen otherwise. 1363 */ 1364 if (code == VMCB_EXIT_INVALID) { 1365 vm_exit_svm(vmexit, code, info1, info2); 1366 return (0); 1367 } 1368 1369 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1370 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1371 1372 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1373 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1374 vmexit->inst_length, code, info1, info2)); 1375 1376 svm_update_virqinfo(vcpu); 1377 svm_save_intinfo(svm_sc, vcpu); 1378 1379 switch (code) { 1380 case VMCB_EXIT_IRET: 1381 /* 1382 * Restart execution at "iret" but with the intercept cleared. 1383 */ 1384 vmexit->inst_length = 0; 1385 clear_nmi_blocking(vcpu); 1386 handled = 1; 1387 break; 1388 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1389 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1390 handled = 1; 1391 break; 1392 case VMCB_EXIT_INTR: /* external interrupt */ 1393 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1394 handled = 1; 1395 break; 1396 case VMCB_EXIT_NMI: /* external NMI */ 1397 handled = 1; 1398 break; 1399 case 0x40 ... 0x5F: 1400 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1401 reflect = 1; 1402 idtvec = code - 0x40; 1403 switch (idtvec) { 1404 case IDT_MC: 1405 /* 1406 * Call the machine check handler by hand. Also don't 1407 * reflect the machine check back into the guest. 1408 */ 1409 reflect = 0; 1410 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1411 __asm __volatile("int $18"); 1412 break; 1413 case IDT_PF: 1414 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1415 KASSERT(error == 0, ("%s: error %d updating cr2", 1416 __func__, error)); 1417 /* fallthru */ 1418 case IDT_NP: 1419 case IDT_SS: 1420 case IDT_GP: 1421 case IDT_AC: 1422 case IDT_TS: 1423 errcode_valid = 1; 1424 break; 1425 1426 case IDT_DF: 1427 errcode_valid = 1; 1428 info1 = 0; 1429 break; 1430 case IDT_DB: { 1431 /* 1432 * Check if we are being stepped (RFLAGS.TF) 1433 * and bounce vmexit to userland. 1434 */ 1435 bool stepped = 0; 1436 uint64_t dr6 = 0; 1437 1438 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); 1439 stepped = !!(dr6 & DBREG_DR6_BS); 1440 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { 1441 vmexit->exitcode = VM_EXITCODE_DB; 1442 vmexit->u.dbg.trace_trap = 1; 1443 vmexit->u.dbg.pushf_intercept = 0; 1444 1445 if (vcpu->dbg.popf_sstep) { 1446 /* 1447 * DB# exit was caused by stepping over 1448 * popf. 1449 */ 1450 uint64_t rflags; 1451 1452 vcpu->dbg.popf_sstep = 0; 1453 1454 /* 1455 * Update shadowed TF bit so the next 1456 * setcap(..., RFLAGS_SSTEP, 0) restores 1457 * the correct value 1458 */ 1459 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, 1460 &rflags); 1461 vcpu->dbg.rflags_tf = rflags & PSL_T; 1462 } else if (vcpu->dbg.pushf_sstep) { 1463 /* 1464 * DB# exit was caused by stepping over 1465 * pushf. 1466 */ 1467 vcpu->dbg.pushf_sstep = 0; 1468 1469 /* 1470 * Adjusting the pushed rflags after a 1471 * restarted pushf instruction must be 1472 * handled outside of svm.c due to the 1473 * critical_enter() lock being held. 1474 */ 1475 vmexit->u.dbg.pushf_intercept = 1; 1476 vmexit->u.dbg.tf_shadow_val = 1477 vcpu->dbg.rflags_tf; 1478 svm_paging_info(svm_get_vmcb(vcpu), 1479 &vmexit->u.dbg.paging); 1480 } 1481 1482 /* Clear DR6 "single-step" bit. */ 1483 dr6 &= ~DBREG_DR6_BS; 1484 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); 1485 KASSERT(error == 0, 1486 ("%s: error %d updating DR6\r\n", __func__, 1487 error)); 1488 1489 reflect = 0; 1490 } 1491 break; 1492 } 1493 case IDT_BP: 1494 vmexit->exitcode = VM_EXITCODE_BPT; 1495 vmexit->u.bpt.inst_length = vmexit->inst_length; 1496 vmexit->inst_length = 0; 1497 1498 reflect = 0; 1499 break; 1500 case IDT_OF: 1501 case IDT_BR: 1502 /* 1503 * The 'nrip' field is populated for INT3, INTO and 1504 * BOUND exceptions and this also implies that 1505 * 'inst_length' is non-zero. 1506 * 1507 * Reset 'inst_length' to zero so the guest %rip at 1508 * event injection is identical to what it was when 1509 * the exception originally happened. 1510 */ 1511 SVM_CTR2(vcpu, "Reset inst_length from %d " 1512 "to zero before injecting exception %d", 1513 vmexit->inst_length, idtvec); 1514 vmexit->inst_length = 0; 1515 /* fallthru */ 1516 default: 1517 errcode_valid = 0; 1518 info1 = 0; 1519 break; 1520 } 1521 1522 if (reflect) { 1523 KASSERT(vmexit->inst_length == 0, 1524 ("invalid inst_length (%d) " 1525 "when reflecting exception %d into guest", 1526 vmexit->inst_length, idtvec)); 1527 /* Reflect the exception back into the guest */ 1528 SVM_CTR2(vcpu, "Reflecting exception " 1529 "%d/%#x into the guest", idtvec, (int)info1); 1530 error = vm_inject_exception(vcpu->vcpu, idtvec, 1531 errcode_valid, info1, 0); 1532 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1533 __func__, error)); 1534 handled = 1; 1535 } 1536 break; 1537 case VMCB_EXIT_MSR: /* MSR access. */ 1538 eax = state->rax; 1539 ecx = ctx->sctx_rcx; 1540 edx = ctx->sctx_rdx; 1541 retu = false; 1542 1543 if (info1) { 1544 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1545 val = (uint64_t)edx << 32 | eax; 1546 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1547 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1548 vmexit->exitcode = VM_EXITCODE_WRMSR; 1549 vmexit->u.msr.code = ecx; 1550 vmexit->u.msr.wval = val; 1551 } else if (!retu) { 1552 handled = 1; 1553 } else { 1554 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1555 ("emulate_wrmsr retu with bogus exitcode")); 1556 } 1557 } else { 1558 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1560 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1561 vmexit->exitcode = VM_EXITCODE_RDMSR; 1562 vmexit->u.msr.code = ecx; 1563 } else if (!retu) { 1564 handled = 1; 1565 } else { 1566 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1567 ("emulate_rdmsr retu with bogus exitcode")); 1568 } 1569 } 1570 break; 1571 case VMCB_EXIT_IO: 1572 handled = svm_handle_io(vcpu, vmexit); 1573 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1574 break; 1575 case VMCB_EXIT_CPUID: 1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1577 handled = x86_emulate_cpuid(vcpu->vcpu, 1578 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1579 &ctx->sctx_rdx); 1580 break; 1581 case VMCB_EXIT_HLT: 1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1583 vmexit->exitcode = VM_EXITCODE_HLT; 1584 vmexit->u.hlt.rflags = state->rflags; 1585 break; 1586 case VMCB_EXIT_PAUSE: 1587 vmexit->exitcode = VM_EXITCODE_PAUSE; 1588 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1589 break; 1590 case VMCB_EXIT_NPF: 1591 /* EXITINFO2 contains the faulting guest physical address */ 1592 if (info1 & VMCB_NPF_INFO1_RSV) { 1593 SVM_CTR2(vcpu, "nested page fault with " 1594 "reserved bits set: info1(%#lx) info2(%#lx)", 1595 info1, info2); 1596 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { 1597 vmexit->exitcode = VM_EXITCODE_PAGING; 1598 vmexit->u.paging.gpa = info2; 1599 vmexit->u.paging.fault_type = npf_fault_type(info1); 1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1601 SVM_CTR3(vcpu, "nested page fault " 1602 "on gpa %#lx/%#lx at rip %#lx", 1603 info2, info1, state->rip); 1604 } else if (svm_npf_emul_fault(info1)) { 1605 svm_handle_inst_emul(vmcb, info2, vmexit); 1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1607 SVM_CTR3(vcpu, "inst_emul fault " 1608 "for gpa %#lx/%#lx at rip %#lx", 1609 info2, info1, state->rip); 1610 } 1611 break; 1612 case VMCB_EXIT_MONITOR: 1613 vmexit->exitcode = VM_EXITCODE_MONITOR; 1614 break; 1615 case VMCB_EXIT_MWAIT: 1616 vmexit->exitcode = VM_EXITCODE_MWAIT; 1617 break; 1618 case VMCB_EXIT_PUSHF: { 1619 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1620 uint64_t rflags; 1621 1622 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1623 /* Restart this instruction. */ 1624 vmexit->inst_length = 0; 1625 /* Disable PUSHF intercepts - avoid a loop. */ 1626 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1627 VMCB_INTCPT_PUSHF, 0); 1628 /* Trace restarted instruction. */ 1629 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1630 /* Let the IDT_DB handler know that pushf was stepped. 1631 */ 1632 vcpu->dbg.pushf_sstep = 1; 1633 handled = 1; 1634 } 1635 break; 1636 } 1637 case VMCB_EXIT_POPF: { 1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1639 uint64_t rflags; 1640 1641 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1642 /* Restart this instruction */ 1643 vmexit->inst_length = 0; 1644 /* Disable POPF intercepts - avoid a loop*/ 1645 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1646 VMCB_INTCPT_POPF, 0); 1647 /* Trace restarted instruction */ 1648 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1649 vcpu->dbg.popf_sstep = 1; 1650 handled = 1; 1651 } 1652 break; 1653 } 1654 case VMCB_EXIT_SHUTDOWN: 1655 case VMCB_EXIT_VMRUN: 1656 case VMCB_EXIT_VMMCALL: 1657 case VMCB_EXIT_VMLOAD: 1658 case VMCB_EXIT_VMSAVE: 1659 case VMCB_EXIT_STGI: 1660 case VMCB_EXIT_CLGI: 1661 case VMCB_EXIT_SKINIT: 1662 case VMCB_EXIT_ICEBP: 1663 case VMCB_EXIT_INVLPGA: 1664 vm_inject_ud(vcpu->vcpu); 1665 handled = 1; 1666 break; 1667 case VMCB_EXIT_INVD: 1668 case VMCB_EXIT_WBINVD: 1669 /* ignore exit */ 1670 handled = 1; 1671 break; 1672 default: 1673 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1674 break; 1675 } 1676 1677 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1678 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1679 vmexit->rip, vmexit->inst_length); 1680 1681 if (handled) { 1682 vmexit->rip += vmexit->inst_length; 1683 vmexit->inst_length = 0; 1684 state->rip = vmexit->rip; 1685 } else { 1686 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1687 /* 1688 * If this VM exit was not claimed by anybody then 1689 * treat it as a generic SVM exit. 1690 */ 1691 vm_exit_svm(vmexit, code, info1, info2); 1692 } else { 1693 /* 1694 * The exitcode and collateral have been populated. 1695 * The VM exit will be processed further in userland. 1696 */ 1697 } 1698 } 1699 return (handled); 1700 } 1701 1702 static void 1703 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1704 { 1705 uint64_t intinfo; 1706 1707 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1708 return; 1709 1710 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1711 "valid: %#lx", __func__, intinfo)); 1712 1713 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1714 VMCB_EXITINTINFO_VECTOR(intinfo), 1715 VMCB_EXITINTINFO_EC(intinfo), 1716 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1717 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1718 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1719 } 1720 1721 /* 1722 * Inject event to virtual cpu. 1723 */ 1724 static void 1725 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1726 struct vlapic *vlapic) 1727 { 1728 struct vmcb_ctrl *ctrl; 1729 struct vmcb_state *state; 1730 uint8_t v_tpr; 1731 int vector, need_intr_window; 1732 int extint_pending; 1733 1734 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { 1735 return; 1736 } 1737 1738 state = svm_get_vmcb_state(vcpu); 1739 ctrl = svm_get_vmcb_ctrl(vcpu); 1740 1741 need_intr_window = 0; 1742 1743 if (vcpu->nextrip != state->rip) { 1744 ctrl->intr_shadow = 0; 1745 SVM_CTR2(vcpu, "Guest interrupt blocking " 1746 "cleared due to rip change: %#lx/%#lx", 1747 vcpu->nextrip, state->rip); 1748 } 1749 1750 /* 1751 * Inject pending events or exceptions for this vcpu. 1752 * 1753 * An event might be pending because the previous #VMEXIT happened 1754 * during event delivery (i.e. ctrl->exitintinfo). 1755 * 1756 * An event might also be pending because an exception was injected 1757 * by the hypervisor (e.g. #PF during instruction emulation). 1758 */ 1759 svm_inj_intinfo(sc, vcpu); 1760 1761 /* NMI event has priority over interrupts. */ 1762 if (vm_nmi_pending(vcpu->vcpu)) { 1763 if (nmi_blocked(vcpu)) { 1764 /* 1765 * Can't inject another NMI if the guest has not 1766 * yet executed an "iret" after the last NMI. 1767 */ 1768 SVM_CTR0(vcpu, "Cannot inject NMI due " 1769 "to NMI-blocking"); 1770 } else if (ctrl->intr_shadow) { 1771 /* 1772 * Can't inject an NMI if the vcpu is in an intr_shadow. 1773 */ 1774 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1775 "interrupt shadow"); 1776 need_intr_window = 1; 1777 goto done; 1778 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1779 /* 1780 * If there is already an exception/interrupt pending 1781 * then defer the NMI until after that. 1782 */ 1783 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1784 "eventinj %#lx", ctrl->eventinj); 1785 1786 /* 1787 * Use self-IPI to trigger a VM-exit as soon as 1788 * possible after the event injection is completed. 1789 * 1790 * This works only if the external interrupt exiting 1791 * is at a lower priority than the event injection. 1792 * 1793 * Although not explicitly specified in APMv2 the 1794 * relative priorities were verified empirically. 1795 */ 1796 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1797 } else { 1798 vm_nmi_clear(vcpu->vcpu); 1799 1800 /* Inject NMI, vector number is not used */ 1801 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1802 IDT_NMI, 0, false); 1803 1804 /* virtual NMI blocking is now in effect */ 1805 enable_nmi_blocking(vcpu); 1806 1807 SVM_CTR0(vcpu, "Injecting vNMI"); 1808 } 1809 } 1810 1811 extint_pending = vm_extint_pending(vcpu->vcpu); 1812 if (!extint_pending) { 1813 if (!vlapic_pending_intr(vlapic, &vector)) 1814 goto done; 1815 KASSERT(vector >= 16 && vector <= 255, 1816 ("invalid vector %d from local APIC", vector)); 1817 } else { 1818 /* Ask the legacy pic for a vector to inject */ 1819 vatpic_pending_intr(sc->vm, &vector); 1820 KASSERT(vector >= 0 && vector <= 255, 1821 ("invalid vector %d from INTR", vector)); 1822 } 1823 1824 /* 1825 * If the guest has disabled interrupts or is in an interrupt shadow 1826 * then we cannot inject the pending interrupt. 1827 */ 1828 if ((state->rflags & PSL_I) == 0) { 1829 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1830 "rflags %#lx", vector, state->rflags); 1831 need_intr_window = 1; 1832 goto done; 1833 } 1834 1835 if (ctrl->intr_shadow) { 1836 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1837 "interrupt shadow", vector); 1838 need_intr_window = 1; 1839 goto done; 1840 } 1841 1842 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1843 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1844 "eventinj %#lx", vector, ctrl->eventinj); 1845 need_intr_window = 1; 1846 goto done; 1847 } 1848 1849 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1850 1851 if (!extint_pending) { 1852 vlapic_intr_accepted(vlapic, vector); 1853 } else { 1854 vm_extint_clear(vcpu->vcpu); 1855 vatpic_intr_accepted(sc->vm, vector); 1856 } 1857 1858 /* 1859 * Force a VM-exit as soon as the vcpu is ready to accept another 1860 * interrupt. This is done because the PIC might have another vector 1861 * that it wants to inject. Also, if the APIC has a pending interrupt 1862 * that was preempted by the ExtInt then it allows us to inject the 1863 * APIC vector as soon as possible. 1864 */ 1865 need_intr_window = 1; 1866 done: 1867 /* 1868 * The guest can modify the TPR by writing to %CR8. In guest mode 1869 * the processor reflects this write to V_TPR without hypervisor 1870 * intervention. 1871 * 1872 * The guest can also modify the TPR by writing to it via the memory 1873 * mapped APIC page. In this case, the write will be emulated by the 1874 * hypervisor. For this reason V_TPR must be updated before every 1875 * VMRUN. 1876 */ 1877 v_tpr = vlapic_get_cr8(vlapic); 1878 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1879 if (ctrl->v_tpr != v_tpr) { 1880 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1881 ctrl->v_tpr, v_tpr); 1882 ctrl->v_tpr = v_tpr; 1883 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1884 } 1885 1886 if (need_intr_window) { 1887 /* 1888 * We use V_IRQ in conjunction with the VINTR intercept to 1889 * trap into the hypervisor as soon as a virtual interrupt 1890 * can be delivered. 1891 * 1892 * Since injected events are not subject to intercept checks 1893 * we need to ensure that the V_IRQ is not actually going to 1894 * be delivered on VM entry. The KASSERT below enforces this. 1895 */ 1896 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1897 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1898 ("Bogus intr_window_exiting: eventinj (%#lx), " 1899 "intr_shadow (%u), rflags (%#lx)", 1900 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1901 enable_intr_window_exiting(vcpu); 1902 } else { 1903 disable_intr_window_exiting(vcpu); 1904 } 1905 } 1906 1907 static __inline void 1908 restore_host_tss(void) 1909 { 1910 struct system_segment_descriptor *tss_sd; 1911 1912 /* 1913 * The TSS descriptor was in use prior to launching the guest so it 1914 * has been marked busy. 1915 * 1916 * 'ltr' requires the descriptor to be marked available so change the 1917 * type to "64-bit available TSS". 1918 */ 1919 tss_sd = PCPU_GET(tss); 1920 tss_sd->sd_type = SDT_SYSTSS; 1921 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1922 } 1923 1924 static void 1925 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1926 { 1927 struct vmcb_ctrl *ctrl; 1928 long eptgen; 1929 int cpu; 1930 bool alloc_asid; 1931 1932 cpu = curcpu; 1933 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1934 smr_enter(pmap->pm_eptsmr); 1935 1936 ctrl = svm_get_vmcb_ctrl(vcpu); 1937 1938 /* 1939 * The TLB entries associated with the vcpu's ASID are not valid 1940 * if either of the following conditions is true: 1941 * 1942 * 1. The vcpu's ASID generation is different than the host cpu's 1943 * ASID generation. This happens when the vcpu migrates to a new 1944 * host cpu. It can also happen when the number of vcpus executing 1945 * on a host cpu is greater than the number of ASIDs available. 1946 * 1947 * 2. The pmap generation number is different than the value cached in 1948 * the 'vcpustate'. This happens when the host invalidates pages 1949 * belonging to the guest. 1950 * 1951 * asidgen eptgen Action 1952 * mismatch mismatch 1953 * 0 0 (a) 1954 * 0 1 (b1) or (b2) 1955 * 1 0 (c) 1956 * 1 1 (d) 1957 * 1958 * (a) There is no mismatch in eptgen or ASID generation and therefore 1959 * no further action is needed. 1960 * 1961 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1962 * retained and the TLB entries associated with this ASID 1963 * are flushed by VMRUN. 1964 * 1965 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1966 * allocated. 1967 * 1968 * (c) A new ASID is allocated. 1969 * 1970 * (d) A new ASID is allocated. 1971 */ 1972 1973 alloc_asid = false; 1974 eptgen = atomic_load_long(&pmap->pm_eptgen); 1975 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1976 1977 if (vcpu->asid.gen != asid[cpu].gen) { 1978 alloc_asid = true; /* (c) and (d) */ 1979 } else if (vcpu->eptgen != eptgen) { 1980 if (flush_by_asid()) 1981 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1982 else 1983 alloc_asid = true; /* (b2) */ 1984 } else { 1985 /* 1986 * This is the common case (a). 1987 */ 1988 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1989 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1990 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1991 } 1992 1993 if (alloc_asid) { 1994 if (++asid[cpu].num >= nasid) { 1995 asid[cpu].num = 1; 1996 if (++asid[cpu].gen == 0) 1997 asid[cpu].gen = 1; 1998 /* 1999 * If this cpu does not support "flush-by-asid" 2000 * then flush the entire TLB on a generation 2001 * bump. Subsequent ASID allocation in this 2002 * generation can be done without a TLB flush. 2003 */ 2004 if (!flush_by_asid()) 2005 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 2006 } 2007 vcpu->asid.gen = asid[cpu].gen; 2008 vcpu->asid.num = asid[cpu].num; 2009 2010 ctrl->asid = vcpu->asid.num; 2011 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 2012 /* 2013 * If this cpu supports "flush-by-asid" then the TLB 2014 * was not flushed after the generation bump. The TLB 2015 * is flushed selectively after every new ASID allocation. 2016 */ 2017 if (flush_by_asid()) 2018 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 2019 } 2020 vcpu->eptgen = eptgen; 2021 2022 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 2023 KASSERT(ctrl->asid == vcpu->asid.num, 2024 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 2025 } 2026 2027 static void 2028 svm_pmap_deactivate(pmap_t pmap) 2029 { 2030 smr_exit(pmap->pm_eptsmr); 2031 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2032 } 2033 2034 static __inline void 2035 disable_gintr(void) 2036 { 2037 2038 __asm __volatile("clgi"); 2039 } 2040 2041 static __inline void 2042 enable_gintr(void) 2043 { 2044 2045 __asm __volatile("stgi"); 2046 } 2047 2048 static __inline void 2049 svm_dr_enter_guest(struct svm_regctx *gctx) 2050 { 2051 2052 /* Save host control debug registers. */ 2053 gctx->host_dr7 = rdr7(); 2054 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2055 2056 /* 2057 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2058 * exceptions in the host based on the guest DRx values. The 2059 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 2060 * VMCB. 2061 */ 2062 load_dr7(0); 2063 wrmsr(MSR_DEBUGCTLMSR, 0); 2064 2065 /* Save host debug registers. */ 2066 gctx->host_dr0 = rdr0(); 2067 gctx->host_dr1 = rdr1(); 2068 gctx->host_dr2 = rdr2(); 2069 gctx->host_dr3 = rdr3(); 2070 gctx->host_dr6 = rdr6(); 2071 2072 /* Restore guest debug registers. */ 2073 load_dr0(gctx->sctx_dr0); 2074 load_dr1(gctx->sctx_dr1); 2075 load_dr2(gctx->sctx_dr2); 2076 load_dr3(gctx->sctx_dr3); 2077 } 2078 2079 static __inline void 2080 svm_dr_leave_guest(struct svm_regctx *gctx) 2081 { 2082 2083 /* Save guest debug registers. */ 2084 gctx->sctx_dr0 = rdr0(); 2085 gctx->sctx_dr1 = rdr1(); 2086 gctx->sctx_dr2 = rdr2(); 2087 gctx->sctx_dr3 = rdr3(); 2088 2089 /* 2090 * Restore host debug registers. Restore DR7 and DEBUGCTL 2091 * last. 2092 */ 2093 load_dr0(gctx->host_dr0); 2094 load_dr1(gctx->host_dr1); 2095 load_dr2(gctx->host_dr2); 2096 load_dr3(gctx->host_dr3); 2097 load_dr6(gctx->host_dr6); 2098 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 2099 load_dr7(gctx->host_dr7); 2100 } 2101 2102 /* 2103 * Start vcpu with specified RIP. 2104 */ 2105 static int 2106 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 2107 { 2108 struct svm_regctx *gctx; 2109 struct svm_softc *svm_sc; 2110 struct svm_vcpu *vcpu; 2111 struct vmcb_state *state; 2112 struct vmcb_ctrl *ctrl; 2113 struct vm_exit *vmexit; 2114 struct vlapic *vlapic; 2115 uint64_t vmcb_pa; 2116 int handled; 2117 uint16_t ldt_sel; 2118 2119 vcpu = vcpui; 2120 svm_sc = vcpu->sc; 2121 state = svm_get_vmcb_state(vcpu); 2122 ctrl = svm_get_vmcb_ctrl(vcpu); 2123 vmexit = vm_exitinfo(vcpu->vcpu); 2124 vlapic = vm_lapic(vcpu->vcpu); 2125 2126 gctx = svm_get_guest_regctx(vcpu); 2127 vmcb_pa = vcpu->vmcb_pa; 2128 2129 if (vcpu->lastcpu != curcpu) { 2130 /* 2131 * Force new ASID allocation by invalidating the generation. 2132 */ 2133 vcpu->asid.gen = 0; 2134 2135 /* 2136 * Invalidate the VMCB state cache by marking all fields dirty. 2137 */ 2138 svm_set_dirty(vcpu, 0xffffffff); 2139 2140 /* 2141 * XXX 2142 * Setting 'vcpu->lastcpu' here is bit premature because 2143 * we may return from this function without actually executing 2144 * the VMRUN instruction. This could happen if a rendezvous 2145 * or an AST is pending on the first time through the loop. 2146 * 2147 * This works for now but any new side-effects of vcpu 2148 * migration should take this case into account. 2149 */ 2150 vcpu->lastcpu = curcpu; 2151 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2152 } 2153 2154 svm_msr_guest_enter(vcpu); 2155 2156 /* Update Guest RIP */ 2157 state->rip = rip; 2158 2159 do { 2160 /* 2161 * Disable global interrupts to guarantee atomicity during 2162 * loading of guest state. This includes not only the state 2163 * loaded by the "vmrun" instruction but also software state 2164 * maintained by the hypervisor: suspended and rendezvous 2165 * state, NPT generation number, vlapic interrupts etc. 2166 */ 2167 disable_gintr(); 2168 2169 if (vcpu_suspended(evinfo)) { 2170 enable_gintr(); 2171 vm_exit_suspended(vcpu->vcpu, state->rip); 2172 break; 2173 } 2174 2175 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2176 enable_gintr(); 2177 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2178 break; 2179 } 2180 2181 if (vcpu_reqidle(evinfo)) { 2182 enable_gintr(); 2183 vm_exit_reqidle(vcpu->vcpu, state->rip); 2184 break; 2185 } 2186 2187 /* We are asked to give the cpu by scheduler. */ 2188 if (vcpu_should_yield(vcpu->vcpu)) { 2189 enable_gintr(); 2190 vm_exit_astpending(vcpu->vcpu, state->rip); 2191 break; 2192 } 2193 2194 if (vcpu_debugged(vcpu->vcpu)) { 2195 enable_gintr(); 2196 vm_exit_debug(vcpu->vcpu, state->rip); 2197 break; 2198 } 2199 2200 /* 2201 * #VMEXIT resumes the host with the guest LDTR, so 2202 * save the current LDT selector so it can be restored 2203 * after an exit. The userspace hypervisor probably 2204 * doesn't use a LDT, but save and restore it to be 2205 * safe. 2206 */ 2207 ldt_sel = sldt(); 2208 2209 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2210 2211 /* 2212 * Check the pmap generation and the ASID generation to 2213 * ensure that the vcpu does not use stale TLB mappings. 2214 */ 2215 svm_pmap_activate(vcpu, pmap); 2216 2217 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2218 vcpu->dirty = 0; 2219 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2220 2221 /* Launch Virtual Machine. */ 2222 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2223 svm_dr_enter_guest(gctx); 2224 svm_launch(vmcb_pa, gctx, get_pcpu()); 2225 svm_dr_leave_guest(gctx); 2226 2227 svm_pmap_deactivate(pmap); 2228 2229 /* 2230 * The host GDTR and IDTR is saved by VMRUN and restored 2231 * automatically on #VMEXIT. However, the host TSS needs 2232 * to be restored explicitly. 2233 */ 2234 restore_host_tss(); 2235 2236 /* Restore host LDTR. */ 2237 lldt(ldt_sel); 2238 2239 /* #VMEXIT disables interrupts so re-enable them here. */ 2240 enable_gintr(); 2241 2242 /* Update 'nextrip' */ 2243 vcpu->nextrip = state->rip; 2244 2245 /* Handle #VMEXIT and if required return to user space. */ 2246 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2247 } while (handled); 2248 2249 svm_msr_guest_exit(vcpu); 2250 2251 return (0); 2252 } 2253 2254 static void 2255 svm_vcpu_cleanup(void *vcpui) 2256 { 2257 struct svm_vcpu *vcpu = vcpui; 2258 2259 free(vcpu->vmcb, M_SVM); 2260 free(vcpu, M_SVM); 2261 } 2262 2263 static void 2264 svm_cleanup(void *vmi) 2265 { 2266 struct svm_softc *sc = vmi; 2267 2268 free(sc->iopm_bitmap, M_SVM); 2269 free(sc->msr_bitmap, M_SVM); 2270 free(sc, M_SVM); 2271 } 2272 2273 static register_t * 2274 swctx_regptr(struct svm_regctx *regctx, int reg) 2275 { 2276 2277 switch (reg) { 2278 case VM_REG_GUEST_RBX: 2279 return (®ctx->sctx_rbx); 2280 case VM_REG_GUEST_RCX: 2281 return (®ctx->sctx_rcx); 2282 case VM_REG_GUEST_RDX: 2283 return (®ctx->sctx_rdx); 2284 case VM_REG_GUEST_RDI: 2285 return (®ctx->sctx_rdi); 2286 case VM_REG_GUEST_RSI: 2287 return (®ctx->sctx_rsi); 2288 case VM_REG_GUEST_RBP: 2289 return (®ctx->sctx_rbp); 2290 case VM_REG_GUEST_R8: 2291 return (®ctx->sctx_r8); 2292 case VM_REG_GUEST_R9: 2293 return (®ctx->sctx_r9); 2294 case VM_REG_GUEST_R10: 2295 return (®ctx->sctx_r10); 2296 case VM_REG_GUEST_R11: 2297 return (®ctx->sctx_r11); 2298 case VM_REG_GUEST_R12: 2299 return (®ctx->sctx_r12); 2300 case VM_REG_GUEST_R13: 2301 return (®ctx->sctx_r13); 2302 case VM_REG_GUEST_R14: 2303 return (®ctx->sctx_r14); 2304 case VM_REG_GUEST_R15: 2305 return (®ctx->sctx_r15); 2306 case VM_REG_GUEST_DR0: 2307 return (®ctx->sctx_dr0); 2308 case VM_REG_GUEST_DR1: 2309 return (®ctx->sctx_dr1); 2310 case VM_REG_GUEST_DR2: 2311 return (®ctx->sctx_dr2); 2312 case VM_REG_GUEST_DR3: 2313 return (®ctx->sctx_dr3); 2314 default: 2315 return (NULL); 2316 } 2317 } 2318 2319 static int 2320 svm_getreg(void *vcpui, int ident, uint64_t *val) 2321 { 2322 struct svm_vcpu *vcpu; 2323 register_t *reg; 2324 2325 vcpu = vcpui; 2326 2327 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2328 return (svm_get_intr_shadow(vcpu, val)); 2329 } 2330 2331 if (vmcb_read(vcpu, ident, val) == 0) { 2332 return (0); 2333 } 2334 2335 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2336 2337 if (reg != NULL) { 2338 *val = *reg; 2339 return (0); 2340 } 2341 2342 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2343 return (EINVAL); 2344 } 2345 2346 static int 2347 svm_setreg(void *vcpui, int ident, uint64_t val) 2348 { 2349 struct svm_vcpu *vcpu; 2350 register_t *reg; 2351 2352 vcpu = vcpui; 2353 2354 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2355 return (svm_modify_intr_shadow(vcpu, val)); 2356 } 2357 2358 /* Do not permit user write access to VMCB fields by offset. */ 2359 if (!VMCB_ACCESS_OK(ident)) { 2360 if (vmcb_write(vcpu, ident, val) == 0) { 2361 return (0); 2362 } 2363 } 2364 2365 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2366 2367 if (reg != NULL) { 2368 *reg = val; 2369 return (0); 2370 } 2371 2372 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2373 /* Ignore. */ 2374 return (0); 2375 } 2376 2377 /* 2378 * XXX deal with CR3 and invalidate TLB entries tagged with the 2379 * vcpu's ASID. This needs to be treated differently depending on 2380 * whether 'running' is true/false. 2381 */ 2382 2383 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2384 return (EINVAL); 2385 } 2386 2387 static int 2388 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2389 { 2390 return (vmcb_getdesc(vcpui, reg, desc)); 2391 } 2392 2393 static int 2394 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2395 { 2396 return (vmcb_setdesc(vcpui, reg, desc)); 2397 } 2398 2399 #ifdef BHYVE_SNAPSHOT 2400 static int 2401 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2402 { 2403 int ret; 2404 uint64_t val; 2405 2406 if (meta->op == VM_SNAPSHOT_SAVE) { 2407 ret = svm_getreg(vcpui, ident, &val); 2408 if (ret != 0) 2409 goto done; 2410 2411 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2412 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2413 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2414 2415 ret = svm_setreg(vcpui, ident, val); 2416 if (ret != 0) 2417 goto done; 2418 } else { 2419 ret = EINVAL; 2420 goto done; 2421 } 2422 2423 done: 2424 return (ret); 2425 } 2426 #endif 2427 2428 static int 2429 svm_setcap(void *vcpui, int type, int val) 2430 { 2431 struct svm_vcpu *vcpu; 2432 struct vlapic *vlapic; 2433 int error; 2434 2435 vcpu = vcpui; 2436 error = 0; 2437 2438 switch (type) { 2439 case VM_CAP_HALT_EXIT: 2440 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2441 VMCB_INTCPT_HLT, val); 2442 break; 2443 case VM_CAP_PAUSE_EXIT: 2444 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2445 VMCB_INTCPT_PAUSE, val); 2446 break; 2447 case VM_CAP_UNRESTRICTED_GUEST: 2448 /* Unrestricted guest execution cannot be disabled in SVM */ 2449 if (val == 0) 2450 error = EINVAL; 2451 break; 2452 case VM_CAP_BPT_EXIT: 2453 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); 2454 break; 2455 case VM_CAP_IPI_EXIT: 2456 vlapic = vm_lapic(vcpu->vcpu); 2457 vlapic->ipi_exit = val; 2458 break; 2459 case VM_CAP_MASK_HWINTR: 2460 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); 2461 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); 2462 break; 2463 case VM_CAP_RFLAGS_TF: { 2464 uint64_t rflags; 2465 2466 /* Fetch RFLAGS. */ 2467 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { 2468 error = (EINVAL); 2469 break; 2470 } 2471 if (val) { 2472 /* Save current TF bit. */ 2473 vcpu->dbg.rflags_tf = rflags & PSL_T; 2474 /* Trace next instruction. */ 2475 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2476 (rflags | PSL_T))) { 2477 error = (EINVAL); 2478 break; 2479 } 2480 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); 2481 } else { 2482 /* 2483 * Restore shadowed RFLAGS.TF only if vCPU was 2484 * previously stepped 2485 */ 2486 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 2487 rflags &= ~PSL_T; 2488 rflags |= vcpu->dbg.rflags_tf; 2489 vcpu->dbg.rflags_tf = 0; 2490 2491 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2492 rflags)) { 2493 error = (EINVAL); 2494 break; 2495 } 2496 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); 2497 } 2498 } 2499 2500 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); 2501 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, 2502 val); 2503 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, 2504 val); 2505 break; 2506 } 2507 default: 2508 error = ENOENT; 2509 break; 2510 } 2511 return (error); 2512 } 2513 2514 static int 2515 svm_getcap(void *vcpui, int type, int *retval) 2516 { 2517 struct svm_vcpu *vcpu; 2518 struct vlapic *vlapic; 2519 int error; 2520 2521 vcpu = vcpui; 2522 error = 0; 2523 2524 switch (type) { 2525 case VM_CAP_HALT_EXIT: 2526 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2527 VMCB_INTCPT_HLT); 2528 break; 2529 case VM_CAP_PAUSE_EXIT: 2530 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2531 VMCB_INTCPT_PAUSE); 2532 break; 2533 case VM_CAP_UNRESTRICTED_GUEST: 2534 *retval = 1; /* unrestricted guest is always enabled */ 2535 break; 2536 case VM_CAP_BPT_EXIT: 2537 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); 2538 break; 2539 case VM_CAP_IPI_EXIT: 2540 vlapic = vm_lapic(vcpu->vcpu); 2541 *retval = vlapic->ipi_exit; 2542 break; 2543 case VM_CAP_RFLAGS_TF: 2544 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); 2545 break; 2546 case VM_CAP_MASK_HWINTR: 2547 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); 2548 break; 2549 default: 2550 error = ENOENT; 2551 break; 2552 } 2553 return (error); 2554 } 2555 2556 static struct vmspace * 2557 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2558 { 2559 return (svm_npt_alloc(min, max)); 2560 } 2561 2562 static void 2563 svm_vmspace_free(struct vmspace *vmspace) 2564 { 2565 svm_npt_free(vmspace); 2566 } 2567 2568 static struct vlapic * 2569 svm_vlapic_init(void *vcpui) 2570 { 2571 struct svm_vcpu *vcpu; 2572 struct vlapic *vlapic; 2573 2574 vcpu = vcpui; 2575 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2576 vlapic->vm = vcpu->sc->vm; 2577 vlapic->vcpu = vcpu->vcpu; 2578 vlapic->vcpuid = vcpu->vcpuid; 2579 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2580 M_WAITOK | M_ZERO); 2581 2582 vlapic_init(vlapic); 2583 2584 return (vlapic); 2585 } 2586 2587 static void 2588 svm_vlapic_cleanup(struct vlapic *vlapic) 2589 { 2590 2591 vlapic_cleanup(vlapic); 2592 free(vlapic->apic_page, M_SVM_VLAPIC); 2593 free(vlapic, M_SVM_VLAPIC); 2594 } 2595 2596 #ifdef BHYVE_SNAPSHOT 2597 static int 2598 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2599 { 2600 struct svm_vcpu *vcpu; 2601 int err, running, hostcpu; 2602 2603 vcpu = vcpui; 2604 err = 0; 2605 2606 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2607 if (running && hostcpu != curcpu) { 2608 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2609 vcpu->vcpuid); 2610 return (EINVAL); 2611 } 2612 2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2614 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2615 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2616 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2617 2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2620 2621 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2622 2623 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2624 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2625 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2626 2627 /* Guest segments */ 2628 /* ES */ 2629 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2630 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2631 2632 /* CS */ 2633 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2634 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2635 2636 /* SS */ 2637 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2638 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2639 2640 /* DS */ 2641 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2642 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2643 2644 /* FS */ 2645 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2646 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2647 2648 /* GS */ 2649 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2650 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2651 2652 /* TR */ 2653 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2654 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2655 2656 /* LDTR */ 2657 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2658 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2659 2660 /* EFER */ 2661 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2662 2663 /* IDTR and GDTR */ 2664 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2665 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2666 2667 /* Specific AMD registers */ 2668 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2669 2670 err += vmcb_snapshot_any(vcpu, 2671 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2672 err += vmcb_snapshot_any(vcpu, 2673 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2674 err += vmcb_snapshot_any(vcpu, 2675 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2676 err += vmcb_snapshot_any(vcpu, 2677 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2678 err += vmcb_snapshot_any(vcpu, 2679 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2680 2681 err += vmcb_snapshot_any(vcpu, 2682 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2683 err += vmcb_snapshot_any(vcpu, 2684 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2685 2686 err += vmcb_snapshot_any(vcpu, 2687 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2688 2689 err += vmcb_snapshot_any(vcpu, 2690 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2691 2692 err += vmcb_snapshot_any(vcpu, 2693 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2694 2695 err += vmcb_snapshot_any(vcpu, 2696 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2697 err += vmcb_snapshot_any(vcpu, 2698 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2699 err += vmcb_snapshot_any(vcpu, 2700 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2701 err += vmcb_snapshot_any(vcpu, 2702 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2703 2704 err += vmcb_snapshot_any(vcpu, 2705 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2706 2707 err += vmcb_snapshot_any(vcpu, 2708 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2709 err += vmcb_snapshot_any(vcpu, 2710 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2711 err += vmcb_snapshot_any(vcpu, 2712 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2713 err += vmcb_snapshot_any(vcpu, 2714 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2715 2716 err += vmcb_snapshot_any(vcpu, 2717 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2718 2719 err += vmcb_snapshot_any(vcpu, 2720 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2721 err += vmcb_snapshot_any(vcpu, 2722 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2723 err += vmcb_snapshot_any(vcpu, 2724 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2725 2726 err += vmcb_snapshot_any(vcpu, 2727 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2728 2729 err += vmcb_snapshot_any(vcpu, 2730 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2731 2732 err += vmcb_snapshot_any(vcpu, 2733 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2734 err += vmcb_snapshot_any(vcpu, 2735 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2736 err += vmcb_snapshot_any(vcpu, 2737 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2738 2739 err += vmcb_snapshot_any(vcpu, 2740 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2741 2742 err += vmcb_snapshot_any(vcpu, 2743 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2744 err += vmcb_snapshot_any(vcpu, 2745 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2746 err += vmcb_snapshot_any(vcpu, 2747 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2748 err += vmcb_snapshot_any(vcpu, 2749 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2750 err += vmcb_snapshot_any(vcpu, 2751 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2752 if (err != 0) 2753 goto done; 2754 2755 /* Snapshot swctx for virtual cpu */ 2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2774 2775 /* Restore other svm_vcpu struct fields */ 2776 2777 /* Restore NEXTRIP field */ 2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2779 2780 /* Restore lastcpu field */ 2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2783 2784 /* Restore EPTGEN field - EPT is Extended Page Table */ 2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2786 2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2789 2790 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); 2791 2792 /* Set all caches dirty */ 2793 if (meta->op == VM_SNAPSHOT_RESTORE) 2794 svm_set_dirty(vcpu, 0xffffffff); 2795 2796 done: 2797 return (err); 2798 } 2799 2800 static int 2801 svm_restore_tsc(void *vcpui, uint64_t offset) 2802 { 2803 struct svm_vcpu *vcpu = vcpui; 2804 2805 svm_set_tsc_offset(vcpu, offset); 2806 2807 return (0); 2808 } 2809 #endif 2810 2811 const struct vmm_ops vmm_ops_amd = { 2812 .modinit = svm_modinit, 2813 .modcleanup = svm_modcleanup, 2814 .modresume = svm_modresume, 2815 .modsuspend = svm_modsuspend, 2816 .init = svm_init, 2817 .run = svm_run, 2818 .cleanup = svm_cleanup, 2819 .vcpu_init = svm_vcpu_init, 2820 .vcpu_cleanup = svm_vcpu_cleanup, 2821 .getreg = svm_getreg, 2822 .setreg = svm_setreg, 2823 .getdesc = svm_getdesc, 2824 .setdesc = svm_setdesc, 2825 .getcap = svm_getcap, 2826 .setcap = svm_setcap, 2827 .vmspace_alloc = svm_vmspace_alloc, 2828 .vmspace_free = svm_vmspace_free, 2829 .vlapic_init = svm_vlapic_init, 2830 .vlapic_cleanup = svm_vlapic_cleanup, 2831 #ifdef BHYVE_SNAPSHOT 2832 .vcpu_snapshot = svm_vcpu_snapshot, 2833 .restore_tsc = svm_restore_tsc, 2834 #endif 2835 }; 2836