1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "opt_bhyve_snapshot.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/reg.h> 40 #include <sys/smr.h> 41 #include <sys/sysctl.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 #include <vm/pmap.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/psl.h> 49 #include <machine/md_var.h> 50 #include <machine/specialreg.h> 51 #include <machine/smp.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include <machine/vmm_snapshot.h> 56 57 #include <dev/vmm/vmm_ktr.h> 58 59 #include "vmm_lapic.h" 60 #include "vmm_stat.h" 61 #include "vmm_ioport.h" 62 #include "vatpic.h" 63 #include "vlapic.h" 64 #include "vlapic_priv.h" 65 66 #include "x86.h" 67 #include "vmcb.h" 68 #include "svm.h" 69 #include "svm_softc.h" 70 #include "svm_msr.h" 71 #include "npt.h" 72 73 SYSCTL_DECL(_hw_vmm); 74 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 75 NULL); 76 77 /* 78 * SVM CPUID function 0x8000_000A, edx bit decoding. 79 */ 80 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 81 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 82 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 83 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 84 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 85 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 86 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 87 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 88 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 89 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 90 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 91 92 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 93 VMCB_CACHE_IOPM | \ 94 VMCB_CACHE_I | \ 95 VMCB_CACHE_TPR | \ 96 VMCB_CACHE_CR2 | \ 97 VMCB_CACHE_CR | \ 98 VMCB_CACHE_DR | \ 99 VMCB_CACHE_DT | \ 100 VMCB_CACHE_SEG | \ 101 VMCB_CACHE_NP) 102 103 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 104 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 105 0, NULL); 106 107 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 109 110 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 111 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 112 "SVM features advertised by CPUID.8000000AH:EDX"); 113 114 static int disable_npf_assist; 115 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 116 &disable_npf_assist, 0, NULL); 117 118 /* Maximum ASIDs supported by the processor */ 119 static uint32_t nasid; 120 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 121 "Number of ASIDs supported by this processor"); 122 123 /* Current ASID generation for each host cpu */ 124 static struct asid asid[MAXCPU]; 125 126 /* SVM host state saved area of size 4KB for each physical core. */ 127 static uint8_t *hsave; 128 129 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 130 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 131 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 132 133 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 134 static int svm_setreg(void *vcpui, int ident, uint64_t val); 135 static int svm_getreg(void *vcpui, int ident, uint64_t *val); 136 static __inline int 137 flush_by_asid(void) 138 { 139 140 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 141 } 142 143 static __inline int 144 decode_assist(void) 145 { 146 147 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 148 } 149 150 static void 151 svm_disable(void *arg __unused) 152 { 153 uint64_t efer; 154 155 efer = rdmsr(MSR_EFER); 156 efer &= ~EFER_SVM; 157 wrmsr(MSR_EFER, efer); 158 } 159 160 /* 161 * Disable SVM on all CPUs. 162 */ 163 static int 164 svm_modcleanup(void) 165 { 166 167 smp_rendezvous(NULL, svm_disable, NULL, NULL); 168 169 if (hsave != NULL) 170 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 171 172 return (0); 173 } 174 175 /* 176 * Verify that all the features required by bhyve are available. 177 */ 178 static int 179 check_svm_features(void) 180 { 181 u_int regs[4]; 182 183 /* CPUID Fn8000_000A is for SVM */ 184 do_cpuid(0x8000000A, regs); 185 svm_feature &= regs[3]; 186 187 /* 188 * The number of ASIDs can be configured to be less than what is 189 * supported by the hardware but not more. 190 */ 191 if (nasid == 0 || nasid > regs[1]) 192 nasid = regs[1]; 193 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 194 195 /* bhyve requires the Nested Paging feature */ 196 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 197 printf("SVM: Nested Paging feature not available.\n"); 198 return (ENXIO); 199 } 200 201 /* bhyve requires the NRIP Save feature */ 202 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 203 printf("SVM: NRIP Save feature not available.\n"); 204 return (ENXIO); 205 } 206 207 return (0); 208 } 209 210 static void 211 svm_enable(void *arg __unused) 212 { 213 uint64_t efer; 214 215 efer = rdmsr(MSR_EFER); 216 efer |= EFER_SVM; 217 wrmsr(MSR_EFER, efer); 218 219 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 220 } 221 222 /* 223 * Return 1 if SVM is enabled on this processor and 0 otherwise. 224 */ 225 static int 226 svm_available(void) 227 { 228 uint64_t msr; 229 230 /* Section 15.4 Enabling SVM from APM2. */ 231 if ((amd_feature2 & AMDID2_SVM) == 0) { 232 printf("SVM: not available.\n"); 233 return (0); 234 } 235 236 msr = rdmsr(MSR_VM_CR); 237 if ((msr & VM_CR_SVMDIS) != 0) { 238 printf("SVM: disabled by BIOS.\n"); 239 return (0); 240 } 241 242 return (1); 243 } 244 245 static int 246 svm_modinit(int ipinum) 247 { 248 int error, cpu; 249 250 if (!svm_available()) 251 return (ENXIO); 252 253 error = check_svm_features(); 254 if (error) 255 return (error); 256 257 vmcb_clean &= VMCB_CACHE_DEFAULT; 258 259 for (cpu = 0; cpu < MAXCPU; cpu++) { 260 /* 261 * Initialize the host ASIDs to their "highest" valid values. 262 * 263 * The next ASID allocation will rollover both 'gen' and 'num' 264 * and start off the sequence at {1,1}. 265 */ 266 asid[cpu].gen = ~0UL; 267 asid[cpu].num = nasid - 1; 268 } 269 270 svm_msr_init(); 271 svm_npt_init(ipinum); 272 273 /* Enable SVM on all CPUs */ 274 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 275 smp_rendezvous(NULL, svm_enable, NULL, NULL); 276 277 return (0); 278 } 279 280 static void 281 svm_modresume(void) 282 { 283 284 svm_enable(NULL); 285 } 286 287 #ifdef BHYVE_SNAPSHOT 288 void 289 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 290 { 291 struct vmcb_ctrl *ctrl; 292 293 ctrl = svm_get_vmcb_ctrl(vcpu); 294 ctrl->tsc_offset = offset; 295 296 svm_set_dirty(vcpu, VMCB_CACHE_I); 297 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 298 299 vm_set_tsc_offset(vcpu->vcpu, offset); 300 } 301 #endif 302 303 /* Pentium compatible MSRs */ 304 #define MSR_PENTIUM_START 0 305 #define MSR_PENTIUM_END 0x1FFF 306 /* AMD 6th generation and Intel compatible MSRs */ 307 #define MSR_AMD6TH_START 0xC0000000UL 308 #define MSR_AMD6TH_END 0xC0001FFFUL 309 /* AMD 7th and 8th generation compatible MSRs */ 310 #define MSR_AMD7TH_START 0xC0010000UL 311 #define MSR_AMD7TH_END 0xC0011FFFUL 312 313 /* 314 * Get the index and bit position for a MSR in permission bitmap. 315 * Two bits are used for each MSR: lower bit for read and higher bit for write. 316 */ 317 static int 318 svm_msr_index(uint64_t msr, int *index, int *bit) 319 { 320 uint32_t base, off; 321 322 *index = -1; 323 *bit = (msr % 4) * 2; 324 base = 0; 325 326 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 327 *index = msr / 4; 328 return (0); 329 } 330 331 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 332 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 333 off = (msr - MSR_AMD6TH_START); 334 *index = (off + base) / 4; 335 return (0); 336 } 337 338 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 339 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 340 off = (msr - MSR_AMD7TH_START); 341 *index = (off + base) / 4; 342 return (0); 343 } 344 345 return (EINVAL); 346 } 347 348 /* 349 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 350 */ 351 static void 352 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 353 { 354 int index, bit, error __diagused; 355 356 error = svm_msr_index(msr, &index, &bit); 357 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 358 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 359 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 360 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 361 "msr %#lx", __func__, bit, msr)); 362 363 if (read) 364 perm_bitmap[index] &= ~(1UL << bit); 365 366 if (write) 367 perm_bitmap[index] &= ~(2UL << bit); 368 } 369 370 static void 371 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 372 { 373 374 svm_msr_perm(perm_bitmap, msr, true, true); 375 } 376 377 static void 378 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 379 { 380 381 svm_msr_perm(perm_bitmap, msr, true, false); 382 } 383 384 static __inline int 385 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 386 { 387 struct vmcb_ctrl *ctrl; 388 389 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 390 391 ctrl = svm_get_vmcb_ctrl(vcpu); 392 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 393 } 394 395 static __inline void 396 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 397 { 398 struct vmcb_ctrl *ctrl; 399 uint32_t oldval; 400 401 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 402 403 ctrl = svm_get_vmcb_ctrl(vcpu); 404 oldval = ctrl->intercept[idx]; 405 406 if (enabled) 407 ctrl->intercept[idx] |= bitmask; 408 else 409 ctrl->intercept[idx] &= ~bitmask; 410 411 if (ctrl->intercept[idx] != oldval) { 412 svm_set_dirty(vcpu, VMCB_CACHE_I); 413 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 414 oldval, ctrl->intercept[idx]); 415 } 416 } 417 418 static __inline void 419 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 420 { 421 422 svm_set_intercept(vcpu, off, bitmask, 0); 423 } 424 425 static __inline void 426 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 427 { 428 429 svm_set_intercept(vcpu, off, bitmask, 1); 430 } 431 432 static void 433 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 434 uint64_t msrpm_base_pa, uint64_t np_pml4) 435 { 436 struct vmcb_ctrl *ctrl; 437 struct vmcb_state *state; 438 uint32_t mask; 439 int n; 440 441 ctrl = svm_get_vmcb_ctrl(vcpu); 442 state = svm_get_vmcb_state(vcpu); 443 444 ctrl->iopm_base_pa = iopm_base_pa; 445 ctrl->msrpm_base_pa = msrpm_base_pa; 446 447 /* Enable nested paging */ 448 ctrl->np_enable = 1; 449 ctrl->n_cr3 = np_pml4; 450 451 /* 452 * Intercept accesses to the control registers that are not shadowed 453 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 454 */ 455 for (n = 0; n < 16; n++) { 456 mask = (BIT(n) << 16) | BIT(n); 457 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 458 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 459 else 460 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 461 } 462 463 /* 464 * Intercept everything when tracing guest exceptions otherwise 465 * just intercept machine check exception. 466 */ 467 if (vcpu_trace_exceptions(vcpu->vcpu)) { 468 for (n = 0; n < 32; n++) { 469 /* 470 * Skip unimplemented vectors in the exception bitmap. 471 */ 472 if (n == 2 || n == 9) { 473 continue; 474 } 475 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 476 } 477 } else { 478 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 479 } 480 481 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 482 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 483 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 484 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 485 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 486 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 493 494 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 495 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 496 497 /* 498 * Intercept SVM instructions since AMD enables them in guests otherwise. 499 * Non-intercepted VMMCALL causes #UD, skip it. 500 */ 501 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 503 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 504 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 505 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 507 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 508 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 509 VMCB_INTCPT_WBINVD); 510 } 511 512 /* 513 * From section "Canonicalization and Consistency Checks" in APMv2 514 * the VMRUN intercept bit must be set to pass the consistency check. 515 */ 516 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 517 518 /* 519 * The ASID will be set to a non-zero value just before VMRUN. 520 */ 521 ctrl->asid = 0; 522 523 /* 524 * Section 15.21.1, Interrupt Masking in EFLAGS 525 * Section 15.21.2, Virtualizing APIC.TPR 526 * 527 * This must be set for %rflag and %cr8 isolation of guest and host. 528 */ 529 ctrl->v_intr_masking = 1; 530 531 /* Enable Last Branch Record aka LBR for debugging */ 532 ctrl->lbr_virt_en = 1; 533 state->dbgctl = BIT(0); 534 535 /* EFER_SVM must always be set when the guest is executing */ 536 state->efer = EFER_SVM; 537 538 /* Set up the PAT to power-on state */ 539 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 540 PAT_VALUE(1, PAT_WRITE_THROUGH) | 541 PAT_VALUE(2, PAT_UNCACHED) | 542 PAT_VALUE(3, PAT_UNCACHEABLE) | 543 PAT_VALUE(4, PAT_WRITE_BACK) | 544 PAT_VALUE(5, PAT_WRITE_THROUGH) | 545 PAT_VALUE(6, PAT_UNCACHED) | 546 PAT_VALUE(7, PAT_UNCACHEABLE); 547 548 /* Set up DR6/7 to power-on state */ 549 state->dr6 = DBREG_DR6_RESERVED1; 550 state->dr7 = DBREG_DR7_RESERVED1; 551 } 552 553 /* 554 * Initialize a virtual machine. 555 */ 556 static void * 557 svm_init(struct vm *vm, pmap_t pmap) 558 { 559 struct svm_softc *svm_sc; 560 561 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 562 563 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 564 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 565 if (svm_sc->msr_bitmap == NULL) 566 panic("contigmalloc of SVM MSR bitmap failed"); 567 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 568 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 569 if (svm_sc->iopm_bitmap == NULL) 570 panic("contigmalloc of SVM IO bitmap failed"); 571 572 svm_sc->vm = vm; 573 svm_sc->nptp = vtophys(pmap->pm_pmltop); 574 575 /* 576 * Intercept read and write accesses to all MSRs. 577 */ 578 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 579 580 /* 581 * Access to the following MSRs is redirected to the VMCB when the 582 * guest is executing. Therefore it is safe to allow the guest to 583 * read/write these MSRs directly without hypervisor involvement. 584 */ 585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 588 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 597 598 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 599 600 /* 601 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 602 */ 603 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 604 605 /* Intercept access to all I/O ports. */ 606 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 607 608 return (svm_sc); 609 } 610 611 static void * 612 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 613 { 614 struct svm_softc *sc = vmi; 615 struct svm_vcpu *vcpu; 616 617 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 618 vcpu->sc = sc; 619 vcpu->vcpu = vcpu1; 620 vcpu->vcpuid = vcpuid; 621 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 622 M_WAITOK | M_ZERO); 623 vcpu->nextrip = ~0; 624 vcpu->lastcpu = NOCPU; 625 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 626 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 627 sc->nptp); 628 svm_msr_guest_init(sc, vcpu); 629 return (vcpu); 630 } 631 632 /* 633 * Collateral for a generic SVM VM-exit. 634 */ 635 static void 636 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 637 { 638 639 vme->exitcode = VM_EXITCODE_SVM; 640 vme->u.svm.exitcode = code; 641 vme->u.svm.exitinfo1 = info1; 642 vme->u.svm.exitinfo2 = info2; 643 } 644 645 static int 646 svm_cpl(struct vmcb_state *state) 647 { 648 649 /* 650 * From APMv2: 651 * "Retrieve the CPL from the CPL field in the VMCB, not 652 * from any segment DPL" 653 */ 654 return (state->cpl); 655 } 656 657 static enum vm_cpu_mode 658 svm_vcpu_mode(struct vmcb *vmcb) 659 { 660 struct vmcb_segment seg; 661 struct vmcb_state *state; 662 int error __diagused; 663 664 state = &vmcb->state; 665 666 if (state->efer & EFER_LMA) { 667 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 668 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 669 error)); 670 671 /* 672 * Section 4.8.1 for APM2, check if Code Segment has 673 * Long attribute set in descriptor. 674 */ 675 if (seg.attrib & VMCB_CS_ATTRIB_L) 676 return (CPU_MODE_64BIT); 677 else 678 return (CPU_MODE_COMPATIBILITY); 679 } else if (state->cr0 & CR0_PE) { 680 return (CPU_MODE_PROTECTED); 681 } else { 682 return (CPU_MODE_REAL); 683 } 684 } 685 686 static enum vm_paging_mode 687 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 688 { 689 690 if ((cr0 & CR0_PG) == 0) 691 return (PAGING_MODE_FLAT); 692 if ((cr4 & CR4_PAE) == 0) 693 return (PAGING_MODE_32); 694 if (efer & EFER_LME) 695 return (PAGING_MODE_64); 696 else 697 return (PAGING_MODE_PAE); 698 } 699 700 /* 701 * ins/outs utility routines 702 */ 703 static uint64_t 704 svm_inout_str_index(struct svm_regctx *regs, int in) 705 { 706 uint64_t val; 707 708 val = in ? regs->sctx_rdi : regs->sctx_rsi; 709 710 return (val); 711 } 712 713 static uint64_t 714 svm_inout_str_count(struct svm_regctx *regs, int rep) 715 { 716 uint64_t val; 717 718 val = rep ? regs->sctx_rcx : 1; 719 720 return (val); 721 } 722 723 static void 724 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 725 struct vm_inout_str *vis) 726 { 727 int error __diagused, s; 728 729 if (in) { 730 vis->seg_name = VM_REG_GUEST_ES; 731 } else { 732 /* The segment field has standard encoding */ 733 s = (info1 >> 10) & 0x7; 734 vis->seg_name = vm_segment_name(s); 735 } 736 737 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 738 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 739 } 740 741 static int 742 svm_inout_str_addrsize(uint64_t info1) 743 { 744 uint32_t size; 745 746 size = (info1 >> 7) & 0x7; 747 switch (size) { 748 case 1: 749 return (2); /* 16 bit */ 750 case 2: 751 return (4); /* 32 bit */ 752 case 4: 753 return (8); /* 64 bit */ 754 default: 755 panic("%s: invalid size encoding %d", __func__, size); 756 } 757 } 758 759 static void 760 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 761 { 762 struct vmcb_state *state; 763 764 state = &vmcb->state; 765 paging->cr3 = state->cr3; 766 paging->cpl = svm_cpl(state); 767 paging->cpu_mode = svm_vcpu_mode(vmcb); 768 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 769 state->efer); 770 } 771 772 #define UNHANDLED 0 773 774 /* 775 * Handle guest I/O intercept. 776 */ 777 static int 778 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 779 { 780 struct vmcb_ctrl *ctrl; 781 struct vmcb_state *state; 782 struct svm_regctx *regs; 783 struct vm_inout_str *vis; 784 uint64_t info1; 785 int inout_string; 786 787 state = svm_get_vmcb_state(vcpu); 788 ctrl = svm_get_vmcb_ctrl(vcpu); 789 regs = svm_get_guest_regctx(vcpu); 790 791 info1 = ctrl->exitinfo1; 792 inout_string = info1 & BIT(2) ? 1 : 0; 793 794 /* 795 * The effective segment number in EXITINFO1[12:10] is populated 796 * only if the processor has the DecodeAssist capability. 797 * 798 * XXX this is not specified explicitly in APMv2 but can be verified 799 * empirically. 800 */ 801 if (inout_string && !decode_assist()) 802 return (UNHANDLED); 803 804 vmexit->exitcode = VM_EXITCODE_INOUT; 805 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 806 vmexit->u.inout.string = inout_string; 807 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 808 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 809 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 810 vmexit->u.inout.eax = (uint32_t)(state->rax); 811 812 if (inout_string) { 813 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 814 vis = &vmexit->u.inout_str; 815 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 816 vis->rflags = state->rflags; 817 vis->cr0 = state->cr0; 818 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 819 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 820 vis->addrsize = svm_inout_str_addrsize(info1); 821 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 822 } 823 824 return (UNHANDLED); 825 } 826 827 static int 828 npf_fault_type(uint64_t exitinfo1) 829 { 830 831 if (exitinfo1 & VMCB_NPF_INFO1_W) 832 return (VM_PROT_WRITE); 833 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 834 return (VM_PROT_EXECUTE); 835 else 836 return (VM_PROT_READ); 837 } 838 839 static bool 840 svm_npf_emul_fault(uint64_t exitinfo1) 841 { 842 843 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 844 return (false); 845 } 846 847 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 848 return (false); 849 } 850 851 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 852 return (false); 853 } 854 855 return (true); 856 } 857 858 static void 859 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 860 { 861 struct vm_guest_paging *paging; 862 struct vmcb_segment seg; 863 struct vmcb_ctrl *ctrl; 864 char *inst_bytes; 865 int error __diagused, inst_len; 866 867 ctrl = &vmcb->ctrl; 868 paging = &vmexit->u.inst_emul.paging; 869 870 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 871 vmexit->u.inst_emul.gpa = gpa; 872 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 873 svm_paging_info(vmcb, paging); 874 875 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 876 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 877 878 switch(paging->cpu_mode) { 879 case CPU_MODE_REAL: 880 vmexit->u.inst_emul.cs_base = seg.base; 881 vmexit->u.inst_emul.cs_d = 0; 882 break; 883 case CPU_MODE_PROTECTED: 884 case CPU_MODE_COMPATIBILITY: 885 vmexit->u.inst_emul.cs_base = seg.base; 886 887 /* 888 * Section 4.8.1 of APM2, Default Operand Size or D bit. 889 */ 890 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 891 1 : 0; 892 break; 893 default: 894 vmexit->u.inst_emul.cs_base = 0; 895 vmexit->u.inst_emul.cs_d = 0; 896 break; 897 } 898 899 /* 900 * Copy the instruction bytes into 'vie' if available. 901 */ 902 if (decode_assist() && !disable_npf_assist) { 903 inst_len = ctrl->inst_len; 904 inst_bytes = ctrl->inst_bytes; 905 } else { 906 inst_len = 0; 907 inst_bytes = NULL; 908 } 909 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 910 } 911 912 #ifdef KTR 913 static const char * 914 intrtype_to_str(int intr_type) 915 { 916 switch (intr_type) { 917 case VMCB_EVENTINJ_TYPE_INTR: 918 return ("hwintr"); 919 case VMCB_EVENTINJ_TYPE_NMI: 920 return ("nmi"); 921 case VMCB_EVENTINJ_TYPE_INTn: 922 return ("swintr"); 923 case VMCB_EVENTINJ_TYPE_EXCEPTION: 924 return ("exception"); 925 default: 926 panic("%s: unknown intr_type %d", __func__, intr_type); 927 } 928 } 929 #endif 930 931 /* 932 * Inject an event to vcpu as described in section 15.20, "Event injection". 933 */ 934 static void 935 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 936 uint32_t error, bool ec_valid) 937 { 938 struct vmcb_ctrl *ctrl; 939 940 ctrl = svm_get_vmcb_ctrl(vcpu); 941 942 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 943 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 944 945 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 946 __func__, vector)); 947 948 switch (intr_type) { 949 case VMCB_EVENTINJ_TYPE_INTR: 950 case VMCB_EVENTINJ_TYPE_NMI: 951 case VMCB_EVENTINJ_TYPE_INTn: 952 break; 953 case VMCB_EVENTINJ_TYPE_EXCEPTION: 954 if (vector >= 0 && vector <= 31 && vector != 2) 955 break; 956 /* FALLTHROUGH */ 957 default: 958 panic("%s: invalid intr_type/vector: %d/%d", __func__, 959 intr_type, vector); 960 } 961 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 962 if (ec_valid) { 963 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 964 ctrl->eventinj |= (uint64_t)error << 32; 965 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 966 intrtype_to_str(intr_type), vector, error); 967 } else { 968 SVM_CTR2(vcpu, "Injecting %s at vector %d", 969 intrtype_to_str(intr_type), vector); 970 } 971 } 972 973 static void 974 svm_update_virqinfo(struct svm_vcpu *vcpu) 975 { 976 struct vlapic *vlapic; 977 struct vmcb_ctrl *ctrl; 978 979 vlapic = vm_lapic(vcpu->vcpu); 980 ctrl = svm_get_vmcb_ctrl(vcpu); 981 982 /* Update %cr8 in the emulated vlapic */ 983 vlapic_set_cr8(vlapic, ctrl->v_tpr); 984 985 /* Virtual interrupt injection is not used. */ 986 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 987 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 988 } 989 990 static void 991 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 992 { 993 struct vmcb_ctrl *ctrl; 994 uint64_t intinfo; 995 996 ctrl = svm_get_vmcb_ctrl(vcpu); 997 intinfo = ctrl->exitintinfo; 998 if (!VMCB_EXITINTINFO_VALID(intinfo)) 999 return; 1000 1001 /* 1002 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1003 * 1004 * If a #VMEXIT happened during event delivery then record the event 1005 * that was being delivered. 1006 */ 1007 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1008 VMCB_EXITINTINFO_VECTOR(intinfo)); 1009 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1010 vm_exit_intinfo(vcpu->vcpu, intinfo); 1011 } 1012 1013 #ifdef INVARIANTS 1014 static __inline int 1015 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1016 { 1017 1018 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1019 } 1020 #endif 1021 1022 static __inline void 1023 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1024 { 1025 struct vmcb_ctrl *ctrl; 1026 1027 ctrl = svm_get_vmcb_ctrl(vcpu); 1028 1029 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1030 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1031 KASSERT(vintr_intercept_enabled(vcpu), 1032 ("%s: vintr intercept should be enabled", __func__)); 1033 return; 1034 } 1035 1036 SVM_CTR0(vcpu, "Enable intr window exiting"); 1037 ctrl->v_irq = 1; 1038 ctrl->v_ign_tpr = 1; 1039 ctrl->v_intr_vector = 0; 1040 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1041 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1042 } 1043 1044 static __inline void 1045 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1046 { 1047 struct vmcb_ctrl *ctrl; 1048 1049 ctrl = svm_get_vmcb_ctrl(vcpu); 1050 1051 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1052 KASSERT(!vintr_intercept_enabled(vcpu), 1053 ("%s: vintr intercept should be disabled", __func__)); 1054 return; 1055 } 1056 1057 SVM_CTR0(vcpu, "Disable intr window exiting"); 1058 ctrl->v_irq = 0; 1059 ctrl->v_intr_vector = 0; 1060 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1061 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1062 } 1063 1064 static int 1065 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1066 { 1067 struct vmcb_ctrl *ctrl; 1068 int oldval, newval; 1069 1070 ctrl = svm_get_vmcb_ctrl(vcpu); 1071 oldval = ctrl->intr_shadow; 1072 newval = val ? 1 : 0; 1073 if (newval != oldval) { 1074 ctrl->intr_shadow = newval; 1075 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1076 } 1077 return (0); 1078 } 1079 1080 static int 1081 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1082 { 1083 struct vmcb_ctrl *ctrl; 1084 1085 ctrl = svm_get_vmcb_ctrl(vcpu); 1086 *val = ctrl->intr_shadow; 1087 return (0); 1088 } 1089 1090 /* 1091 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1092 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1093 * to track when the vcpu is done handling the NMI. 1094 */ 1095 static int 1096 nmi_blocked(struct svm_vcpu *vcpu) 1097 { 1098 int blocked; 1099 1100 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1101 return (blocked); 1102 } 1103 1104 static void 1105 enable_nmi_blocking(struct svm_vcpu *vcpu) 1106 { 1107 1108 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1109 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1110 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1111 } 1112 1113 static void 1114 clear_nmi_blocking(struct svm_vcpu *vcpu) 1115 { 1116 int error __diagused; 1117 1118 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1119 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1120 /* 1121 * When the IRET intercept is cleared the vcpu will attempt to execute 1122 * the "iret" when it runs next. However, it is possible to inject 1123 * another NMI into the vcpu before the "iret" has actually executed. 1124 * 1125 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1126 * it will trap back into the hypervisor. If an NMI is pending for 1127 * the vcpu it will be injected into the guest. 1128 * 1129 * XXX this needs to be fixed 1130 */ 1131 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1132 1133 /* 1134 * Set 'intr_shadow' to prevent an NMI from being injected on the 1135 * immediate VMRUN. 1136 */ 1137 error = svm_modify_intr_shadow(vcpu, 1); 1138 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1139 } 1140 1141 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1142 1143 static int 1144 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1145 bool *retu) 1146 { 1147 struct vm_exit *vme; 1148 struct vmcb_state *state; 1149 uint64_t changed, lma, oldval; 1150 int error __diagused; 1151 1152 state = svm_get_vmcb_state(vcpu); 1153 1154 oldval = state->efer; 1155 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1156 1157 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1158 changed = oldval ^ newval; 1159 1160 if (newval & EFER_MBZ_BITS) 1161 goto gpf; 1162 1163 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1164 if (changed & EFER_LME) { 1165 if (state->cr0 & CR0_PG) 1166 goto gpf; 1167 } 1168 1169 /* EFER.LMA = EFER.LME & CR0.PG */ 1170 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1171 lma = EFER_LMA; 1172 else 1173 lma = 0; 1174 1175 if ((newval & EFER_LMA) != lma) 1176 goto gpf; 1177 1178 if (newval & EFER_NXE) { 1179 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1180 goto gpf; 1181 } 1182 1183 /* 1184 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1185 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1186 */ 1187 if (newval & EFER_LMSLE) { 1188 vme = vm_exitinfo(vcpu->vcpu); 1189 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1190 *retu = true; 1191 return (0); 1192 } 1193 1194 if (newval & EFER_FFXSR) { 1195 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1196 goto gpf; 1197 } 1198 1199 if (newval & EFER_TCE) { 1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1201 goto gpf; 1202 } 1203 1204 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1205 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1206 return (0); 1207 gpf: 1208 vm_inject_gp(vcpu->vcpu); 1209 return (0); 1210 } 1211 1212 static int 1213 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1214 uint64_t val, bool *retu) 1215 { 1216 int error; 1217 1218 if (lapic_msr(num)) 1219 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1220 else if (num == MSR_EFER) 1221 error = svm_write_efer(sc, vcpu, val, retu); 1222 else 1223 error = svm_wrmsr(vcpu, num, val, retu); 1224 1225 return (error); 1226 } 1227 1228 static int 1229 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1230 { 1231 struct vmcb_state *state; 1232 struct svm_regctx *ctx; 1233 uint64_t result; 1234 int error; 1235 1236 if (lapic_msr(num)) 1237 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1238 else 1239 error = svm_rdmsr(vcpu, num, &result, retu); 1240 1241 if (error == 0) { 1242 state = svm_get_vmcb_state(vcpu); 1243 ctx = svm_get_guest_regctx(vcpu); 1244 state->rax = result & 0xffffffff; 1245 ctx->sctx_rdx = result >> 32; 1246 } 1247 1248 return (error); 1249 } 1250 1251 #ifdef KTR 1252 static const char * 1253 exit_reason_to_str(uint64_t reason) 1254 { 1255 int i; 1256 static char reasonbuf[32]; 1257 static const struct { 1258 int reason; 1259 const char *str; 1260 } reasons[] = { 1261 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1262 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1263 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1264 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1265 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1266 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1267 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1268 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1269 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1270 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1271 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1272 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1273 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1274 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1275 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1276 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1277 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1278 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1279 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1280 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1281 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1282 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1283 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1284 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1285 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1286 { .reason = VMCB_EXIT_POPF, .str = "popf" }, 1287 { .reason = VMCB_EXIT_PUSHF, .str = "pushf" }, 1288 }; 1289 1290 for (i = 0; i < nitems(reasons); i++) { 1291 if (reasons[i].reason == reason) 1292 return (reasons[i].str); 1293 } 1294 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1295 return (reasonbuf); 1296 } 1297 #endif /* KTR */ 1298 1299 /* 1300 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1301 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1302 * and exceptions caused by INT3, INTO and BOUND instructions. 1303 * 1304 * Return 1 if the nRIP is valid and 0 otherwise. 1305 */ 1306 static int 1307 nrip_valid(uint64_t exitcode) 1308 { 1309 switch (exitcode) { 1310 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1311 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1312 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1313 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1314 case 0x43: /* INT3 */ 1315 case 0x44: /* INTO */ 1316 case 0x45: /* BOUND */ 1317 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1318 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1319 return (1); 1320 default: 1321 return (0); 1322 } 1323 } 1324 1325 static int 1326 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1327 struct vm_exit *vmexit) 1328 { 1329 struct vmcb *vmcb; 1330 struct vmcb_state *state; 1331 struct vmcb_ctrl *ctrl; 1332 struct svm_regctx *ctx; 1333 uint64_t code, info1, info2, val; 1334 uint32_t eax, ecx, edx; 1335 int error __diagused, errcode_valid, handled, idtvec, reflect; 1336 bool retu; 1337 1338 ctx = svm_get_guest_regctx(vcpu); 1339 vmcb = svm_get_vmcb(vcpu); 1340 state = &vmcb->state; 1341 ctrl = &vmcb->ctrl; 1342 1343 handled = 0; 1344 code = ctrl->exitcode; 1345 info1 = ctrl->exitinfo1; 1346 info2 = ctrl->exitinfo2; 1347 1348 vmexit->exitcode = VM_EXITCODE_BOGUS; 1349 vmexit->rip = state->rip; 1350 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1351 1352 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1353 1354 /* 1355 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1356 * in an inconsistent state and can trigger assertions that would 1357 * never happen otherwise. 1358 */ 1359 if (code == VMCB_EXIT_INVALID) { 1360 vm_exit_svm(vmexit, code, info1, info2); 1361 return (0); 1362 } 1363 1364 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1365 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1366 1367 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1368 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1369 vmexit->inst_length, code, info1, info2)); 1370 1371 svm_update_virqinfo(vcpu); 1372 svm_save_intinfo(svm_sc, vcpu); 1373 1374 switch (code) { 1375 case VMCB_EXIT_IRET: 1376 /* 1377 * Restart execution at "iret" but with the intercept cleared. 1378 */ 1379 vmexit->inst_length = 0; 1380 clear_nmi_blocking(vcpu); 1381 handled = 1; 1382 break; 1383 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1384 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1385 handled = 1; 1386 break; 1387 case VMCB_EXIT_INTR: /* external interrupt */ 1388 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1389 handled = 1; 1390 break; 1391 case VMCB_EXIT_NMI: /* external NMI */ 1392 handled = 1; 1393 break; 1394 case 0x40 ... 0x5F: 1395 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1396 reflect = 1; 1397 idtvec = code - 0x40; 1398 switch (idtvec) { 1399 case IDT_MC: 1400 /* 1401 * Call the machine check handler by hand. Also don't 1402 * reflect the machine check back into the guest. 1403 */ 1404 reflect = 0; 1405 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1406 __asm __volatile("int $18"); 1407 break; 1408 case IDT_PF: 1409 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1410 KASSERT(error == 0, ("%s: error %d updating cr2", 1411 __func__, error)); 1412 /* fallthru */ 1413 case IDT_NP: 1414 case IDT_SS: 1415 case IDT_GP: 1416 case IDT_AC: 1417 case IDT_TS: 1418 errcode_valid = 1; 1419 break; 1420 1421 case IDT_DF: 1422 errcode_valid = 1; 1423 info1 = 0; 1424 break; 1425 case IDT_DB: { 1426 /* 1427 * Check if we are being stepped (RFLAGS.TF) 1428 * and bounce vmexit to userland. 1429 */ 1430 bool stepped = 0; 1431 uint64_t dr6 = 0; 1432 1433 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); 1434 stepped = !!(dr6 & DBREG_DR6_BS); 1435 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { 1436 vmexit->exitcode = VM_EXITCODE_DB; 1437 vmexit->u.dbg.trace_trap = 1; 1438 vmexit->u.dbg.pushf_intercept = 0; 1439 1440 if (vcpu->dbg.popf_sstep) { 1441 /* 1442 * DB# exit was caused by stepping over 1443 * popf. 1444 */ 1445 uint64_t rflags; 1446 1447 vcpu->dbg.popf_sstep = 0; 1448 1449 /* 1450 * Update shadowed TF bit so the next 1451 * setcap(..., RFLAGS_SSTEP, 0) restores 1452 * the correct value 1453 */ 1454 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, 1455 &rflags); 1456 vcpu->dbg.rflags_tf = rflags & PSL_T; 1457 } else if (vcpu->dbg.pushf_sstep) { 1458 /* 1459 * DB# exit was caused by stepping over 1460 * pushf. 1461 */ 1462 vcpu->dbg.pushf_sstep = 0; 1463 1464 /* 1465 * Adjusting the pushed rflags after a 1466 * restarted pushf instruction must be 1467 * handled outside of svm.c due to the 1468 * critical_enter() lock being held. 1469 */ 1470 vmexit->u.dbg.pushf_intercept = 1; 1471 vmexit->u.dbg.tf_shadow_val = 1472 vcpu->dbg.rflags_tf; 1473 svm_paging_info(svm_get_vmcb(vcpu), 1474 &vmexit->u.dbg.paging); 1475 } 1476 1477 /* Clear DR6 "single-step" bit. */ 1478 dr6 &= ~DBREG_DR6_BS; 1479 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); 1480 KASSERT(error == 0, 1481 ("%s: error %d updating DR6\r\n", __func__, 1482 error)); 1483 1484 reflect = 0; 1485 } 1486 break; 1487 } 1488 case IDT_BP: 1489 vmexit->exitcode = VM_EXITCODE_BPT; 1490 vmexit->u.bpt.inst_length = vmexit->inst_length; 1491 vmexit->inst_length = 0; 1492 1493 reflect = 0; 1494 break; 1495 case IDT_OF: 1496 case IDT_BR: 1497 /* 1498 * The 'nrip' field is populated for INT3, INTO and 1499 * BOUND exceptions and this also implies that 1500 * 'inst_length' is non-zero. 1501 * 1502 * Reset 'inst_length' to zero so the guest %rip at 1503 * event injection is identical to what it was when 1504 * the exception originally happened. 1505 */ 1506 SVM_CTR2(vcpu, "Reset inst_length from %d " 1507 "to zero before injecting exception %d", 1508 vmexit->inst_length, idtvec); 1509 vmexit->inst_length = 0; 1510 /* fallthru */ 1511 default: 1512 errcode_valid = 0; 1513 info1 = 0; 1514 break; 1515 } 1516 1517 if (reflect) { 1518 KASSERT(vmexit->inst_length == 0, 1519 ("invalid inst_length (%d) " 1520 "when reflecting exception %d into guest", 1521 vmexit->inst_length, idtvec)); 1522 /* Reflect the exception back into the guest */ 1523 SVM_CTR2(vcpu, "Reflecting exception " 1524 "%d/%#x into the guest", idtvec, (int)info1); 1525 error = vm_inject_exception(vcpu->vcpu, idtvec, 1526 errcode_valid, info1, 0); 1527 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1528 __func__, error)); 1529 handled = 1; 1530 } 1531 break; 1532 case VMCB_EXIT_MSR: /* MSR access. */ 1533 eax = state->rax; 1534 ecx = ctx->sctx_rcx; 1535 edx = ctx->sctx_rdx; 1536 retu = false; 1537 1538 if (info1) { 1539 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1540 val = (uint64_t)edx << 32 | eax; 1541 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1542 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1543 vmexit->exitcode = VM_EXITCODE_WRMSR; 1544 vmexit->u.msr.code = ecx; 1545 vmexit->u.msr.wval = val; 1546 } else if (!retu) { 1547 handled = 1; 1548 } else { 1549 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1550 ("emulate_wrmsr retu with bogus exitcode")); 1551 } 1552 } else { 1553 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1554 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1555 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1556 vmexit->exitcode = VM_EXITCODE_RDMSR; 1557 vmexit->u.msr.code = ecx; 1558 } else if (!retu) { 1559 handled = 1; 1560 } else { 1561 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1562 ("emulate_rdmsr retu with bogus exitcode")); 1563 } 1564 } 1565 break; 1566 case VMCB_EXIT_IO: 1567 handled = svm_handle_io(vcpu, vmexit); 1568 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1569 break; 1570 case VMCB_EXIT_CPUID: 1571 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1572 handled = x86_emulate_cpuid(vcpu->vcpu, 1573 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1574 &ctx->sctx_rdx); 1575 break; 1576 case VMCB_EXIT_HLT: 1577 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1578 vmexit->exitcode = VM_EXITCODE_HLT; 1579 vmexit->u.hlt.rflags = state->rflags; 1580 break; 1581 case VMCB_EXIT_PAUSE: 1582 vmexit->exitcode = VM_EXITCODE_PAUSE; 1583 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1584 break; 1585 case VMCB_EXIT_NPF: 1586 /* EXITINFO2 contains the faulting guest physical address */ 1587 if (info1 & VMCB_NPF_INFO1_RSV) { 1588 SVM_CTR2(vcpu, "nested page fault with " 1589 "reserved bits set: info1(%#lx) info2(%#lx)", 1590 info1, info2); 1591 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { 1592 vmexit->exitcode = VM_EXITCODE_PAGING; 1593 vmexit->u.paging.gpa = info2; 1594 vmexit->u.paging.fault_type = npf_fault_type(info1); 1595 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1596 SVM_CTR3(vcpu, "nested page fault " 1597 "on gpa %#lx/%#lx at rip %#lx", 1598 info2, info1, state->rip); 1599 } else if (svm_npf_emul_fault(info1)) { 1600 svm_handle_inst_emul(vmcb, info2, vmexit); 1601 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1602 SVM_CTR3(vcpu, "inst_emul fault " 1603 "for gpa %#lx/%#lx at rip %#lx", 1604 info2, info1, state->rip); 1605 } 1606 break; 1607 case VMCB_EXIT_MONITOR: 1608 vmexit->exitcode = VM_EXITCODE_MONITOR; 1609 break; 1610 case VMCB_EXIT_MWAIT: 1611 vmexit->exitcode = VM_EXITCODE_MWAIT; 1612 break; 1613 case VMCB_EXIT_PUSHF: { 1614 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1615 uint64_t rflags; 1616 1617 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1618 /* Restart this instruction. */ 1619 vmexit->inst_length = 0; 1620 /* Disable PUSHF intercepts - avoid a loop. */ 1621 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1622 VMCB_INTCPT_PUSHF, 0); 1623 /* Trace restarted instruction. */ 1624 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1625 /* Let the IDT_DB handler know that pushf was stepped. 1626 */ 1627 vcpu->dbg.pushf_sstep = 1; 1628 handled = 1; 1629 } 1630 break; 1631 } 1632 case VMCB_EXIT_POPF: { 1633 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1634 uint64_t rflags; 1635 1636 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1637 /* Restart this instruction */ 1638 vmexit->inst_length = 0; 1639 /* Disable POPF intercepts - avoid a loop*/ 1640 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1641 VMCB_INTCPT_POPF, 0); 1642 /* Trace restarted instruction */ 1643 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1644 vcpu->dbg.popf_sstep = 1; 1645 handled = 1; 1646 } 1647 break; 1648 } 1649 case VMCB_EXIT_SHUTDOWN: 1650 case VMCB_EXIT_VMRUN: 1651 case VMCB_EXIT_VMMCALL: 1652 case VMCB_EXIT_VMLOAD: 1653 case VMCB_EXIT_VMSAVE: 1654 case VMCB_EXIT_STGI: 1655 case VMCB_EXIT_CLGI: 1656 case VMCB_EXIT_SKINIT: 1657 case VMCB_EXIT_ICEBP: 1658 case VMCB_EXIT_INVLPGA: 1659 vm_inject_ud(vcpu->vcpu); 1660 handled = 1; 1661 break; 1662 case VMCB_EXIT_INVD: 1663 case VMCB_EXIT_WBINVD: 1664 /* ignore exit */ 1665 handled = 1; 1666 break; 1667 default: 1668 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1669 break; 1670 } 1671 1672 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1673 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1674 vmexit->rip, vmexit->inst_length); 1675 1676 if (handled) { 1677 vmexit->rip += vmexit->inst_length; 1678 vmexit->inst_length = 0; 1679 state->rip = vmexit->rip; 1680 } else { 1681 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1682 /* 1683 * If this VM exit was not claimed by anybody then 1684 * treat it as a generic SVM exit. 1685 */ 1686 vm_exit_svm(vmexit, code, info1, info2); 1687 } else { 1688 /* 1689 * The exitcode and collateral have been populated. 1690 * The VM exit will be processed further in userland. 1691 */ 1692 } 1693 } 1694 return (handled); 1695 } 1696 1697 static void 1698 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1699 { 1700 uint64_t intinfo; 1701 1702 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1703 return; 1704 1705 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1706 "valid: %#lx", __func__, intinfo)); 1707 1708 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1709 VMCB_EXITINTINFO_VECTOR(intinfo), 1710 VMCB_EXITINTINFO_EC(intinfo), 1711 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1712 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1713 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1714 } 1715 1716 /* 1717 * Inject event to virtual cpu. 1718 */ 1719 static void 1720 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1721 struct vlapic *vlapic) 1722 { 1723 struct vmcb_ctrl *ctrl; 1724 struct vmcb_state *state; 1725 uint8_t v_tpr; 1726 int vector, need_intr_window; 1727 int extint_pending; 1728 1729 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { 1730 return; 1731 } 1732 1733 state = svm_get_vmcb_state(vcpu); 1734 ctrl = svm_get_vmcb_ctrl(vcpu); 1735 1736 need_intr_window = 0; 1737 1738 if (vcpu->nextrip != state->rip) { 1739 ctrl->intr_shadow = 0; 1740 SVM_CTR2(vcpu, "Guest interrupt blocking " 1741 "cleared due to rip change: %#lx/%#lx", 1742 vcpu->nextrip, state->rip); 1743 } 1744 1745 /* 1746 * Inject pending events or exceptions for this vcpu. 1747 * 1748 * An event might be pending because the previous #VMEXIT happened 1749 * during event delivery (i.e. ctrl->exitintinfo). 1750 * 1751 * An event might also be pending because an exception was injected 1752 * by the hypervisor (e.g. #PF during instruction emulation). 1753 */ 1754 svm_inj_intinfo(sc, vcpu); 1755 1756 /* NMI event has priority over interrupts. */ 1757 if (vm_nmi_pending(vcpu->vcpu)) { 1758 if (nmi_blocked(vcpu)) { 1759 /* 1760 * Can't inject another NMI if the guest has not 1761 * yet executed an "iret" after the last NMI. 1762 */ 1763 SVM_CTR0(vcpu, "Cannot inject NMI due " 1764 "to NMI-blocking"); 1765 } else if (ctrl->intr_shadow) { 1766 /* 1767 * Can't inject an NMI if the vcpu is in an intr_shadow. 1768 */ 1769 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1770 "interrupt shadow"); 1771 need_intr_window = 1; 1772 goto done; 1773 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1774 /* 1775 * If there is already an exception/interrupt pending 1776 * then defer the NMI until after that. 1777 */ 1778 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1779 "eventinj %#lx", ctrl->eventinj); 1780 1781 /* 1782 * Use self-IPI to trigger a VM-exit as soon as 1783 * possible after the event injection is completed. 1784 * 1785 * This works only if the external interrupt exiting 1786 * is at a lower priority than the event injection. 1787 * 1788 * Although not explicitly specified in APMv2 the 1789 * relative priorities were verified empirically. 1790 */ 1791 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1792 } else { 1793 vm_nmi_clear(vcpu->vcpu); 1794 1795 /* Inject NMI, vector number is not used */ 1796 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1797 IDT_NMI, 0, false); 1798 1799 /* virtual NMI blocking is now in effect */ 1800 enable_nmi_blocking(vcpu); 1801 1802 SVM_CTR0(vcpu, "Injecting vNMI"); 1803 } 1804 } 1805 1806 extint_pending = vm_extint_pending(vcpu->vcpu); 1807 if (!extint_pending) { 1808 if (!vlapic_pending_intr(vlapic, &vector)) 1809 goto done; 1810 KASSERT(vector >= 16 && vector <= 255, 1811 ("invalid vector %d from local APIC", vector)); 1812 } else { 1813 /* Ask the legacy pic for a vector to inject */ 1814 vatpic_pending_intr(sc->vm, &vector); 1815 KASSERT(vector >= 0 && vector <= 255, 1816 ("invalid vector %d from INTR", vector)); 1817 } 1818 1819 /* 1820 * If the guest has disabled interrupts or is in an interrupt shadow 1821 * then we cannot inject the pending interrupt. 1822 */ 1823 if ((state->rflags & PSL_I) == 0) { 1824 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1825 "rflags %#lx", vector, state->rflags); 1826 need_intr_window = 1; 1827 goto done; 1828 } 1829 1830 if (ctrl->intr_shadow) { 1831 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1832 "interrupt shadow", vector); 1833 need_intr_window = 1; 1834 goto done; 1835 } 1836 1837 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1838 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1839 "eventinj %#lx", vector, ctrl->eventinj); 1840 need_intr_window = 1; 1841 goto done; 1842 } 1843 1844 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1845 1846 if (!extint_pending) { 1847 vlapic_intr_accepted(vlapic, vector); 1848 } else { 1849 vm_extint_clear(vcpu->vcpu); 1850 vatpic_intr_accepted(sc->vm, vector); 1851 } 1852 1853 /* 1854 * Force a VM-exit as soon as the vcpu is ready to accept another 1855 * interrupt. This is done because the PIC might have another vector 1856 * that it wants to inject. Also, if the APIC has a pending interrupt 1857 * that was preempted by the ExtInt then it allows us to inject the 1858 * APIC vector as soon as possible. 1859 */ 1860 need_intr_window = 1; 1861 done: 1862 /* 1863 * The guest can modify the TPR by writing to %CR8. In guest mode 1864 * the processor reflects this write to V_TPR without hypervisor 1865 * intervention. 1866 * 1867 * The guest can also modify the TPR by writing to it via the memory 1868 * mapped APIC page. In this case, the write will be emulated by the 1869 * hypervisor. For this reason V_TPR must be updated before every 1870 * VMRUN. 1871 */ 1872 v_tpr = vlapic_get_cr8(vlapic); 1873 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1874 if (ctrl->v_tpr != v_tpr) { 1875 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1876 ctrl->v_tpr, v_tpr); 1877 ctrl->v_tpr = v_tpr; 1878 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1879 } 1880 1881 if (need_intr_window) { 1882 /* 1883 * We use V_IRQ in conjunction with the VINTR intercept to 1884 * trap into the hypervisor as soon as a virtual interrupt 1885 * can be delivered. 1886 * 1887 * Since injected events are not subject to intercept checks 1888 * we need to ensure that the V_IRQ is not actually going to 1889 * be delivered on VM entry. The KASSERT below enforces this. 1890 */ 1891 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1892 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1893 ("Bogus intr_window_exiting: eventinj (%#lx), " 1894 "intr_shadow (%u), rflags (%#lx)", 1895 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1896 enable_intr_window_exiting(vcpu); 1897 } else { 1898 disable_intr_window_exiting(vcpu); 1899 } 1900 } 1901 1902 static __inline void 1903 restore_host_tss(void) 1904 { 1905 struct system_segment_descriptor *tss_sd; 1906 1907 /* 1908 * The TSS descriptor was in use prior to launching the guest so it 1909 * has been marked busy. 1910 * 1911 * 'ltr' requires the descriptor to be marked available so change the 1912 * type to "64-bit available TSS". 1913 */ 1914 tss_sd = PCPU_GET(tss); 1915 tss_sd->sd_type = SDT_SYSTSS; 1916 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1917 } 1918 1919 static void 1920 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1921 { 1922 struct vmcb_ctrl *ctrl; 1923 long eptgen; 1924 int cpu; 1925 bool alloc_asid; 1926 1927 cpu = curcpu; 1928 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1929 smr_enter(pmap->pm_eptsmr); 1930 1931 ctrl = svm_get_vmcb_ctrl(vcpu); 1932 1933 /* 1934 * The TLB entries associated with the vcpu's ASID are not valid 1935 * if either of the following conditions is true: 1936 * 1937 * 1. The vcpu's ASID generation is different than the host cpu's 1938 * ASID generation. This happens when the vcpu migrates to a new 1939 * host cpu. It can also happen when the number of vcpus executing 1940 * on a host cpu is greater than the number of ASIDs available. 1941 * 1942 * 2. The pmap generation number is different than the value cached in 1943 * the 'vcpustate'. This happens when the host invalidates pages 1944 * belonging to the guest. 1945 * 1946 * asidgen eptgen Action 1947 * mismatch mismatch 1948 * 0 0 (a) 1949 * 0 1 (b1) or (b2) 1950 * 1 0 (c) 1951 * 1 1 (d) 1952 * 1953 * (a) There is no mismatch in eptgen or ASID generation and therefore 1954 * no further action is needed. 1955 * 1956 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1957 * retained and the TLB entries associated with this ASID 1958 * are flushed by VMRUN. 1959 * 1960 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1961 * allocated. 1962 * 1963 * (c) A new ASID is allocated. 1964 * 1965 * (d) A new ASID is allocated. 1966 */ 1967 1968 alloc_asid = false; 1969 eptgen = atomic_load_long(&pmap->pm_eptgen); 1970 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1971 1972 if (vcpu->asid.gen != asid[cpu].gen) { 1973 alloc_asid = true; /* (c) and (d) */ 1974 } else if (vcpu->eptgen != eptgen) { 1975 if (flush_by_asid()) 1976 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1977 else 1978 alloc_asid = true; /* (b2) */ 1979 } else { 1980 /* 1981 * This is the common case (a). 1982 */ 1983 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1984 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1985 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1986 } 1987 1988 if (alloc_asid) { 1989 if (++asid[cpu].num >= nasid) { 1990 asid[cpu].num = 1; 1991 if (++asid[cpu].gen == 0) 1992 asid[cpu].gen = 1; 1993 /* 1994 * If this cpu does not support "flush-by-asid" 1995 * then flush the entire TLB on a generation 1996 * bump. Subsequent ASID allocation in this 1997 * generation can be done without a TLB flush. 1998 */ 1999 if (!flush_by_asid()) 2000 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 2001 } 2002 vcpu->asid.gen = asid[cpu].gen; 2003 vcpu->asid.num = asid[cpu].num; 2004 2005 ctrl->asid = vcpu->asid.num; 2006 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 2007 /* 2008 * If this cpu supports "flush-by-asid" then the TLB 2009 * was not flushed after the generation bump. The TLB 2010 * is flushed selectively after every new ASID allocation. 2011 */ 2012 if (flush_by_asid()) 2013 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 2014 } 2015 vcpu->eptgen = eptgen; 2016 2017 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 2018 KASSERT(ctrl->asid == vcpu->asid.num, 2019 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 2020 } 2021 2022 static void 2023 svm_pmap_deactivate(pmap_t pmap) 2024 { 2025 smr_exit(pmap->pm_eptsmr); 2026 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2027 } 2028 2029 static __inline void 2030 disable_gintr(void) 2031 { 2032 2033 __asm __volatile("clgi"); 2034 } 2035 2036 static __inline void 2037 enable_gintr(void) 2038 { 2039 2040 __asm __volatile("stgi"); 2041 } 2042 2043 static __inline void 2044 svm_dr_enter_guest(struct svm_regctx *gctx) 2045 { 2046 2047 /* Save host control debug registers. */ 2048 gctx->host_dr7 = rdr7(); 2049 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2050 2051 /* 2052 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2053 * exceptions in the host based on the guest DRx values. The 2054 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 2055 * VMCB. 2056 */ 2057 load_dr7(0); 2058 wrmsr(MSR_DEBUGCTLMSR, 0); 2059 2060 /* Save host debug registers. */ 2061 gctx->host_dr0 = rdr0(); 2062 gctx->host_dr1 = rdr1(); 2063 gctx->host_dr2 = rdr2(); 2064 gctx->host_dr3 = rdr3(); 2065 gctx->host_dr6 = rdr6(); 2066 2067 /* Restore guest debug registers. */ 2068 load_dr0(gctx->sctx_dr0); 2069 load_dr1(gctx->sctx_dr1); 2070 load_dr2(gctx->sctx_dr2); 2071 load_dr3(gctx->sctx_dr3); 2072 } 2073 2074 static __inline void 2075 svm_dr_leave_guest(struct svm_regctx *gctx) 2076 { 2077 2078 /* Save guest debug registers. */ 2079 gctx->sctx_dr0 = rdr0(); 2080 gctx->sctx_dr1 = rdr1(); 2081 gctx->sctx_dr2 = rdr2(); 2082 gctx->sctx_dr3 = rdr3(); 2083 2084 /* 2085 * Restore host debug registers. Restore DR7 and DEBUGCTL 2086 * last. 2087 */ 2088 load_dr0(gctx->host_dr0); 2089 load_dr1(gctx->host_dr1); 2090 load_dr2(gctx->host_dr2); 2091 load_dr3(gctx->host_dr3); 2092 load_dr6(gctx->host_dr6); 2093 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 2094 load_dr7(gctx->host_dr7); 2095 } 2096 2097 /* 2098 * Start vcpu with specified RIP. 2099 */ 2100 static int 2101 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 2102 { 2103 struct svm_regctx *gctx; 2104 struct svm_softc *svm_sc; 2105 struct svm_vcpu *vcpu; 2106 struct vmcb_state *state; 2107 struct vmcb_ctrl *ctrl; 2108 struct vm_exit *vmexit; 2109 struct vlapic *vlapic; 2110 uint64_t vmcb_pa; 2111 int handled; 2112 uint16_t ldt_sel; 2113 2114 vcpu = vcpui; 2115 svm_sc = vcpu->sc; 2116 state = svm_get_vmcb_state(vcpu); 2117 ctrl = svm_get_vmcb_ctrl(vcpu); 2118 vmexit = vm_exitinfo(vcpu->vcpu); 2119 vlapic = vm_lapic(vcpu->vcpu); 2120 2121 gctx = svm_get_guest_regctx(vcpu); 2122 vmcb_pa = vcpu->vmcb_pa; 2123 2124 if (vcpu->lastcpu != curcpu) { 2125 /* 2126 * Force new ASID allocation by invalidating the generation. 2127 */ 2128 vcpu->asid.gen = 0; 2129 2130 /* 2131 * Invalidate the VMCB state cache by marking all fields dirty. 2132 */ 2133 svm_set_dirty(vcpu, 0xffffffff); 2134 2135 /* 2136 * XXX 2137 * Setting 'vcpu->lastcpu' here is bit premature because 2138 * we may return from this function without actually executing 2139 * the VMRUN instruction. This could happen if a rendezvous 2140 * or an AST is pending on the first time through the loop. 2141 * 2142 * This works for now but any new side-effects of vcpu 2143 * migration should take this case into account. 2144 */ 2145 vcpu->lastcpu = curcpu; 2146 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2147 } 2148 2149 svm_msr_guest_enter(vcpu); 2150 2151 /* Update Guest RIP */ 2152 state->rip = rip; 2153 2154 do { 2155 /* 2156 * Disable global interrupts to guarantee atomicity during 2157 * loading of guest state. This includes not only the state 2158 * loaded by the "vmrun" instruction but also software state 2159 * maintained by the hypervisor: suspended and rendezvous 2160 * state, NPT generation number, vlapic interrupts etc. 2161 */ 2162 disable_gintr(); 2163 2164 if (vcpu_suspended(evinfo)) { 2165 enable_gintr(); 2166 vm_exit_suspended(vcpu->vcpu, state->rip); 2167 break; 2168 } 2169 2170 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2171 enable_gintr(); 2172 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2173 break; 2174 } 2175 2176 if (vcpu_reqidle(evinfo)) { 2177 enable_gintr(); 2178 vm_exit_reqidle(vcpu->vcpu, state->rip); 2179 break; 2180 } 2181 2182 /* We are asked to give the cpu by scheduler. */ 2183 if (vcpu_should_yield(vcpu->vcpu)) { 2184 enable_gintr(); 2185 vm_exit_astpending(vcpu->vcpu, state->rip); 2186 break; 2187 } 2188 2189 if (vcpu_debugged(vcpu->vcpu)) { 2190 enable_gintr(); 2191 vm_exit_debug(vcpu->vcpu, state->rip); 2192 break; 2193 } 2194 2195 /* 2196 * #VMEXIT resumes the host with the guest LDTR, so 2197 * save the current LDT selector so it can be restored 2198 * after an exit. The userspace hypervisor probably 2199 * doesn't use a LDT, but save and restore it to be 2200 * safe. 2201 */ 2202 ldt_sel = sldt(); 2203 2204 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2205 2206 /* 2207 * Check the pmap generation and the ASID generation to 2208 * ensure that the vcpu does not use stale TLB mappings. 2209 */ 2210 svm_pmap_activate(vcpu, pmap); 2211 2212 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2213 vcpu->dirty = 0; 2214 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2215 2216 /* Launch Virtual Machine. */ 2217 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2218 svm_dr_enter_guest(gctx); 2219 svm_launch(vmcb_pa, gctx, get_pcpu()); 2220 svm_dr_leave_guest(gctx); 2221 2222 svm_pmap_deactivate(pmap); 2223 2224 /* 2225 * The host GDTR and IDTR is saved by VMRUN and restored 2226 * automatically on #VMEXIT. However, the host TSS needs 2227 * to be restored explicitly. 2228 */ 2229 restore_host_tss(); 2230 2231 /* Restore host LDTR. */ 2232 lldt(ldt_sel); 2233 2234 /* #VMEXIT disables interrupts so re-enable them here. */ 2235 enable_gintr(); 2236 2237 /* Update 'nextrip' */ 2238 vcpu->nextrip = state->rip; 2239 2240 /* Handle #VMEXIT and if required return to user space. */ 2241 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2242 } while (handled); 2243 2244 svm_msr_guest_exit(vcpu); 2245 2246 return (0); 2247 } 2248 2249 static void 2250 svm_vcpu_cleanup(void *vcpui) 2251 { 2252 struct svm_vcpu *vcpu = vcpui; 2253 2254 free(vcpu->vmcb, M_SVM); 2255 free(vcpu, M_SVM); 2256 } 2257 2258 static void 2259 svm_cleanup(void *vmi) 2260 { 2261 struct svm_softc *sc = vmi; 2262 2263 free(sc->iopm_bitmap, M_SVM); 2264 free(sc->msr_bitmap, M_SVM); 2265 free(sc, M_SVM); 2266 } 2267 2268 static register_t * 2269 swctx_regptr(struct svm_regctx *regctx, int reg) 2270 { 2271 2272 switch (reg) { 2273 case VM_REG_GUEST_RBX: 2274 return (®ctx->sctx_rbx); 2275 case VM_REG_GUEST_RCX: 2276 return (®ctx->sctx_rcx); 2277 case VM_REG_GUEST_RDX: 2278 return (®ctx->sctx_rdx); 2279 case VM_REG_GUEST_RDI: 2280 return (®ctx->sctx_rdi); 2281 case VM_REG_GUEST_RSI: 2282 return (®ctx->sctx_rsi); 2283 case VM_REG_GUEST_RBP: 2284 return (®ctx->sctx_rbp); 2285 case VM_REG_GUEST_R8: 2286 return (®ctx->sctx_r8); 2287 case VM_REG_GUEST_R9: 2288 return (®ctx->sctx_r9); 2289 case VM_REG_GUEST_R10: 2290 return (®ctx->sctx_r10); 2291 case VM_REG_GUEST_R11: 2292 return (®ctx->sctx_r11); 2293 case VM_REG_GUEST_R12: 2294 return (®ctx->sctx_r12); 2295 case VM_REG_GUEST_R13: 2296 return (®ctx->sctx_r13); 2297 case VM_REG_GUEST_R14: 2298 return (®ctx->sctx_r14); 2299 case VM_REG_GUEST_R15: 2300 return (®ctx->sctx_r15); 2301 case VM_REG_GUEST_DR0: 2302 return (®ctx->sctx_dr0); 2303 case VM_REG_GUEST_DR1: 2304 return (®ctx->sctx_dr1); 2305 case VM_REG_GUEST_DR2: 2306 return (®ctx->sctx_dr2); 2307 case VM_REG_GUEST_DR3: 2308 return (®ctx->sctx_dr3); 2309 default: 2310 return (NULL); 2311 } 2312 } 2313 2314 static int 2315 svm_getreg(void *vcpui, int ident, uint64_t *val) 2316 { 2317 struct svm_vcpu *vcpu; 2318 register_t *reg; 2319 2320 vcpu = vcpui; 2321 2322 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2323 return (svm_get_intr_shadow(vcpu, val)); 2324 } 2325 2326 if (vmcb_read(vcpu, ident, val) == 0) { 2327 return (0); 2328 } 2329 2330 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2331 2332 if (reg != NULL) { 2333 *val = *reg; 2334 return (0); 2335 } 2336 2337 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2338 return (EINVAL); 2339 } 2340 2341 static int 2342 svm_setreg(void *vcpui, int ident, uint64_t val) 2343 { 2344 struct svm_vcpu *vcpu; 2345 register_t *reg; 2346 2347 vcpu = vcpui; 2348 2349 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2350 return (svm_modify_intr_shadow(vcpu, val)); 2351 } 2352 2353 /* Do not permit user write access to VMCB fields by offset. */ 2354 if (!VMCB_ACCESS_OK(ident)) { 2355 if (vmcb_write(vcpu, ident, val) == 0) { 2356 return (0); 2357 } 2358 } 2359 2360 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2361 2362 if (reg != NULL) { 2363 *reg = val; 2364 return (0); 2365 } 2366 2367 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2368 /* Ignore. */ 2369 return (0); 2370 } 2371 2372 /* 2373 * XXX deal with CR3 and invalidate TLB entries tagged with the 2374 * vcpu's ASID. This needs to be treated differently depending on 2375 * whether 'running' is true/false. 2376 */ 2377 2378 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2379 return (EINVAL); 2380 } 2381 2382 static int 2383 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2384 { 2385 return (vmcb_getdesc(vcpui, reg, desc)); 2386 } 2387 2388 static int 2389 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2390 { 2391 return (vmcb_setdesc(vcpui, reg, desc)); 2392 } 2393 2394 #ifdef BHYVE_SNAPSHOT 2395 static int 2396 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2397 { 2398 int ret; 2399 uint64_t val; 2400 2401 if (meta->op == VM_SNAPSHOT_SAVE) { 2402 ret = svm_getreg(vcpui, ident, &val); 2403 if (ret != 0) 2404 goto done; 2405 2406 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2407 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2408 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2409 2410 ret = svm_setreg(vcpui, ident, val); 2411 if (ret != 0) 2412 goto done; 2413 } else { 2414 ret = EINVAL; 2415 goto done; 2416 } 2417 2418 done: 2419 return (ret); 2420 } 2421 #endif 2422 2423 static int 2424 svm_setcap(void *vcpui, int type, int val) 2425 { 2426 struct svm_vcpu *vcpu; 2427 struct vlapic *vlapic; 2428 int error; 2429 2430 vcpu = vcpui; 2431 error = 0; 2432 2433 switch (type) { 2434 case VM_CAP_HALT_EXIT: 2435 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2436 VMCB_INTCPT_HLT, val); 2437 break; 2438 case VM_CAP_PAUSE_EXIT: 2439 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2440 VMCB_INTCPT_PAUSE, val); 2441 break; 2442 case VM_CAP_UNRESTRICTED_GUEST: 2443 /* Unrestricted guest execution cannot be disabled in SVM */ 2444 if (val == 0) 2445 error = EINVAL; 2446 break; 2447 case VM_CAP_BPT_EXIT: 2448 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); 2449 break; 2450 case VM_CAP_IPI_EXIT: 2451 vlapic = vm_lapic(vcpu->vcpu); 2452 vlapic->ipi_exit = val; 2453 break; 2454 case VM_CAP_MASK_HWINTR: 2455 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); 2456 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); 2457 break; 2458 case VM_CAP_RFLAGS_TF: { 2459 uint64_t rflags; 2460 2461 /* Fetch RFLAGS. */ 2462 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { 2463 error = (EINVAL); 2464 break; 2465 } 2466 if (val) { 2467 /* Save current TF bit. */ 2468 vcpu->dbg.rflags_tf = rflags & PSL_T; 2469 /* Trace next instruction. */ 2470 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2471 (rflags | PSL_T))) { 2472 error = (EINVAL); 2473 break; 2474 } 2475 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); 2476 } else { 2477 /* 2478 * Restore shadowed RFLAGS.TF only if vCPU was 2479 * previously stepped 2480 */ 2481 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 2482 rflags &= ~PSL_T; 2483 rflags |= vcpu->dbg.rflags_tf; 2484 vcpu->dbg.rflags_tf = 0; 2485 2486 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2487 rflags)) { 2488 error = (EINVAL); 2489 break; 2490 } 2491 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); 2492 } 2493 } 2494 2495 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); 2496 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, 2497 val); 2498 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, 2499 val); 2500 break; 2501 } 2502 default: 2503 error = ENOENT; 2504 break; 2505 } 2506 return (error); 2507 } 2508 2509 static int 2510 svm_getcap(void *vcpui, int type, int *retval) 2511 { 2512 struct svm_vcpu *vcpu; 2513 struct vlapic *vlapic; 2514 int error; 2515 2516 vcpu = vcpui; 2517 error = 0; 2518 2519 switch (type) { 2520 case VM_CAP_HALT_EXIT: 2521 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2522 VMCB_INTCPT_HLT); 2523 break; 2524 case VM_CAP_PAUSE_EXIT: 2525 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2526 VMCB_INTCPT_PAUSE); 2527 break; 2528 case VM_CAP_UNRESTRICTED_GUEST: 2529 *retval = 1; /* unrestricted guest is always enabled */ 2530 break; 2531 case VM_CAP_BPT_EXIT: 2532 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); 2533 break; 2534 case VM_CAP_IPI_EXIT: 2535 vlapic = vm_lapic(vcpu->vcpu); 2536 *retval = vlapic->ipi_exit; 2537 break; 2538 case VM_CAP_RFLAGS_TF: 2539 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); 2540 break; 2541 case VM_CAP_MASK_HWINTR: 2542 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); 2543 break; 2544 default: 2545 error = ENOENT; 2546 break; 2547 } 2548 return (error); 2549 } 2550 2551 static struct vmspace * 2552 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2553 { 2554 return (svm_npt_alloc(min, max)); 2555 } 2556 2557 static void 2558 svm_vmspace_free(struct vmspace *vmspace) 2559 { 2560 svm_npt_free(vmspace); 2561 } 2562 2563 static struct vlapic * 2564 svm_vlapic_init(void *vcpui) 2565 { 2566 struct svm_vcpu *vcpu; 2567 struct vlapic *vlapic; 2568 2569 vcpu = vcpui; 2570 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2571 vlapic->vm = vcpu->sc->vm; 2572 vlapic->vcpu = vcpu->vcpu; 2573 vlapic->vcpuid = vcpu->vcpuid; 2574 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2575 M_WAITOK | M_ZERO); 2576 2577 vlapic_init(vlapic); 2578 2579 return (vlapic); 2580 } 2581 2582 static void 2583 svm_vlapic_cleanup(struct vlapic *vlapic) 2584 { 2585 2586 vlapic_cleanup(vlapic); 2587 free(vlapic->apic_page, M_SVM_VLAPIC); 2588 free(vlapic, M_SVM_VLAPIC); 2589 } 2590 2591 #ifdef BHYVE_SNAPSHOT 2592 static int 2593 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2594 { 2595 struct svm_vcpu *vcpu; 2596 int err, running, hostcpu; 2597 2598 vcpu = vcpui; 2599 err = 0; 2600 2601 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2602 if (running && hostcpu != curcpu) { 2603 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2604 vcpu->vcpuid); 2605 return (EINVAL); 2606 } 2607 2608 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2609 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2610 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2611 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2612 2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2614 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2615 2616 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2617 2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2620 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2621 2622 /* Guest segments */ 2623 /* ES */ 2624 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2625 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2626 2627 /* CS */ 2628 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2629 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2630 2631 /* SS */ 2632 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2633 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2634 2635 /* DS */ 2636 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2637 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2638 2639 /* FS */ 2640 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2641 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2642 2643 /* GS */ 2644 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2645 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2646 2647 /* TR */ 2648 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2649 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2650 2651 /* LDTR */ 2652 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2653 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2654 2655 /* EFER */ 2656 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2657 2658 /* IDTR and GDTR */ 2659 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2660 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2661 2662 /* Specific AMD registers */ 2663 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2664 2665 err += vmcb_snapshot_any(vcpu, 2666 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2667 err += vmcb_snapshot_any(vcpu, 2668 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2669 err += vmcb_snapshot_any(vcpu, 2670 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2671 err += vmcb_snapshot_any(vcpu, 2672 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2673 err += vmcb_snapshot_any(vcpu, 2674 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2675 2676 err += vmcb_snapshot_any(vcpu, 2677 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2678 err += vmcb_snapshot_any(vcpu, 2679 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2680 2681 err += vmcb_snapshot_any(vcpu, 2682 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2683 2684 err += vmcb_snapshot_any(vcpu, 2685 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2686 2687 err += vmcb_snapshot_any(vcpu, 2688 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2689 2690 err += vmcb_snapshot_any(vcpu, 2691 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2692 err += vmcb_snapshot_any(vcpu, 2693 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2694 err += vmcb_snapshot_any(vcpu, 2695 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2696 err += vmcb_snapshot_any(vcpu, 2697 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2698 2699 err += vmcb_snapshot_any(vcpu, 2700 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2701 2702 err += vmcb_snapshot_any(vcpu, 2703 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2704 err += vmcb_snapshot_any(vcpu, 2705 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2706 err += vmcb_snapshot_any(vcpu, 2707 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2708 err += vmcb_snapshot_any(vcpu, 2709 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2710 2711 err += vmcb_snapshot_any(vcpu, 2712 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2713 2714 err += vmcb_snapshot_any(vcpu, 2715 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2716 err += vmcb_snapshot_any(vcpu, 2717 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2718 err += vmcb_snapshot_any(vcpu, 2719 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2720 2721 err += vmcb_snapshot_any(vcpu, 2722 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2723 2724 err += vmcb_snapshot_any(vcpu, 2725 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2726 2727 err += vmcb_snapshot_any(vcpu, 2728 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2729 err += vmcb_snapshot_any(vcpu, 2730 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2731 err += vmcb_snapshot_any(vcpu, 2732 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2733 2734 err += vmcb_snapshot_any(vcpu, 2735 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2736 2737 err += vmcb_snapshot_any(vcpu, 2738 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2739 err += vmcb_snapshot_any(vcpu, 2740 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2741 err += vmcb_snapshot_any(vcpu, 2742 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2743 err += vmcb_snapshot_any(vcpu, 2744 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2745 err += vmcb_snapshot_any(vcpu, 2746 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2747 if (err != 0) 2748 goto done; 2749 2750 /* Snapshot swctx for virtual cpu */ 2751 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2752 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2753 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2754 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2755 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2769 2770 /* Restore other svm_vcpu struct fields */ 2771 2772 /* Restore NEXTRIP field */ 2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2774 2775 /* Restore lastcpu field */ 2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2777 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2778 2779 /* Restore EPTGEN field - EPT is Extended Page Table */ 2780 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2781 2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2783 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2784 2785 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); 2786 2787 /* Set all caches dirty */ 2788 if (meta->op == VM_SNAPSHOT_RESTORE) 2789 svm_set_dirty(vcpu, 0xffffffff); 2790 2791 done: 2792 return (err); 2793 } 2794 2795 static int 2796 svm_restore_tsc(void *vcpui, uint64_t offset) 2797 { 2798 struct svm_vcpu *vcpu = vcpui; 2799 2800 svm_set_tsc_offset(vcpu, offset); 2801 2802 return (0); 2803 } 2804 #endif 2805 2806 const struct vmm_ops vmm_ops_amd = { 2807 .modinit = svm_modinit, 2808 .modcleanup = svm_modcleanup, 2809 .modresume = svm_modresume, 2810 .init = svm_init, 2811 .run = svm_run, 2812 .cleanup = svm_cleanup, 2813 .vcpu_init = svm_vcpu_init, 2814 .vcpu_cleanup = svm_vcpu_cleanup, 2815 .getreg = svm_getreg, 2816 .setreg = svm_setreg, 2817 .getdesc = svm_getdesc, 2818 .setdesc = svm_setdesc, 2819 .getcap = svm_getcap, 2820 .setcap = svm_setcap, 2821 .vmspace_alloc = svm_vmspace_alloc, 2822 .vmspace_free = svm_vmspace_free, 2823 .vlapic_init = svm_vlapic_init, 2824 .vlapic_cleanup = svm_vlapic_cleanup, 2825 #ifdef BHYVE_SNAPSHOT 2826 .vcpu_snapshot = svm_vcpu_snapshot, 2827 .restore_tsc = svm_restore_tsc, 2828 #endif 2829 }; 2830