1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "opt_bhyve_snapshot.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/reg.h> 40 #include <sys/smr.h> 41 #include <sys/sysctl.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 #include <vm/pmap.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/psl.h> 49 #include <machine/md_var.h> 50 #include <machine/specialreg.h> 51 #include <machine/smp.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include <machine/vmm_snapshot.h> 56 57 #include <dev/vmm/vmm_ktr.h> 58 #include <dev/vmm/vmm_mem.h> 59 60 #include "vmm_lapic.h" 61 #include "vmm_stat.h" 62 #include "vmm_ioport.h" 63 #include "vatpic.h" 64 #include "vlapic.h" 65 #include "vlapic_priv.h" 66 67 #include "x86.h" 68 #include "vmcb.h" 69 #include "svm.h" 70 #include "svm_softc.h" 71 #include "svm_msr.h" 72 #include "npt.h" 73 #include "io/ppt.h" 74 75 SYSCTL_DECL(_hw_vmm); 76 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 77 NULL); 78 79 /* 80 * SVM CPUID function 0x8000_000A, edx bit decoding. 81 */ 82 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 83 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 84 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 85 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 86 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 87 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 88 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 89 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 90 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 91 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 92 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 93 94 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 95 VMCB_CACHE_IOPM | \ 96 VMCB_CACHE_I | \ 97 VMCB_CACHE_TPR | \ 98 VMCB_CACHE_CR2 | \ 99 VMCB_CACHE_CR | \ 100 VMCB_CACHE_DR | \ 101 VMCB_CACHE_DT | \ 102 VMCB_CACHE_SEG | \ 103 VMCB_CACHE_NP) 104 105 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 106 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 107 0, NULL); 108 109 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 110 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 111 112 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 113 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 114 "SVM features advertised by CPUID.8000000AH:EDX"); 115 116 static int disable_npf_assist; 117 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 118 &disable_npf_assist, 0, NULL); 119 120 /* Maximum ASIDs supported by the processor */ 121 static uint32_t nasid; 122 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 123 "Number of ASIDs supported by this processor"); 124 125 /* Current ASID generation for each host cpu */ 126 static struct asid asid[MAXCPU]; 127 128 /* SVM host state saved area of size 4KB for each physical core. */ 129 static uint8_t *hsave; 130 131 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 132 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 133 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 134 135 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 136 static int svm_setreg(void *vcpui, int ident, uint64_t val); 137 static int svm_getreg(void *vcpui, int ident, uint64_t *val); 138 static __inline int 139 flush_by_asid(void) 140 { 141 142 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 143 } 144 145 static __inline int 146 decode_assist(void) 147 { 148 149 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 150 } 151 152 static void 153 svm_disable(void *arg __unused) 154 { 155 uint64_t efer; 156 157 efer = rdmsr(MSR_EFER); 158 efer &= ~EFER_SVM; 159 wrmsr(MSR_EFER, efer); 160 } 161 162 /* 163 * Disable SVM on all CPUs. 164 */ 165 static int 166 svm_modcleanup(void) 167 { 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 171 if (hsave != NULL) 172 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 173 174 return (0); 175 } 176 177 /* 178 * Verify that all the features required by bhyve are available. 179 */ 180 static int 181 check_svm_features(void) 182 { 183 u_int regs[4]; 184 185 /* CPUID Fn8000_000A is for SVM */ 186 do_cpuid(0x8000000A, regs); 187 svm_feature &= regs[3]; 188 189 /* 190 * The number of ASIDs can be configured to be less than what is 191 * supported by the hardware but not more. 192 */ 193 if (nasid == 0 || nasid > regs[1]) 194 nasid = regs[1]; 195 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 196 197 /* bhyve requires the Nested Paging feature */ 198 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 199 printf("SVM: Nested Paging feature not available.\n"); 200 return (ENXIO); 201 } 202 203 /* bhyve requires the NRIP Save feature */ 204 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 205 printf("SVM: NRIP Save feature not available.\n"); 206 return (ENXIO); 207 } 208 209 return (0); 210 } 211 212 static void 213 svm_enable(void *arg __unused) 214 { 215 uint64_t efer; 216 217 efer = rdmsr(MSR_EFER); 218 efer |= EFER_SVM; 219 wrmsr(MSR_EFER, efer); 220 221 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 222 } 223 224 /* 225 * Return 1 if SVM is enabled on this processor and 0 otherwise. 226 */ 227 static int 228 svm_available(void) 229 { 230 uint64_t msr; 231 232 /* Section 15.4 Enabling SVM from APM2. */ 233 if ((amd_feature2 & AMDID2_SVM) == 0) { 234 printf("SVM: not available.\n"); 235 return (0); 236 } 237 238 msr = rdmsr(MSR_VM_CR); 239 if ((msr & VM_CR_SVMDIS) != 0) { 240 printf("SVM: disabled by BIOS.\n"); 241 return (0); 242 } 243 244 return (1); 245 } 246 247 static int 248 svm_modinit(int ipinum) 249 { 250 int error, cpu; 251 252 if (!svm_available()) 253 return (ENXIO); 254 255 error = check_svm_features(); 256 if (error) 257 return (error); 258 259 vmcb_clean &= VMCB_CACHE_DEFAULT; 260 261 for (cpu = 0; cpu < MAXCPU; cpu++) { 262 /* 263 * Initialize the host ASIDs to their "highest" valid values. 264 * 265 * The next ASID allocation will rollover both 'gen' and 'num' 266 * and start off the sequence at {1,1}. 267 */ 268 asid[cpu].gen = ~0UL; 269 asid[cpu].num = nasid - 1; 270 } 271 272 svm_msr_init(); 273 svm_npt_init(ipinum); 274 275 /* Enable SVM on all CPUs */ 276 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 277 smp_rendezvous(NULL, svm_enable, NULL, NULL); 278 279 return (0); 280 } 281 282 static void 283 svm_modsuspend(void) 284 { 285 } 286 287 static void 288 svm_modresume(void) 289 { 290 291 svm_enable(NULL); 292 } 293 294 #ifdef BHYVE_SNAPSHOT 295 void 296 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 297 { 298 struct vmcb_ctrl *ctrl; 299 300 ctrl = svm_get_vmcb_ctrl(vcpu); 301 ctrl->tsc_offset = offset; 302 303 svm_set_dirty(vcpu, VMCB_CACHE_I); 304 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 305 306 vm_set_tsc_offset(vcpu->vcpu, offset); 307 } 308 #endif 309 310 /* Pentium compatible MSRs */ 311 #define MSR_PENTIUM_START 0 312 #define MSR_PENTIUM_END 0x1FFF 313 /* AMD 6th generation and Intel compatible MSRs */ 314 #define MSR_AMD6TH_START 0xC0000000UL 315 #define MSR_AMD6TH_END 0xC0001FFFUL 316 /* AMD 7th and 8th generation compatible MSRs */ 317 #define MSR_AMD7TH_START 0xC0010000UL 318 #define MSR_AMD7TH_END 0xC0011FFFUL 319 320 /* 321 * Get the index and bit position for a MSR in permission bitmap. 322 * Two bits are used for each MSR: lower bit for read and higher bit for write. 323 */ 324 static int 325 svm_msr_index(uint64_t msr, int *index, int *bit) 326 { 327 uint32_t base, off; 328 329 *index = -1; 330 *bit = (msr % 4) * 2; 331 base = 0; 332 333 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 334 *index = msr / 4; 335 return (0); 336 } 337 338 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 339 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 340 off = (msr - MSR_AMD6TH_START); 341 *index = (off + base) / 4; 342 return (0); 343 } 344 345 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 346 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 347 off = (msr - MSR_AMD7TH_START); 348 *index = (off + base) / 4; 349 return (0); 350 } 351 352 return (EINVAL); 353 } 354 355 /* 356 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 357 */ 358 static void 359 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 360 { 361 int index, bit, error __diagused; 362 363 error = svm_msr_index(msr, &index, &bit); 364 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 365 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 366 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 367 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 368 "msr %#lx", __func__, bit, msr)); 369 370 if (read) 371 perm_bitmap[index] &= ~(1UL << bit); 372 373 if (write) 374 perm_bitmap[index] &= ~(2UL << bit); 375 } 376 377 static void 378 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 379 { 380 381 svm_msr_perm(perm_bitmap, msr, true, true); 382 } 383 384 static void 385 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 386 { 387 388 svm_msr_perm(perm_bitmap, msr, true, false); 389 } 390 391 static __inline int 392 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 393 { 394 struct vmcb_ctrl *ctrl; 395 396 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 397 398 ctrl = svm_get_vmcb_ctrl(vcpu); 399 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 400 } 401 402 static __inline void 403 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 404 { 405 struct vmcb_ctrl *ctrl; 406 uint32_t oldval; 407 408 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 409 410 ctrl = svm_get_vmcb_ctrl(vcpu); 411 oldval = ctrl->intercept[idx]; 412 413 if (enabled) 414 ctrl->intercept[idx] |= bitmask; 415 else 416 ctrl->intercept[idx] &= ~bitmask; 417 418 if (ctrl->intercept[idx] != oldval) { 419 svm_set_dirty(vcpu, VMCB_CACHE_I); 420 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 421 oldval, ctrl->intercept[idx]); 422 } 423 } 424 425 static __inline void 426 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 427 { 428 429 svm_set_intercept(vcpu, off, bitmask, 0); 430 } 431 432 static __inline void 433 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 434 { 435 436 svm_set_intercept(vcpu, off, bitmask, 1); 437 } 438 439 static void 440 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 441 uint64_t msrpm_base_pa, uint64_t np_pml4) 442 { 443 struct vmcb_ctrl *ctrl; 444 struct vmcb_state *state; 445 uint32_t mask; 446 int n; 447 448 ctrl = svm_get_vmcb_ctrl(vcpu); 449 state = svm_get_vmcb_state(vcpu); 450 451 ctrl->iopm_base_pa = iopm_base_pa; 452 ctrl->msrpm_base_pa = msrpm_base_pa; 453 454 /* Enable nested paging */ 455 ctrl->np_enable = 1; 456 ctrl->n_cr3 = np_pml4; 457 458 /* 459 * Intercept accesses to the control registers that are not shadowed 460 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 461 */ 462 for (n = 0; n < 16; n++) { 463 mask = (BIT(n) << 16) | BIT(n); 464 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 465 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 466 else 467 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 468 } 469 470 /* 471 * Intercept everything when tracing guest exceptions otherwise 472 * just intercept machine check exception. 473 */ 474 if (vcpu_trace_exceptions(vcpu->vcpu)) { 475 for (n = 0; n < 32; n++) { 476 /* 477 * Skip unimplemented vectors in the exception bitmap. 478 */ 479 if (n == 2 || n == 9) { 480 continue; 481 } 482 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 483 } 484 } else { 485 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 486 } 487 488 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 493 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 494 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 495 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 496 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 497 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 498 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 499 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 500 501 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 503 504 /* 505 * Intercept SVM instructions since AMD enables them in guests otherwise. 506 * Non-intercepted VMMCALL causes #UD, skip it. 507 */ 508 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 509 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 510 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 511 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 512 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 513 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 514 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 515 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 516 VMCB_INTCPT_WBINVD); 517 } 518 519 /* 520 * From section "Canonicalization and Consistency Checks" in APMv2 521 * the VMRUN intercept bit must be set to pass the consistency check. 522 */ 523 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 524 525 /* 526 * The ASID will be set to a non-zero value just before VMRUN. 527 */ 528 ctrl->asid = 0; 529 530 /* 531 * Section 15.21.1, Interrupt Masking in EFLAGS 532 * Section 15.21.2, Virtualizing APIC.TPR 533 * 534 * This must be set for %rflag and %cr8 isolation of guest and host. 535 */ 536 ctrl->v_intr_masking = 1; 537 538 /* Enable Last Branch Record aka LBR for debugging */ 539 ctrl->lbr_virt_en = 1; 540 state->dbgctl = BIT(0); 541 542 /* EFER_SVM must always be set when the guest is executing */ 543 state->efer = EFER_SVM; 544 545 /* Set up the PAT to power-on state */ 546 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 547 PAT_VALUE(1, PAT_WRITE_THROUGH) | 548 PAT_VALUE(2, PAT_UNCACHED) | 549 PAT_VALUE(3, PAT_UNCACHEABLE) | 550 PAT_VALUE(4, PAT_WRITE_BACK) | 551 PAT_VALUE(5, PAT_WRITE_THROUGH) | 552 PAT_VALUE(6, PAT_UNCACHED) | 553 PAT_VALUE(7, PAT_UNCACHEABLE); 554 555 /* Set up DR6/7 to power-on state */ 556 state->dr6 = DBREG_DR6_RESERVED1; 557 state->dr7 = DBREG_DR7_RESERVED1; 558 } 559 560 /* 561 * Initialize a virtual machine. 562 */ 563 static void * 564 svm_init(struct vm *vm, pmap_t pmap) 565 { 566 struct svm_softc *svm_sc; 567 568 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 569 570 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 571 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 572 if (svm_sc->msr_bitmap == NULL) 573 panic("contigmalloc of SVM MSR bitmap failed"); 574 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 575 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 576 if (svm_sc->iopm_bitmap == NULL) 577 panic("contigmalloc of SVM IO bitmap failed"); 578 579 svm_sc->vm = vm; 580 svm_sc->nptp = vtophys(pmap->pm_pmltop); 581 582 /* 583 * Intercept read and write accesses to all MSRs. 584 */ 585 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 586 587 /* 588 * Access to the following MSRs is redirected to the VMCB when the 589 * guest is executing. Therefore it is safe to allow the guest to 590 * read/write these MSRs directly without hypervisor involvement. 591 */ 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 595 596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 602 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 603 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 604 605 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 606 607 /* 608 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 609 */ 610 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 611 612 /* Intercept access to all I/O ports. */ 613 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 614 615 return (svm_sc); 616 } 617 618 static void * 619 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 620 { 621 struct svm_softc *sc = vmi; 622 struct svm_vcpu *vcpu; 623 624 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 625 vcpu->sc = sc; 626 vcpu->vcpu = vcpu1; 627 vcpu->vcpuid = vcpuid; 628 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 629 M_WAITOK | M_ZERO); 630 vcpu->nextrip = ~0; 631 vcpu->lastcpu = NOCPU; 632 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 633 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 634 sc->nptp); 635 svm_msr_guest_init(sc, vcpu); 636 return (vcpu); 637 } 638 639 /* 640 * Collateral for a generic SVM VM-exit. 641 */ 642 static void 643 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 644 { 645 646 vme->exitcode = VM_EXITCODE_SVM; 647 vme->u.svm.exitcode = code; 648 vme->u.svm.exitinfo1 = info1; 649 vme->u.svm.exitinfo2 = info2; 650 } 651 652 static int 653 svm_cpl(struct vmcb_state *state) 654 { 655 656 /* 657 * From APMv2: 658 * "Retrieve the CPL from the CPL field in the VMCB, not 659 * from any segment DPL" 660 */ 661 return (state->cpl); 662 } 663 664 static enum vm_cpu_mode 665 svm_vcpu_mode(struct vmcb *vmcb) 666 { 667 struct vmcb_segment seg; 668 struct vmcb_state *state; 669 int error __diagused; 670 671 state = &vmcb->state; 672 673 if (state->efer & EFER_LMA) { 674 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 675 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 676 error)); 677 678 /* 679 * Section 4.8.1 for APM2, check if Code Segment has 680 * Long attribute set in descriptor. 681 */ 682 if (seg.attrib & VMCB_CS_ATTRIB_L) 683 return (CPU_MODE_64BIT); 684 else 685 return (CPU_MODE_COMPATIBILITY); 686 } else if (state->cr0 & CR0_PE) { 687 return (CPU_MODE_PROTECTED); 688 } else { 689 return (CPU_MODE_REAL); 690 } 691 } 692 693 static enum vm_paging_mode 694 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 695 { 696 697 if ((cr0 & CR0_PG) == 0) 698 return (PAGING_MODE_FLAT); 699 if ((cr4 & CR4_PAE) == 0) 700 return (PAGING_MODE_32); 701 if (efer & EFER_LME) 702 return (PAGING_MODE_64); 703 else 704 return (PAGING_MODE_PAE); 705 } 706 707 /* 708 * ins/outs utility routines 709 */ 710 static uint64_t 711 svm_inout_str_index(struct svm_regctx *regs, int in) 712 { 713 uint64_t val; 714 715 val = in ? regs->sctx_rdi : regs->sctx_rsi; 716 717 return (val); 718 } 719 720 static uint64_t 721 svm_inout_str_count(struct svm_regctx *regs, int rep) 722 { 723 uint64_t val; 724 725 val = rep ? regs->sctx_rcx : 1; 726 727 return (val); 728 } 729 730 static void 731 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 732 struct vm_inout_str *vis) 733 { 734 int error __diagused, s; 735 736 if (in) { 737 vis->seg_name = VM_REG_GUEST_ES; 738 } else { 739 /* The segment field has standard encoding */ 740 s = (info1 >> 10) & 0x7; 741 vis->seg_name = vm_segment_name(s); 742 } 743 744 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 745 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 746 } 747 748 static int 749 svm_inout_str_addrsize(uint64_t info1) 750 { 751 uint32_t size; 752 753 size = (info1 >> 7) & 0x7; 754 switch (size) { 755 case 1: 756 return (2); /* 16 bit */ 757 case 2: 758 return (4); /* 32 bit */ 759 case 4: 760 return (8); /* 64 bit */ 761 default: 762 panic("%s: invalid size encoding %d", __func__, size); 763 } 764 } 765 766 static void 767 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 768 { 769 struct vmcb_state *state; 770 771 state = &vmcb->state; 772 paging->cr3 = state->cr3; 773 paging->cpl = svm_cpl(state); 774 paging->cpu_mode = svm_vcpu_mode(vmcb); 775 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 776 state->efer); 777 } 778 779 #define UNHANDLED 0 780 781 /* 782 * Handle guest I/O intercept. 783 */ 784 static int 785 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 786 { 787 struct vmcb_ctrl *ctrl; 788 struct vmcb_state *state; 789 struct svm_regctx *regs; 790 struct vm_inout_str *vis; 791 uint64_t info1; 792 int inout_string; 793 794 state = svm_get_vmcb_state(vcpu); 795 ctrl = svm_get_vmcb_ctrl(vcpu); 796 regs = svm_get_guest_regctx(vcpu); 797 798 info1 = ctrl->exitinfo1; 799 inout_string = info1 & BIT(2) ? 1 : 0; 800 801 /* 802 * The effective segment number in EXITINFO1[12:10] is populated 803 * only if the processor has the DecodeAssist capability. 804 * 805 * XXX this is not specified explicitly in APMv2 but can be verified 806 * empirically. 807 */ 808 if (inout_string && !decode_assist()) 809 return (UNHANDLED); 810 811 vmexit->exitcode = VM_EXITCODE_INOUT; 812 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 813 vmexit->u.inout.string = inout_string; 814 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 815 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 816 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 817 vmexit->u.inout.eax = (uint32_t)(state->rax); 818 819 if (inout_string) { 820 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 821 vis = &vmexit->u.inout_str; 822 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 823 vis->rflags = state->rflags; 824 vis->cr0 = state->cr0; 825 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 826 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 827 vis->addrsize = svm_inout_str_addrsize(info1); 828 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 829 } 830 831 return (UNHANDLED); 832 } 833 834 static int 835 npf_fault_type(uint64_t exitinfo1) 836 { 837 838 if (exitinfo1 & VMCB_NPF_INFO1_W) 839 return (VM_PROT_WRITE); 840 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 841 return (VM_PROT_EXECUTE); 842 else 843 return (VM_PROT_READ); 844 } 845 846 static bool 847 svm_npf_emul_fault(uint64_t exitinfo1) 848 { 849 850 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 851 return (false); 852 } 853 854 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 855 return (false); 856 } 857 858 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 859 return (false); 860 } 861 862 return (true); 863 } 864 865 static void 866 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 867 { 868 struct vm_guest_paging *paging; 869 struct vmcb_segment seg; 870 struct vmcb_ctrl *ctrl; 871 char *inst_bytes; 872 int error __diagused, inst_len; 873 874 ctrl = &vmcb->ctrl; 875 paging = &vmexit->u.inst_emul.paging; 876 877 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 878 vmexit->u.inst_emul.gpa = gpa; 879 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 880 svm_paging_info(vmcb, paging); 881 882 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 883 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 884 885 switch(paging->cpu_mode) { 886 case CPU_MODE_REAL: 887 vmexit->u.inst_emul.cs_base = seg.base; 888 vmexit->u.inst_emul.cs_d = 0; 889 break; 890 case CPU_MODE_PROTECTED: 891 case CPU_MODE_COMPATIBILITY: 892 vmexit->u.inst_emul.cs_base = seg.base; 893 894 /* 895 * Section 4.8.1 of APM2, Default Operand Size or D bit. 896 */ 897 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 898 1 : 0; 899 break; 900 default: 901 vmexit->u.inst_emul.cs_base = 0; 902 vmexit->u.inst_emul.cs_d = 0; 903 break; 904 } 905 906 /* 907 * Copy the instruction bytes into 'vie' if available. 908 */ 909 if (decode_assist() && !disable_npf_assist) { 910 inst_len = ctrl->inst_len; 911 inst_bytes = ctrl->inst_bytes; 912 } else { 913 inst_len = 0; 914 inst_bytes = NULL; 915 } 916 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 917 } 918 919 #ifdef KTR 920 static const char * 921 intrtype_to_str(int intr_type) 922 { 923 switch (intr_type) { 924 case VMCB_EVENTINJ_TYPE_INTR: 925 return ("hwintr"); 926 case VMCB_EVENTINJ_TYPE_NMI: 927 return ("nmi"); 928 case VMCB_EVENTINJ_TYPE_INTn: 929 return ("swintr"); 930 case VMCB_EVENTINJ_TYPE_EXCEPTION: 931 return ("exception"); 932 default: 933 panic("%s: unknown intr_type %d", __func__, intr_type); 934 } 935 } 936 #endif 937 938 /* 939 * Inject an event to vcpu as described in section 15.20, "Event injection". 940 */ 941 static void 942 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 943 uint32_t error, bool ec_valid) 944 { 945 struct vmcb_ctrl *ctrl; 946 947 ctrl = svm_get_vmcb_ctrl(vcpu); 948 949 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 950 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 951 952 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 953 __func__, vector)); 954 955 switch (intr_type) { 956 case VMCB_EVENTINJ_TYPE_INTR: 957 case VMCB_EVENTINJ_TYPE_NMI: 958 case VMCB_EVENTINJ_TYPE_INTn: 959 break; 960 case VMCB_EVENTINJ_TYPE_EXCEPTION: 961 if (vector >= 0 && vector <= 31 && vector != 2) 962 break; 963 /* FALLTHROUGH */ 964 default: 965 panic("%s: invalid intr_type/vector: %d/%d", __func__, 966 intr_type, vector); 967 } 968 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 969 if (ec_valid) { 970 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 971 ctrl->eventinj |= (uint64_t)error << 32; 972 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 973 intrtype_to_str(intr_type), vector, error); 974 } else { 975 SVM_CTR2(vcpu, "Injecting %s at vector %d", 976 intrtype_to_str(intr_type), vector); 977 } 978 } 979 980 static void 981 svm_update_virqinfo(struct svm_vcpu *vcpu) 982 { 983 struct vlapic *vlapic; 984 struct vmcb_ctrl *ctrl; 985 986 vlapic = vm_lapic(vcpu->vcpu); 987 ctrl = svm_get_vmcb_ctrl(vcpu); 988 989 /* Update %cr8 in the emulated vlapic */ 990 vlapic_set_cr8(vlapic, ctrl->v_tpr); 991 992 /* Virtual interrupt injection is not used. */ 993 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 994 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 995 } 996 997 static void 998 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 999 { 1000 struct vmcb_ctrl *ctrl; 1001 uint64_t intinfo; 1002 1003 ctrl = svm_get_vmcb_ctrl(vcpu); 1004 intinfo = ctrl->exitintinfo; 1005 if (!VMCB_EXITINTINFO_VALID(intinfo)) 1006 return; 1007 1008 /* 1009 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1010 * 1011 * If a #VMEXIT happened during event delivery then record the event 1012 * that was being delivered. 1013 */ 1014 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1015 VMCB_EXITINTINFO_VECTOR(intinfo)); 1016 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1017 vm_exit_intinfo(vcpu->vcpu, intinfo); 1018 } 1019 1020 #ifdef INVARIANTS 1021 static __inline int 1022 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1023 { 1024 1025 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1026 } 1027 #endif 1028 1029 static __inline void 1030 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1031 { 1032 struct vmcb_ctrl *ctrl; 1033 1034 ctrl = svm_get_vmcb_ctrl(vcpu); 1035 1036 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1037 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1038 KASSERT(vintr_intercept_enabled(vcpu), 1039 ("%s: vintr intercept should be enabled", __func__)); 1040 return; 1041 } 1042 1043 SVM_CTR0(vcpu, "Enable intr window exiting"); 1044 ctrl->v_irq = 1; 1045 ctrl->v_ign_tpr = 1; 1046 ctrl->v_intr_vector = 0; 1047 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1048 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1049 } 1050 1051 static __inline void 1052 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1053 { 1054 struct vmcb_ctrl *ctrl; 1055 1056 ctrl = svm_get_vmcb_ctrl(vcpu); 1057 1058 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1059 KASSERT(!vintr_intercept_enabled(vcpu), 1060 ("%s: vintr intercept should be disabled", __func__)); 1061 return; 1062 } 1063 1064 SVM_CTR0(vcpu, "Disable intr window exiting"); 1065 ctrl->v_irq = 0; 1066 ctrl->v_intr_vector = 0; 1067 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1068 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1069 } 1070 1071 static int 1072 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1073 { 1074 struct vmcb_ctrl *ctrl; 1075 int oldval, newval; 1076 1077 ctrl = svm_get_vmcb_ctrl(vcpu); 1078 oldval = ctrl->intr_shadow; 1079 newval = val ? 1 : 0; 1080 if (newval != oldval) { 1081 ctrl->intr_shadow = newval; 1082 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1083 } 1084 return (0); 1085 } 1086 1087 static int 1088 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1089 { 1090 struct vmcb_ctrl *ctrl; 1091 1092 ctrl = svm_get_vmcb_ctrl(vcpu); 1093 *val = ctrl->intr_shadow; 1094 return (0); 1095 } 1096 1097 /* 1098 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1099 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1100 * to track when the vcpu is done handling the NMI. 1101 */ 1102 static int 1103 nmi_blocked(struct svm_vcpu *vcpu) 1104 { 1105 int blocked; 1106 1107 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1108 return (blocked); 1109 } 1110 1111 static void 1112 enable_nmi_blocking(struct svm_vcpu *vcpu) 1113 { 1114 1115 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1116 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1117 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1118 } 1119 1120 static void 1121 clear_nmi_blocking(struct svm_vcpu *vcpu) 1122 { 1123 int error __diagused; 1124 1125 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1126 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1127 /* 1128 * When the IRET intercept is cleared the vcpu will attempt to execute 1129 * the "iret" when it runs next. However, it is possible to inject 1130 * another NMI into the vcpu before the "iret" has actually executed. 1131 * 1132 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1133 * it will trap back into the hypervisor. If an NMI is pending for 1134 * the vcpu it will be injected into the guest. 1135 * 1136 * XXX this needs to be fixed 1137 */ 1138 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1139 1140 /* 1141 * Set 'intr_shadow' to prevent an NMI from being injected on the 1142 * immediate VMRUN. 1143 */ 1144 error = svm_modify_intr_shadow(vcpu, 1); 1145 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1146 } 1147 1148 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1149 1150 static int 1151 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1152 bool *retu) 1153 { 1154 struct vm_exit *vme; 1155 struct vmcb_state *state; 1156 uint64_t changed, lma, oldval; 1157 int error __diagused; 1158 1159 state = svm_get_vmcb_state(vcpu); 1160 1161 oldval = state->efer; 1162 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1163 1164 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1165 changed = oldval ^ newval; 1166 1167 if (newval & EFER_MBZ_BITS) 1168 goto gpf; 1169 1170 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1171 if (changed & EFER_LME) { 1172 if (state->cr0 & CR0_PG) 1173 goto gpf; 1174 } 1175 1176 /* EFER.LMA = EFER.LME & CR0.PG */ 1177 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1178 lma = EFER_LMA; 1179 else 1180 lma = 0; 1181 1182 if ((newval & EFER_LMA) != lma) 1183 goto gpf; 1184 1185 if (newval & EFER_NXE) { 1186 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1187 goto gpf; 1188 } 1189 1190 /* 1191 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1192 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1193 */ 1194 if (newval & EFER_LMSLE) { 1195 vme = vm_exitinfo(vcpu->vcpu); 1196 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1197 *retu = true; 1198 return (0); 1199 } 1200 1201 if (newval & EFER_FFXSR) { 1202 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1203 goto gpf; 1204 } 1205 1206 if (newval & EFER_TCE) { 1207 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1208 goto gpf; 1209 } 1210 1211 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1212 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1213 return (0); 1214 gpf: 1215 vm_inject_gp(vcpu->vcpu); 1216 return (0); 1217 } 1218 1219 static int 1220 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1221 uint64_t val, bool *retu) 1222 { 1223 int error; 1224 1225 if (lapic_msr(num)) 1226 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1227 else if (num == MSR_EFER) 1228 error = svm_write_efer(sc, vcpu, val, retu); 1229 else 1230 error = svm_wrmsr(vcpu, num, val, retu); 1231 1232 return (error); 1233 } 1234 1235 static int 1236 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1237 { 1238 struct vmcb_state *state; 1239 struct svm_regctx *ctx; 1240 uint64_t result; 1241 int error; 1242 1243 if (lapic_msr(num)) 1244 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1245 else 1246 error = svm_rdmsr(vcpu, num, &result, retu); 1247 1248 if (error == 0) { 1249 state = svm_get_vmcb_state(vcpu); 1250 ctx = svm_get_guest_regctx(vcpu); 1251 state->rax = result & 0xffffffff; 1252 ctx->sctx_rdx = result >> 32; 1253 } 1254 1255 return (error); 1256 } 1257 1258 #ifdef KTR 1259 static const char * 1260 exit_reason_to_str(uint64_t reason) 1261 { 1262 int i; 1263 static char reasonbuf[32]; 1264 static const struct { 1265 int reason; 1266 const char *str; 1267 } reasons[] = { 1268 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1269 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1270 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1271 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1272 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1273 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1274 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1275 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1276 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1277 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1278 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1279 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1280 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1281 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1282 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1283 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1284 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1285 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1286 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1287 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1288 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1289 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1290 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1291 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1292 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1293 { .reason = VMCB_EXIT_POPF, .str = "popf" }, 1294 { .reason = VMCB_EXIT_PUSHF, .str = "pushf" }, 1295 }; 1296 1297 for (i = 0; i < nitems(reasons); i++) { 1298 if (reasons[i].reason == reason) 1299 return (reasons[i].str); 1300 } 1301 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1302 return (reasonbuf); 1303 } 1304 #endif /* KTR */ 1305 1306 /* 1307 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1308 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1309 * and exceptions caused by INT3, INTO and BOUND instructions. 1310 * 1311 * Return 1 if the nRIP is valid and 0 otherwise. 1312 */ 1313 static int 1314 nrip_valid(uint64_t exitcode) 1315 { 1316 switch (exitcode) { 1317 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1318 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1319 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1320 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1321 case 0x43: /* INT3 */ 1322 case 0x44: /* INTO */ 1323 case 0x45: /* BOUND */ 1324 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1325 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1326 return (1); 1327 default: 1328 return (0); 1329 } 1330 } 1331 1332 static int 1333 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1334 struct vm_exit *vmexit) 1335 { 1336 struct vmcb *vmcb; 1337 struct vmcb_state *state; 1338 struct vmcb_ctrl *ctrl; 1339 struct svm_regctx *ctx; 1340 uint64_t code, info1, info2, val; 1341 uint32_t eax, ecx, edx; 1342 int error __diagused, errcode_valid, handled, idtvec, reflect; 1343 bool retu; 1344 1345 ctx = svm_get_guest_regctx(vcpu); 1346 vmcb = svm_get_vmcb(vcpu); 1347 state = &vmcb->state; 1348 ctrl = &vmcb->ctrl; 1349 1350 handled = 0; 1351 code = ctrl->exitcode; 1352 info1 = ctrl->exitinfo1; 1353 info2 = ctrl->exitinfo2; 1354 1355 vmexit->exitcode = VM_EXITCODE_BOGUS; 1356 vmexit->rip = state->rip; 1357 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1358 1359 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1360 1361 /* 1362 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1363 * in an inconsistent state and can trigger assertions that would 1364 * never happen otherwise. 1365 */ 1366 if (code == VMCB_EXIT_INVALID) { 1367 vm_exit_svm(vmexit, code, info1, info2); 1368 return (0); 1369 } 1370 1371 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1372 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1373 1374 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1375 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1376 vmexit->inst_length, code, info1, info2)); 1377 1378 svm_update_virqinfo(vcpu); 1379 svm_save_intinfo(svm_sc, vcpu); 1380 1381 switch (code) { 1382 case VMCB_EXIT_IRET: 1383 /* 1384 * Restart execution at "iret" but with the intercept cleared. 1385 */ 1386 vmexit->inst_length = 0; 1387 clear_nmi_blocking(vcpu); 1388 handled = 1; 1389 break; 1390 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1391 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1392 handled = 1; 1393 break; 1394 case VMCB_EXIT_INTR: /* external interrupt */ 1395 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1396 handled = 1; 1397 break; 1398 case VMCB_EXIT_NMI: /* external NMI */ 1399 handled = 1; 1400 break; 1401 case 0x40 ... 0x5F: 1402 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1403 reflect = 1; 1404 idtvec = code - 0x40; 1405 switch (idtvec) { 1406 case IDT_MC: 1407 /* 1408 * Call the machine check handler by hand. Also don't 1409 * reflect the machine check back into the guest. 1410 */ 1411 reflect = 0; 1412 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1413 __asm __volatile("int $18"); 1414 break; 1415 case IDT_PF: 1416 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1417 KASSERT(error == 0, ("%s: error %d updating cr2", 1418 __func__, error)); 1419 /* fallthru */ 1420 case IDT_NP: 1421 case IDT_SS: 1422 case IDT_GP: 1423 case IDT_AC: 1424 case IDT_TS: 1425 errcode_valid = 1; 1426 break; 1427 1428 case IDT_DF: 1429 errcode_valid = 1; 1430 info1 = 0; 1431 break; 1432 case IDT_DB: { 1433 /* 1434 * Check if we are being stepped (RFLAGS.TF) 1435 * and bounce vmexit to userland. 1436 */ 1437 bool stepped = 0; 1438 uint64_t dr6 = 0; 1439 1440 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); 1441 stepped = !!(dr6 & DBREG_DR6_BS); 1442 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { 1443 vmexit->exitcode = VM_EXITCODE_DB; 1444 vmexit->u.dbg.trace_trap = 1; 1445 vmexit->u.dbg.pushf_intercept = 0; 1446 1447 if (vcpu->dbg.popf_sstep) { 1448 /* 1449 * DB# exit was caused by stepping over 1450 * popf. 1451 */ 1452 uint64_t rflags; 1453 1454 vcpu->dbg.popf_sstep = 0; 1455 1456 /* 1457 * Update shadowed TF bit so the next 1458 * setcap(..., RFLAGS_SSTEP, 0) restores 1459 * the correct value 1460 */ 1461 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, 1462 &rflags); 1463 vcpu->dbg.rflags_tf = rflags & PSL_T; 1464 } else if (vcpu->dbg.pushf_sstep) { 1465 /* 1466 * DB# exit was caused by stepping over 1467 * pushf. 1468 */ 1469 vcpu->dbg.pushf_sstep = 0; 1470 1471 /* 1472 * Adjusting the pushed rflags after a 1473 * restarted pushf instruction must be 1474 * handled outside of svm.c due to the 1475 * critical_enter() lock being held. 1476 */ 1477 vmexit->u.dbg.pushf_intercept = 1; 1478 vmexit->u.dbg.tf_shadow_val = 1479 vcpu->dbg.rflags_tf; 1480 svm_paging_info(svm_get_vmcb(vcpu), 1481 &vmexit->u.dbg.paging); 1482 } 1483 1484 /* Clear DR6 "single-step" bit. */ 1485 dr6 &= ~DBREG_DR6_BS; 1486 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); 1487 KASSERT(error == 0, 1488 ("%s: error %d updating DR6\r\n", __func__, 1489 error)); 1490 1491 reflect = 0; 1492 } 1493 break; 1494 } 1495 case IDT_BP: 1496 vmexit->exitcode = VM_EXITCODE_BPT; 1497 vmexit->u.bpt.inst_length = vmexit->inst_length; 1498 vmexit->inst_length = 0; 1499 1500 reflect = 0; 1501 break; 1502 case IDT_OF: 1503 case IDT_BR: 1504 /* 1505 * The 'nrip' field is populated for INT3, INTO and 1506 * BOUND exceptions and this also implies that 1507 * 'inst_length' is non-zero. 1508 * 1509 * Reset 'inst_length' to zero so the guest %rip at 1510 * event injection is identical to what it was when 1511 * the exception originally happened. 1512 */ 1513 SVM_CTR2(vcpu, "Reset inst_length from %d " 1514 "to zero before injecting exception %d", 1515 vmexit->inst_length, idtvec); 1516 vmexit->inst_length = 0; 1517 /* fallthru */ 1518 default: 1519 errcode_valid = 0; 1520 info1 = 0; 1521 break; 1522 } 1523 1524 if (reflect) { 1525 KASSERT(vmexit->inst_length == 0, 1526 ("invalid inst_length (%d) " 1527 "when reflecting exception %d into guest", 1528 vmexit->inst_length, idtvec)); 1529 /* Reflect the exception back into the guest */ 1530 SVM_CTR2(vcpu, "Reflecting exception " 1531 "%d/%#x into the guest", idtvec, (int)info1); 1532 error = vm_inject_exception(vcpu->vcpu, idtvec, 1533 errcode_valid, info1, 0); 1534 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1535 __func__, error)); 1536 handled = 1; 1537 } 1538 break; 1539 case VMCB_EXIT_MSR: /* MSR access. */ 1540 eax = state->rax; 1541 ecx = ctx->sctx_rcx; 1542 edx = ctx->sctx_rdx; 1543 retu = false; 1544 1545 if (info1) { 1546 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1547 val = (uint64_t)edx << 32 | eax; 1548 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1549 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1550 vmexit->exitcode = VM_EXITCODE_WRMSR; 1551 vmexit->u.msr.code = ecx; 1552 vmexit->u.msr.wval = val; 1553 } else if (!retu) { 1554 handled = 1; 1555 } else { 1556 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1557 ("emulate_wrmsr retu with bogus exitcode")); 1558 } 1559 } else { 1560 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1561 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1562 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1563 vmexit->exitcode = VM_EXITCODE_RDMSR; 1564 vmexit->u.msr.code = ecx; 1565 } else if (!retu) { 1566 handled = 1; 1567 } else { 1568 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1569 ("emulate_rdmsr retu with bogus exitcode")); 1570 } 1571 } 1572 break; 1573 case VMCB_EXIT_IO: 1574 handled = svm_handle_io(vcpu, vmexit); 1575 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1576 break; 1577 case VMCB_EXIT_CPUID: 1578 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1579 handled = x86_emulate_cpuid(vcpu->vcpu, 1580 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1581 &ctx->sctx_rdx); 1582 break; 1583 case VMCB_EXIT_HLT: 1584 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1585 vmexit->exitcode = VM_EXITCODE_HLT; 1586 vmexit->u.hlt.rflags = state->rflags; 1587 break; 1588 case VMCB_EXIT_PAUSE: 1589 vmexit->exitcode = VM_EXITCODE_PAUSE; 1590 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1591 break; 1592 case VMCB_EXIT_NPF: 1593 /* EXITINFO2 contains the faulting guest physical address */ 1594 if (info1 & VMCB_NPF_INFO1_RSV) { 1595 SVM_CTR2(vcpu, "nested page fault with " 1596 "reserved bits set: info1(%#lx) info2(%#lx)", 1597 info1, info2); 1598 } else if (vm_mem_allocated(vcpu->vcpu, info2) || 1599 ppt_is_mmio(svm_sc->vm, info2)) { 1600 vmexit->exitcode = VM_EXITCODE_PAGING; 1601 vmexit->u.paging.gpa = info2; 1602 vmexit->u.paging.fault_type = npf_fault_type(info1); 1603 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1604 SVM_CTR3(vcpu, "nested page fault " 1605 "on gpa %#lx/%#lx at rip %#lx", 1606 info2, info1, state->rip); 1607 } else if (svm_npf_emul_fault(info1)) { 1608 svm_handle_inst_emul(vmcb, info2, vmexit); 1609 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1610 SVM_CTR3(vcpu, "inst_emul fault " 1611 "for gpa %#lx/%#lx at rip %#lx", 1612 info2, info1, state->rip); 1613 } 1614 break; 1615 case VMCB_EXIT_MONITOR: 1616 vmexit->exitcode = VM_EXITCODE_MONITOR; 1617 break; 1618 case VMCB_EXIT_MWAIT: 1619 vmexit->exitcode = VM_EXITCODE_MWAIT; 1620 break; 1621 case VMCB_EXIT_PUSHF: { 1622 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1623 uint64_t rflags; 1624 1625 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1626 /* Restart this instruction. */ 1627 vmexit->inst_length = 0; 1628 /* Disable PUSHF intercepts - avoid a loop. */ 1629 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1630 VMCB_INTCPT_PUSHF, 0); 1631 /* Trace restarted instruction. */ 1632 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1633 /* Let the IDT_DB handler know that pushf was stepped. 1634 */ 1635 vcpu->dbg.pushf_sstep = 1; 1636 handled = 1; 1637 } 1638 break; 1639 } 1640 case VMCB_EXIT_POPF: { 1641 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1642 uint64_t rflags; 1643 1644 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1645 /* Restart this instruction */ 1646 vmexit->inst_length = 0; 1647 /* Disable POPF intercepts - avoid a loop*/ 1648 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1649 VMCB_INTCPT_POPF, 0); 1650 /* Trace restarted instruction */ 1651 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1652 vcpu->dbg.popf_sstep = 1; 1653 handled = 1; 1654 } 1655 break; 1656 } 1657 case VMCB_EXIT_SHUTDOWN: 1658 case VMCB_EXIT_VMRUN: 1659 case VMCB_EXIT_VMMCALL: 1660 case VMCB_EXIT_VMLOAD: 1661 case VMCB_EXIT_VMSAVE: 1662 case VMCB_EXIT_STGI: 1663 case VMCB_EXIT_CLGI: 1664 case VMCB_EXIT_SKINIT: 1665 case VMCB_EXIT_ICEBP: 1666 case VMCB_EXIT_INVLPGA: 1667 vm_inject_ud(vcpu->vcpu); 1668 handled = 1; 1669 break; 1670 case VMCB_EXIT_INVD: 1671 case VMCB_EXIT_WBINVD: 1672 /* ignore exit */ 1673 handled = 1; 1674 break; 1675 default: 1676 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1677 break; 1678 } 1679 1680 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1681 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1682 vmexit->rip, vmexit->inst_length); 1683 1684 if (handled) { 1685 vmexit->rip += vmexit->inst_length; 1686 vmexit->inst_length = 0; 1687 state->rip = vmexit->rip; 1688 } else { 1689 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1690 /* 1691 * If this VM exit was not claimed by anybody then 1692 * treat it as a generic SVM exit. 1693 */ 1694 vm_exit_svm(vmexit, code, info1, info2); 1695 } else { 1696 /* 1697 * The exitcode and collateral have been populated. 1698 * The VM exit will be processed further in userland. 1699 */ 1700 } 1701 } 1702 return (handled); 1703 } 1704 1705 static void 1706 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1707 { 1708 uint64_t intinfo; 1709 1710 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1711 return; 1712 1713 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1714 "valid: %#lx", __func__, intinfo)); 1715 1716 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1717 VMCB_EXITINTINFO_VECTOR(intinfo), 1718 VMCB_EXITINTINFO_EC(intinfo), 1719 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1720 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1721 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1722 } 1723 1724 /* 1725 * Inject event to virtual cpu. 1726 */ 1727 static void 1728 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1729 struct vlapic *vlapic) 1730 { 1731 struct vmcb_ctrl *ctrl; 1732 struct vmcb_state *state; 1733 uint8_t v_tpr; 1734 int vector, need_intr_window; 1735 int extint_pending; 1736 1737 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { 1738 return; 1739 } 1740 1741 state = svm_get_vmcb_state(vcpu); 1742 ctrl = svm_get_vmcb_ctrl(vcpu); 1743 1744 need_intr_window = 0; 1745 1746 if (vcpu->nextrip != state->rip) { 1747 ctrl->intr_shadow = 0; 1748 SVM_CTR2(vcpu, "Guest interrupt blocking " 1749 "cleared due to rip change: %#lx/%#lx", 1750 vcpu->nextrip, state->rip); 1751 } 1752 1753 /* 1754 * Inject pending events or exceptions for this vcpu. 1755 * 1756 * An event might be pending because the previous #VMEXIT happened 1757 * during event delivery (i.e. ctrl->exitintinfo). 1758 * 1759 * An event might also be pending because an exception was injected 1760 * by the hypervisor (e.g. #PF during instruction emulation). 1761 */ 1762 svm_inj_intinfo(sc, vcpu); 1763 1764 /* NMI event has priority over interrupts. */ 1765 if (vm_nmi_pending(vcpu->vcpu)) { 1766 if (nmi_blocked(vcpu)) { 1767 /* 1768 * Can't inject another NMI if the guest has not 1769 * yet executed an "iret" after the last NMI. 1770 */ 1771 SVM_CTR0(vcpu, "Cannot inject NMI due " 1772 "to NMI-blocking"); 1773 } else if (ctrl->intr_shadow) { 1774 /* 1775 * Can't inject an NMI if the vcpu is in an intr_shadow. 1776 */ 1777 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1778 "interrupt shadow"); 1779 need_intr_window = 1; 1780 goto done; 1781 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1782 /* 1783 * If there is already an exception/interrupt pending 1784 * then defer the NMI until after that. 1785 */ 1786 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1787 "eventinj %#lx", ctrl->eventinj); 1788 1789 /* 1790 * Use self-IPI to trigger a VM-exit as soon as 1791 * possible after the event injection is completed. 1792 * 1793 * This works only if the external interrupt exiting 1794 * is at a lower priority than the event injection. 1795 * 1796 * Although not explicitly specified in APMv2 the 1797 * relative priorities were verified empirically. 1798 */ 1799 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1800 } else { 1801 vm_nmi_clear(vcpu->vcpu); 1802 1803 /* Inject NMI, vector number is not used */ 1804 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1805 IDT_NMI, 0, false); 1806 1807 /* virtual NMI blocking is now in effect */ 1808 enable_nmi_blocking(vcpu); 1809 1810 SVM_CTR0(vcpu, "Injecting vNMI"); 1811 } 1812 } 1813 1814 extint_pending = vm_extint_pending(vcpu->vcpu); 1815 if (!extint_pending) { 1816 if (!vlapic_pending_intr(vlapic, &vector)) 1817 goto done; 1818 KASSERT(vector >= 16 && vector <= 255, 1819 ("invalid vector %d from local APIC", vector)); 1820 } else { 1821 /* Ask the legacy pic for a vector to inject */ 1822 vatpic_pending_intr(sc->vm, &vector); 1823 KASSERT(vector >= 0 && vector <= 255, 1824 ("invalid vector %d from INTR", vector)); 1825 } 1826 1827 /* 1828 * If the guest has disabled interrupts or is in an interrupt shadow 1829 * then we cannot inject the pending interrupt. 1830 */ 1831 if ((state->rflags & PSL_I) == 0) { 1832 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1833 "rflags %#lx", vector, state->rflags); 1834 need_intr_window = 1; 1835 goto done; 1836 } 1837 1838 if (ctrl->intr_shadow) { 1839 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1840 "interrupt shadow", vector); 1841 need_intr_window = 1; 1842 goto done; 1843 } 1844 1845 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1846 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1847 "eventinj %#lx", vector, ctrl->eventinj); 1848 need_intr_window = 1; 1849 goto done; 1850 } 1851 1852 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1853 1854 if (!extint_pending) { 1855 vlapic_intr_accepted(vlapic, vector); 1856 } else { 1857 vm_extint_clear(vcpu->vcpu); 1858 vatpic_intr_accepted(sc->vm, vector); 1859 } 1860 1861 /* 1862 * Force a VM-exit as soon as the vcpu is ready to accept another 1863 * interrupt. This is done because the PIC might have another vector 1864 * that it wants to inject. Also, if the APIC has a pending interrupt 1865 * that was preempted by the ExtInt then it allows us to inject the 1866 * APIC vector as soon as possible. 1867 */ 1868 need_intr_window = 1; 1869 done: 1870 /* 1871 * The guest can modify the TPR by writing to %CR8. In guest mode 1872 * the processor reflects this write to V_TPR without hypervisor 1873 * intervention. 1874 * 1875 * The guest can also modify the TPR by writing to it via the memory 1876 * mapped APIC page. In this case, the write will be emulated by the 1877 * hypervisor. For this reason V_TPR must be updated before every 1878 * VMRUN. 1879 */ 1880 v_tpr = vlapic_get_cr8(vlapic); 1881 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1882 if (ctrl->v_tpr != v_tpr) { 1883 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1884 ctrl->v_tpr, v_tpr); 1885 ctrl->v_tpr = v_tpr; 1886 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1887 } 1888 1889 if (need_intr_window) { 1890 /* 1891 * We use V_IRQ in conjunction with the VINTR intercept to 1892 * trap into the hypervisor as soon as a virtual interrupt 1893 * can be delivered. 1894 * 1895 * Since injected events are not subject to intercept checks 1896 * we need to ensure that the V_IRQ is not actually going to 1897 * be delivered on VM entry. The KASSERT below enforces this. 1898 */ 1899 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1900 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1901 ("Bogus intr_window_exiting: eventinj (%#lx), " 1902 "intr_shadow (%u), rflags (%#lx)", 1903 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1904 enable_intr_window_exiting(vcpu); 1905 } else { 1906 disable_intr_window_exiting(vcpu); 1907 } 1908 } 1909 1910 static __inline void 1911 restore_host_tss(void) 1912 { 1913 struct system_segment_descriptor *tss_sd; 1914 1915 /* 1916 * The TSS descriptor was in use prior to launching the guest so it 1917 * has been marked busy. 1918 * 1919 * 'ltr' requires the descriptor to be marked available so change the 1920 * type to "64-bit available TSS". 1921 */ 1922 tss_sd = PCPU_GET(tss); 1923 tss_sd->sd_type = SDT_SYSTSS; 1924 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1925 } 1926 1927 static void 1928 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1929 { 1930 struct vmcb_ctrl *ctrl; 1931 long eptgen; 1932 int cpu; 1933 bool alloc_asid; 1934 1935 cpu = curcpu; 1936 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1937 smr_enter(pmap->pm_eptsmr); 1938 1939 ctrl = svm_get_vmcb_ctrl(vcpu); 1940 1941 /* 1942 * The TLB entries associated with the vcpu's ASID are not valid 1943 * if either of the following conditions is true: 1944 * 1945 * 1. The vcpu's ASID generation is different than the host cpu's 1946 * ASID generation. This happens when the vcpu migrates to a new 1947 * host cpu. It can also happen when the number of vcpus executing 1948 * on a host cpu is greater than the number of ASIDs available. 1949 * 1950 * 2. The pmap generation number is different than the value cached in 1951 * the 'vcpustate'. This happens when the host invalidates pages 1952 * belonging to the guest. 1953 * 1954 * asidgen eptgen Action 1955 * mismatch mismatch 1956 * 0 0 (a) 1957 * 0 1 (b1) or (b2) 1958 * 1 0 (c) 1959 * 1 1 (d) 1960 * 1961 * (a) There is no mismatch in eptgen or ASID generation and therefore 1962 * no further action is needed. 1963 * 1964 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1965 * retained and the TLB entries associated with this ASID 1966 * are flushed by VMRUN. 1967 * 1968 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1969 * allocated. 1970 * 1971 * (c) A new ASID is allocated. 1972 * 1973 * (d) A new ASID is allocated. 1974 */ 1975 1976 alloc_asid = false; 1977 eptgen = atomic_load_long(&pmap->pm_eptgen); 1978 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1979 1980 if (vcpu->asid.gen != asid[cpu].gen) { 1981 alloc_asid = true; /* (c) and (d) */ 1982 } else if (vcpu->eptgen != eptgen) { 1983 if (flush_by_asid()) 1984 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 1985 else 1986 alloc_asid = true; /* (b2) */ 1987 } else { 1988 /* 1989 * This is the common case (a). 1990 */ 1991 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 1992 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 1993 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 1994 } 1995 1996 if (alloc_asid) { 1997 if (++asid[cpu].num >= nasid) { 1998 asid[cpu].num = 1; 1999 if (++asid[cpu].gen == 0) 2000 asid[cpu].gen = 1; 2001 /* 2002 * If this cpu does not support "flush-by-asid" 2003 * then flush the entire TLB on a generation 2004 * bump. Subsequent ASID allocation in this 2005 * generation can be done without a TLB flush. 2006 */ 2007 if (!flush_by_asid()) 2008 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 2009 } 2010 vcpu->asid.gen = asid[cpu].gen; 2011 vcpu->asid.num = asid[cpu].num; 2012 2013 ctrl->asid = vcpu->asid.num; 2014 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 2015 /* 2016 * If this cpu supports "flush-by-asid" then the TLB 2017 * was not flushed after the generation bump. The TLB 2018 * is flushed selectively after every new ASID allocation. 2019 */ 2020 if (flush_by_asid()) 2021 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 2022 } 2023 vcpu->eptgen = eptgen; 2024 2025 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 2026 KASSERT(ctrl->asid == vcpu->asid.num, 2027 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 2028 } 2029 2030 static void 2031 svm_pmap_deactivate(pmap_t pmap) 2032 { 2033 smr_exit(pmap->pm_eptsmr); 2034 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2035 } 2036 2037 static __inline void 2038 disable_gintr(void) 2039 { 2040 2041 __asm __volatile("clgi"); 2042 } 2043 2044 static __inline void 2045 enable_gintr(void) 2046 { 2047 2048 __asm __volatile("stgi"); 2049 } 2050 2051 static __inline void 2052 svm_dr_enter_guest(struct svm_regctx *gctx) 2053 { 2054 2055 /* Save host control debug registers. */ 2056 gctx->host_dr7 = rdr7(); 2057 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2058 2059 /* 2060 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2061 * exceptions in the host based on the guest DRx values. The 2062 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 2063 * VMCB. 2064 */ 2065 load_dr7(0); 2066 wrmsr(MSR_DEBUGCTLMSR, 0); 2067 2068 /* Save host debug registers. */ 2069 gctx->host_dr0 = rdr0(); 2070 gctx->host_dr1 = rdr1(); 2071 gctx->host_dr2 = rdr2(); 2072 gctx->host_dr3 = rdr3(); 2073 gctx->host_dr6 = rdr6(); 2074 2075 /* Restore guest debug registers. */ 2076 load_dr0(gctx->sctx_dr0); 2077 load_dr1(gctx->sctx_dr1); 2078 load_dr2(gctx->sctx_dr2); 2079 load_dr3(gctx->sctx_dr3); 2080 } 2081 2082 static __inline void 2083 svm_dr_leave_guest(struct svm_regctx *gctx) 2084 { 2085 2086 /* Save guest debug registers. */ 2087 gctx->sctx_dr0 = rdr0(); 2088 gctx->sctx_dr1 = rdr1(); 2089 gctx->sctx_dr2 = rdr2(); 2090 gctx->sctx_dr3 = rdr3(); 2091 2092 /* 2093 * Restore host debug registers. Restore DR7 and DEBUGCTL 2094 * last. 2095 */ 2096 load_dr0(gctx->host_dr0); 2097 load_dr1(gctx->host_dr1); 2098 load_dr2(gctx->host_dr2); 2099 load_dr3(gctx->host_dr3); 2100 load_dr6(gctx->host_dr6); 2101 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 2102 load_dr7(gctx->host_dr7); 2103 } 2104 2105 /* 2106 * Start vcpu with specified RIP. 2107 */ 2108 static int 2109 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 2110 { 2111 struct svm_regctx *gctx; 2112 struct svm_softc *svm_sc; 2113 struct svm_vcpu *vcpu; 2114 struct vmcb_state *state; 2115 struct vmcb_ctrl *ctrl; 2116 struct vm_exit *vmexit; 2117 struct vlapic *vlapic; 2118 uint64_t vmcb_pa; 2119 int handled; 2120 uint16_t ldt_sel; 2121 2122 vcpu = vcpui; 2123 svm_sc = vcpu->sc; 2124 state = svm_get_vmcb_state(vcpu); 2125 ctrl = svm_get_vmcb_ctrl(vcpu); 2126 vmexit = vm_exitinfo(vcpu->vcpu); 2127 vlapic = vm_lapic(vcpu->vcpu); 2128 2129 gctx = svm_get_guest_regctx(vcpu); 2130 vmcb_pa = vcpu->vmcb_pa; 2131 2132 if (vcpu->lastcpu != curcpu) { 2133 /* 2134 * Force new ASID allocation by invalidating the generation. 2135 */ 2136 vcpu->asid.gen = 0; 2137 2138 /* 2139 * Invalidate the VMCB state cache by marking all fields dirty. 2140 */ 2141 svm_set_dirty(vcpu, 0xffffffff); 2142 2143 /* 2144 * XXX 2145 * Setting 'vcpu->lastcpu' here is bit premature because 2146 * we may return from this function without actually executing 2147 * the VMRUN instruction. This could happen if a rendezvous 2148 * or an AST is pending on the first time through the loop. 2149 * 2150 * This works for now but any new side-effects of vcpu 2151 * migration should take this case into account. 2152 */ 2153 vcpu->lastcpu = curcpu; 2154 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2155 } 2156 2157 svm_msr_guest_enter(vcpu); 2158 2159 /* Update Guest RIP */ 2160 state->rip = rip; 2161 2162 do { 2163 /* 2164 * Disable global interrupts to guarantee atomicity during 2165 * loading of guest state. This includes not only the state 2166 * loaded by the "vmrun" instruction but also software state 2167 * maintained by the hypervisor: suspended and rendezvous 2168 * state, NPT generation number, vlapic interrupts etc. 2169 */ 2170 disable_gintr(); 2171 2172 if (vcpu_suspended(evinfo)) { 2173 enable_gintr(); 2174 vm_exit_suspended(vcpu->vcpu, state->rip); 2175 break; 2176 } 2177 2178 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2179 enable_gintr(); 2180 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2181 break; 2182 } 2183 2184 if (vcpu_reqidle(evinfo)) { 2185 enable_gintr(); 2186 vm_exit_reqidle(vcpu->vcpu, state->rip); 2187 break; 2188 } 2189 2190 /* We are asked to give the cpu by scheduler. */ 2191 if (vcpu_should_yield(vcpu->vcpu)) { 2192 enable_gintr(); 2193 vm_exit_astpending(vcpu->vcpu, state->rip); 2194 break; 2195 } 2196 2197 if (vcpu_debugged(vcpu->vcpu)) { 2198 enable_gintr(); 2199 vm_exit_debug(vcpu->vcpu, state->rip); 2200 break; 2201 } 2202 2203 /* 2204 * #VMEXIT resumes the host with the guest LDTR, so 2205 * save the current LDT selector so it can be restored 2206 * after an exit. The userspace hypervisor probably 2207 * doesn't use a LDT, but save and restore it to be 2208 * safe. 2209 */ 2210 ldt_sel = sldt(); 2211 2212 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2213 2214 /* 2215 * Check the pmap generation and the ASID generation to 2216 * ensure that the vcpu does not use stale TLB mappings. 2217 */ 2218 svm_pmap_activate(vcpu, pmap); 2219 2220 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2221 vcpu->dirty = 0; 2222 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2223 2224 /* Launch Virtual Machine. */ 2225 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2226 svm_dr_enter_guest(gctx); 2227 svm_launch(vmcb_pa, gctx, get_pcpu()); 2228 svm_dr_leave_guest(gctx); 2229 2230 svm_pmap_deactivate(pmap); 2231 2232 /* 2233 * The host GDTR and IDTR is saved by VMRUN and restored 2234 * automatically on #VMEXIT. However, the host TSS needs 2235 * to be restored explicitly. 2236 */ 2237 restore_host_tss(); 2238 2239 /* Restore host LDTR. */ 2240 lldt(ldt_sel); 2241 2242 /* #VMEXIT disables interrupts so re-enable them here. */ 2243 enable_gintr(); 2244 2245 /* Update 'nextrip' */ 2246 vcpu->nextrip = state->rip; 2247 2248 /* Handle #VMEXIT and if required return to user space. */ 2249 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2250 } while (handled); 2251 2252 svm_msr_guest_exit(vcpu); 2253 2254 return (0); 2255 } 2256 2257 static void 2258 svm_vcpu_cleanup(void *vcpui) 2259 { 2260 struct svm_vcpu *vcpu = vcpui; 2261 2262 free(vcpu->vmcb, M_SVM); 2263 free(vcpu, M_SVM); 2264 } 2265 2266 static void 2267 svm_cleanup(void *vmi) 2268 { 2269 struct svm_softc *sc = vmi; 2270 2271 free(sc->iopm_bitmap, M_SVM); 2272 free(sc->msr_bitmap, M_SVM); 2273 free(sc, M_SVM); 2274 } 2275 2276 static register_t * 2277 swctx_regptr(struct svm_regctx *regctx, int reg) 2278 { 2279 2280 switch (reg) { 2281 case VM_REG_GUEST_RBX: 2282 return (®ctx->sctx_rbx); 2283 case VM_REG_GUEST_RCX: 2284 return (®ctx->sctx_rcx); 2285 case VM_REG_GUEST_RDX: 2286 return (®ctx->sctx_rdx); 2287 case VM_REG_GUEST_RDI: 2288 return (®ctx->sctx_rdi); 2289 case VM_REG_GUEST_RSI: 2290 return (®ctx->sctx_rsi); 2291 case VM_REG_GUEST_RBP: 2292 return (®ctx->sctx_rbp); 2293 case VM_REG_GUEST_R8: 2294 return (®ctx->sctx_r8); 2295 case VM_REG_GUEST_R9: 2296 return (®ctx->sctx_r9); 2297 case VM_REG_GUEST_R10: 2298 return (®ctx->sctx_r10); 2299 case VM_REG_GUEST_R11: 2300 return (®ctx->sctx_r11); 2301 case VM_REG_GUEST_R12: 2302 return (®ctx->sctx_r12); 2303 case VM_REG_GUEST_R13: 2304 return (®ctx->sctx_r13); 2305 case VM_REG_GUEST_R14: 2306 return (®ctx->sctx_r14); 2307 case VM_REG_GUEST_R15: 2308 return (®ctx->sctx_r15); 2309 case VM_REG_GUEST_DR0: 2310 return (®ctx->sctx_dr0); 2311 case VM_REG_GUEST_DR1: 2312 return (®ctx->sctx_dr1); 2313 case VM_REG_GUEST_DR2: 2314 return (®ctx->sctx_dr2); 2315 case VM_REG_GUEST_DR3: 2316 return (®ctx->sctx_dr3); 2317 default: 2318 return (NULL); 2319 } 2320 } 2321 2322 static int 2323 svm_getreg(void *vcpui, int ident, uint64_t *val) 2324 { 2325 struct svm_vcpu *vcpu; 2326 register_t *reg; 2327 2328 vcpu = vcpui; 2329 2330 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2331 return (svm_get_intr_shadow(vcpu, val)); 2332 } 2333 2334 if (vmcb_read(vcpu, ident, val) == 0) { 2335 return (0); 2336 } 2337 2338 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2339 2340 if (reg != NULL) { 2341 *val = *reg; 2342 return (0); 2343 } 2344 2345 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2346 return (EINVAL); 2347 } 2348 2349 static int 2350 svm_setreg(void *vcpui, int ident, uint64_t val) 2351 { 2352 struct svm_vcpu *vcpu; 2353 register_t *reg; 2354 2355 vcpu = vcpui; 2356 2357 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2358 return (svm_modify_intr_shadow(vcpu, val)); 2359 } 2360 2361 /* Do not permit user write access to VMCB fields by offset. */ 2362 if (!VMCB_ACCESS_OK(ident)) { 2363 if (vmcb_write(vcpu, ident, val) == 0) { 2364 return (0); 2365 } 2366 } 2367 2368 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2369 2370 if (reg != NULL) { 2371 *reg = val; 2372 return (0); 2373 } 2374 2375 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2376 /* Ignore. */ 2377 return (0); 2378 } 2379 2380 /* 2381 * XXX deal with CR3 and invalidate TLB entries tagged with the 2382 * vcpu's ASID. This needs to be treated differently depending on 2383 * whether 'running' is true/false. 2384 */ 2385 2386 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2387 return (EINVAL); 2388 } 2389 2390 static int 2391 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2392 { 2393 return (vmcb_getdesc(vcpui, reg, desc)); 2394 } 2395 2396 static int 2397 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2398 { 2399 return (vmcb_setdesc(vcpui, reg, desc)); 2400 } 2401 2402 #ifdef BHYVE_SNAPSHOT 2403 static int 2404 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2405 { 2406 int ret; 2407 uint64_t val; 2408 2409 if (meta->op == VM_SNAPSHOT_SAVE) { 2410 ret = svm_getreg(vcpui, ident, &val); 2411 if (ret != 0) 2412 goto done; 2413 2414 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2415 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2416 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2417 2418 ret = svm_setreg(vcpui, ident, val); 2419 if (ret != 0) 2420 goto done; 2421 } else { 2422 ret = EINVAL; 2423 goto done; 2424 } 2425 2426 done: 2427 return (ret); 2428 } 2429 #endif 2430 2431 static int 2432 svm_setcap(void *vcpui, int type, int val) 2433 { 2434 struct svm_vcpu *vcpu; 2435 struct vlapic *vlapic; 2436 int error; 2437 2438 vcpu = vcpui; 2439 error = 0; 2440 2441 switch (type) { 2442 case VM_CAP_HALT_EXIT: 2443 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2444 VMCB_INTCPT_HLT, val); 2445 break; 2446 case VM_CAP_PAUSE_EXIT: 2447 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2448 VMCB_INTCPT_PAUSE, val); 2449 break; 2450 case VM_CAP_UNRESTRICTED_GUEST: 2451 /* Unrestricted guest execution cannot be disabled in SVM */ 2452 if (val == 0) 2453 error = EINVAL; 2454 break; 2455 case VM_CAP_BPT_EXIT: 2456 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); 2457 break; 2458 case VM_CAP_IPI_EXIT: 2459 vlapic = vm_lapic(vcpu->vcpu); 2460 vlapic->ipi_exit = val; 2461 break; 2462 case VM_CAP_MASK_HWINTR: 2463 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); 2464 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); 2465 break; 2466 case VM_CAP_RFLAGS_TF: { 2467 uint64_t rflags; 2468 2469 /* Fetch RFLAGS. */ 2470 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { 2471 error = (EINVAL); 2472 break; 2473 } 2474 if (val) { 2475 /* Save current TF bit. */ 2476 vcpu->dbg.rflags_tf = rflags & PSL_T; 2477 /* Trace next instruction. */ 2478 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2479 (rflags | PSL_T))) { 2480 error = (EINVAL); 2481 break; 2482 } 2483 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); 2484 } else { 2485 /* 2486 * Restore shadowed RFLAGS.TF only if vCPU was 2487 * previously stepped 2488 */ 2489 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 2490 rflags &= ~PSL_T; 2491 rflags |= vcpu->dbg.rflags_tf; 2492 vcpu->dbg.rflags_tf = 0; 2493 2494 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2495 rflags)) { 2496 error = (EINVAL); 2497 break; 2498 } 2499 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); 2500 } 2501 } 2502 2503 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); 2504 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, 2505 val); 2506 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, 2507 val); 2508 break; 2509 } 2510 default: 2511 error = ENOENT; 2512 break; 2513 } 2514 return (error); 2515 } 2516 2517 static int 2518 svm_getcap(void *vcpui, int type, int *retval) 2519 { 2520 struct svm_vcpu *vcpu; 2521 struct vlapic *vlapic; 2522 int error; 2523 2524 vcpu = vcpui; 2525 error = 0; 2526 2527 switch (type) { 2528 case VM_CAP_HALT_EXIT: 2529 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2530 VMCB_INTCPT_HLT); 2531 break; 2532 case VM_CAP_PAUSE_EXIT: 2533 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2534 VMCB_INTCPT_PAUSE); 2535 break; 2536 case VM_CAP_UNRESTRICTED_GUEST: 2537 *retval = 1; /* unrestricted guest is always enabled */ 2538 break; 2539 case VM_CAP_BPT_EXIT: 2540 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); 2541 break; 2542 case VM_CAP_IPI_EXIT: 2543 vlapic = vm_lapic(vcpu->vcpu); 2544 *retval = vlapic->ipi_exit; 2545 break; 2546 case VM_CAP_RFLAGS_TF: 2547 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); 2548 break; 2549 case VM_CAP_MASK_HWINTR: 2550 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); 2551 break; 2552 default: 2553 error = ENOENT; 2554 break; 2555 } 2556 return (error); 2557 } 2558 2559 static struct vmspace * 2560 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2561 { 2562 return (svm_npt_alloc(min, max)); 2563 } 2564 2565 static void 2566 svm_vmspace_free(struct vmspace *vmspace) 2567 { 2568 svm_npt_free(vmspace); 2569 } 2570 2571 static struct vlapic * 2572 svm_vlapic_init(void *vcpui) 2573 { 2574 struct svm_vcpu *vcpu; 2575 struct vlapic *vlapic; 2576 2577 vcpu = vcpui; 2578 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2579 vlapic->vm = vcpu->sc->vm; 2580 vlapic->vcpu = vcpu->vcpu; 2581 vlapic->vcpuid = vcpu->vcpuid; 2582 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2583 M_WAITOK | M_ZERO); 2584 2585 vlapic_init(vlapic); 2586 2587 return (vlapic); 2588 } 2589 2590 static void 2591 svm_vlapic_cleanup(struct vlapic *vlapic) 2592 { 2593 2594 vlapic_cleanup(vlapic); 2595 free(vlapic->apic_page, M_SVM_VLAPIC); 2596 free(vlapic, M_SVM_VLAPIC); 2597 } 2598 2599 #ifdef BHYVE_SNAPSHOT 2600 static int 2601 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2602 { 2603 struct svm_vcpu *vcpu; 2604 int err, running, hostcpu; 2605 2606 vcpu = vcpui; 2607 err = 0; 2608 2609 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2610 if (running && hostcpu != curcpu) { 2611 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2612 vcpu->vcpuid); 2613 return (EINVAL); 2614 } 2615 2616 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2617 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2620 2621 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2622 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2623 2624 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2625 2626 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2627 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2628 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2629 2630 /* Guest segments */ 2631 /* ES */ 2632 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2633 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2634 2635 /* CS */ 2636 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2637 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2638 2639 /* SS */ 2640 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2641 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2642 2643 /* DS */ 2644 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2645 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2646 2647 /* FS */ 2648 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2649 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2650 2651 /* GS */ 2652 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2653 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2654 2655 /* TR */ 2656 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2657 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2658 2659 /* LDTR */ 2660 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2661 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2662 2663 /* EFER */ 2664 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2665 2666 /* IDTR and GDTR */ 2667 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2668 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2669 2670 /* Specific AMD registers */ 2671 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2672 2673 err += vmcb_snapshot_any(vcpu, 2674 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2675 err += vmcb_snapshot_any(vcpu, 2676 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2677 err += vmcb_snapshot_any(vcpu, 2678 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2679 err += vmcb_snapshot_any(vcpu, 2680 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2681 err += vmcb_snapshot_any(vcpu, 2682 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2683 2684 err += vmcb_snapshot_any(vcpu, 2685 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2686 err += vmcb_snapshot_any(vcpu, 2687 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2688 2689 err += vmcb_snapshot_any(vcpu, 2690 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2691 2692 err += vmcb_snapshot_any(vcpu, 2693 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2694 2695 err += vmcb_snapshot_any(vcpu, 2696 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2697 2698 err += vmcb_snapshot_any(vcpu, 2699 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2700 err += vmcb_snapshot_any(vcpu, 2701 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2702 err += vmcb_snapshot_any(vcpu, 2703 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2704 err += vmcb_snapshot_any(vcpu, 2705 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2706 2707 err += vmcb_snapshot_any(vcpu, 2708 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2709 2710 err += vmcb_snapshot_any(vcpu, 2711 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2712 err += vmcb_snapshot_any(vcpu, 2713 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2714 err += vmcb_snapshot_any(vcpu, 2715 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2716 err += vmcb_snapshot_any(vcpu, 2717 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2718 2719 err += vmcb_snapshot_any(vcpu, 2720 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2721 2722 err += vmcb_snapshot_any(vcpu, 2723 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2724 err += vmcb_snapshot_any(vcpu, 2725 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2726 err += vmcb_snapshot_any(vcpu, 2727 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2728 2729 err += vmcb_snapshot_any(vcpu, 2730 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2731 2732 err += vmcb_snapshot_any(vcpu, 2733 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2734 2735 err += vmcb_snapshot_any(vcpu, 2736 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2737 err += vmcb_snapshot_any(vcpu, 2738 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2739 err += vmcb_snapshot_any(vcpu, 2740 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2741 2742 err += vmcb_snapshot_any(vcpu, 2743 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2744 2745 err += vmcb_snapshot_any(vcpu, 2746 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2747 err += vmcb_snapshot_any(vcpu, 2748 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2749 err += vmcb_snapshot_any(vcpu, 2750 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2751 err += vmcb_snapshot_any(vcpu, 2752 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2753 err += vmcb_snapshot_any(vcpu, 2754 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2755 if (err != 0) 2756 goto done; 2757 2758 /* Snapshot swctx for virtual cpu */ 2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2774 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2777 2778 /* Restore other svm_vcpu struct fields */ 2779 2780 /* Restore NEXTRIP field */ 2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2782 2783 /* Restore lastcpu field */ 2784 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2786 2787 /* Restore EPTGEN field - EPT is Extended Page Table */ 2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2789 2790 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2791 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2792 2793 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); 2794 2795 /* Set all caches dirty */ 2796 if (meta->op == VM_SNAPSHOT_RESTORE) 2797 svm_set_dirty(vcpu, 0xffffffff); 2798 2799 done: 2800 return (err); 2801 } 2802 2803 static int 2804 svm_restore_tsc(void *vcpui, uint64_t offset) 2805 { 2806 struct svm_vcpu *vcpu = vcpui; 2807 2808 svm_set_tsc_offset(vcpu, offset); 2809 2810 return (0); 2811 } 2812 #endif 2813 2814 const struct vmm_ops vmm_ops_amd = { 2815 .modinit = svm_modinit, 2816 .modcleanup = svm_modcleanup, 2817 .modresume = svm_modresume, 2818 .modsuspend = svm_modsuspend, 2819 .init = svm_init, 2820 .run = svm_run, 2821 .cleanup = svm_cleanup, 2822 .vcpu_init = svm_vcpu_init, 2823 .vcpu_cleanup = svm_vcpu_cleanup, 2824 .getreg = svm_getreg, 2825 .setreg = svm_setreg, 2826 .getdesc = svm_getdesc, 2827 .setdesc = svm_setdesc, 2828 .getcap = svm_getcap, 2829 .setcap = svm_setcap, 2830 .vmspace_alloc = svm_vmspace_alloc, 2831 .vmspace_free = svm_vmspace_free, 2832 .vlapic_init = svm_vlapic_init, 2833 .vlapic_cleanup = svm_vlapic_cleanup, 2834 #ifdef BHYVE_SNAPSHOT 2835 .vcpu_snapshot = svm_vcpu_snapshot, 2836 .restore_tsc = svm_restore_tsc, 2837 #endif 2838 }; 2839