1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "opt_bhyve_snapshot.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/smp.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/proc.h> 39 #include <sys/reg.h> 40 #include <sys/smr.h> 41 #include <sys/sysctl.h> 42 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 #include <vm/pmap.h> 46 47 #include <machine/cpufunc.h> 48 #include <machine/psl.h> 49 #include <machine/md_var.h> 50 #include <machine/specialreg.h> 51 #include <machine/smp.h> 52 #include <machine/vmm.h> 53 #include <machine/vmm_dev.h> 54 #include <machine/vmm_instruction_emul.h> 55 #include <machine/vmm_snapshot.h> 56 57 #include <dev/vmm/vmm_ktr.h> 58 #include <dev/vmm/vmm_mem.h> 59 60 #include "vmm_lapic.h" 61 #include "vmm_stat.h" 62 #include "vmm_ioport.h" 63 #include "vatpic.h" 64 #include "vlapic.h" 65 #include "vlapic_priv.h" 66 67 #include "x86.h" 68 #include "vmcb.h" 69 #include "svm.h" 70 #include "svm_softc.h" 71 #include "svm_msr.h" 72 #include "npt.h" 73 #include "io/ppt.h" 74 75 SYSCTL_DECL(_hw_vmm); 76 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 77 NULL); 78 79 /* 80 * SVM CPUID function 0x8000_000A, edx bit decoding. 81 */ 82 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 83 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 84 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 85 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 86 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 87 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 88 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 89 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 90 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 91 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 92 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 93 94 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 95 VMCB_CACHE_IOPM | \ 96 VMCB_CACHE_I | \ 97 VMCB_CACHE_TPR | \ 98 VMCB_CACHE_CR2 | \ 99 VMCB_CACHE_CR | \ 100 VMCB_CACHE_DR | \ 101 VMCB_CACHE_DT | \ 102 VMCB_CACHE_SEG | \ 103 VMCB_CACHE_NP) 104 105 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 106 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 107 0, NULL); 108 109 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 110 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 111 112 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 113 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, 114 "SVM features advertised by CPUID.8000000AH:EDX"); 115 116 static int disable_npf_assist; 117 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 118 &disable_npf_assist, 0, NULL); 119 120 /* Maximum ASIDs supported by the processor */ 121 static uint32_t nasid; 122 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, 123 "Number of ASIDs supported by this processor"); 124 125 /* Current ASID generation for each host cpu */ 126 static struct asid asid[MAXCPU]; 127 128 /* SVM host state saved area of size 4KB for each physical core. */ 129 static uint8_t *hsave; 130 131 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 132 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 133 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 134 135 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc); 136 static int svm_setreg(void *vcpui, int ident, uint64_t val); 137 static int svm_getreg(void *vcpui, int ident, uint64_t *val); 138 static __inline int 139 flush_by_asid(void) 140 { 141 142 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); 143 } 144 145 static __inline int 146 decode_assist(void) 147 { 148 149 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); 150 } 151 152 static void 153 svm_disable(void *arg __unused) 154 { 155 uint64_t efer; 156 157 efer = rdmsr(MSR_EFER); 158 efer &= ~EFER_SVM; 159 wrmsr(MSR_EFER, efer); 160 } 161 162 /* 163 * Disable SVM on all CPUs. 164 */ 165 static int 166 svm_modcleanup(void) 167 { 168 169 smp_rendezvous(NULL, svm_disable, NULL, NULL); 170 171 if (hsave != NULL) 172 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE); 173 174 return (0); 175 } 176 177 /* 178 * Verify that all the features required by bhyve are available. 179 */ 180 static int 181 check_svm_features(void) 182 { 183 u_int regs[4]; 184 185 /* CPUID Fn8000_000A is for SVM */ 186 do_cpuid(0x8000000A, regs); 187 svm_feature &= regs[3]; 188 189 /* 190 * The number of ASIDs can be configured to be less than what is 191 * supported by the hardware but not more. 192 */ 193 if (nasid == 0 || nasid > regs[1]) 194 nasid = regs[1]; 195 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); 196 197 /* bhyve requires the Nested Paging feature */ 198 if (!(svm_feature & AMD_CPUID_SVM_NP)) { 199 printf("SVM: Nested Paging feature not available.\n"); 200 return (ENXIO); 201 } 202 203 /* bhyve requires the NRIP Save feature */ 204 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { 205 printf("SVM: NRIP Save feature not available.\n"); 206 return (ENXIO); 207 } 208 209 return (0); 210 } 211 212 static void 213 svm_enable(void *arg __unused) 214 { 215 uint64_t efer; 216 217 efer = rdmsr(MSR_EFER); 218 efer |= EFER_SVM; 219 wrmsr(MSR_EFER, efer); 220 221 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE])); 222 } 223 224 /* 225 * Return 1 if SVM is enabled on this processor and 0 otherwise. 226 */ 227 static int 228 svm_available(void) 229 { 230 uint64_t msr; 231 232 /* Section 15.4 Enabling SVM from APM2. */ 233 if ((amd_feature2 & AMDID2_SVM) == 0) { 234 printf("SVM: not available.\n"); 235 return (0); 236 } 237 238 msr = rdmsr(MSR_VM_CR); 239 if ((msr & VM_CR_SVMDIS) != 0) { 240 printf("SVM: disabled by BIOS.\n"); 241 return (0); 242 } 243 244 return (1); 245 } 246 247 static int 248 svm_modinit(int ipinum) 249 { 250 int error, cpu; 251 252 if (!svm_available()) 253 return (ENXIO); 254 255 error = check_svm_features(); 256 if (error) 257 return (error); 258 259 vmcb_clean &= VMCB_CACHE_DEFAULT; 260 261 for (cpu = 0; cpu < MAXCPU; cpu++) { 262 /* 263 * Initialize the host ASIDs to their "highest" valid values. 264 * 265 * The next ASID allocation will rollover both 'gen' and 'num' 266 * and start off the sequence at {1,1}. 267 */ 268 asid[cpu].gen = ~0UL; 269 asid[cpu].num = nasid - 1; 270 } 271 272 svm_msr_init(); 273 svm_npt_init(ipinum); 274 275 /* Enable SVM on all CPUs */ 276 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO); 277 smp_rendezvous(NULL, svm_enable, NULL, NULL); 278 279 return (0); 280 } 281 282 static void 283 svm_modsuspend(void) 284 { 285 } 286 287 static void 288 svm_modresume(void) 289 { 290 291 svm_enable(NULL); 292 } 293 294 #ifdef BHYVE_SNAPSHOT 295 void 296 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset) 297 { 298 struct vmcb_ctrl *ctrl; 299 300 ctrl = svm_get_vmcb_ctrl(vcpu); 301 ctrl->tsc_offset = offset; 302 303 svm_set_dirty(vcpu, VMCB_CACHE_I); 304 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); 305 306 vm_set_tsc_offset(vcpu->vcpu, offset); 307 } 308 #endif 309 310 /* Pentium compatible MSRs */ 311 #define MSR_PENTIUM_START 0 312 #define MSR_PENTIUM_END 0x1FFF 313 /* AMD 6th generation and Intel compatible MSRs */ 314 #define MSR_AMD6TH_START 0xC0000000UL 315 #define MSR_AMD6TH_END 0xC0001FFFUL 316 /* AMD 7th and 8th generation compatible MSRs */ 317 #define MSR_AMD7TH_START 0xC0010000UL 318 #define MSR_AMD7TH_END 0xC0011FFFUL 319 320 static void 321 svm_get_cs_info(struct vmcb *vmcb, struct vm_guest_paging *paging, int *cs_d, 322 uint64_t *base) 323 { 324 struct vmcb_segment seg; 325 int error __diagused; 326 327 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 328 KASSERT(error == 0, ("%s: vmcb_seg error %d", __func__, error)); 329 330 switch (paging->cpu_mode) { 331 case CPU_MODE_REAL: 332 *base = seg.base; 333 *cs_d = 0; 334 break; 335 case CPU_MODE_PROTECTED: 336 case CPU_MODE_COMPATIBILITY: 337 *cs_d = !!(seg.attrib & VMCB_CS_ATTRIB_D); 338 *base = seg.base; 339 break; 340 default: 341 *base = 0; 342 *cs_d = 0; 343 break; 344 } 345 } 346 347 /* 348 * Get the index and bit position for a MSR in permission bitmap. 349 * Two bits are used for each MSR: lower bit for read and higher bit for write. 350 */ 351 static int 352 svm_msr_index(uint64_t msr, int *index, int *bit) 353 { 354 uint32_t base, off; 355 356 *index = -1; 357 *bit = (msr % 4) * 2; 358 base = 0; 359 360 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { 361 *index = msr / 4; 362 return (0); 363 } 364 365 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 366 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 367 off = (msr - MSR_AMD6TH_START); 368 *index = (off + base) / 4; 369 return (0); 370 } 371 372 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 373 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 374 off = (msr - MSR_AMD7TH_START); 375 *index = (off + base) / 4; 376 return (0); 377 } 378 379 return (EINVAL); 380 } 381 382 /* 383 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 384 */ 385 static void 386 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 387 { 388 int index, bit, error __diagused; 389 390 error = svm_msr_index(msr, &index, &bit); 391 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); 392 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 393 ("%s: invalid index %d for msr %#lx", __func__, index, msr)); 394 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 395 "msr %#lx", __func__, bit, msr)); 396 397 if (read) 398 perm_bitmap[index] &= ~(1UL << bit); 399 400 if (write) 401 perm_bitmap[index] &= ~(2UL << bit); 402 } 403 404 static void 405 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 406 { 407 408 svm_msr_perm(perm_bitmap, msr, true, true); 409 } 410 411 static void 412 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 413 { 414 415 svm_msr_perm(perm_bitmap, msr, true, false); 416 } 417 418 static __inline int 419 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) 420 { 421 struct vmcb_ctrl *ctrl; 422 423 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 424 425 ctrl = svm_get_vmcb_ctrl(vcpu); 426 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 427 } 428 429 static __inline void 430 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) 431 { 432 struct vmcb_ctrl *ctrl; 433 uint32_t oldval; 434 435 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 436 437 ctrl = svm_get_vmcb_ctrl(vcpu); 438 oldval = ctrl->intercept[idx]; 439 440 if (enabled) 441 ctrl->intercept[idx] |= bitmask; 442 else 443 ctrl->intercept[idx] &= ~bitmask; 444 445 if (ctrl->intercept[idx] != oldval) { 446 svm_set_dirty(vcpu, VMCB_CACHE_I); 447 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, 448 oldval, ctrl->intercept[idx]); 449 } 450 } 451 452 static __inline void 453 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 454 { 455 456 svm_set_intercept(vcpu, off, bitmask, 0); 457 } 458 459 static __inline void 460 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) 461 { 462 463 svm_set_intercept(vcpu, off, bitmask, 1); 464 } 465 466 static void 467 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, 468 uint64_t msrpm_base_pa, uint64_t np_pml4) 469 { 470 struct vmcb_ctrl *ctrl; 471 struct vmcb_state *state; 472 uint32_t mask; 473 int n; 474 475 ctrl = svm_get_vmcb_ctrl(vcpu); 476 state = svm_get_vmcb_state(vcpu); 477 478 ctrl->iopm_base_pa = iopm_base_pa; 479 ctrl->msrpm_base_pa = msrpm_base_pa; 480 481 /* Enable nested paging */ 482 ctrl->np_enable = 1; 483 ctrl->n_cr3 = np_pml4; 484 485 /* 486 * Intercept accesses to the control registers that are not shadowed 487 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 488 */ 489 for (n = 0; n < 16; n++) { 490 mask = (BIT(n) << 16) | BIT(n); 491 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 492 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); 493 else 494 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); 495 } 496 497 /* 498 * Intercept everything when tracing guest exceptions otherwise 499 * just intercept machine check exception. 500 */ 501 if (vcpu_trace_exceptions(vcpu->vcpu)) { 502 for (n = 0; n < 32; n++) { 503 /* 504 * Skip unimplemented vectors in the exception bitmap. 505 */ 506 if (n == 2 || n == 9) { 507 continue; 508 } 509 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); 510 } 511 } else { 512 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 513 } 514 515 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 516 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 517 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 518 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 519 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 520 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 521 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 522 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 523 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 524 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); 525 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 526 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 527 528 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 529 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 530 531 /* 532 * Intercept SVM instructions since AMD enables them in guests otherwise. 533 * Non-intercepted VMMCALL causes #UD, skip it. 534 */ 535 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 536 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 537 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 538 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 539 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 540 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); 541 if (vcpu_trap_wbinvd(vcpu->vcpu)) { 542 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, 543 VMCB_INTCPT_WBINVD); 544 } 545 546 /* 547 * From section "Canonicalization and Consistency Checks" in APMv2 548 * the VMRUN intercept bit must be set to pass the consistency check. 549 */ 550 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 551 552 /* 553 * The ASID will be set to a non-zero value just before VMRUN. 554 */ 555 ctrl->asid = 0; 556 557 /* 558 * Section 15.21.1, Interrupt Masking in EFLAGS 559 * Section 15.21.2, Virtualizing APIC.TPR 560 * 561 * This must be set for %rflag and %cr8 isolation of guest and host. 562 */ 563 ctrl->v_intr_masking = 1; 564 565 /* Enable Last Branch Record aka LBR for debugging */ 566 ctrl->lbr_virt_en = 1; 567 state->dbgctl = BIT(0); 568 569 /* EFER_SVM must always be set when the guest is executing */ 570 state->efer = EFER_SVM; 571 572 /* Set up the PAT to power-on state */ 573 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 574 PAT_VALUE(1, PAT_WRITE_THROUGH) | 575 PAT_VALUE(2, PAT_UNCACHED) | 576 PAT_VALUE(3, PAT_UNCACHEABLE) | 577 PAT_VALUE(4, PAT_WRITE_BACK) | 578 PAT_VALUE(5, PAT_WRITE_THROUGH) | 579 PAT_VALUE(6, PAT_UNCACHED) | 580 PAT_VALUE(7, PAT_UNCACHEABLE); 581 582 /* Set up DR6/7 to power-on state */ 583 state->dr6 = DBREG_DR6_RESERVED1; 584 state->dr7 = DBREG_DR7_RESERVED1; 585 } 586 587 /* 588 * Initialize a virtual machine. 589 */ 590 static void * 591 svm_init(struct vm *vm, pmap_t pmap) 592 { 593 struct svm_softc *svm_sc; 594 595 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 596 597 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 598 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 599 if (svm_sc->msr_bitmap == NULL) 600 panic("contigmalloc of SVM MSR bitmap failed"); 601 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 602 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 603 if (svm_sc->iopm_bitmap == NULL) 604 panic("contigmalloc of SVM IO bitmap failed"); 605 606 svm_sc->vm = vm; 607 svm_sc->nptp = vtophys(pmap->pm_pmltop); 608 609 /* 610 * Intercept read and write accesses to all MSRs. 611 */ 612 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 613 614 /* 615 * Access to the following MSRs is redirected to the VMCB when the 616 * guest is executing. Therefore it is safe to allow the guest to 617 * read/write these MSRs directly without hypervisor involvement. 618 */ 619 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 620 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 621 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 622 623 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 624 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 625 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 626 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 627 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 628 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 629 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 630 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 631 632 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 633 634 /* 635 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 636 */ 637 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 638 639 /* Intercept access to all I/O ports. */ 640 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 641 642 return (svm_sc); 643 } 644 645 static void * 646 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) 647 { 648 struct svm_softc *sc = vmi; 649 struct svm_vcpu *vcpu; 650 651 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); 652 vcpu->sc = sc; 653 vcpu->vcpu = vcpu1; 654 vcpu->vcpuid = vcpuid; 655 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, 656 M_WAITOK | M_ZERO); 657 vcpu->nextrip = ~0; 658 vcpu->lastcpu = NOCPU; 659 vcpu->vmcb_pa = vtophys(vcpu->vmcb); 660 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), 661 sc->nptp); 662 svm_msr_guest_init(sc, vcpu); 663 return (vcpu); 664 } 665 666 /* 667 * Collateral for a generic SVM VM-exit. 668 */ 669 static void 670 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 671 { 672 673 vme->exitcode = VM_EXITCODE_SVM; 674 vme->u.svm.exitcode = code; 675 vme->u.svm.exitinfo1 = info1; 676 vme->u.svm.exitinfo2 = info2; 677 } 678 679 static int 680 svm_cpl(struct vmcb_state *state) 681 { 682 683 /* 684 * From APMv2: 685 * "Retrieve the CPL from the CPL field in the VMCB, not 686 * from any segment DPL" 687 */ 688 return (state->cpl); 689 } 690 691 static enum vm_cpu_mode 692 svm_vcpu_mode(struct vmcb *vmcb) 693 { 694 struct vmcb_segment seg; 695 struct vmcb_state *state; 696 int error __diagused; 697 698 state = &vmcb->state; 699 700 if (state->efer & EFER_LMA) { 701 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 702 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 703 error)); 704 705 /* 706 * Section 4.8.1 for APM2, check if Code Segment has 707 * Long attribute set in descriptor. 708 */ 709 if (seg.attrib & VMCB_CS_ATTRIB_L) 710 return (CPU_MODE_64BIT); 711 else 712 return (CPU_MODE_COMPATIBILITY); 713 } else if (state->cr0 & CR0_PE) { 714 return (CPU_MODE_PROTECTED); 715 } else { 716 return (CPU_MODE_REAL); 717 } 718 } 719 720 static enum vm_paging_mode 721 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 722 { 723 724 if ((cr0 & CR0_PG) == 0) 725 return (PAGING_MODE_FLAT); 726 if ((cr4 & CR4_PAE) == 0) 727 return (PAGING_MODE_32); 728 if (efer & EFER_LME) 729 return (PAGING_MODE_64); 730 else 731 return (PAGING_MODE_PAE); 732 } 733 734 /* 735 * ins/outs utility routines 736 */ 737 static uint64_t 738 svm_inout_str_index(struct svm_regctx *regs, int in) 739 { 740 uint64_t val; 741 742 val = in ? regs->sctx_rdi : regs->sctx_rsi; 743 744 return (val); 745 } 746 747 static uint64_t 748 svm_inout_str_count(struct svm_regctx *regs, int rep) 749 { 750 uint64_t val; 751 752 val = rep ? regs->sctx_rcx : 1; 753 754 return (val); 755 } 756 757 static void 758 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, 759 struct vm_inout_str *vis) 760 { 761 int error __diagused, s; 762 763 if (in) { 764 vis->seg_name = VM_REG_GUEST_ES; 765 } else if (decode_assist()) { 766 /* 767 * The effective segment number in EXITINFO1[12:10] is populated 768 * only if the processor has the DecodeAssist capability. 769 * 770 * XXX this is not specified explicitly in APMv2 but can be 771 * verified empirically. 772 */ 773 s = (info1 >> 10) & 0x7; 774 775 /* The segment field has standard encoding */ 776 vis->seg_name = vm_segment_name(s); 777 } else { 778 /* 779 * The segment register need to be manually decoded by fetching 780 * the instructions near ip. However, we are unable to fetch it 781 * while the interrupts are disabled. Therefore, we leave the 782 * value unset until the generic ins/outs handler runs. 783 */ 784 vis->seg_name = VM_REG_LAST; 785 svm_get_cs_info(vcpu->vmcb, &vis->paging, &vis->cs_d, 786 &vis->cs_base); 787 return; 788 } 789 790 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 791 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 792 } 793 794 static int 795 svm_inout_str_addrsize(uint64_t info1) 796 { 797 uint32_t size; 798 799 size = (info1 >> 7) & 0x7; 800 switch (size) { 801 case 1: 802 return (2); /* 16 bit */ 803 case 2: 804 return (4); /* 32 bit */ 805 case 4: 806 return (8); /* 64 bit */ 807 default: 808 panic("%s: invalid size encoding %d", __func__, size); 809 } 810 } 811 812 static void 813 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 814 { 815 struct vmcb_state *state; 816 817 state = &vmcb->state; 818 paging->cr3 = state->cr3; 819 paging->cpl = svm_cpl(state); 820 paging->cpu_mode = svm_vcpu_mode(vmcb); 821 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 822 state->efer); 823 } 824 825 #define UNHANDLED 0 826 827 /* 828 * Handle guest I/O intercept. 829 */ 830 static int 831 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) 832 { 833 struct vmcb_ctrl *ctrl; 834 struct vmcb_state *state; 835 struct svm_regctx *regs; 836 struct vm_inout_str *vis; 837 uint64_t info1; 838 int inout_string; 839 840 state = svm_get_vmcb_state(vcpu); 841 ctrl = svm_get_vmcb_ctrl(vcpu); 842 regs = svm_get_guest_regctx(vcpu); 843 844 info1 = ctrl->exitinfo1; 845 inout_string = info1 & BIT(2) ? 1 : 0; 846 847 vmexit->exitcode = VM_EXITCODE_INOUT; 848 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; 849 vmexit->u.inout.string = inout_string; 850 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; 851 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; 852 vmexit->u.inout.port = (uint16_t)(info1 >> 16); 853 vmexit->u.inout.eax = (uint32_t)(state->rax); 854 855 if (inout_string) { 856 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 857 vis = &vmexit->u.inout_str; 858 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); 859 vis->rflags = state->rflags; 860 vis->cr0 = state->cr0; 861 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); 862 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); 863 vis->addrsize = svm_inout_str_addrsize(info1); 864 vis->cs_d = 0; 865 vis->cs_base = 0; 866 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); 867 } 868 869 return (UNHANDLED); 870 } 871 872 static int 873 npf_fault_type(uint64_t exitinfo1) 874 { 875 876 if (exitinfo1 & VMCB_NPF_INFO1_W) 877 return (VM_PROT_WRITE); 878 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 879 return (VM_PROT_EXECUTE); 880 else 881 return (VM_PROT_READ); 882 } 883 884 static bool 885 svm_npf_emul_fault(uint64_t exitinfo1) 886 { 887 888 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 889 return (false); 890 } 891 892 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 893 return (false); 894 } 895 896 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 897 return (false); 898 } 899 900 return (true); 901 } 902 903 static void 904 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 905 { 906 struct vm_guest_paging *paging; 907 struct vmcb_ctrl *ctrl; 908 char *inst_bytes; 909 int inst_len; 910 911 ctrl = &vmcb->ctrl; 912 paging = &vmexit->u.inst_emul.paging; 913 914 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 915 vmexit->u.inst_emul.gpa = gpa; 916 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 917 svm_paging_info(vmcb, paging); 918 919 svm_get_cs_info(vmcb, paging, &vmexit->u.inst_emul.cs_d, 920 &vmexit->u.inst_emul.cs_base); 921 922 /* 923 * Copy the instruction bytes into 'vie' if available. 924 */ 925 if (decode_assist() && !disable_npf_assist) { 926 inst_len = ctrl->inst_len; 927 inst_bytes = ctrl->inst_bytes; 928 } else { 929 inst_len = 0; 930 inst_bytes = NULL; 931 } 932 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 933 } 934 935 #ifdef KTR 936 static const char * 937 intrtype_to_str(int intr_type) 938 { 939 switch (intr_type) { 940 case VMCB_EVENTINJ_TYPE_INTR: 941 return ("hwintr"); 942 case VMCB_EVENTINJ_TYPE_NMI: 943 return ("nmi"); 944 case VMCB_EVENTINJ_TYPE_INTn: 945 return ("swintr"); 946 case VMCB_EVENTINJ_TYPE_EXCEPTION: 947 return ("exception"); 948 default: 949 panic("%s: unknown intr_type %d", __func__, intr_type); 950 } 951 } 952 #endif 953 954 /* 955 * Inject an event to vcpu as described in section 15.20, "Event injection". 956 */ 957 static void 958 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, 959 uint32_t error, bool ec_valid) 960 { 961 struct vmcb_ctrl *ctrl; 962 963 ctrl = svm_get_vmcb_ctrl(vcpu); 964 965 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, 966 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); 967 968 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", 969 __func__, vector)); 970 971 switch (intr_type) { 972 case VMCB_EVENTINJ_TYPE_INTR: 973 case VMCB_EVENTINJ_TYPE_NMI: 974 case VMCB_EVENTINJ_TYPE_INTn: 975 break; 976 case VMCB_EVENTINJ_TYPE_EXCEPTION: 977 if (vector >= 0 && vector <= 31 && vector != 2) 978 break; 979 /* FALLTHROUGH */ 980 default: 981 panic("%s: invalid intr_type/vector: %d/%d", __func__, 982 intr_type, vector); 983 } 984 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; 985 if (ec_valid) { 986 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 987 ctrl->eventinj |= (uint64_t)error << 32; 988 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", 989 intrtype_to_str(intr_type), vector, error); 990 } else { 991 SVM_CTR2(vcpu, "Injecting %s at vector %d", 992 intrtype_to_str(intr_type), vector); 993 } 994 } 995 996 static void 997 svm_update_virqinfo(struct svm_vcpu *vcpu) 998 { 999 struct vlapic *vlapic; 1000 struct vmcb_ctrl *ctrl; 1001 1002 vlapic = vm_lapic(vcpu->vcpu); 1003 ctrl = svm_get_vmcb_ctrl(vcpu); 1004 1005 /* Update %cr8 in the emulated vlapic */ 1006 vlapic_set_cr8(vlapic, ctrl->v_tpr); 1007 1008 /* Virtual interrupt injection is not used. */ 1009 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 1010 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 1011 } 1012 1013 static void 1014 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1015 { 1016 struct vmcb_ctrl *ctrl; 1017 uint64_t intinfo; 1018 1019 ctrl = svm_get_vmcb_ctrl(vcpu); 1020 intinfo = ctrl->exitintinfo; 1021 if (!VMCB_EXITINTINFO_VALID(intinfo)) 1022 return; 1023 1024 /* 1025 * From APMv2, Section "Intercepts during IDT interrupt delivery" 1026 * 1027 * If a #VMEXIT happened during event delivery then record the event 1028 * that was being delivered. 1029 */ 1030 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, 1031 VMCB_EXITINTINFO_VECTOR(intinfo)); 1032 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); 1033 vm_exit_intinfo(vcpu->vcpu, intinfo); 1034 } 1035 1036 #ifdef INVARIANTS 1037 static __inline int 1038 vintr_intercept_enabled(struct svm_vcpu *vcpu) 1039 { 1040 1041 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); 1042 } 1043 #endif 1044 1045 static __inline void 1046 enable_intr_window_exiting(struct svm_vcpu *vcpu) 1047 { 1048 struct vmcb_ctrl *ctrl; 1049 1050 ctrl = svm_get_vmcb_ctrl(vcpu); 1051 1052 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { 1053 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); 1054 KASSERT(vintr_intercept_enabled(vcpu), 1055 ("%s: vintr intercept should be enabled", __func__)); 1056 return; 1057 } 1058 1059 SVM_CTR0(vcpu, "Enable intr window exiting"); 1060 ctrl->v_irq = 1; 1061 ctrl->v_ign_tpr = 1; 1062 ctrl->v_intr_vector = 0; 1063 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1064 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1065 } 1066 1067 static __inline void 1068 disable_intr_window_exiting(struct svm_vcpu *vcpu) 1069 { 1070 struct vmcb_ctrl *ctrl; 1071 1072 ctrl = svm_get_vmcb_ctrl(vcpu); 1073 1074 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { 1075 KASSERT(!vintr_intercept_enabled(vcpu), 1076 ("%s: vintr intercept should be disabled", __func__)); 1077 return; 1078 } 1079 1080 SVM_CTR0(vcpu, "Disable intr window exiting"); 1081 ctrl->v_irq = 0; 1082 ctrl->v_intr_vector = 0; 1083 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1084 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1085 } 1086 1087 static int 1088 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) 1089 { 1090 struct vmcb_ctrl *ctrl; 1091 int oldval, newval; 1092 1093 ctrl = svm_get_vmcb_ctrl(vcpu); 1094 oldval = ctrl->intr_shadow; 1095 newval = val ? 1 : 0; 1096 if (newval != oldval) { 1097 ctrl->intr_shadow = newval; 1098 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); 1099 } 1100 return (0); 1101 } 1102 1103 static int 1104 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) 1105 { 1106 struct vmcb_ctrl *ctrl; 1107 1108 ctrl = svm_get_vmcb_ctrl(vcpu); 1109 *val = ctrl->intr_shadow; 1110 return (0); 1111 } 1112 1113 /* 1114 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1115 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1116 * to track when the vcpu is done handling the NMI. 1117 */ 1118 static int 1119 nmi_blocked(struct svm_vcpu *vcpu) 1120 { 1121 int blocked; 1122 1123 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1124 return (blocked); 1125 } 1126 1127 static void 1128 enable_nmi_blocking(struct svm_vcpu *vcpu) 1129 { 1130 1131 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); 1132 SVM_CTR0(vcpu, "vNMI blocking enabled"); 1133 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1134 } 1135 1136 static void 1137 clear_nmi_blocking(struct svm_vcpu *vcpu) 1138 { 1139 int error __diagused; 1140 1141 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); 1142 SVM_CTR0(vcpu, "vNMI blocking cleared"); 1143 /* 1144 * When the IRET intercept is cleared the vcpu will attempt to execute 1145 * the "iret" when it runs next. However, it is possible to inject 1146 * another NMI into the vcpu before the "iret" has actually executed. 1147 * 1148 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1149 * it will trap back into the hypervisor. If an NMI is pending for 1150 * the vcpu it will be injected into the guest. 1151 * 1152 * XXX this needs to be fixed 1153 */ 1154 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1155 1156 /* 1157 * Set 'intr_shadow' to prevent an NMI from being injected on the 1158 * immediate VMRUN. 1159 */ 1160 error = svm_modify_intr_shadow(vcpu, 1); 1161 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); 1162 } 1163 1164 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1165 1166 static int 1167 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, 1168 bool *retu) 1169 { 1170 struct vm_exit *vme; 1171 struct vmcb_state *state; 1172 uint64_t changed, lma, oldval; 1173 int error __diagused; 1174 1175 state = svm_get_vmcb_state(vcpu); 1176 1177 oldval = state->efer; 1178 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); 1179 1180 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1181 changed = oldval ^ newval; 1182 1183 if (newval & EFER_MBZ_BITS) 1184 goto gpf; 1185 1186 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1187 if (changed & EFER_LME) { 1188 if (state->cr0 & CR0_PG) 1189 goto gpf; 1190 } 1191 1192 /* EFER.LMA = EFER.LME & CR0.PG */ 1193 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) 1194 lma = EFER_LMA; 1195 else 1196 lma = 0; 1197 1198 if ((newval & EFER_LMA) != lma) 1199 goto gpf; 1200 1201 if (newval & EFER_NXE) { 1202 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) 1203 goto gpf; 1204 } 1205 1206 /* 1207 * XXX bhyve does not enforce segment limits in 64-bit mode. Until 1208 * this is fixed flag guest attempt to set EFER_LMSLE as an error. 1209 */ 1210 if (newval & EFER_LMSLE) { 1211 vme = vm_exitinfo(vcpu->vcpu); 1212 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); 1213 *retu = true; 1214 return (0); 1215 } 1216 1217 if (newval & EFER_FFXSR) { 1218 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) 1219 goto gpf; 1220 } 1221 1222 if (newval & EFER_TCE) { 1223 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) 1224 goto gpf; 1225 } 1226 1227 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); 1228 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); 1229 return (0); 1230 gpf: 1231 vm_inject_gp(vcpu->vcpu); 1232 return (0); 1233 } 1234 1235 static int 1236 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, 1237 uint64_t val, bool *retu) 1238 { 1239 int error; 1240 1241 if (lapic_msr(num)) 1242 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); 1243 else if (num == MSR_EFER) 1244 error = svm_write_efer(sc, vcpu, val, retu); 1245 else 1246 error = svm_wrmsr(vcpu, num, val, retu); 1247 1248 return (error); 1249 } 1250 1251 static int 1252 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) 1253 { 1254 struct vmcb_state *state; 1255 struct svm_regctx *ctx; 1256 uint64_t result; 1257 int error; 1258 1259 if (lapic_msr(num)) 1260 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); 1261 else 1262 error = svm_rdmsr(vcpu, num, &result, retu); 1263 1264 if (error == 0) { 1265 state = svm_get_vmcb_state(vcpu); 1266 ctx = svm_get_guest_regctx(vcpu); 1267 state->rax = result & 0xffffffff; 1268 ctx->sctx_rdx = result >> 32; 1269 } 1270 1271 return (error); 1272 } 1273 1274 #ifdef KTR 1275 static const char * 1276 exit_reason_to_str(uint64_t reason) 1277 { 1278 int i; 1279 static char reasonbuf[32]; 1280 static const struct { 1281 int reason; 1282 const char *str; 1283 } reasons[] = { 1284 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" }, 1285 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" }, 1286 { .reason = VMCB_EXIT_NPF, .str = "nptfault" }, 1287 { .reason = VMCB_EXIT_PAUSE, .str = "pause" }, 1288 { .reason = VMCB_EXIT_HLT, .str = "hlt" }, 1289 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" }, 1290 { .reason = VMCB_EXIT_IO, .str = "inout" }, 1291 { .reason = VMCB_EXIT_MC, .str = "mchk" }, 1292 { .reason = VMCB_EXIT_INTR, .str = "extintr" }, 1293 { .reason = VMCB_EXIT_NMI, .str = "nmi" }, 1294 { .reason = VMCB_EXIT_VINTR, .str = "vintr" }, 1295 { .reason = VMCB_EXIT_MSR, .str = "msr" }, 1296 { .reason = VMCB_EXIT_IRET, .str = "iret" }, 1297 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" }, 1298 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" }, 1299 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" }, 1300 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" }, 1301 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" }, 1302 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" }, 1303 { .reason = VMCB_EXIT_STGI, .str = "stgi" }, 1304 { .reason = VMCB_EXIT_CLGI, .str = "clgi" }, 1305 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" }, 1306 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" }, 1307 { .reason = VMCB_EXIT_INVD, .str = "invd" }, 1308 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" }, 1309 { .reason = VMCB_EXIT_POPF, .str = "popf" }, 1310 { .reason = VMCB_EXIT_PUSHF, .str = "pushf" }, 1311 }; 1312 1313 for (i = 0; i < nitems(reasons); i++) { 1314 if (reasons[i].reason == reason) 1315 return (reasons[i].str); 1316 } 1317 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); 1318 return (reasonbuf); 1319 } 1320 #endif /* KTR */ 1321 1322 /* 1323 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1324 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1325 * and exceptions caused by INT3, INTO and BOUND instructions. 1326 * 1327 * Return 1 if the nRIP is valid and 0 otherwise. 1328 */ 1329 static int 1330 nrip_valid(uint64_t exitcode) 1331 { 1332 switch (exitcode) { 1333 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1334 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1335 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1336 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1337 case 0x43: /* INT3 */ 1338 case 0x44: /* INTO */ 1339 case 0x45: /* BOUND */ 1340 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1341 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1342 return (1); 1343 default: 1344 return (0); 1345 } 1346 } 1347 1348 static int 1349 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, 1350 struct vm_exit *vmexit) 1351 { 1352 struct vmcb *vmcb; 1353 struct vmcb_state *state; 1354 struct vmcb_ctrl *ctrl; 1355 struct svm_regctx *ctx; 1356 uint64_t code, info1, info2, val; 1357 uint32_t eax, ecx, edx; 1358 int error __diagused, errcode_valid, handled, idtvec, reflect; 1359 bool retu; 1360 1361 ctx = svm_get_guest_regctx(vcpu); 1362 vmcb = svm_get_vmcb(vcpu); 1363 state = &vmcb->state; 1364 ctrl = &vmcb->ctrl; 1365 1366 handled = 0; 1367 code = ctrl->exitcode; 1368 info1 = ctrl->exitinfo1; 1369 info2 = ctrl->exitinfo2; 1370 1371 vmexit->exitcode = VM_EXITCODE_BOGUS; 1372 vmexit->rip = state->rip; 1373 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1374 1375 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); 1376 1377 /* 1378 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1379 * in an inconsistent state and can trigger assertions that would 1380 * never happen otherwise. 1381 */ 1382 if (code == VMCB_EXIT_INVALID) { 1383 vm_exit_svm(vmexit, code, info1, info2); 1384 return (0); 1385 } 1386 1387 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1388 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); 1389 1390 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1391 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", 1392 vmexit->inst_length, code, info1, info2)); 1393 1394 svm_update_virqinfo(vcpu); 1395 svm_save_intinfo(svm_sc, vcpu); 1396 1397 switch (code) { 1398 case VMCB_EXIT_IRET: 1399 /* 1400 * Restart execution at "iret" but with the intercept cleared. 1401 */ 1402 vmexit->inst_length = 0; 1403 clear_nmi_blocking(vcpu); 1404 handled = 1; 1405 break; 1406 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1407 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); 1408 handled = 1; 1409 break; 1410 case VMCB_EXIT_INTR: /* external interrupt */ 1411 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); 1412 handled = 1; 1413 break; 1414 case VMCB_EXIT_NMI: /* external NMI */ 1415 handled = 1; 1416 break; 1417 case 0x40 ... 0x5F: 1418 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); 1419 reflect = 1; 1420 idtvec = code - 0x40; 1421 switch (idtvec) { 1422 case IDT_MC: 1423 /* 1424 * Call the machine check handler by hand. Also don't 1425 * reflect the machine check back into the guest. 1426 */ 1427 reflect = 0; 1428 SVM_CTR0(vcpu, "Vectoring to MCE handler"); 1429 __asm __volatile("int $18"); 1430 break; 1431 case IDT_PF: 1432 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); 1433 KASSERT(error == 0, ("%s: error %d updating cr2", 1434 __func__, error)); 1435 /* fallthru */ 1436 case IDT_NP: 1437 case IDT_SS: 1438 case IDT_GP: 1439 case IDT_AC: 1440 case IDT_TS: 1441 errcode_valid = 1; 1442 break; 1443 1444 case IDT_DF: 1445 errcode_valid = 1; 1446 info1 = 0; 1447 break; 1448 case IDT_DB: { 1449 /* 1450 * Check if we are being stepped (RFLAGS.TF) 1451 * and bounce vmexit to userland. 1452 */ 1453 bool stepped = 0; 1454 uint64_t dr6 = 0; 1455 1456 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); 1457 stepped = !!(dr6 & DBREG_DR6_BS); 1458 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { 1459 vmexit->exitcode = VM_EXITCODE_DB; 1460 vmexit->u.dbg.trace_trap = 1; 1461 vmexit->u.dbg.pushf_intercept = 0; 1462 1463 if (vcpu->dbg.popf_sstep) { 1464 /* 1465 * DB# exit was caused by stepping over 1466 * popf. 1467 */ 1468 uint64_t rflags; 1469 1470 vcpu->dbg.popf_sstep = 0; 1471 1472 /* 1473 * Update shadowed TF bit so the next 1474 * setcap(..., RFLAGS_SSTEP, 0) restores 1475 * the correct value 1476 */ 1477 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, 1478 &rflags); 1479 vcpu->dbg.rflags_tf = rflags & PSL_T; 1480 } else if (vcpu->dbg.pushf_sstep) { 1481 /* 1482 * DB# exit was caused by stepping over 1483 * pushf. 1484 */ 1485 vcpu->dbg.pushf_sstep = 0; 1486 1487 /* 1488 * Adjusting the pushed rflags after a 1489 * restarted pushf instruction must be 1490 * handled outside of svm.c due to the 1491 * critical_enter() lock being held. 1492 */ 1493 vmexit->u.dbg.pushf_intercept = 1; 1494 vmexit->u.dbg.tf_shadow_val = 1495 vcpu->dbg.rflags_tf; 1496 svm_paging_info(svm_get_vmcb(vcpu), 1497 &vmexit->u.dbg.paging); 1498 } 1499 1500 /* Clear DR6 "single-step" bit. */ 1501 dr6 &= ~DBREG_DR6_BS; 1502 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); 1503 KASSERT(error == 0, 1504 ("%s: error %d updating DR6\r\n", __func__, 1505 error)); 1506 1507 reflect = 0; 1508 } 1509 break; 1510 } 1511 case IDT_BP: 1512 vmexit->exitcode = VM_EXITCODE_BPT; 1513 vmexit->u.bpt.inst_length = vmexit->inst_length; 1514 vmexit->inst_length = 0; 1515 1516 reflect = 0; 1517 break; 1518 case IDT_OF: 1519 case IDT_BR: 1520 /* 1521 * The 'nrip' field is populated for INT3, INTO and 1522 * BOUND exceptions and this also implies that 1523 * 'inst_length' is non-zero. 1524 * 1525 * Reset 'inst_length' to zero so the guest %rip at 1526 * event injection is identical to what it was when 1527 * the exception originally happened. 1528 */ 1529 SVM_CTR2(vcpu, "Reset inst_length from %d " 1530 "to zero before injecting exception %d", 1531 vmexit->inst_length, idtvec); 1532 vmexit->inst_length = 0; 1533 /* fallthru */ 1534 default: 1535 errcode_valid = 0; 1536 info1 = 0; 1537 break; 1538 } 1539 1540 if (reflect) { 1541 KASSERT(vmexit->inst_length == 0, 1542 ("invalid inst_length (%d) " 1543 "when reflecting exception %d into guest", 1544 vmexit->inst_length, idtvec)); 1545 /* Reflect the exception back into the guest */ 1546 SVM_CTR2(vcpu, "Reflecting exception " 1547 "%d/%#x into the guest", idtvec, (int)info1); 1548 error = vm_inject_exception(vcpu->vcpu, idtvec, 1549 errcode_valid, info1, 0); 1550 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1551 __func__, error)); 1552 handled = 1; 1553 } 1554 break; 1555 case VMCB_EXIT_MSR: /* MSR access. */ 1556 eax = state->rax; 1557 ecx = ctx->sctx_rcx; 1558 edx = ctx->sctx_rdx; 1559 retu = false; 1560 1561 if (info1) { 1562 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); 1563 val = (uint64_t)edx << 32 | eax; 1564 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); 1565 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { 1566 vmexit->exitcode = VM_EXITCODE_WRMSR; 1567 vmexit->u.msr.code = ecx; 1568 vmexit->u.msr.wval = val; 1569 } else if (!retu) { 1570 handled = 1; 1571 } else { 1572 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1573 ("emulate_wrmsr retu with bogus exitcode")); 1574 } 1575 } else { 1576 SVM_CTR1(vcpu, "rdmsr %#x", ecx); 1577 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); 1578 if (emulate_rdmsr(vcpu, ecx, &retu)) { 1579 vmexit->exitcode = VM_EXITCODE_RDMSR; 1580 vmexit->u.msr.code = ecx; 1581 } else if (!retu) { 1582 handled = 1; 1583 } else { 1584 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 1585 ("emulate_rdmsr retu with bogus exitcode")); 1586 } 1587 } 1588 break; 1589 case VMCB_EXIT_IO: 1590 handled = svm_handle_io(vcpu, vmexit); 1591 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); 1592 break; 1593 case VMCB_EXIT_CPUID: 1594 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); 1595 handled = x86_emulate_cpuid(vcpu->vcpu, 1596 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, 1597 &ctx->sctx_rdx); 1598 break; 1599 case VMCB_EXIT_HLT: 1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); 1601 vmexit->exitcode = VM_EXITCODE_HLT; 1602 vmexit->u.hlt.rflags = state->rflags; 1603 break; 1604 case VMCB_EXIT_PAUSE: 1605 vmexit->exitcode = VM_EXITCODE_PAUSE; 1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); 1607 break; 1608 case VMCB_EXIT_NPF: 1609 /* EXITINFO2 contains the faulting guest physical address */ 1610 if (info1 & VMCB_NPF_INFO1_RSV) { 1611 SVM_CTR2(vcpu, "nested page fault with " 1612 "reserved bits set: info1(%#lx) info2(%#lx)", 1613 info1, info2); 1614 } else if (vm_mem_allocated(vcpu->vcpu, info2) || 1615 ppt_is_mmio(svm_sc->vm, info2)) { 1616 vmexit->exitcode = VM_EXITCODE_PAGING; 1617 vmexit->u.paging.gpa = info2; 1618 vmexit->u.paging.fault_type = npf_fault_type(info1); 1619 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); 1620 SVM_CTR3(vcpu, "nested page fault " 1621 "on gpa %#lx/%#lx at rip %#lx", 1622 info2, info1, state->rip); 1623 } else if (svm_npf_emul_fault(info1)) { 1624 svm_handle_inst_emul(vmcb, info2, vmexit); 1625 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); 1626 SVM_CTR3(vcpu, "inst_emul fault " 1627 "for gpa %#lx/%#lx at rip %#lx", 1628 info2, info1, state->rip); 1629 } 1630 break; 1631 case VMCB_EXIT_MONITOR: 1632 vmexit->exitcode = VM_EXITCODE_MONITOR; 1633 break; 1634 case VMCB_EXIT_MWAIT: 1635 vmexit->exitcode = VM_EXITCODE_MWAIT; 1636 break; 1637 case VMCB_EXIT_PUSHF: { 1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1639 uint64_t rflags; 1640 1641 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1642 /* Restart this instruction. */ 1643 vmexit->inst_length = 0; 1644 /* Disable PUSHF intercepts - avoid a loop. */ 1645 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1646 VMCB_INTCPT_PUSHF, 0); 1647 /* Trace restarted instruction. */ 1648 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1649 /* Let the IDT_DB handler know that pushf was stepped. 1650 */ 1651 vcpu->dbg.pushf_sstep = 1; 1652 handled = 1; 1653 } 1654 break; 1655 } 1656 case VMCB_EXIT_POPF: { 1657 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 1658 uint64_t rflags; 1659 1660 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); 1661 /* Restart this instruction */ 1662 vmexit->inst_length = 0; 1663 /* Disable POPF intercepts - avoid a loop*/ 1664 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 1665 VMCB_INTCPT_POPF, 0); 1666 /* Trace restarted instruction */ 1667 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); 1668 vcpu->dbg.popf_sstep = 1; 1669 handled = 1; 1670 } 1671 break; 1672 } 1673 case VMCB_EXIT_SHUTDOWN: 1674 case VMCB_EXIT_VMRUN: 1675 case VMCB_EXIT_VMMCALL: 1676 case VMCB_EXIT_VMLOAD: 1677 case VMCB_EXIT_VMSAVE: 1678 case VMCB_EXIT_STGI: 1679 case VMCB_EXIT_CLGI: 1680 case VMCB_EXIT_SKINIT: 1681 case VMCB_EXIT_ICEBP: 1682 case VMCB_EXIT_INVLPGA: 1683 vm_inject_ud(vcpu->vcpu); 1684 handled = 1; 1685 break; 1686 case VMCB_EXIT_INVD: 1687 case VMCB_EXIT_WBINVD: 1688 /* ignore exit */ 1689 handled = 1; 1690 break; 1691 default: 1692 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); 1693 break; 1694 } 1695 1696 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", 1697 handled ? "handled" : "unhandled", exit_reason_to_str(code), 1698 vmexit->rip, vmexit->inst_length); 1699 1700 if (handled) { 1701 vmexit->rip += vmexit->inst_length; 1702 vmexit->inst_length = 0; 1703 state->rip = vmexit->rip; 1704 } else { 1705 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1706 /* 1707 * If this VM exit was not claimed by anybody then 1708 * treat it as a generic SVM exit. 1709 */ 1710 vm_exit_svm(vmexit, code, info1, info2); 1711 } else { 1712 /* 1713 * The exitcode and collateral have been populated. 1714 * The VM exit will be processed further in userland. 1715 */ 1716 } 1717 } 1718 return (handled); 1719 } 1720 1721 static void 1722 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) 1723 { 1724 uint64_t intinfo; 1725 1726 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) 1727 return; 1728 1729 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " 1730 "valid: %#lx", __func__, intinfo)); 1731 1732 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), 1733 VMCB_EXITINTINFO_VECTOR(intinfo), 1734 VMCB_EXITINTINFO_EC(intinfo), 1735 VMCB_EXITINTINFO_EC_VALID(intinfo)); 1736 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); 1737 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); 1738 } 1739 1740 /* 1741 * Inject event to virtual cpu. 1742 */ 1743 static void 1744 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, 1745 struct vlapic *vlapic) 1746 { 1747 struct vmcb_ctrl *ctrl; 1748 struct vmcb_state *state; 1749 uint8_t v_tpr; 1750 int vector, need_intr_window; 1751 int extint_pending; 1752 1753 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { 1754 return; 1755 } 1756 1757 state = svm_get_vmcb_state(vcpu); 1758 ctrl = svm_get_vmcb_ctrl(vcpu); 1759 1760 need_intr_window = 0; 1761 1762 if (vcpu->nextrip != state->rip) { 1763 ctrl->intr_shadow = 0; 1764 SVM_CTR2(vcpu, "Guest interrupt blocking " 1765 "cleared due to rip change: %#lx/%#lx", 1766 vcpu->nextrip, state->rip); 1767 } 1768 1769 /* 1770 * Inject pending events or exceptions for this vcpu. 1771 * 1772 * An event might be pending because the previous #VMEXIT happened 1773 * during event delivery (i.e. ctrl->exitintinfo). 1774 * 1775 * An event might also be pending because an exception was injected 1776 * by the hypervisor (e.g. #PF during instruction emulation). 1777 */ 1778 svm_inj_intinfo(sc, vcpu); 1779 1780 /* NMI event has priority over interrupts. */ 1781 if (vm_nmi_pending(vcpu->vcpu)) { 1782 if (nmi_blocked(vcpu)) { 1783 /* 1784 * Can't inject another NMI if the guest has not 1785 * yet executed an "iret" after the last NMI. 1786 */ 1787 SVM_CTR0(vcpu, "Cannot inject NMI due " 1788 "to NMI-blocking"); 1789 } else if (ctrl->intr_shadow) { 1790 /* 1791 * Can't inject an NMI if the vcpu is in an intr_shadow. 1792 */ 1793 SVM_CTR0(vcpu, "Cannot inject NMI due to " 1794 "interrupt shadow"); 1795 need_intr_window = 1; 1796 goto done; 1797 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1798 /* 1799 * If there is already an exception/interrupt pending 1800 * then defer the NMI until after that. 1801 */ 1802 SVM_CTR1(vcpu, "Cannot inject NMI due to " 1803 "eventinj %#lx", ctrl->eventinj); 1804 1805 /* 1806 * Use self-IPI to trigger a VM-exit as soon as 1807 * possible after the event injection is completed. 1808 * 1809 * This works only if the external interrupt exiting 1810 * is at a lower priority than the event injection. 1811 * 1812 * Although not explicitly specified in APMv2 the 1813 * relative priorities were verified empirically. 1814 */ 1815 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ 1816 } else { 1817 vm_nmi_clear(vcpu->vcpu); 1818 1819 /* Inject NMI, vector number is not used */ 1820 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, 1821 IDT_NMI, 0, false); 1822 1823 /* virtual NMI blocking is now in effect */ 1824 enable_nmi_blocking(vcpu); 1825 1826 SVM_CTR0(vcpu, "Injecting vNMI"); 1827 } 1828 } 1829 1830 extint_pending = vm_extint_pending(vcpu->vcpu); 1831 if (!extint_pending) { 1832 if (!vlapic_pending_intr(vlapic, &vector)) 1833 goto done; 1834 KASSERT(vector >= 16 && vector <= 255, 1835 ("invalid vector %d from local APIC", vector)); 1836 } else { 1837 /* Ask the legacy pic for a vector to inject */ 1838 vatpic_pending_intr(sc->vm, &vector); 1839 KASSERT(vector >= 0 && vector <= 255, 1840 ("invalid vector %d from INTR", vector)); 1841 } 1842 1843 /* 1844 * If the guest has disabled interrupts or is in an interrupt shadow 1845 * then we cannot inject the pending interrupt. 1846 */ 1847 if ((state->rflags & PSL_I) == 0) { 1848 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1849 "rflags %#lx", vector, state->rflags); 1850 need_intr_window = 1; 1851 goto done; 1852 } 1853 1854 if (ctrl->intr_shadow) { 1855 SVM_CTR1(vcpu, "Cannot inject vector %d due to " 1856 "interrupt shadow", vector); 1857 need_intr_window = 1; 1858 goto done; 1859 } 1860 1861 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1862 SVM_CTR2(vcpu, "Cannot inject vector %d due to " 1863 "eventinj %#lx", vector, ctrl->eventinj); 1864 need_intr_window = 1; 1865 goto done; 1866 } 1867 1868 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); 1869 1870 if (!extint_pending) { 1871 vlapic_intr_accepted(vlapic, vector); 1872 } else { 1873 vm_extint_clear(vcpu->vcpu); 1874 vatpic_intr_accepted(sc->vm, vector); 1875 } 1876 1877 /* 1878 * Force a VM-exit as soon as the vcpu is ready to accept another 1879 * interrupt. This is done because the PIC might have another vector 1880 * that it wants to inject. Also, if the APIC has a pending interrupt 1881 * that was preempted by the ExtInt then it allows us to inject the 1882 * APIC vector as soon as possible. 1883 */ 1884 need_intr_window = 1; 1885 done: 1886 /* 1887 * The guest can modify the TPR by writing to %CR8. In guest mode 1888 * the processor reflects this write to V_TPR without hypervisor 1889 * intervention. 1890 * 1891 * The guest can also modify the TPR by writing to it via the memory 1892 * mapped APIC page. In this case, the write will be emulated by the 1893 * hypervisor. For this reason V_TPR must be updated before every 1894 * VMRUN. 1895 */ 1896 v_tpr = vlapic_get_cr8(vlapic); 1897 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1898 if (ctrl->v_tpr != v_tpr) { 1899 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", 1900 ctrl->v_tpr, v_tpr); 1901 ctrl->v_tpr = v_tpr; 1902 svm_set_dirty(vcpu, VMCB_CACHE_TPR); 1903 } 1904 1905 if (need_intr_window) { 1906 /* 1907 * We use V_IRQ in conjunction with the VINTR intercept to 1908 * trap into the hypervisor as soon as a virtual interrupt 1909 * can be delivered. 1910 * 1911 * Since injected events are not subject to intercept checks 1912 * we need to ensure that the V_IRQ is not actually going to 1913 * be delivered on VM entry. The KASSERT below enforces this. 1914 */ 1915 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 1916 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, 1917 ("Bogus intr_window_exiting: eventinj (%#lx), " 1918 "intr_shadow (%u), rflags (%#lx)", 1919 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); 1920 enable_intr_window_exiting(vcpu); 1921 } else { 1922 disable_intr_window_exiting(vcpu); 1923 } 1924 } 1925 1926 static __inline void 1927 restore_host_tss(void) 1928 { 1929 struct system_segment_descriptor *tss_sd; 1930 1931 /* 1932 * The TSS descriptor was in use prior to launching the guest so it 1933 * has been marked busy. 1934 * 1935 * 'ltr' requires the descriptor to be marked available so change the 1936 * type to "64-bit available TSS". 1937 */ 1938 tss_sd = PCPU_GET(tss); 1939 tss_sd->sd_type = SDT_SYSTSS; 1940 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1941 } 1942 1943 static void 1944 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) 1945 { 1946 struct vmcb_ctrl *ctrl; 1947 long eptgen; 1948 int cpu; 1949 bool alloc_asid; 1950 1951 cpu = curcpu; 1952 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 1953 smr_enter(pmap->pm_eptsmr); 1954 1955 ctrl = svm_get_vmcb_ctrl(vcpu); 1956 1957 /* 1958 * The TLB entries associated with the vcpu's ASID are not valid 1959 * if either of the following conditions is true: 1960 * 1961 * 1. The vcpu's ASID generation is different than the host cpu's 1962 * ASID generation. This happens when the vcpu migrates to a new 1963 * host cpu. It can also happen when the number of vcpus executing 1964 * on a host cpu is greater than the number of ASIDs available. 1965 * 1966 * 2. The pmap generation number is different than the value cached in 1967 * the 'vcpustate'. This happens when the host invalidates pages 1968 * belonging to the guest. 1969 * 1970 * asidgen eptgen Action 1971 * mismatch mismatch 1972 * 0 0 (a) 1973 * 0 1 (b1) or (b2) 1974 * 1 0 (c) 1975 * 1 1 (d) 1976 * 1977 * (a) There is no mismatch in eptgen or ASID generation and therefore 1978 * no further action is needed. 1979 * 1980 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is 1981 * retained and the TLB entries associated with this ASID 1982 * are flushed by VMRUN. 1983 * 1984 * (b2) If the cpu does not support FlushByAsid then a new ASID is 1985 * allocated. 1986 * 1987 * (c) A new ASID is allocated. 1988 * 1989 * (d) A new ASID is allocated. 1990 */ 1991 1992 alloc_asid = false; 1993 eptgen = atomic_load_long(&pmap->pm_eptgen); 1994 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; 1995 1996 if (vcpu->asid.gen != asid[cpu].gen) { 1997 alloc_asid = true; /* (c) and (d) */ 1998 } else if (vcpu->eptgen != eptgen) { 1999 if (flush_by_asid()) 2000 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ 2001 else 2002 alloc_asid = true; /* (b2) */ 2003 } else { 2004 /* 2005 * This is the common case (a). 2006 */ 2007 KASSERT(!alloc_asid, ("ASID allocation not necessary")); 2008 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, 2009 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); 2010 } 2011 2012 if (alloc_asid) { 2013 if (++asid[cpu].num >= nasid) { 2014 asid[cpu].num = 1; 2015 if (++asid[cpu].gen == 0) 2016 asid[cpu].gen = 1; 2017 /* 2018 * If this cpu does not support "flush-by-asid" 2019 * then flush the entire TLB on a generation 2020 * bump. Subsequent ASID allocation in this 2021 * generation can be done without a TLB flush. 2022 */ 2023 if (!flush_by_asid()) 2024 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 2025 } 2026 vcpu->asid.gen = asid[cpu].gen; 2027 vcpu->asid.num = asid[cpu].num; 2028 2029 ctrl->asid = vcpu->asid.num; 2030 svm_set_dirty(vcpu, VMCB_CACHE_ASID); 2031 /* 2032 * If this cpu supports "flush-by-asid" then the TLB 2033 * was not flushed after the generation bump. The TLB 2034 * is flushed selectively after every new ASID allocation. 2035 */ 2036 if (flush_by_asid()) 2037 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 2038 } 2039 vcpu->eptgen = eptgen; 2040 2041 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); 2042 KASSERT(ctrl->asid == vcpu->asid.num, 2043 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); 2044 } 2045 2046 static void 2047 svm_pmap_deactivate(pmap_t pmap) 2048 { 2049 smr_exit(pmap->pm_eptsmr); 2050 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 2051 } 2052 2053 static __inline void 2054 disable_gintr(void) 2055 { 2056 2057 __asm __volatile("clgi"); 2058 } 2059 2060 static __inline void 2061 enable_gintr(void) 2062 { 2063 2064 __asm __volatile("stgi"); 2065 } 2066 2067 static __inline void 2068 svm_dr_enter_guest(struct svm_regctx *gctx) 2069 { 2070 2071 /* Save host control debug registers. */ 2072 gctx->host_dr7 = rdr7(); 2073 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2074 2075 /* 2076 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2077 * exceptions in the host based on the guest DRx values. The 2078 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 2079 * VMCB. 2080 */ 2081 load_dr7(0); 2082 wrmsr(MSR_DEBUGCTLMSR, 0); 2083 2084 /* Save host debug registers. */ 2085 gctx->host_dr0 = rdr0(); 2086 gctx->host_dr1 = rdr1(); 2087 gctx->host_dr2 = rdr2(); 2088 gctx->host_dr3 = rdr3(); 2089 gctx->host_dr6 = rdr6(); 2090 2091 /* Restore guest debug registers. */ 2092 load_dr0(gctx->sctx_dr0); 2093 load_dr1(gctx->sctx_dr1); 2094 load_dr2(gctx->sctx_dr2); 2095 load_dr3(gctx->sctx_dr3); 2096 } 2097 2098 static __inline void 2099 svm_dr_leave_guest(struct svm_regctx *gctx) 2100 { 2101 2102 /* Save guest debug registers. */ 2103 gctx->sctx_dr0 = rdr0(); 2104 gctx->sctx_dr1 = rdr1(); 2105 gctx->sctx_dr2 = rdr2(); 2106 gctx->sctx_dr3 = rdr3(); 2107 2108 /* 2109 * Restore host debug registers. Restore DR7 and DEBUGCTL 2110 * last. 2111 */ 2112 load_dr0(gctx->host_dr0); 2113 load_dr1(gctx->host_dr1); 2114 load_dr2(gctx->host_dr2); 2115 load_dr3(gctx->host_dr3); 2116 load_dr6(gctx->host_dr6); 2117 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 2118 load_dr7(gctx->host_dr7); 2119 } 2120 2121 /* 2122 * Start vcpu with specified RIP. 2123 */ 2124 static int 2125 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 2126 { 2127 struct svm_regctx *gctx; 2128 struct svm_softc *svm_sc; 2129 struct svm_vcpu *vcpu; 2130 struct vmcb_state *state; 2131 struct vmcb_ctrl *ctrl; 2132 struct vm_exit *vmexit; 2133 struct vlapic *vlapic; 2134 uint64_t vmcb_pa; 2135 int handled; 2136 uint16_t ldt_sel; 2137 2138 vcpu = vcpui; 2139 svm_sc = vcpu->sc; 2140 state = svm_get_vmcb_state(vcpu); 2141 ctrl = svm_get_vmcb_ctrl(vcpu); 2142 vmexit = vm_exitinfo(vcpu->vcpu); 2143 vlapic = vm_lapic(vcpu->vcpu); 2144 2145 gctx = svm_get_guest_regctx(vcpu); 2146 vmcb_pa = vcpu->vmcb_pa; 2147 2148 if (vcpu->lastcpu != curcpu) { 2149 /* 2150 * Force new ASID allocation by invalidating the generation. 2151 */ 2152 vcpu->asid.gen = 0; 2153 2154 /* 2155 * Invalidate the VMCB state cache by marking all fields dirty. 2156 */ 2157 svm_set_dirty(vcpu, 0xffffffff); 2158 2159 /* 2160 * XXX 2161 * Setting 'vcpu->lastcpu' here is bit premature because 2162 * we may return from this function without actually executing 2163 * the VMRUN instruction. This could happen if a rendezvous 2164 * or an AST is pending on the first time through the loop. 2165 * 2166 * This works for now but any new side-effects of vcpu 2167 * migration should take this case into account. 2168 */ 2169 vcpu->lastcpu = curcpu; 2170 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); 2171 } 2172 2173 svm_msr_guest_enter(vcpu); 2174 2175 /* Update Guest RIP */ 2176 state->rip = rip; 2177 2178 do { 2179 /* 2180 * Disable global interrupts to guarantee atomicity during 2181 * loading of guest state. This includes not only the state 2182 * loaded by the "vmrun" instruction but also software state 2183 * maintained by the hypervisor: suspended and rendezvous 2184 * state, NPT generation number, vlapic interrupts etc. 2185 */ 2186 disable_gintr(); 2187 2188 if (vcpu_suspended(evinfo)) { 2189 enable_gintr(); 2190 vm_exit_suspended(vcpu->vcpu, state->rip); 2191 break; 2192 } 2193 2194 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { 2195 enable_gintr(); 2196 vm_exit_rendezvous(vcpu->vcpu, state->rip); 2197 break; 2198 } 2199 2200 if (vcpu_reqidle(evinfo)) { 2201 enable_gintr(); 2202 vm_exit_reqidle(vcpu->vcpu, state->rip); 2203 break; 2204 } 2205 2206 /* We are asked to give the cpu by scheduler. */ 2207 if (vcpu_should_yield(vcpu->vcpu)) { 2208 enable_gintr(); 2209 vm_exit_astpending(vcpu->vcpu, state->rip); 2210 break; 2211 } 2212 2213 if (vcpu_debugged(vcpu->vcpu)) { 2214 enable_gintr(); 2215 vm_exit_debug(vcpu->vcpu, state->rip); 2216 break; 2217 } 2218 2219 /* 2220 * #VMEXIT resumes the host with the guest LDTR, so 2221 * save the current LDT selector so it can be restored 2222 * after an exit. The userspace hypervisor probably 2223 * doesn't use a LDT, but save and restore it to be 2224 * safe. 2225 */ 2226 ldt_sel = sldt(); 2227 2228 svm_inj_interrupts(svm_sc, vcpu, vlapic); 2229 2230 /* 2231 * Check the pmap generation and the ASID generation to 2232 * ensure that the vcpu does not use stale TLB mappings. 2233 */ 2234 svm_pmap_activate(vcpu, pmap); 2235 2236 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; 2237 vcpu->dirty = 0; 2238 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 2239 2240 /* Launch Virtual Machine. */ 2241 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); 2242 svm_dr_enter_guest(gctx); 2243 svm_launch(vmcb_pa, gctx, get_pcpu()); 2244 svm_dr_leave_guest(gctx); 2245 2246 svm_pmap_deactivate(pmap); 2247 2248 /* 2249 * The host GDTR and IDTR is saved by VMRUN and restored 2250 * automatically on #VMEXIT. However, the host TSS needs 2251 * to be restored explicitly. 2252 */ 2253 restore_host_tss(); 2254 2255 /* Restore host LDTR. */ 2256 lldt(ldt_sel); 2257 2258 /* #VMEXIT disables interrupts so re-enable them here. */ 2259 enable_gintr(); 2260 2261 /* Update 'nextrip' */ 2262 vcpu->nextrip = state->rip; 2263 2264 /* Handle #VMEXIT and if required return to user space. */ 2265 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2266 } while (handled); 2267 2268 svm_msr_guest_exit(vcpu); 2269 2270 return (0); 2271 } 2272 2273 static void 2274 svm_vcpu_cleanup(void *vcpui) 2275 { 2276 struct svm_vcpu *vcpu = vcpui; 2277 2278 free(vcpu->vmcb, M_SVM); 2279 free(vcpu, M_SVM); 2280 } 2281 2282 static void 2283 svm_cleanup(void *vmi) 2284 { 2285 struct svm_softc *sc = vmi; 2286 2287 free(sc->iopm_bitmap, M_SVM); 2288 free(sc->msr_bitmap, M_SVM); 2289 free(sc, M_SVM); 2290 } 2291 2292 static register_t * 2293 swctx_regptr(struct svm_regctx *regctx, int reg) 2294 { 2295 2296 switch (reg) { 2297 case VM_REG_GUEST_RBX: 2298 return (®ctx->sctx_rbx); 2299 case VM_REG_GUEST_RCX: 2300 return (®ctx->sctx_rcx); 2301 case VM_REG_GUEST_RDX: 2302 return (®ctx->sctx_rdx); 2303 case VM_REG_GUEST_RDI: 2304 return (®ctx->sctx_rdi); 2305 case VM_REG_GUEST_RSI: 2306 return (®ctx->sctx_rsi); 2307 case VM_REG_GUEST_RBP: 2308 return (®ctx->sctx_rbp); 2309 case VM_REG_GUEST_R8: 2310 return (®ctx->sctx_r8); 2311 case VM_REG_GUEST_R9: 2312 return (®ctx->sctx_r9); 2313 case VM_REG_GUEST_R10: 2314 return (®ctx->sctx_r10); 2315 case VM_REG_GUEST_R11: 2316 return (®ctx->sctx_r11); 2317 case VM_REG_GUEST_R12: 2318 return (®ctx->sctx_r12); 2319 case VM_REG_GUEST_R13: 2320 return (®ctx->sctx_r13); 2321 case VM_REG_GUEST_R14: 2322 return (®ctx->sctx_r14); 2323 case VM_REG_GUEST_R15: 2324 return (®ctx->sctx_r15); 2325 case VM_REG_GUEST_DR0: 2326 return (®ctx->sctx_dr0); 2327 case VM_REG_GUEST_DR1: 2328 return (®ctx->sctx_dr1); 2329 case VM_REG_GUEST_DR2: 2330 return (®ctx->sctx_dr2); 2331 case VM_REG_GUEST_DR3: 2332 return (®ctx->sctx_dr3); 2333 default: 2334 return (NULL); 2335 } 2336 } 2337 2338 static int 2339 svm_getreg(void *vcpui, int ident, uint64_t *val) 2340 { 2341 struct svm_vcpu *vcpu; 2342 register_t *reg; 2343 2344 vcpu = vcpui; 2345 2346 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2347 return (svm_get_intr_shadow(vcpu, val)); 2348 } 2349 2350 if (vmcb_read(vcpu, ident, val) == 0) { 2351 return (0); 2352 } 2353 2354 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2355 2356 if (reg != NULL) { 2357 *val = *reg; 2358 return (0); 2359 } 2360 2361 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); 2362 return (EINVAL); 2363 } 2364 2365 static int 2366 svm_setreg(void *vcpui, int ident, uint64_t val) 2367 { 2368 struct svm_vcpu *vcpu; 2369 register_t *reg; 2370 2371 vcpu = vcpui; 2372 2373 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2374 return (svm_modify_intr_shadow(vcpu, val)); 2375 } 2376 2377 /* Do not permit user write access to VMCB fields by offset. */ 2378 if (!VMCB_ACCESS_OK(ident)) { 2379 if (vmcb_write(vcpu, ident, val) == 0) { 2380 return (0); 2381 } 2382 } 2383 2384 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); 2385 2386 if (reg != NULL) { 2387 *reg = val; 2388 return (0); 2389 } 2390 2391 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) { 2392 /* Ignore. */ 2393 return (0); 2394 } 2395 2396 /* 2397 * XXX deal with CR3 and invalidate TLB entries tagged with the 2398 * vcpu's ASID. This needs to be treated differently depending on 2399 * whether 'running' is true/false. 2400 */ 2401 2402 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); 2403 return (EINVAL); 2404 } 2405 2406 static int 2407 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc) 2408 { 2409 return (vmcb_getdesc(vcpui, reg, desc)); 2410 } 2411 2412 static int 2413 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc) 2414 { 2415 return (vmcb_setdesc(vcpui, reg, desc)); 2416 } 2417 2418 #ifdef BHYVE_SNAPSHOT 2419 static int 2420 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta) 2421 { 2422 int ret; 2423 uint64_t val; 2424 2425 if (meta->op == VM_SNAPSHOT_SAVE) { 2426 ret = svm_getreg(vcpui, ident, &val); 2427 if (ret != 0) 2428 goto done; 2429 2430 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2431 } else if (meta->op == VM_SNAPSHOT_RESTORE) { 2432 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done); 2433 2434 ret = svm_setreg(vcpui, ident, val); 2435 if (ret != 0) 2436 goto done; 2437 } else { 2438 ret = EINVAL; 2439 goto done; 2440 } 2441 2442 done: 2443 return (ret); 2444 } 2445 #endif 2446 2447 static int 2448 svm_setcap(void *vcpui, int type, int val) 2449 { 2450 struct svm_vcpu *vcpu; 2451 struct vlapic *vlapic; 2452 int error; 2453 2454 vcpu = vcpui; 2455 error = 0; 2456 2457 switch (type) { 2458 case VM_CAP_HALT_EXIT: 2459 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2460 VMCB_INTCPT_HLT, val); 2461 break; 2462 case VM_CAP_PAUSE_EXIT: 2463 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, 2464 VMCB_INTCPT_PAUSE, val); 2465 break; 2466 case VM_CAP_UNRESTRICTED_GUEST: 2467 /* Unrestricted guest execution cannot be disabled in SVM */ 2468 if (val == 0) 2469 error = EINVAL; 2470 break; 2471 case VM_CAP_BPT_EXIT: 2472 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); 2473 break; 2474 case VM_CAP_IPI_EXIT: 2475 vlapic = vm_lapic(vcpu->vcpu); 2476 vlapic->ipi_exit = val; 2477 break; 2478 case VM_CAP_MASK_HWINTR: 2479 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); 2480 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); 2481 break; 2482 case VM_CAP_RFLAGS_TF: { 2483 uint64_t rflags; 2484 2485 /* Fetch RFLAGS. */ 2486 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { 2487 error = (EINVAL); 2488 break; 2489 } 2490 if (val) { 2491 /* Save current TF bit. */ 2492 vcpu->dbg.rflags_tf = rflags & PSL_T; 2493 /* Trace next instruction. */ 2494 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2495 (rflags | PSL_T))) { 2496 error = (EINVAL); 2497 break; 2498 } 2499 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); 2500 } else { 2501 /* 2502 * Restore shadowed RFLAGS.TF only if vCPU was 2503 * previously stepped 2504 */ 2505 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { 2506 rflags &= ~PSL_T; 2507 rflags |= vcpu->dbg.rflags_tf; 2508 vcpu->dbg.rflags_tf = 0; 2509 2510 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, 2511 rflags)) { 2512 error = (EINVAL); 2513 break; 2514 } 2515 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); 2516 } 2517 } 2518 2519 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); 2520 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, 2521 val); 2522 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, 2523 val); 2524 break; 2525 } 2526 default: 2527 error = ENOENT; 2528 break; 2529 } 2530 return (error); 2531 } 2532 2533 static int 2534 svm_getcap(void *vcpui, int type, int *retval) 2535 { 2536 struct svm_vcpu *vcpu; 2537 struct vlapic *vlapic; 2538 int error; 2539 2540 vcpu = vcpui; 2541 error = 0; 2542 2543 switch (type) { 2544 case VM_CAP_HALT_EXIT: 2545 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2546 VMCB_INTCPT_HLT); 2547 break; 2548 case VM_CAP_PAUSE_EXIT: 2549 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, 2550 VMCB_INTCPT_PAUSE); 2551 break; 2552 case VM_CAP_UNRESTRICTED_GUEST: 2553 *retval = 1; /* unrestricted guest is always enabled */ 2554 break; 2555 case VM_CAP_BPT_EXIT: 2556 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); 2557 break; 2558 case VM_CAP_IPI_EXIT: 2559 vlapic = vm_lapic(vcpu->vcpu); 2560 *retval = vlapic->ipi_exit; 2561 break; 2562 case VM_CAP_RFLAGS_TF: 2563 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); 2564 break; 2565 case VM_CAP_MASK_HWINTR: 2566 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); 2567 break; 2568 default: 2569 error = ENOENT; 2570 break; 2571 } 2572 return (error); 2573 } 2574 2575 static struct vmspace * 2576 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max) 2577 { 2578 return (svm_npt_alloc(min, max)); 2579 } 2580 2581 static void 2582 svm_vmspace_free(struct vmspace *vmspace) 2583 { 2584 svm_npt_free(vmspace); 2585 } 2586 2587 static struct vlapic * 2588 svm_vlapic_init(void *vcpui) 2589 { 2590 struct svm_vcpu *vcpu; 2591 struct vlapic *vlapic; 2592 2593 vcpu = vcpui; 2594 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); 2595 vlapic->vm = vcpu->sc->vm; 2596 vlapic->vcpu = vcpu->vcpu; 2597 vlapic->vcpuid = vcpu->vcpuid; 2598 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, 2599 M_WAITOK | M_ZERO); 2600 2601 vlapic_init(vlapic); 2602 2603 return (vlapic); 2604 } 2605 2606 static void 2607 svm_vlapic_cleanup(struct vlapic *vlapic) 2608 { 2609 2610 vlapic_cleanup(vlapic); 2611 free(vlapic->apic_page, M_SVM_VLAPIC); 2612 free(vlapic, M_SVM_VLAPIC); 2613 } 2614 2615 #ifdef BHYVE_SNAPSHOT 2616 static int 2617 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 2618 { 2619 struct svm_vcpu *vcpu; 2620 int err, running, hostcpu; 2621 2622 vcpu = vcpui; 2623 err = 0; 2624 2625 running = vcpu_is_running(vcpu->vcpu, &hostcpu); 2626 if (running && hostcpu != curcpu) { 2627 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), 2628 vcpu->vcpuid); 2629 return (EINVAL); 2630 } 2631 2632 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); 2633 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); 2634 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); 2635 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); 2636 2637 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); 2638 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); 2639 2640 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); 2641 2642 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); 2643 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); 2644 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); 2645 2646 /* Guest segments */ 2647 /* ES */ 2648 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); 2649 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); 2650 2651 /* CS */ 2652 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); 2653 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); 2654 2655 /* SS */ 2656 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); 2657 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); 2658 2659 /* DS */ 2660 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); 2661 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); 2662 2663 /* FS */ 2664 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); 2665 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); 2666 2667 /* GS */ 2668 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); 2669 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); 2670 2671 /* TR */ 2672 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); 2673 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); 2674 2675 /* LDTR */ 2676 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); 2677 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); 2678 2679 /* EFER */ 2680 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); 2681 2682 /* IDTR and GDTR */ 2683 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); 2684 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); 2685 2686 /* Specific AMD registers */ 2687 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); 2688 2689 err += vmcb_snapshot_any(vcpu, 2690 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta); 2691 err += vmcb_snapshot_any(vcpu, 2692 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta); 2693 err += vmcb_snapshot_any(vcpu, 2694 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta); 2695 err += vmcb_snapshot_any(vcpu, 2696 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta); 2697 err += vmcb_snapshot_any(vcpu, 2698 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta); 2699 2700 err += vmcb_snapshot_any(vcpu, 2701 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta); 2702 err += vmcb_snapshot_any(vcpu, 2703 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta); 2704 2705 err += vmcb_snapshot_any(vcpu, 2706 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta); 2707 2708 err += vmcb_snapshot_any(vcpu, 2709 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta); 2710 2711 err += vmcb_snapshot_any(vcpu, 2712 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta); 2713 2714 err += vmcb_snapshot_any(vcpu, 2715 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta); 2716 err += vmcb_snapshot_any(vcpu, 2717 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta); 2718 err += vmcb_snapshot_any(vcpu, 2719 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta); 2720 err += vmcb_snapshot_any(vcpu, 2721 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta); 2722 2723 err += vmcb_snapshot_any(vcpu, 2724 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta); 2725 2726 err += vmcb_snapshot_any(vcpu, 2727 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta); 2728 err += vmcb_snapshot_any(vcpu, 2729 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta); 2730 err += vmcb_snapshot_any(vcpu, 2731 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta); 2732 err += vmcb_snapshot_any(vcpu, 2733 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta); 2734 2735 err += vmcb_snapshot_any(vcpu, 2736 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta); 2737 2738 err += vmcb_snapshot_any(vcpu, 2739 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta); 2740 err += vmcb_snapshot_any(vcpu, 2741 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta); 2742 err += vmcb_snapshot_any(vcpu, 2743 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta); 2744 2745 err += vmcb_snapshot_any(vcpu, 2746 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta); 2747 2748 err += vmcb_snapshot_any(vcpu, 2749 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta); 2750 2751 err += vmcb_snapshot_any(vcpu, 2752 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta); 2753 err += vmcb_snapshot_any(vcpu, 2754 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta); 2755 err += vmcb_snapshot_any(vcpu, 2756 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta); 2757 2758 err += vmcb_snapshot_any(vcpu, 2759 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta); 2760 2761 err += vmcb_snapshot_any(vcpu, 2762 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta); 2763 err += vmcb_snapshot_any(vcpu, 2764 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta); 2765 err += vmcb_snapshot_any(vcpu, 2766 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta); 2767 err += vmcb_snapshot_any(vcpu, 2768 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta); 2769 err += vmcb_snapshot_any(vcpu, 2770 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta); 2771 if (err != 0) 2772 goto done; 2773 2774 /* Snapshot swctx for virtual cpu */ 2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); 2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); 2777 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); 2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); 2779 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); 2780 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); 2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); 2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); 2783 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); 2784 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); 2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); 2786 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); 2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); 2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); 2789 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); 2790 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); 2791 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); 2792 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); 2793 2794 /* Restore other svm_vcpu struct fields */ 2795 2796 /* Restore NEXTRIP field */ 2797 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); 2798 2799 /* Restore lastcpu field */ 2800 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); 2801 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); 2802 2803 /* Restore EPTGEN field - EPT is Extended Page Table */ 2804 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); 2805 2806 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); 2807 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); 2808 2809 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); 2810 2811 /* Set all caches dirty */ 2812 if (meta->op == VM_SNAPSHOT_RESTORE) 2813 svm_set_dirty(vcpu, 0xffffffff); 2814 2815 done: 2816 return (err); 2817 } 2818 2819 static int 2820 svm_restore_tsc(void *vcpui, uint64_t offset) 2821 { 2822 struct svm_vcpu *vcpu = vcpui; 2823 2824 svm_set_tsc_offset(vcpu, offset); 2825 2826 return (0); 2827 } 2828 #endif 2829 2830 const struct vmm_ops vmm_ops_amd = { 2831 .modinit = svm_modinit, 2832 .modcleanup = svm_modcleanup, 2833 .modresume = svm_modresume, 2834 .modsuspend = svm_modsuspend, 2835 .init = svm_init, 2836 .run = svm_run, 2837 .cleanup = svm_cleanup, 2838 .vcpu_init = svm_vcpu_init, 2839 .vcpu_cleanup = svm_vcpu_cleanup, 2840 .getreg = svm_getreg, 2841 .setreg = svm_setreg, 2842 .getdesc = svm_getdesc, 2843 .setdesc = svm_setdesc, 2844 .getcap = svm_getcap, 2845 .setcap = svm_setcap, 2846 .vmspace_alloc = svm_vmspace_alloc, 2847 .vmspace_free = svm_vmspace_free, 2848 .vlapic_init = svm_vlapic_init, 2849 .vlapic_cleanup = svm_vlapic_cleanup, 2850 #ifdef BHYVE_SNAPSHOT 2851 .vcpu_snapshot = svm_vcpu_snapshot, 2852 .restore_tsc = svm_restore_tsc, 2853 #endif 2854 }; 2855