1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * This file and its contents are supplied under the terms of the 31 * Common Development and Distribution License ("CDDL"), version 1.0. 32 * You may only use this file in accordance with the terms of version 33 * 1.0 of the CDDL. 34 * 35 * A full copy of the text of the CDDL should have accompanied this 36 * source. A copy of the CDDL is also available via the Internet at 37 * http://www.illumos.org/license/CDDL. 38 * 39 * Copyright 2018 Joyent, Inc. 40 * Copyright 2021 Oxide Computer Company 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/malloc.h> 50 #include <sys/pcpu.h> 51 #include <sys/proc.h> 52 #include <sys/sysctl.h> 53 54 #include <sys/x86_archext.h> 55 #include <sys/trap.h> 56 57 #include <machine/cpufunc.h> 58 #include <machine/psl.h> 59 #include <machine/md_var.h> 60 #include <machine/reg.h> 61 #include <machine/specialreg.h> 62 #include <machine/vmm.h> 63 #include <machine/vmm_dev.h> 64 #include <sys/vmm_instruction_emul.h> 65 #include <sys/vmm_vm.h> 66 #include <sys/vmm_kernel.h> 67 68 #include "vmm_lapic.h" 69 #include "vmm_stat.h" 70 #include "vmm_ktr.h" 71 #include "vmm_ioport.h" 72 #include "vatpic.h" 73 #include "vlapic.h" 74 #include "vlapic_priv.h" 75 76 #include "x86.h" 77 #include "vmcb.h" 78 #include "svm.h" 79 #include "svm_softc.h" 80 #include "svm_msr.h" 81 82 SYSCTL_DECL(_hw_vmm); 83 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 84 NULL); 85 86 /* 87 * SVM CPUID function 0x8000_000A, edx bit decoding. 88 */ 89 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ 90 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ 91 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ 92 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ 93 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ 94 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ 95 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ 96 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 97 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 98 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 99 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ 100 101 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 102 VMCB_CACHE_IOPM | \ 103 VMCB_CACHE_I | \ 104 VMCB_CACHE_TPR | \ 105 VMCB_CACHE_CR2 | \ 106 VMCB_CACHE_CR | \ 107 VMCB_CACHE_DR | \ 108 VMCB_CACHE_DT | \ 109 VMCB_CACHE_SEG | \ 110 VMCB_CACHE_NP) 111 112 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 113 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 114 0, NULL); 115 116 static MALLOC_DEFINE(M_SVM, "svm", "svm"); 117 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 118 119 /* SVM features advertised by CPUID.8000000AH:EDX */ 120 static uint32_t svm_feature = ~0U; /* AMD SVM features. */ 121 122 static int disable_npf_assist; 123 124 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 125 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 126 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 127 128 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 129 static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val); 130 static void flush_asid(struct svm_softc *sc, int vcpuid); 131 132 static __inline bool 133 flush_by_asid(void) 134 { 135 return ((svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID) != 0); 136 } 137 138 static __inline bool 139 decode_assist(void) 140 { 141 return ((svm_feature & AMD_CPUID_SVM_DECODE_ASSIST) != 0); 142 } 143 144 static int 145 svm_cleanup(void) 146 { 147 /* This is taken care of by the hma registration */ 148 return (0); 149 } 150 151 static int 152 svm_init(void) 153 { 154 vmcb_clean &= VMCB_CACHE_DEFAULT; 155 156 svm_msr_init(); 157 158 return (0); 159 } 160 161 static void 162 svm_restore(void) 163 { 164 /* No-op on illumos */ 165 } 166 167 /* Pentium compatible MSRs */ 168 #define MSR_PENTIUM_START 0 169 #define MSR_PENTIUM_END 0x1FFF 170 /* AMD 6th generation and Intel compatible MSRs */ 171 #define MSR_AMD6TH_START 0xC0000000UL 172 #define MSR_AMD6TH_END 0xC0001FFFUL 173 /* AMD 7th and 8th generation compatible MSRs */ 174 #define MSR_AMD7TH_START 0xC0010000UL 175 #define MSR_AMD7TH_END 0xC0011FFFUL 176 177 /* 178 * Get the index and bit position for a MSR in permission bitmap. 179 * Two bits are used for each MSR: lower bit for read and higher bit for write. 180 */ 181 static int 182 svm_msr_index(uint64_t msr, int *index, int *bit) 183 { 184 uint32_t base, off; 185 186 *index = -1; 187 *bit = (msr % 4) * 2; 188 base = 0; 189 190 if (msr <= MSR_PENTIUM_END) { 191 *index = msr / 4; 192 return (0); 193 } 194 195 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); 196 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { 197 off = (msr - MSR_AMD6TH_START); 198 *index = (off + base) / 4; 199 return (0); 200 } 201 202 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); 203 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { 204 off = (msr - MSR_AMD7TH_START); 205 *index = (off + base) / 4; 206 return (0); 207 } 208 209 return (EINVAL); 210 } 211 212 /* 213 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. 214 */ 215 static void 216 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) 217 { 218 int index, bit, error; 219 220 error = svm_msr_index(msr, &index, &bit); 221 KASSERT(error == 0, ("%s: invalid msr %lx", __func__, msr)); 222 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, 223 ("%s: invalid index %d for msr %lx", __func__, index, msr)); 224 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " 225 "msr %lx", __func__, bit, msr)); 226 227 if (read) 228 perm_bitmap[index] &= ~(1UL << bit); 229 230 if (write) 231 perm_bitmap[index] &= ~(2UL << bit); 232 } 233 234 static void 235 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) 236 { 237 238 svm_msr_perm(perm_bitmap, msr, true, true); 239 } 240 241 static void 242 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 243 { 244 245 svm_msr_perm(perm_bitmap, msr, true, false); 246 } 247 248 static __inline int 249 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 250 { 251 struct vmcb_ctrl *ctrl; 252 253 KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx)); 254 255 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 256 return (ctrl->intercept[idx] & bitmask ? 1 : 0); 257 } 258 259 static __inline void 260 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, 261 int enabled) 262 { 263 struct vmcb_ctrl *ctrl; 264 uint32_t oldval; 265 266 KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx)); 267 268 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 269 oldval = ctrl->intercept[idx]; 270 271 if (enabled) 272 ctrl->intercept[idx] |= bitmask; 273 else 274 ctrl->intercept[idx] &= ~bitmask; 275 276 if (ctrl->intercept[idx] != oldval) { 277 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); 278 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 279 "from %x to %x", idx, oldval, ctrl->intercept[idx]); 280 } 281 } 282 283 static __inline void 284 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 285 { 286 287 svm_set_intercept(sc, vcpu, off, bitmask, 0); 288 } 289 290 static __inline void 291 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 292 { 293 294 svm_set_intercept(sc, vcpu, off, bitmask, 1); 295 } 296 297 static void 298 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, 299 uint64_t msrpm_base_pa, uint64_t np_pml4) 300 { 301 struct vmcb_ctrl *ctrl; 302 struct vmcb_state *state; 303 uint32_t mask; 304 int n; 305 306 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 307 state = svm_get_vmcb_state(sc, vcpu); 308 309 ctrl->iopm_base_pa = iopm_base_pa; 310 ctrl->msrpm_base_pa = msrpm_base_pa; 311 312 /* Enable nested paging */ 313 ctrl->np_ctrl = NP_ENABLE; 314 ctrl->n_cr3 = np_pml4; 315 316 /* 317 * Intercept accesses to the control registers that are not shadowed 318 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. 319 */ 320 for (n = 0; n < 16; n++) { 321 mask = (BIT(n) << 16) | BIT(n); 322 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) 323 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 324 else 325 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); 326 } 327 328 /* 329 * Selectively intercept writes to %cr0. This triggers on operations 330 * which would change bits other than TS or MP. 331 */ 332 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 333 VMCB_INTCPT_CR0_WRITE); 334 335 /* 336 * Intercept everything when tracing guest exceptions otherwise 337 * just intercept machine check exception. 338 */ 339 if (vcpu_trace_exceptions(sc->vm, vcpu)) { 340 for (n = 0; n < 32; n++) { 341 /* 342 * Skip unimplemented vectors in the exception bitmap. 343 */ 344 if (n == 2 || n == 9) { 345 continue; 346 } 347 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); 348 } 349 } else { 350 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); 351 } 352 353 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ 354 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); 355 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); 356 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); 357 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); 358 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); 359 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); 360 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); 361 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); 362 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 363 VMCB_INTCPT_FERR_FREEZE); 364 365 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); 366 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); 367 368 /* Intercept privileged invalidation instructions. */ 369 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); 370 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); 371 372 /* 373 * Intercept all virtualization-related instructions. 374 * 375 * From section "Canonicalization and Consistency Checks" in APMv2 376 * the VMRUN intercept bit must be set to pass the consistency check. 377 */ 378 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); 379 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL); 380 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); 381 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); 382 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); 383 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); 384 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); 385 386 /* 387 * The ASID will be set to a non-zero value just before VMRUN. 388 */ 389 ctrl->asid = 0; 390 391 /* 392 * Section 15.21.1, Interrupt Masking in EFLAGS 393 * Section 15.21.2, Virtualizing APIC.TPR 394 * 395 * This must be set for %rflag and %cr8 isolation of guest and host. 396 */ 397 ctrl->v_intr_ctrl |= V_INTR_MASKING; 398 399 /* Enable Last Branch Record aka LBR for debugging */ 400 ctrl->misc_ctrl |= LBR_VIRT_ENABLE; 401 state->dbgctl = BIT(0); 402 403 /* EFER_SVM must always be set when the guest is executing */ 404 state->efer = EFER_SVM; 405 406 /* Set up the PAT to power-on state */ 407 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | 408 PAT_VALUE(1, PAT_WRITE_THROUGH) | 409 PAT_VALUE(2, PAT_UNCACHED) | 410 PAT_VALUE(3, PAT_UNCACHEABLE) | 411 PAT_VALUE(4, PAT_WRITE_BACK) | 412 PAT_VALUE(5, PAT_WRITE_THROUGH) | 413 PAT_VALUE(6, PAT_UNCACHED) | 414 PAT_VALUE(7, PAT_UNCACHEABLE); 415 416 /* Set up DR6/7 to power-on state */ 417 state->dr6 = DBREG_DR6_RESERVED1; 418 state->dr7 = DBREG_DR7_RESERVED1; 419 } 420 421 /* 422 * Initialize a virtual machine. 423 */ 424 static void * 425 svm_vminit(struct vm *vm) 426 { 427 struct svm_softc *svm_sc; 428 struct svm_vcpu *vcpu; 429 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; 430 int i; 431 uint16_t maxcpus; 432 433 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO); 434 if (((uintptr_t)svm_sc & PAGE_MASK) != 0) 435 panic("malloc of svm_softc not aligned on page boundary"); 436 437 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, 438 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 439 if (svm_sc->msr_bitmap == NULL) 440 panic("contigmalloc of SVM MSR bitmap failed"); 441 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, 442 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0); 443 if (svm_sc->iopm_bitmap == NULL) 444 panic("contigmalloc of SVM IO bitmap failed"); 445 446 svm_sc->vm = vm; 447 svm_sc->nptp = vmspace_table_root(vm_get_vmspace(vm)); 448 449 /* 450 * Intercept read and write accesses to all MSRs. 451 */ 452 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); 453 454 /* 455 * Access to the following MSRs is redirected to the VMCB when the 456 * guest is executing. Therefore it is safe to allow the guest to 457 * read/write these MSRs directly without hypervisor involvement. 458 */ 459 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); 460 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); 461 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); 462 463 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); 464 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); 465 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); 466 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); 467 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 468 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 469 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 470 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 471 472 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); 473 474 /* 475 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 476 */ 477 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 478 479 /* Intercept access to all I/O ports. */ 480 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); 481 482 iopm_pa = vtophys(svm_sc->iopm_bitmap); 483 msrpm_pa = vtophys(svm_sc->msr_bitmap); 484 pml4_pa = svm_sc->nptp; 485 maxcpus = vm_get_maxcpus(svm_sc->vm); 486 for (i = 0; i < maxcpus; i++) { 487 vcpu = svm_get_vcpu(svm_sc, i); 488 vcpu->nextrip = ~0; 489 vcpu->lastcpu = NOCPU; 490 vcpu->vmcb_pa = vtophys(&vcpu->vmcb); 491 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); 492 svm_msr_guest_init(svm_sc, i); 493 } 494 return (svm_sc); 495 } 496 497 /* 498 * Collateral for a generic SVM VM-exit. 499 */ 500 static void 501 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) 502 { 503 504 vme->exitcode = VM_EXITCODE_SVM; 505 vme->u.svm.exitcode = code; 506 vme->u.svm.exitinfo1 = info1; 507 vme->u.svm.exitinfo2 = info2; 508 } 509 510 static int 511 svm_cpl(struct vmcb_state *state) 512 { 513 514 /* 515 * From APMv2: 516 * "Retrieve the CPL from the CPL field in the VMCB, not 517 * from any segment DPL" 518 */ 519 return (state->cpl); 520 } 521 522 static enum vm_cpu_mode 523 svm_vcpu_mode(struct vmcb *vmcb) 524 { 525 struct vmcb_state *state; 526 527 state = &vmcb->state; 528 529 if (state->efer & EFER_LMA) { 530 struct vmcb_segment *seg; 531 532 /* 533 * Section 4.8.1 for APM2, check if Code Segment has 534 * Long attribute set in descriptor. 535 */ 536 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS); 537 if (seg->attrib & VMCB_CS_ATTRIB_L) 538 return (CPU_MODE_64BIT); 539 else 540 return (CPU_MODE_COMPATIBILITY); 541 } else if (state->cr0 & CR0_PE) { 542 return (CPU_MODE_PROTECTED); 543 } else { 544 return (CPU_MODE_REAL); 545 } 546 } 547 548 static enum vm_paging_mode 549 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) 550 { 551 552 if ((cr0 & CR0_PG) == 0) 553 return (PAGING_MODE_FLAT); 554 if ((cr4 & CR4_PAE) == 0) 555 return (PAGING_MODE_32); 556 if (efer & EFER_LME) 557 return (PAGING_MODE_64); 558 else 559 return (PAGING_MODE_PAE); 560 } 561 562 /* 563 * ins/outs utility routines 564 */ 565 566 static void 567 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) 568 { 569 struct vmcb_state *state; 570 571 state = &vmcb->state; 572 paging->cr3 = state->cr3; 573 paging->cpl = svm_cpl(state); 574 paging->cpu_mode = svm_vcpu_mode(vmcb); 575 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, 576 state->efer); 577 } 578 579 #define UNHANDLED 0 580 581 /* 582 * Handle guest I/O intercept. 583 */ 584 static int 585 svm_handle_inout(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 586 { 587 struct vmcb_ctrl *ctrl; 588 struct vmcb_state *state; 589 struct vm_inout *inout; 590 struct vie *vie; 591 uint64_t info1; 592 struct vm_guest_paging paging; 593 594 state = svm_get_vmcb_state(svm_sc, vcpu); 595 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 596 inout = &vmexit->u.inout; 597 info1 = ctrl->exitinfo1; 598 599 inout->bytes = (info1 >> 4) & 0x7; 600 inout->flags = 0; 601 inout->flags |= (info1 & BIT(0)) ? INOUT_IN : 0; 602 inout->flags |= (info1 & BIT(3)) ? INOUT_REP : 0; 603 inout->flags |= (info1 & BIT(2)) ? INOUT_STR : 0; 604 inout->port = (uint16_t)(info1 >> 16); 605 inout->eax = (uint32_t)(state->rax); 606 607 if ((inout->flags & INOUT_STR) != 0) { 608 /* 609 * The effective segment number in EXITINFO1[12:10] is populated 610 * only if the processor has the DecodeAssist capability. 611 * 612 * This is not specified explicitly in APMv2 but can be verified 613 * empirically. 614 */ 615 if (!decode_assist()) { 616 /* 617 * Without decoding assistance, force the task of 618 * emulating the ins/outs on userspace. 619 */ 620 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 621 bzero(&vmexit->u.inst_emul, 622 sizeof (vmexit->u.inst_emul)); 623 return (UNHANDLED); 624 } 625 626 /* 627 * Bits 7-9 encode the address size of ins/outs operations where 628 * the 1/2/4 values correspond to 16/32/64 bit sizes. 629 */ 630 inout->addrsize = 2 * ((info1 >> 7) & 0x7); 631 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 632 inout->addrsize == 8); 633 634 if (inout->flags & INOUT_IN) { 635 /* 636 * For INS instructions, %es (encoded as 0) is the 637 * implied segment for the operation. 638 */ 639 inout->segment = 0; 640 } else { 641 /* 642 * Bits 10-12 encode the segment for OUTS. 643 * This value follows the standard x86 segment order. 644 */ 645 inout->segment = (info1 >> 10) & 0x7; 646 } 647 } 648 649 vmexit->exitcode = VM_EXITCODE_INOUT; 650 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging); 651 vie = vm_vie_ctx(svm_sc->vm, vcpu); 652 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 653 654 /* The in/out emulation will handle advancing %rip */ 655 vmexit->inst_length = 0; 656 657 return (UNHANDLED); 658 } 659 660 static int 661 npf_fault_type(uint64_t exitinfo1) 662 { 663 664 if (exitinfo1 & VMCB_NPF_INFO1_W) 665 return (PROT_WRITE); 666 else if (exitinfo1 & VMCB_NPF_INFO1_ID) 667 return (PROT_EXEC); 668 else 669 return (PROT_READ); 670 } 671 672 static bool 673 svm_npf_emul_fault(uint64_t exitinfo1) 674 { 675 if (exitinfo1 & VMCB_NPF_INFO1_ID) { 676 return (false); 677 } 678 679 if (exitinfo1 & VMCB_NPF_INFO1_GPT) { 680 return (false); 681 } 682 683 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { 684 return (false); 685 } 686 687 return (true); 688 } 689 690 static void 691 svm_handle_mmio_emul(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit, 692 uint64_t gpa) 693 { 694 struct vmcb_ctrl *ctrl; 695 struct vmcb *vmcb; 696 struct vie *vie; 697 struct vm_guest_paging paging; 698 struct vmcb_segment *seg; 699 char *inst_bytes = NULL; 700 uint8_t inst_len = 0; 701 702 vmcb = svm_get_vmcb(svm_sc, vcpu); 703 ctrl = &vmcb->ctrl; 704 705 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 706 vmexit->u.mmio_emul.gpa = gpa; 707 vmexit->u.mmio_emul.gla = VIE_INVALID_GLA; 708 svm_paging_info(vmcb, &paging); 709 710 switch (paging.cpu_mode) { 711 case CPU_MODE_REAL: 712 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS); 713 vmexit->u.mmio_emul.cs_base = seg->base; 714 vmexit->u.mmio_emul.cs_d = 0; 715 break; 716 case CPU_MODE_PROTECTED: 717 case CPU_MODE_COMPATIBILITY: 718 seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS); 719 vmexit->u.mmio_emul.cs_base = seg->base; 720 721 /* 722 * Section 4.8.1 of APM2, Default Operand Size or D bit. 723 */ 724 vmexit->u.mmio_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? 725 1 : 0; 726 break; 727 default: 728 vmexit->u.mmio_emul.cs_base = 0; 729 vmexit->u.mmio_emul.cs_d = 0; 730 break; 731 } 732 733 /* 734 * Copy the instruction bytes into 'vie' if available. 735 */ 736 if (decode_assist() && !disable_npf_assist) { 737 inst_len = ctrl->inst_len; 738 inst_bytes = (char *)ctrl->inst_bytes; 739 } 740 vie = vm_vie_ctx(svm_sc->vm, vcpu); 741 vie_init_mmio(vie, inst_bytes, inst_len, &paging, gpa); 742 } 743 744 /* 745 * Do not allow CD, NW, or invalid high bits to be asserted in the value of cr0 746 * which is live in the guest. They are visible via the shadow instead. 747 */ 748 #define SVM_CR0_MASK ~(CR0_CD | CR0_NW | 0xffffffff00000000) 749 750 static void 751 svm_set_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t val, bool guest_write) 752 { 753 struct vmcb_state *state; 754 struct svm_regctx *regctx; 755 uint64_t masked, old, diff; 756 757 state = svm_get_vmcb_state(svm_sc, vcpu); 758 regctx = svm_get_guest_regctx(svm_sc, vcpu); 759 760 old = state->cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK); 761 diff = old ^ val; 762 763 /* No further work needed if register contents remain the same */ 764 if (diff == 0) { 765 return; 766 } 767 768 /* Flush the TLB if the paging or write-protect bits are changing */ 769 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 770 flush_asid(svm_sc, vcpu); 771 } 772 773 /* 774 * If the change in %cr0 is due to a guest action (via interception) 775 * then other CPU state updates may be required. 776 */ 777 if (guest_write) { 778 if ((diff & CR0_PG) != 0) { 779 uint64_t efer = state->efer; 780 781 /* Keep the long-mode state in EFER in sync */ 782 if ((val & CR0_PG) != 0 && (efer & EFER_LME) != 0) { 783 state->efer |= EFER_LMA; 784 } 785 if ((val & CR0_PG) == 0 && (efer & EFER_LME) != 0) { 786 state->efer &= ~EFER_LMA; 787 } 788 } 789 } 790 791 masked = val & SVM_CR0_MASK; 792 regctx->sctx_cr0_shadow = val; 793 state->cr0 = masked; 794 svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_CR); 795 796 if ((masked ^ val) != 0) { 797 /* 798 * The guest has set bits in %cr0 which we are masking out and 799 * exposing via shadow. 800 * 801 * We must intercept %cr0 reads in order to make the shadowed 802 * view available to the guest. 803 * 804 * Writes to %cr0 must also be intercepted (unconditionally, 805 * unlike the VMCB_INTCPT_CR0_WRITE mechanism) so we can catch 806 * if/when the guest clears those shadowed bits. 807 */ 808 svm_enable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT, 809 BIT(0) | BIT(16)); 810 } else { 811 /* 812 * When no bits remain in %cr0 which require shadowing, the 813 * unconditional intercept of reads/writes to %cr0 can be 814 * disabled. 815 * 816 * The selective write intercept (VMCB_INTCPT_CR0_WRITE) remains 817 * in place so we can be notified of operations which change 818 * bits other than TS or MP. 819 */ 820 svm_disable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT, 821 BIT(0) | BIT(16)); 822 } 823 svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_I); 824 } 825 826 static void 827 svm_get_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t *val) 828 { 829 struct vmcb *vmcb; 830 struct svm_regctx *regctx; 831 832 vmcb = svm_get_vmcb(svm_sc, vcpu); 833 regctx = svm_get_guest_regctx(svm_sc, vcpu); 834 835 /* 836 * Include the %cr0 bits which exist only in the shadow along with those 837 * in the running vCPU state. 838 */ 839 *val = vmcb->state.cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK); 840 } 841 842 static void 843 svm_handle_cr0_read(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg) 844 { 845 uint64_t val; 846 int err; 847 848 svm_get_cr0(svm_sc, vcpu, &val); 849 err = svm_setreg(svm_sc, vcpu, reg, val); 850 ASSERT(err == 0); 851 } 852 853 static void 854 svm_handle_cr0_write(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg) 855 { 856 struct vmcb_state *state; 857 uint64_t val; 858 int err; 859 860 state = svm_get_vmcb_state(svm_sc, vcpu); 861 862 err = svm_getreg(svm_sc, vcpu, reg, &val); 863 ASSERT(err == 0); 864 865 if ((val & CR0_NW) != 0 && (val & CR0_CD) == 0) { 866 /* NW without CD is nonsensical */ 867 vm_inject_gp(svm_sc->vm, vcpu); 868 return; 869 } 870 if ((val & CR0_PG) != 0 && (val & CR0_PE) == 0) { 871 /* PG requires PE */ 872 vm_inject_gp(svm_sc->vm, vcpu); 873 return; 874 } 875 if ((state->cr0 & CR0_PG) == 0 && (val & CR0_PG) != 0) { 876 /* When enabling paging, PAE must be enabled if LME is. */ 877 if ((state->efer & EFER_LME) != 0 && 878 (state->cr4 & CR4_PAE) == 0) { 879 vm_inject_gp(svm_sc->vm, vcpu); 880 return; 881 } 882 } 883 884 svm_set_cr0(svm_sc, vcpu, val, true); 885 } 886 887 static void 888 svm_inst_emul_other(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 889 { 890 struct vie *vie; 891 struct vm_guest_paging paging; 892 893 /* Let the instruction emulation (hopefully in-kernel) handle it */ 894 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 895 bzero(&vmexit->u.inst_emul, sizeof (vmexit->u.inst_emul)); 896 vie = vm_vie_ctx(svm_sc->vm, vcpu); 897 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging); 898 vie_init_other(vie, &paging); 899 900 /* The instruction emulation will handle advancing %rip */ 901 vmexit->inst_length = 0; 902 } 903 904 static void 905 svm_update_virqinfo(struct svm_softc *sc, int vcpu) 906 { 907 struct vm *vm; 908 struct vlapic *vlapic; 909 struct vmcb_ctrl *ctrl; 910 911 vm = sc->vm; 912 vlapic = vm_lapic(vm, vcpu); 913 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 914 915 /* Update %cr8 in the emulated vlapic */ 916 vlapic_set_cr8(vlapic, ctrl->v_tpr); 917 918 /* Virtual interrupt injection is not used. */ 919 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " 920 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); 921 } 922 923 static void 924 svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu) 925 { 926 struct vmcb_ctrl *ctrl; 927 uint64_t intinfo; 928 int err; 929 930 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 931 intinfo = ctrl->exitintinfo; 932 if (!VMCB_EXITINTINFO_VALID(intinfo)) 933 return; 934 935 /* 936 * From APMv2, Section "Intercepts during IDT interrupt delivery" 937 * 938 * If a #VMEXIT happened during event delivery then record the event 939 * that was being delivered. 940 */ 941 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", 942 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); 943 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); 944 err = vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); 945 VERIFY0(err); 946 } 947 948 static __inline int 949 vintr_intercept_enabled(struct svm_softc *sc, int vcpu) 950 { 951 952 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 953 VMCB_INTCPT_VINTR)); 954 } 955 956 static void 957 svm_enable_intr_window_exiting(struct svm_softc *sc, int vcpu) 958 { 959 struct vmcb_ctrl *ctrl; 960 struct vmcb_state *state; 961 962 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 963 state = svm_get_vmcb_state(sc, vcpu); 964 965 if ((ctrl->v_irq & V_IRQ) != 0 && ctrl->v_intr_vector == 0) { 966 KASSERT(ctrl->v_intr_prio & V_IGN_TPR, 967 ("%s: invalid v_ign_tpr", __func__)); 968 KASSERT(vintr_intercept_enabled(sc, vcpu), 969 ("%s: vintr intercept should be enabled", __func__)); 970 return; 971 } 972 973 /* 974 * We use V_IRQ in conjunction with the VINTR intercept to trap into the 975 * hypervisor as soon as a virtual interrupt can be delivered. 976 * 977 * Since injected events are not subject to intercept checks we need to 978 * ensure that the V_IRQ is not actually going to be delivered on VM 979 * entry. 980 */ 981 VERIFY((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || 982 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow); 983 984 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 985 ctrl->v_irq |= V_IRQ; 986 ctrl->v_intr_prio |= V_IGN_TPR; 987 ctrl->v_intr_vector = 0; 988 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 989 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 990 } 991 992 static void 993 svm_disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 994 { 995 struct vmcb_ctrl *ctrl; 996 997 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 998 999 if ((ctrl->v_irq & V_IRQ) == 0 && ctrl->v_intr_vector == 0) { 1000 KASSERT(!vintr_intercept_enabled(sc, vcpu), 1001 ("%s: vintr intercept should be disabled", __func__)); 1002 return; 1003 } 1004 1005 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1006 ctrl->v_irq &= ~V_IRQ; 1007 ctrl->v_intr_vector = 0; 1008 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1009 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1010 } 1011 1012 /* 1013 * Once an NMI is injected it blocks delivery of further NMIs until the handler 1014 * executes an IRET. The IRET intercept is enabled when an NMI is injected to 1015 * to track when the vcpu is done handling the NMI. 1016 */ 1017 static int 1018 svm_nmi_blocked(struct svm_softc *sc, int vcpu) 1019 { 1020 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 1021 VMCB_INTCPT_IRET)); 1022 } 1023 1024 static void 1025 svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu) 1026 { 1027 struct vmcb_ctrl *ctrl; 1028 1029 KASSERT(svm_nmi_blocked(sc, vcpu), ("vNMI already unblocked")); 1030 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); 1031 /* 1032 * When the IRET intercept is cleared the vcpu will attempt to execute 1033 * the "iret" when it runs next. However, it is possible to inject 1034 * another NMI into the vcpu before the "iret" has actually executed. 1035 * 1036 * For e.g. if the "iret" encounters a #NPF when accessing the stack 1037 * it will trap back into the hypervisor. If an NMI is pending for 1038 * the vcpu it will be injected into the guest. 1039 * 1040 * XXX this needs to be fixed 1041 */ 1042 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1043 1044 /* 1045 * Set an interrupt shadow to prevent an NMI from being immediately 1046 * injected on the next VMRUN. 1047 */ 1048 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1049 ctrl->intr_shadow = 1; 1050 } 1051 1052 static void 1053 svm_inject_event(struct svm_softc *sc, int vcpu, uint64_t intinfo) 1054 { 1055 struct vmcb_ctrl *ctrl; 1056 uint8_t vector; 1057 uint32_t evtype; 1058 1059 ASSERT(VMCB_EXITINTINFO_VALID(intinfo)); 1060 1061 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1062 vector = VMCB_EXITINTINFO_VECTOR(intinfo); 1063 evtype = VMCB_EXITINTINFO_TYPE(intinfo); 1064 1065 switch (evtype) { 1066 case VMCB_EVENTINJ_TYPE_INTR: 1067 case VMCB_EVENTINJ_TYPE_NMI: 1068 case VMCB_EVENTINJ_TYPE_INTn: 1069 break; 1070 case VMCB_EVENTINJ_TYPE_EXCEPTION: 1071 VERIFY(vector <= 31); 1072 /* 1073 * NMIs are expected to be injected with VMCB_EVENTINJ_TYPE_NMI, 1074 * rather than as an exception with the NMI vector. 1075 */ 1076 VERIFY(vector != 2); 1077 break; 1078 default: 1079 panic("unexpected event type %x", evtype); 1080 } 1081 1082 ctrl->eventinj = VMCB_EVENTINJ_VALID | evtype | vector; 1083 if (VMCB_EXITINTINFO_EC_VALID(intinfo)) { 1084 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; 1085 ctrl->eventinj |= (uint64_t)VMCB_EXITINTINFO_EC(intinfo) << 32; 1086 } 1087 } 1088 1089 static void 1090 svm_inject_nmi(struct svm_softc *sc, int vcpu) 1091 { 1092 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1093 1094 ASSERT(!svm_nmi_blocked(sc, vcpu)); 1095 1096 ctrl->eventinj = VMCB_EVENTINJ_VALID | VMCB_EVENTINJ_TYPE_NMI; 1097 vm_nmi_clear(sc->vm, vcpu); 1098 1099 /* 1100 * Virtual NMI blocking is now in effect. 1101 * 1102 * Not only does this block a subsequent NMI injection from taking 1103 * place, it also configures an intercept on the IRET so we can track 1104 * when the next injection can take place. 1105 */ 1106 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); 1107 } 1108 1109 static void 1110 svm_inject_irq(struct svm_softc *sc, int vcpu, int vector) 1111 { 1112 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1113 1114 ASSERT(vector >= 0 && vector <= 255); 1115 1116 ctrl->eventinj = VMCB_EVENTINJ_VALID | vector; 1117 } 1118 1119 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL 1120 1121 static vm_msr_result_t 1122 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval) 1123 { 1124 struct vmcb_state *state = svm_get_vmcb_state(sc, vcpu); 1125 uint64_t lma; 1126 int error; 1127 1128 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ 1129 1130 if (newval & EFER_MBZ_BITS) { 1131 return (VMR_GP); 1132 } 1133 1134 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ 1135 const uint64_t changed = state->efer ^ newval; 1136 if (changed & EFER_LME) { 1137 if (state->cr0 & CR0_PG) { 1138 return (VMR_GP); 1139 } 1140 } 1141 1142 /* EFER.LMA = EFER.LME & CR0.PG */ 1143 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) { 1144 lma = EFER_LMA; 1145 } else { 1146 lma = 0; 1147 } 1148 if ((newval & EFER_LMA) != lma) { 1149 return (VMR_GP); 1150 } 1151 1152 if ((newval & EFER_NXE) != 0 && 1153 !vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) { 1154 return (VMR_GP); 1155 } 1156 if ((newval & EFER_FFXSR) != 0 && 1157 !vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) { 1158 return (VMR_GP); 1159 } 1160 if ((newval & EFER_TCE) != 0 && 1161 !vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) { 1162 return (VMR_GP); 1163 } 1164 1165 /* 1166 * Until bhyve has proper support for long-mode segment limits, just 1167 * toss a #GP at the guest if they attempt to use it. 1168 */ 1169 if (newval & EFER_LMSLE) { 1170 return (VMR_GP); 1171 } 1172 1173 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); 1174 VERIFY0(error); 1175 return (VMR_OK); 1176 } 1177 1178 static int 1179 svm_handle_msr(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit, 1180 bool is_wrmsr) 1181 { 1182 struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu); 1183 struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu); 1184 const uint32_t ecx = ctx->sctx_rcx; 1185 vm_msr_result_t res; 1186 uint64_t val = 0; 1187 1188 if (is_wrmsr) { 1189 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); 1190 val = ctx->sctx_rdx << 32 | (uint32_t)state->rax; 1191 1192 if (vlapic_owned_msr(ecx)) { 1193 struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu); 1194 1195 res = vlapic_wrmsr(vlapic, ecx, val); 1196 } else if (ecx == MSR_EFER) { 1197 res = svm_write_efer(svm_sc, vcpu, val); 1198 } else { 1199 res = svm_wrmsr(svm_sc, vcpu, ecx, val); 1200 } 1201 } else { 1202 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); 1203 1204 if (vlapic_owned_msr(ecx)) { 1205 struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu); 1206 1207 res = vlapic_rdmsr(vlapic, ecx, &val); 1208 } else { 1209 res = svm_rdmsr(svm_sc, vcpu, ecx, &val); 1210 } 1211 } 1212 1213 switch (res) { 1214 case VMR_OK: 1215 /* Store rdmsr result in the appropriate registers */ 1216 if (!is_wrmsr) { 1217 state->rax = (uint32_t)val; 1218 ctx->sctx_rdx = val >> 32; 1219 } 1220 return (1); 1221 case VMR_GP: 1222 vm_inject_gp(svm_sc->vm, vcpu); 1223 return (1); 1224 case VMR_UNHANLDED: 1225 vmexit->exitcode = is_wrmsr ? 1226 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 1227 vmexit->u.msr.code = ecx; 1228 vmexit->u.msr.wval = val; 1229 return (0); 1230 default: 1231 panic("unexpected msr result %u\n", res); 1232 } 1233 } 1234 1235 /* 1236 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs 1237 * that are due to instruction intercepts as well as MSR and IOIO intercepts 1238 * and exceptions caused by INT3, INTO and BOUND instructions. 1239 * 1240 * Return 1 if the nRIP is valid and 0 otherwise. 1241 */ 1242 static int 1243 nrip_valid(uint64_t exitcode) 1244 { 1245 switch (exitcode) { 1246 case 0x00 ... 0x0F: /* read of CR0 through CR15 */ 1247 case 0x10 ... 0x1F: /* write of CR0 through CR15 */ 1248 case 0x20 ... 0x2F: /* read of DR0 through DR15 */ 1249 case 0x30 ... 0x3F: /* write of DR0 through DR15 */ 1250 case 0x43: /* INT3 */ 1251 case 0x44: /* INTO */ 1252 case 0x45: /* BOUND */ 1253 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ 1254 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ 1255 return (1); 1256 default: 1257 return (0); 1258 } 1259 } 1260 1261 static int 1262 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) 1263 { 1264 struct vmcb *vmcb; 1265 struct vmcb_state *state; 1266 struct vmcb_ctrl *ctrl; 1267 struct svm_regctx *ctx; 1268 uint64_t code, info1, info2; 1269 int error, errcode_valid = 0, handled, idtvec, reflect; 1270 1271 ctx = svm_get_guest_regctx(svm_sc, vcpu); 1272 vmcb = svm_get_vmcb(svm_sc, vcpu); 1273 state = &vmcb->state; 1274 ctrl = &vmcb->ctrl; 1275 1276 handled = 0; 1277 code = ctrl->exitcode; 1278 info1 = ctrl->exitinfo1; 1279 info2 = ctrl->exitinfo2; 1280 1281 vmexit->exitcode = VM_EXITCODE_BOGUS; 1282 vmexit->rip = state->rip; 1283 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; 1284 1285 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); 1286 1287 /* 1288 * #VMEXIT(INVALID) needs to be handled early because the VMCB is 1289 * in an inconsistent state and can trigger assertions that would 1290 * never happen otherwise. 1291 */ 1292 if (code == VMCB_EXIT_INVALID) { 1293 vm_exit_svm(vmexit, code, info1, info2); 1294 return (0); 1295 } 1296 1297 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " 1298 "injection valid bit is set %lx", __func__, ctrl->eventinj)); 1299 1300 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, 1301 ("invalid inst_length %d: code (%lx), info1 (%lx), info2 (%lx)", 1302 vmexit->inst_length, code, info1, info2)); 1303 1304 svm_update_virqinfo(svm_sc, vcpu); 1305 svm_save_exitintinfo(svm_sc, vcpu); 1306 1307 switch (code) { 1308 case VMCB_EXIT_CR0_READ: 1309 if (VMCB_CRx_INFO1_VALID(info1) != 0) { 1310 svm_handle_cr0_read(svm_sc, vcpu, 1311 vie_regnum_map(VMCB_CRx_INFO1_GPR(info1))); 1312 handled = 1; 1313 } else { 1314 /* 1315 * If SMSW is used to read the contents of %cr0, then 1316 * the VALID bit will not be set in `info1`, since the 1317 * handling is different from the mov-to-reg case. 1318 * 1319 * Punt to the instruction emulation to handle it. 1320 */ 1321 svm_inst_emul_other(svm_sc, vcpu, vmexit); 1322 } 1323 break; 1324 case VMCB_EXIT_CR0_WRITE: 1325 case VMCB_EXIT_CR0_SEL_WRITE: 1326 if (VMCB_CRx_INFO1_VALID(info1) != 0) { 1327 svm_handle_cr0_write(svm_sc, vcpu, 1328 vie_regnum_map(VMCB_CRx_INFO1_GPR(info1))); 1329 handled = 1; 1330 } else { 1331 /* 1332 * Writes to %cr0 without VALID being set in `info1` are 1333 * initiated by the LMSW and CLTS instructions. While 1334 * LMSW (like SMSW) sees little use in modern OSes and 1335 * bootloaders, CLTS is still used for handling FPU 1336 * state transitions. 1337 * 1338 * Punt to the instruction emulation to handle them. 1339 */ 1340 svm_inst_emul_other(svm_sc, vcpu, vmexit); 1341 } 1342 break; 1343 case VMCB_EXIT_IRET: 1344 /* 1345 * Restart execution at "iret" but with the intercept cleared. 1346 */ 1347 vmexit->inst_length = 0; 1348 svm_clear_nmi_blocking(svm_sc, vcpu); 1349 handled = 1; 1350 break; 1351 case VMCB_EXIT_VINTR: /* interrupt window exiting */ 1352 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); 1353 svm_disable_intr_window_exiting(svm_sc, vcpu); 1354 handled = 1; 1355 break; 1356 case VMCB_EXIT_INTR: /* external interrupt */ 1357 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); 1358 handled = 1; 1359 break; 1360 case VMCB_EXIT_NMI: 1361 case VMCB_EXIT_SMI: 1362 case VMCB_EXIT_INIT: 1363 /* 1364 * For external NMI/SMI and physical INIT interrupts, simply 1365 * continue execution, as those host events will be handled by 1366 * the physical CPU. 1367 */ 1368 handled = 1; 1369 break; 1370 case 0x40 ... 0x5F: 1371 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); 1372 reflect = 1; 1373 idtvec = code - 0x40; 1374 switch (idtvec) { 1375 case IDT_MC: 1376 /* 1377 * Call the machine check handler by hand. Also don't 1378 * reflect the machine check back into the guest. 1379 */ 1380 reflect = 0; 1381 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); 1382 vmm_call_trap(T_MCE); 1383 break; 1384 case IDT_PF: 1385 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, 1386 info2); 1387 KASSERT(error == 0, ("%s: error %d updating cr2", 1388 __func__, error)); 1389 /* fallthru */ 1390 case IDT_NP: 1391 case IDT_SS: 1392 case IDT_GP: 1393 case IDT_AC: 1394 case IDT_TS: 1395 errcode_valid = 1; 1396 break; 1397 1398 case IDT_DF: 1399 errcode_valid = 1; 1400 info1 = 0; 1401 break; 1402 1403 case IDT_BP: 1404 case IDT_OF: 1405 case IDT_BR: 1406 /* 1407 * The 'nrip' field is populated for INT3, INTO and 1408 * BOUND exceptions and this also implies that 1409 * 'inst_length' is non-zero. 1410 * 1411 * Reset 'inst_length' to zero so the guest %rip at 1412 * event injection is identical to what it was when 1413 * the exception originally happened. 1414 */ 1415 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " 1416 "to zero before injecting exception %d", 1417 vmexit->inst_length, idtvec); 1418 vmexit->inst_length = 0; 1419 /* fallthru */ 1420 default: 1421 errcode_valid = 0; 1422 info1 = 0; 1423 break; 1424 } 1425 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " 1426 "when reflecting exception %d into guest", 1427 vmexit->inst_length, idtvec)); 1428 1429 if (reflect) { 1430 /* Reflect the exception back into the guest */ 1431 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " 1432 "%d/%x into the guest", idtvec, (int)info1); 1433 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, 1434 errcode_valid, info1, 0); 1435 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 1436 __func__, error)); 1437 } 1438 handled = 1; 1439 break; 1440 case VMCB_EXIT_MSR: 1441 handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0); 1442 break; 1443 case VMCB_EXIT_IO: 1444 handled = svm_handle_inout(svm_sc, vcpu, vmexit); 1445 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); 1446 break; 1447 case VMCB_EXIT_SHUTDOWN: 1448 (void) vm_suspend(svm_sc->vm, VM_SUSPEND_TRIPLEFAULT); 1449 handled = 1; 1450 break; 1451 case VMCB_EXIT_INVD: 1452 case VMCB_EXIT_INVLPGA: 1453 /* privileged invalidation instructions */ 1454 vm_inject_ud(svm_sc->vm, vcpu); 1455 handled = 1; 1456 break; 1457 case VMCB_EXIT_VMRUN: 1458 case VMCB_EXIT_VMLOAD: 1459 case VMCB_EXIT_VMSAVE: 1460 case VMCB_EXIT_STGI: 1461 case VMCB_EXIT_CLGI: 1462 case VMCB_EXIT_SKINIT: 1463 /* privileged vmm instructions */ 1464 vm_inject_ud(svm_sc->vm, vcpu); 1465 handled = 1; 1466 break; 1467 case VMCB_EXIT_VMMCALL: 1468 /* No handlers make use of VMMCALL for now */ 1469 vm_inject_ud(svm_sc->vm, vcpu); 1470 handled = 1; 1471 break; 1472 case VMCB_EXIT_CPUID: 1473 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); 1474 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax, 1475 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); 1476 break; 1477 case VMCB_EXIT_HLT: 1478 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); 1479 vmexit->exitcode = VM_EXITCODE_HLT; 1480 vmexit->u.hlt.rflags = state->rflags; 1481 break; 1482 case VMCB_EXIT_PAUSE: 1483 vmexit->exitcode = VM_EXITCODE_PAUSE; 1484 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); 1485 break; 1486 case VMCB_EXIT_NPF: 1487 /* EXITINFO2 contains the faulting guest physical address */ 1488 if (info1 & VMCB_NPF_INFO1_RSV) { 1489 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " 1490 "reserved bits set: info1(%lx) info2(%lx)", 1491 info1, info2); 1492 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { 1493 vmexit->exitcode = VM_EXITCODE_PAGING; 1494 vmexit->u.paging.gpa = info2; 1495 vmexit->u.paging.fault_type = npf_fault_type(info1); 1496 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 1497 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " 1498 "on gpa %lx/%lx at rip %lx", 1499 info2, info1, state->rip); 1500 } else if (svm_npf_emul_fault(info1)) { 1501 svm_handle_mmio_emul(svm_sc, vcpu, vmexit, info2); 1502 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 1503 VCPU_CTR3(svm_sc->vm, vcpu, "mmio_emul fault " 1504 "for gpa %lx/%lx at rip %lx", 1505 info2, info1, state->rip); 1506 } 1507 break; 1508 case VMCB_EXIT_MONITOR: 1509 vmexit->exitcode = VM_EXITCODE_MONITOR; 1510 break; 1511 case VMCB_EXIT_MWAIT: 1512 vmexit->exitcode = VM_EXITCODE_MWAIT; 1513 break; 1514 default: 1515 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); 1516 break; 1517 } 1518 1519 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t, 1520 code); 1521 1522 if (handled) { 1523 vmexit->rip += vmexit->inst_length; 1524 vmexit->inst_length = 0; 1525 state->rip = vmexit->rip; 1526 } else { 1527 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 1528 /* 1529 * If this VM exit was not claimed by anybody then 1530 * treat it as a generic SVM exit. 1531 */ 1532 vm_exit_svm(vmexit, code, info1, info2); 1533 } else { 1534 /* 1535 * The exitcode and collateral have been populated. 1536 * The VM exit will be processed further in userland. 1537 */ 1538 } 1539 } 1540 return (handled); 1541 } 1542 1543 /* 1544 * Inject exceptions, NMIs, and ExtINTs. 1545 * 1546 * The logic behind these are complicated and may involve mutex contention, so 1547 * the injection is performed without the protection of host CPU interrupts 1548 * being disabled. This means a racing notification could be "lost", 1549 * necessitating a later call to svm_inject_recheck() to close that window 1550 * of opportunity. 1551 */ 1552 static enum event_inject_state 1553 svm_inject_events(struct svm_softc *sc, int vcpu) 1554 { 1555 struct vmcb_ctrl *ctrl; 1556 struct vmcb_state *state; 1557 struct svm_vcpu *vcpustate; 1558 uint64_t intinfo; 1559 enum event_inject_state ev_state; 1560 1561 state = svm_get_vmcb_state(sc, vcpu); 1562 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1563 vcpustate = svm_get_vcpu(sc, vcpu); 1564 ev_state = EIS_CAN_INJECT; 1565 1566 /* Clear any interrupt shadow if guest %rip has changed */ 1567 if (vcpustate->nextrip != state->rip) { 1568 ctrl->intr_shadow = 0; 1569 } 1570 1571 /* 1572 * An event is already pending for injection. This can occur when the 1573 * vCPU exits prior to VM entry (like for an AST). 1574 */ 1575 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { 1576 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1577 } 1578 1579 /* 1580 * Inject pending events or exceptions for this vcpu. 1581 * 1582 * An event might be pending because the previous #VMEXIT happened 1583 * during event delivery (i.e. ctrl->exitintinfo). 1584 * 1585 * An event might also be pending because an exception was injected 1586 * by the hypervisor (e.g. #PF during instruction emulation). 1587 */ 1588 if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) { 1589 ASSERT(VMCB_EXITINTINFO_VALID(intinfo)); 1590 1591 svm_inject_event(sc, vcpu, intinfo); 1592 vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); 1593 ev_state = EIS_EV_INJECTED; 1594 } 1595 1596 /* NMI event has priority over interrupts. */ 1597 if (vm_nmi_pending(sc->vm, vcpu) && !svm_nmi_blocked(sc, vcpu)) { 1598 if (ev_state == EIS_CAN_INJECT) { 1599 /* Can't inject NMI if vcpu is in an intr_shadow. */ 1600 if (ctrl->intr_shadow) { 1601 return (EIS_GI_BLOCK); 1602 } 1603 1604 svm_inject_nmi(sc, vcpu); 1605 ev_state = EIS_EV_INJECTED; 1606 } else { 1607 return (ev_state | EIS_REQ_EXIT); 1608 } 1609 } 1610 1611 if (vm_extint_pending(sc->vm, vcpu)) { 1612 int vector; 1613 1614 if (ev_state != EIS_CAN_INJECT) { 1615 return (ev_state | EIS_REQ_EXIT); 1616 } 1617 1618 /* 1619 * If the guest has disabled interrupts or is in an interrupt 1620 * shadow then we cannot inject the pending interrupt. 1621 */ 1622 if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) { 1623 return (EIS_GI_BLOCK); 1624 } 1625 1626 /* Ask the legacy pic for a vector to inject */ 1627 vatpic_pending_intr(sc->vm, &vector); 1628 KASSERT(vector >= 0 && vector <= 255, 1629 ("invalid vector %d from INTR", vector)); 1630 1631 svm_inject_irq(sc, vcpu, vector); 1632 vm_extint_clear(sc->vm, vcpu); 1633 vatpic_intr_accepted(sc->vm, vector); 1634 ev_state = EIS_EV_INJECTED; 1635 } 1636 1637 return (ev_state); 1638 } 1639 1640 /* 1641 * Synchronize vLAPIC state and inject any interrupts pending on it. 1642 * 1643 * This is done with host CPU interrupts disabled so notification IPIs will be 1644 * queued on the host APIC and recognized when entering SVM guest context. 1645 */ 1646 static enum event_inject_state 1647 svm_inject_vlapic(struct svm_softc *sc, int vcpu, struct vlapic *vlapic, 1648 enum event_inject_state ev_state) 1649 { 1650 struct vmcb_ctrl *ctrl; 1651 struct vmcb_state *state; 1652 int vector; 1653 uint8_t v_tpr; 1654 1655 state = svm_get_vmcb_state(sc, vcpu); 1656 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1657 1658 /* 1659 * The guest can modify the TPR by writing to %cr8. In guest mode the 1660 * CPU reflects this write to V_TPR without hypervisor intervention. 1661 * 1662 * The guest can also modify the TPR by writing to it via the memory 1663 * mapped APIC page. In this case, the write will be emulated by the 1664 * hypervisor. For this reason V_TPR must be updated before every 1665 * VMRUN. 1666 */ 1667 v_tpr = vlapic_get_cr8(vlapic); 1668 KASSERT(v_tpr <= 15, ("invalid v_tpr %x", v_tpr)); 1669 if (ctrl->v_tpr != v_tpr) { 1670 ctrl->v_tpr = v_tpr; 1671 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); 1672 } 1673 1674 /* If an event cannot otherwise be injected, we are done for now */ 1675 if (ev_state != EIS_CAN_INJECT) { 1676 return (ev_state); 1677 } 1678 1679 if (!vlapic_pending_intr(vlapic, &vector)) { 1680 return (EIS_CAN_INJECT); 1681 } 1682 KASSERT(vector >= 16 && vector <= 255, 1683 ("invalid vector %d from local APIC", vector)); 1684 1685 /* 1686 * If the guest has disabled interrupts or is in an interrupt shadow 1687 * then we cannot inject the pending interrupt. 1688 */ 1689 if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) { 1690 return (EIS_GI_BLOCK); 1691 } 1692 1693 svm_inject_irq(sc, vcpu, vector); 1694 vlapic_intr_accepted(vlapic, vector); 1695 return (EIS_EV_INJECTED); 1696 } 1697 1698 /* 1699 * Re-check for events to be injected. 1700 * 1701 * Once host CPU interrupts are disabled, check for the presence of any events 1702 * which require injection processing. If an exit is required upon injection, 1703 * or once the guest becomes interruptable, that will be configured too. 1704 */ 1705 static bool 1706 svm_inject_recheck(struct svm_softc *sc, int vcpu, 1707 enum event_inject_state ev_state) 1708 { 1709 struct vmcb_ctrl *ctrl; 1710 1711 ctrl = svm_get_vmcb_ctrl(sc, vcpu); 1712 1713 if (ev_state == EIS_CAN_INJECT) { 1714 /* 1715 * An active interrupt shadow would preclude us from injecting 1716 * any events picked up during a re-check. 1717 */ 1718 if (ctrl->intr_shadow != 0) { 1719 return (false); 1720 } 1721 1722 if (vm_nmi_pending(sc->vm, vcpu) && 1723 !svm_nmi_blocked(sc, vcpu)) { 1724 /* queued NMI not blocked by NMI-window-exiting */ 1725 return (true); 1726 } 1727 if (vm_extint_pending(sc->vm, vcpu)) { 1728 /* queued ExtINT not blocked by existing injection */ 1729 return (true); 1730 } 1731 } else { 1732 if ((ev_state & EIS_REQ_EXIT) != 0) { 1733 /* 1734 * Use a self-IPI to force an immediate exit after 1735 * event injection has occurred. 1736 */ 1737 poke_cpu(CPU->cpu_id); 1738 } else { 1739 /* 1740 * If any event is being injected, an exit immediately 1741 * upon becoming interruptable again will allow pending 1742 * or newly queued events to be injected in a timely 1743 * manner. 1744 */ 1745 svm_enable_intr_window_exiting(sc, vcpu); 1746 } 1747 } 1748 return (false); 1749 } 1750 1751 1752 static void 1753 check_asid(struct svm_softc *sc, int vcpuid, uint_t thiscpu, uint64_t nptgen) 1754 { 1755 struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid); 1756 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1757 uint8_t flush; 1758 1759 flush = hma_svm_asid_update(&vcpustate->hma_asid, flush_by_asid(), 1760 vcpustate->nptgen != nptgen); 1761 1762 if (flush != VMCB_TLB_FLUSH_NOTHING) { 1763 ctrl->asid = vcpustate->hma_asid.hsa_asid; 1764 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1765 } 1766 ctrl->tlb_ctrl = flush; 1767 vcpustate->nptgen = nptgen; 1768 } 1769 1770 static void 1771 flush_asid(struct svm_softc *sc, int vcpuid) 1772 { 1773 struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid); 1774 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid); 1775 uint8_t flush; 1776 1777 flush = hma_svm_asid_update(&vcpustate->hma_asid, flush_by_asid(), 1778 true); 1779 1780 ASSERT(flush != VMCB_TLB_FLUSH_NOTHING); 1781 ctrl->asid = vcpustate->hma_asid.hsa_asid; 1782 ctrl->tlb_ctrl = flush; 1783 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); 1784 /* 1785 * A potential future optimization: We could choose to update the nptgen 1786 * associated with the vCPU, since any pending nptgen change requiring a 1787 * flush will be satisfied by the one which has just now been queued. 1788 */ 1789 } 1790 1791 static __inline void 1792 disable_gintr(void) 1793 { 1794 __asm __volatile("clgi"); 1795 } 1796 1797 static __inline void 1798 enable_gintr(void) 1799 { 1800 __asm __volatile("stgi"); 1801 } 1802 1803 static __inline void 1804 svm_dr_enter_guest(struct svm_regctx *gctx) 1805 { 1806 1807 /* Save host control debug registers. */ 1808 gctx->host_dr7 = rdr7(); 1809 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 1810 1811 /* 1812 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 1813 * exceptions in the host based on the guest DRx values. The 1814 * guest DR6, DR7, and DEBUGCTL are saved/restored in the 1815 * VMCB. 1816 */ 1817 load_dr7(0); 1818 wrmsr(MSR_DEBUGCTLMSR, 0); 1819 1820 /* Save host debug registers. */ 1821 gctx->host_dr0 = rdr0(); 1822 gctx->host_dr1 = rdr1(); 1823 gctx->host_dr2 = rdr2(); 1824 gctx->host_dr3 = rdr3(); 1825 gctx->host_dr6 = rdr6(); 1826 1827 /* Restore guest debug registers. */ 1828 load_dr0(gctx->sctx_dr0); 1829 load_dr1(gctx->sctx_dr1); 1830 load_dr2(gctx->sctx_dr2); 1831 load_dr3(gctx->sctx_dr3); 1832 } 1833 1834 static __inline void 1835 svm_dr_leave_guest(struct svm_regctx *gctx) 1836 { 1837 1838 /* Save guest debug registers. */ 1839 gctx->sctx_dr0 = rdr0(); 1840 gctx->sctx_dr1 = rdr1(); 1841 gctx->sctx_dr2 = rdr2(); 1842 gctx->sctx_dr3 = rdr3(); 1843 1844 /* 1845 * Restore host debug registers. Restore DR7 and DEBUGCTL 1846 * last. 1847 */ 1848 load_dr0(gctx->host_dr0); 1849 load_dr1(gctx->host_dr1); 1850 load_dr2(gctx->host_dr2); 1851 load_dr3(gctx->host_dr3); 1852 load_dr6(gctx->host_dr6); 1853 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); 1854 load_dr7(gctx->host_dr7); 1855 } 1856 1857 static void 1858 svm_apply_tsc_adjust(struct svm_softc *svm_sc, int vcpuid) 1859 { 1860 const uint64_t offset = vcpu_tsc_offset(svm_sc->vm, vcpuid, true); 1861 struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpuid); 1862 1863 if (ctrl->tsc_offset != offset) { 1864 ctrl->tsc_offset = offset; 1865 svm_set_dirty(svm_sc, vcpuid, VMCB_CACHE_I); 1866 } 1867 } 1868 1869 1870 /* 1871 * Start vcpu with specified RIP. 1872 */ 1873 static int 1874 svm_vmrun(void *arg, int vcpu, uint64_t rip) 1875 { 1876 struct svm_regctx *gctx; 1877 struct svm_softc *svm_sc; 1878 struct svm_vcpu *vcpustate; 1879 struct vmcb_state *state; 1880 struct vmcb_ctrl *ctrl; 1881 struct vm_exit *vmexit; 1882 struct vlapic *vlapic; 1883 vm_client_t *vmc; 1884 struct vm *vm; 1885 uint64_t vmcb_pa; 1886 int handled; 1887 uint16_t ldt_sel; 1888 1889 svm_sc = arg; 1890 vm = svm_sc->vm; 1891 1892 vcpustate = svm_get_vcpu(svm_sc, vcpu); 1893 state = svm_get_vmcb_state(svm_sc, vcpu); 1894 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); 1895 vmexit = vm_exitinfo(vm, vcpu); 1896 vlapic = vm_lapic(vm, vcpu); 1897 vmc = vm_get_vmclient(vm, vcpu); 1898 1899 gctx = svm_get_guest_regctx(svm_sc, vcpu); 1900 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; 1901 1902 if (vcpustate->lastcpu != curcpu) { 1903 /* 1904 * Force new ASID allocation by invalidating the generation. 1905 */ 1906 vcpustate->hma_asid.hsa_gen = 0; 1907 1908 /* 1909 * Invalidate the VMCB state cache by marking all fields dirty. 1910 */ 1911 svm_set_dirty(svm_sc, vcpu, 0xffffffff); 1912 1913 /* 1914 * XXX 1915 * Setting 'vcpustate->lastcpu' here is bit premature because 1916 * we may return from this function without actually executing 1917 * the VMRUN instruction. This could happen if an AST or yield 1918 * condition is pending on the first time through the loop. 1919 * 1920 * This works for now but any new side-effects of vcpu 1921 * migration should take this case into account. 1922 */ 1923 vcpustate->lastcpu = curcpu; 1924 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); 1925 } 1926 1927 svm_apply_tsc_adjust(svm_sc, vcpu); 1928 1929 svm_msr_guest_enter(svm_sc, vcpu); 1930 1931 VERIFY(!vcpustate->loaded && curthread->t_preempt != 0); 1932 vcpustate->loaded = B_TRUE; 1933 1934 /* Update Guest RIP */ 1935 state->rip = rip; 1936 1937 do { 1938 enum event_inject_state inject_state; 1939 uint64_t nptgen; 1940 1941 /* 1942 * Initial event injection is complex and may involve mutex 1943 * contention, so it must be performed with global interrupts 1944 * still enabled. 1945 */ 1946 inject_state = svm_inject_events(svm_sc, vcpu); 1947 handled = 0; 1948 1949 /* 1950 * Disable global interrupts to guarantee atomicity during 1951 * loading of guest state. This includes not only the state 1952 * loaded by the "vmrun" instruction but also software state 1953 * maintained by the hypervisor: suspended and rendezvous 1954 * state, NPT generation number, vlapic interrupts etc. 1955 */ 1956 disable_gintr(); 1957 1958 /* 1959 * Synchronizing and injecting vlapic state is lock-free and is 1960 * safe (and prudent) to perform with interrupts disabled. 1961 */ 1962 inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic, 1963 inject_state); 1964 1965 /* 1966 * Check for vCPU bail-out conditions. This must be done after 1967 * svm_inject_events() to detect a triple-fault condition. 1968 */ 1969 if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) { 1970 enable_gintr(); 1971 break; 1972 } 1973 1974 if (vcpu_run_state_pending(vm, vcpu)) { 1975 enable_gintr(); 1976 vm_exit_run_state(vm, vcpu, state->rip); 1977 break; 1978 } 1979 1980 /* 1981 * If subsequent activity queued events which require injection 1982 * handling, take another lap to handle them. 1983 */ 1984 if (svm_inject_recheck(svm_sc, vcpu, inject_state)) { 1985 enable_gintr(); 1986 handled = 1; 1987 continue; 1988 } 1989 1990 /* 1991 * #VMEXIT resumes the host with the guest LDTR, so 1992 * save the current LDT selector so it can be restored 1993 * after an exit. The userspace hypervisor probably 1994 * doesn't use a LDT, but save and restore it to be 1995 * safe. 1996 */ 1997 ldt_sel = sldt(); 1998 1999 /* 2000 * Check the vmspace and ASID generations to ensure that the 2001 * vcpu does not use stale TLB mappings. 2002 */ 2003 nptgen = vmc_table_enter(vmc); 2004 check_asid(svm_sc, vcpu, curcpu, nptgen); 2005 2006 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; 2007 vcpustate->dirty = 0; 2008 VCPU_CTR1(vm, vcpu, "vmcb clean %x", ctrl->vmcb_clean); 2009 2010 /* Launch Virtual Machine. */ 2011 vcpu_ustate_change(vm, vcpu, VU_RUN); 2012 VCPU_CTR1(vm, vcpu, "Resume execution at %lx", state->rip); 2013 svm_dr_enter_guest(gctx); 2014 svm_launch(vmcb_pa, gctx, get_pcpu()); 2015 svm_dr_leave_guest(gctx); 2016 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2017 2018 /* Restore host LDTR. */ 2019 lldt(ldt_sel); 2020 2021 /* #VMEXIT disables interrupts so re-enable them here. */ 2022 enable_gintr(); 2023 2024 vmc_table_exit(vmc); 2025 2026 /* Update 'nextrip' */ 2027 vcpustate->nextrip = state->rip; 2028 2029 /* Handle #VMEXIT and if required return to user space. */ 2030 handled = svm_vmexit(svm_sc, vcpu, vmexit); 2031 } while (handled); 2032 2033 svm_msr_guest_exit(svm_sc, vcpu); 2034 2035 VERIFY(vcpustate->loaded && curthread->t_preempt != 0); 2036 vcpustate->loaded = B_FALSE; 2037 2038 return (0); 2039 } 2040 2041 static void 2042 svm_vmcleanup(void *arg) 2043 { 2044 struct svm_softc *sc = arg; 2045 2046 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM); 2047 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM); 2048 free(sc, M_SVM); 2049 } 2050 2051 static uint64_t * 2052 swctx_regptr(struct svm_regctx *regctx, int reg) 2053 { 2054 switch (reg) { 2055 case VM_REG_GUEST_RBX: 2056 return (®ctx->sctx_rbx); 2057 case VM_REG_GUEST_RCX: 2058 return (®ctx->sctx_rcx); 2059 case VM_REG_GUEST_RDX: 2060 return (®ctx->sctx_rdx); 2061 case VM_REG_GUEST_RDI: 2062 return (®ctx->sctx_rdi); 2063 case VM_REG_GUEST_RSI: 2064 return (®ctx->sctx_rsi); 2065 case VM_REG_GUEST_RBP: 2066 return (®ctx->sctx_rbp); 2067 case VM_REG_GUEST_R8: 2068 return (®ctx->sctx_r8); 2069 case VM_REG_GUEST_R9: 2070 return (®ctx->sctx_r9); 2071 case VM_REG_GUEST_R10: 2072 return (®ctx->sctx_r10); 2073 case VM_REG_GUEST_R11: 2074 return (®ctx->sctx_r11); 2075 case VM_REG_GUEST_R12: 2076 return (®ctx->sctx_r12); 2077 case VM_REG_GUEST_R13: 2078 return (®ctx->sctx_r13); 2079 case VM_REG_GUEST_R14: 2080 return (®ctx->sctx_r14); 2081 case VM_REG_GUEST_R15: 2082 return (®ctx->sctx_r15); 2083 case VM_REG_GUEST_DR0: 2084 return (®ctx->sctx_dr0); 2085 case VM_REG_GUEST_DR1: 2086 return (®ctx->sctx_dr1); 2087 case VM_REG_GUEST_DR2: 2088 return (®ctx->sctx_dr2); 2089 case VM_REG_GUEST_DR3: 2090 return (®ctx->sctx_dr3); 2091 default: 2092 return (NULL); 2093 } 2094 } 2095 2096 static int 2097 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2098 { 2099 struct svm_softc *sc; 2100 struct vmcb *vmcb; 2101 uint64_t *regp; 2102 uint64_t *fieldp; 2103 struct vmcb_segment *seg; 2104 2105 sc = arg; 2106 vmcb = svm_get_vmcb(sc, vcpu); 2107 2108 regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident); 2109 if (regp != NULL) { 2110 *val = *regp; 2111 return (0); 2112 } 2113 2114 switch (ident) { 2115 case VM_REG_GUEST_INTR_SHADOW: 2116 *val = (vmcb->ctrl.intr_shadow != 0) ? 1 : 0; 2117 break; 2118 2119 case VM_REG_GUEST_CR0: 2120 svm_get_cr0(sc, vcpu, val); 2121 break; 2122 case VM_REG_GUEST_CR2: 2123 case VM_REG_GUEST_CR3: 2124 case VM_REG_GUEST_CR4: 2125 case VM_REG_GUEST_DR6: 2126 case VM_REG_GUEST_DR7: 2127 case VM_REG_GUEST_EFER: 2128 case VM_REG_GUEST_RAX: 2129 case VM_REG_GUEST_RFLAGS: 2130 case VM_REG_GUEST_RIP: 2131 case VM_REG_GUEST_RSP: 2132 fieldp = vmcb_regptr(vmcb, ident, NULL); 2133 *val = *fieldp; 2134 break; 2135 2136 case VM_REG_GUEST_CS: 2137 case VM_REG_GUEST_DS: 2138 case VM_REG_GUEST_ES: 2139 case VM_REG_GUEST_FS: 2140 case VM_REG_GUEST_GS: 2141 case VM_REG_GUEST_SS: 2142 case VM_REG_GUEST_LDTR: 2143 case VM_REG_GUEST_TR: 2144 seg = vmcb_segptr(vmcb, ident); 2145 *val = seg->selector; 2146 break; 2147 2148 case VM_REG_GUEST_GDTR: 2149 case VM_REG_GUEST_IDTR: 2150 /* GDTR and IDTR don't have segment selectors */ 2151 return (EINVAL); 2152 2153 default: 2154 return (EINVAL); 2155 } 2156 2157 return (0); 2158 } 2159 2160 static int 2161 svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2162 { 2163 struct svm_softc *sc; 2164 struct vmcb *vmcb; 2165 uint64_t *regp; 2166 uint64_t *fieldp; 2167 uint32_t dirty; 2168 struct vmcb_segment *seg; 2169 2170 sc = arg; 2171 vmcb = svm_get_vmcb(sc, vcpu); 2172 2173 regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident); 2174 if (regp != NULL) { 2175 *regp = val; 2176 return (0); 2177 } 2178 2179 dirty = VMCB_CACHE_NONE; 2180 switch (ident) { 2181 case VM_REG_GUEST_INTR_SHADOW: 2182 vmcb->ctrl.intr_shadow = (val != 0) ? 1 : 0; 2183 break; 2184 2185 case VM_REG_GUEST_EFER: 2186 fieldp = vmcb_regptr(vmcb, ident, &dirty); 2187 /* EFER_SVM must always be set when the guest is executing */ 2188 *fieldp = val | EFER_SVM; 2189 dirty |= VMCB_CACHE_CR; 2190 break; 2191 2192 case VM_REG_GUEST_CR0: 2193 svm_set_cr0(sc, vcpu, val, false); 2194 break; 2195 case VM_REG_GUEST_CR2: 2196 case VM_REG_GUEST_CR3: 2197 case VM_REG_GUEST_CR4: 2198 case VM_REG_GUEST_DR6: 2199 case VM_REG_GUEST_DR7: 2200 case VM_REG_GUEST_RAX: 2201 case VM_REG_GUEST_RFLAGS: 2202 case VM_REG_GUEST_RIP: 2203 case VM_REG_GUEST_RSP: 2204 fieldp = vmcb_regptr(vmcb, ident, &dirty); 2205 *fieldp = val; 2206 break; 2207 2208 case VM_REG_GUEST_CS: 2209 case VM_REG_GUEST_DS: 2210 case VM_REG_GUEST_ES: 2211 case VM_REG_GUEST_SS: 2212 case VM_REG_GUEST_FS: 2213 case VM_REG_GUEST_GS: 2214 case VM_REG_GUEST_LDTR: 2215 case VM_REG_GUEST_TR: 2216 dirty |= VMCB_CACHE_SEG; 2217 seg = vmcb_segptr(vmcb, ident); 2218 seg->selector = (uint16_t)val; 2219 break; 2220 2221 case VM_REG_GUEST_GDTR: 2222 case VM_REG_GUEST_IDTR: 2223 /* GDTR and IDTR don't have segment selectors */ 2224 return (EINVAL); 2225 2226 default: 2227 return (EINVAL); 2228 } 2229 2230 if (dirty != VMCB_CACHE_NONE) { 2231 svm_set_dirty(sc, vcpu, dirty); 2232 } 2233 2234 /* 2235 * XXX deal with CR3 and invalidate TLB entries tagged with the 2236 * vcpu's ASID. This needs to be treated differently depending on 2237 * whether 'running' is true/false. 2238 */ 2239 2240 return (0); 2241 } 2242 2243 static int 2244 svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc) 2245 { 2246 struct vmcb *vmcb; 2247 struct svm_softc *sc; 2248 struct vmcb_segment *seg; 2249 2250 sc = arg; 2251 vmcb = svm_get_vmcb(sc, vcpu); 2252 2253 switch (reg) { 2254 case VM_REG_GUEST_CS: 2255 case VM_REG_GUEST_DS: 2256 case VM_REG_GUEST_ES: 2257 case VM_REG_GUEST_SS: 2258 case VM_REG_GUEST_FS: 2259 case VM_REG_GUEST_GS: 2260 case VM_REG_GUEST_LDTR: 2261 case VM_REG_GUEST_TR: 2262 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG); 2263 seg = vmcb_segptr(vmcb, reg); 2264 /* 2265 * Map seg_desc access to VMCB attribute format. 2266 * 2267 * SVM uses the 'P' bit in the segment attributes to indicate a 2268 * NULL segment so clear it if the segment is marked unusable. 2269 */ 2270 seg->attrib = VMCB_ACCESS2ATTR(desc->access); 2271 if (SEG_DESC_UNUSABLE(desc->access)) { 2272 seg->attrib &= ~0x80; 2273 } 2274 break; 2275 2276 case VM_REG_GUEST_GDTR: 2277 case VM_REG_GUEST_IDTR: 2278 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT); 2279 seg = vmcb_segptr(vmcb, reg); 2280 break; 2281 2282 default: 2283 return (EINVAL); 2284 } 2285 2286 ASSERT(seg != NULL); 2287 seg->base = desc->base; 2288 seg->limit = desc->limit; 2289 2290 return (0); 2291 } 2292 2293 static int 2294 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc) 2295 { 2296 struct vmcb *vmcb; 2297 struct svm_softc *sc; 2298 struct vmcb_segment *seg; 2299 2300 sc = arg; 2301 vmcb = svm_get_vmcb(sc, vcpu); 2302 2303 switch (reg) { 2304 case VM_REG_GUEST_DS: 2305 case VM_REG_GUEST_ES: 2306 case VM_REG_GUEST_FS: 2307 case VM_REG_GUEST_GS: 2308 case VM_REG_GUEST_SS: 2309 case VM_REG_GUEST_LDTR: 2310 seg = vmcb_segptr(vmcb, reg); 2311 desc->access = VMCB_ATTR2ACCESS(seg->attrib); 2312 /* 2313 * VT-x uses bit 16 to indicate a segment that has been loaded 2314 * with a NULL selector (aka unusable). The 'desc->access' 2315 * field is interpreted in the VT-x format by the 2316 * processor-independent code. 2317 * 2318 * SVM uses the 'P' bit to convey the same information so 2319 * convert it into the VT-x format. For more details refer to 2320 * section "Segment State in the VMCB" in APMv2. 2321 */ 2322 if ((desc->access & 0x80) == 0) { 2323 /* Unusable segment */ 2324 desc->access |= 0x10000; 2325 } 2326 break; 2327 2328 case VM_REG_GUEST_CS: 2329 case VM_REG_GUEST_TR: 2330 seg = vmcb_segptr(vmcb, reg); 2331 desc->access = VMCB_ATTR2ACCESS(seg->attrib); 2332 break; 2333 2334 case VM_REG_GUEST_GDTR: 2335 case VM_REG_GUEST_IDTR: 2336 seg = vmcb_segptr(vmcb, reg); 2337 /* 2338 * Since there are no access bits associated with the GDTR or 2339 * the IDTR, zero out the field to ensure it does not contain 2340 * garbage which might confuse the consumer. 2341 */ 2342 desc->access = 0; 2343 break; 2344 2345 default: 2346 return (EINVAL); 2347 } 2348 2349 ASSERT(seg != NULL); 2350 desc->base = seg->base; 2351 desc->limit = seg->limit; 2352 return (0); 2353 } 2354 2355 static int 2356 svm_setcap(void *arg, int vcpu, int type, int val) 2357 { 2358 struct svm_softc *sc; 2359 int error; 2360 2361 sc = arg; 2362 error = 0; 2363 switch (type) { 2364 case VM_CAP_HALT_EXIT: 2365 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2366 VMCB_INTCPT_HLT, val); 2367 break; 2368 case VM_CAP_PAUSE_EXIT: 2369 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2370 VMCB_INTCPT_PAUSE, val); 2371 break; 2372 default: 2373 error = ENOENT; 2374 break; 2375 } 2376 return (error); 2377 } 2378 2379 static int 2380 svm_getcap(void *arg, int vcpu, int type, int *retval) 2381 { 2382 struct svm_softc *sc; 2383 int error; 2384 2385 sc = arg; 2386 error = 0; 2387 2388 switch (type) { 2389 case VM_CAP_HALT_EXIT: 2390 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2391 VMCB_INTCPT_HLT); 2392 break; 2393 case VM_CAP_PAUSE_EXIT: 2394 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, 2395 VMCB_INTCPT_PAUSE); 2396 break; 2397 default: 2398 error = ENOENT; 2399 break; 2400 } 2401 return (error); 2402 } 2403 2404 static struct vlapic * 2405 svm_vlapic_init(void *arg, int vcpuid) 2406 { 2407 struct svm_softc *svm_sc; 2408 struct vlapic *vlapic; 2409 2410 svm_sc = arg; 2411 vlapic = malloc(sizeof (struct vlapic), M_SVM_VLAPIC, 2412 M_WAITOK | M_ZERO); 2413 vlapic->vm = svm_sc->vm; 2414 vlapic->vcpuid = vcpuid; 2415 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; 2416 2417 vlapic_init(vlapic); 2418 2419 return (vlapic); 2420 } 2421 2422 static void 2423 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) 2424 { 2425 vlapic_cleanup(vlapic); 2426 free(vlapic, M_SVM_VLAPIC); 2427 } 2428 2429 static void 2430 svm_savectx(void *arg, int vcpu) 2431 { 2432 struct svm_softc *sc = arg; 2433 2434 if (sc->vcpu[vcpu].loaded) { 2435 svm_msr_guest_exit(sc, vcpu); 2436 } 2437 } 2438 2439 static void 2440 svm_restorectx(void *arg, int vcpu) 2441 { 2442 struct svm_softc *sc = arg; 2443 2444 if (sc->vcpu[vcpu].loaded) { 2445 svm_msr_guest_enter(sc, vcpu); 2446 } 2447 } 2448 2449 struct vmm_ops vmm_ops_amd = { 2450 .init = svm_init, 2451 .cleanup = svm_cleanup, 2452 .resume = svm_restore, 2453 2454 .vminit = svm_vminit, 2455 .vmrun = svm_vmrun, 2456 .vmcleanup = svm_vmcleanup, 2457 .vmgetreg = svm_getreg, 2458 .vmsetreg = svm_setreg, 2459 .vmgetdesc = svm_getdesc, 2460 .vmsetdesc = svm_setdesc, 2461 .vmgetcap = svm_getcap, 2462 .vmsetcap = svm_setcap, 2463 .vlapic_init = svm_vlapic_init, 2464 .vlapic_cleanup = svm_vlapic_cleanup, 2465 2466 .vmsavectx = svm_savectx, 2467 .vmrestorectx = svm_restorectx, 2468 }; 2469