Lines Matching +full:ctx +full:- +full:asid

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
88 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
110 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
125 /* Current ASID generation for each host cpu */
126 static struct asid asid[MAXCPU]; variable
265 * The next ASID allocation will rollover both 'gen' and 'num' in svm_modinit()
268 asid[cpu].gen = ~0UL; in svm_modinit()
269 asid[cpu].num = nasid - 1; in svm_modinit()
301 ctrl->tsc_offset = offset; in svm_set_tsc_offset()
306 vm_set_tsc_offset(vcpu->vcpu, offset); in svm_set_tsc_offset()
329 *index = -1; in svm_msr_index()
338 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); in svm_msr_index()
340 off = (msr - MSR_AMD6TH_START); in svm_msr_index()
345 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); in svm_msr_index()
347 off = (msr - MSR_AMD7TH_START); in svm_msr_index()
399 return (ctrl->intercept[idx] & bitmask ? 1 : 0); in svm_get_intercept()
411 oldval = ctrl->intercept[idx]; in svm_set_intercept()
414 ctrl->intercept[idx] |= bitmask; in svm_set_intercept()
416 ctrl->intercept[idx] &= ~bitmask; in svm_set_intercept()
418 if (ctrl->intercept[idx] != oldval) { in svm_set_intercept()
421 oldval, ctrl->intercept[idx]); in svm_set_intercept()
451 ctrl->iopm_base_pa = iopm_base_pa; in vmcb_init()
452 ctrl->msrpm_base_pa = msrpm_base_pa; in vmcb_init()
455 ctrl->np_enable = 1; in vmcb_init()
456 ctrl->n_cr3 = np_pml4; in vmcb_init()
460 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. in vmcb_init()
474 if (vcpu_trace_exceptions(vcpu->vcpu)) { in vmcb_init()
506 * Non-intercepted VMMCALL causes #UD, skip it. in vmcb_init()
514 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmcb_init()
526 * The ASID will be set to a non-zero value just before VMRUN. in vmcb_init()
528 ctrl->asid = 0; in vmcb_init()
536 ctrl->v_intr_masking = 1; in vmcb_init()
539 ctrl->lbr_virt_en = 1; in vmcb_init()
540 state->dbgctl = BIT(0); in vmcb_init()
543 state->efer = EFER_SVM; in vmcb_init()
545 /* Set up the PAT to power-on state */ in vmcb_init()
546 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | in vmcb_init()
555 /* Set up DR6/7 to power-on state */ in vmcb_init()
556 state->dr6 = DBREG_DR6_RESERVED1; in vmcb_init()
557 state->dr7 = DBREG_DR7_RESERVED1; in vmcb_init()
570 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, in svm_init()
572 if (svm_sc->msr_bitmap == NULL) in svm_init()
574 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, in svm_init()
576 if (svm_sc->iopm_bitmap == NULL) in svm_init()
579 svm_sc->vm = vm; in svm_init()
580 svm_sc->nptp = vtophys(pmap->pm_pmltop); in svm_init()
585 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); in svm_init()
592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); in svm_init()
593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); in svm_init()
594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); in svm_init()
596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); in svm_init()
597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); in svm_init()
598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); in svm_init()
599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); in svm_init()
600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); in svm_init()
601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); in svm_init()
602 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); in svm_init()
603 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); in svm_init()
605 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); in svm_init()
610 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); in svm_init()
613 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); in svm_init()
625 vcpu->sc = sc; in svm_vcpu_init()
626 vcpu->vcpu = vcpu1; in svm_vcpu_init()
627 vcpu->vcpuid = vcpuid; in svm_vcpu_init()
628 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, in svm_vcpu_init()
630 vcpu->nextrip = ~0; in svm_vcpu_init()
631 vcpu->lastcpu = NOCPU; in svm_vcpu_init()
632 vcpu->vmcb_pa = vtophys(vcpu->vmcb); in svm_vcpu_init()
633 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), in svm_vcpu_init()
634 sc->nptp); in svm_vcpu_init()
640 * Collateral for a generic SVM VM-exit.
646 vme->exitcode = VM_EXITCODE_SVM; in vm_exit_svm()
647 vme->u.svm.exitcode = code; in vm_exit_svm()
648 vme->u.svm.exitinfo1 = info1; in vm_exit_svm()
649 vme->u.svm.exitinfo2 = info2; in vm_exit_svm()
661 return (state->cpl); in svm_cpl()
671 state = &vmcb->state; in svm_vcpu_mode()
673 if (state->efer & EFER_LMA) { in svm_vcpu_mode()
686 } else if (state->cr0 & CR0_PE) { in svm_vcpu_mode()
715 val = in ? regs->sctx_rdi : regs->sctx_rsi; in svm_inout_str_index()
725 val = rep ? regs->sctx_rcx : 1; in svm_inout_str_count()
737 vis->seg_name = VM_REG_GUEST_ES; in svm_inout_str_seginfo()
741 vis->seg_name = vm_segment_name(s); in svm_inout_str_seginfo()
744 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in svm_inout_str_seginfo()
771 state = &vmcb->state; in svm_paging_info()
772 paging->cr3 = state->cr3; in svm_paging_info()
773 paging->cpl = svm_cpl(state); in svm_paging_info()
774 paging->cpu_mode = svm_vcpu_mode(vmcb); in svm_paging_info()
775 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, in svm_paging_info()
776 state->efer); in svm_paging_info()
798 info1 = ctrl->exitinfo1; in svm_handle_io()
811 vmexit->exitcode = VM_EXITCODE_INOUT; in svm_handle_io()
812 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; in svm_handle_io()
813 vmexit->u.inout.string = inout_string; in svm_handle_io()
814 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; in svm_handle_io()
815 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; in svm_handle_io()
816 vmexit->u.inout.port = (uint16_t)(info1 >> 16); in svm_handle_io()
817 vmexit->u.inout.eax = (uint32_t)(state->rax); in svm_handle_io()
820 vmexit->exitcode = VM_EXITCODE_INOUT_STR; in svm_handle_io()
821 vis = &vmexit->u.inout_str; in svm_handle_io()
822 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); in svm_handle_io()
823 vis->rflags = state->rflags; in svm_handle_io()
824 vis->cr0 = state->cr0; in svm_handle_io()
825 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); in svm_handle_io()
826 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); in svm_handle_io()
827 vis->addrsize = svm_inout_str_addrsize(info1); in svm_handle_io()
828 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); in svm_handle_io()
874 ctrl = &vmcb->ctrl; in svm_handle_inst_emul()
875 paging = &vmexit->u.inst_emul.paging; in svm_handle_inst_emul()
877 vmexit->exitcode = VM_EXITCODE_INST_EMUL; in svm_handle_inst_emul()
878 vmexit->u.inst_emul.gpa = gpa; in svm_handle_inst_emul()
879 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; in svm_handle_inst_emul()
885 switch(paging->cpu_mode) { in svm_handle_inst_emul()
887 vmexit->u.inst_emul.cs_base = seg.base; in svm_handle_inst_emul()
888 vmexit->u.inst_emul.cs_d = 0; in svm_handle_inst_emul()
892 vmexit->u.inst_emul.cs_base = seg.base; in svm_handle_inst_emul()
897 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? in svm_handle_inst_emul()
901 vmexit->u.inst_emul.cs_base = 0; in svm_handle_inst_emul()
902 vmexit->u.inst_emul.cs_d = 0; in svm_handle_inst_emul()
910 inst_len = ctrl->inst_len; in svm_handle_inst_emul()
911 inst_bytes = ctrl->inst_bytes; in svm_handle_inst_emul()
916 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); in svm_handle_inst_emul()
949 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, in svm_eventinject()
950 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); in svm_eventinject()
968 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; in svm_eventinject()
970 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; in svm_eventinject()
971 ctrl->eventinj |= (uint64_t)error << 32; in svm_eventinject()
986 vlapic = vm_lapic(vcpu->vcpu); in svm_update_virqinfo()
990 vlapic_set_cr8(vlapic, ctrl->v_tpr); in svm_update_virqinfo()
993 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " in svm_update_virqinfo()
994 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); in svm_update_virqinfo()
1004 intinfo = ctrl->exitintinfo; in svm_save_intinfo()
1016 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); in svm_save_intinfo()
1017 vm_exit_intinfo(vcpu->vcpu, intinfo); in svm_save_intinfo()
1036 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { in enable_intr_window_exiting()
1037 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); in enable_intr_window_exiting()
1044 ctrl->v_irq = 1; in enable_intr_window_exiting()
1045 ctrl->v_ign_tpr = 1; in enable_intr_window_exiting()
1046 ctrl->v_intr_vector = 0; in enable_intr_window_exiting()
1058 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { in disable_intr_window_exiting()
1065 ctrl->v_irq = 0; in disable_intr_window_exiting()
1066 ctrl->v_intr_vector = 0; in disable_intr_window_exiting()
1078 oldval = ctrl->intr_shadow; in svm_modify_intr_shadow()
1081 ctrl->intr_shadow = newval; in svm_modify_intr_shadow()
1093 *val = ctrl->intr_shadow; in svm_get_intr_shadow()
1161 oldval = state->efer; in svm_write_efer()
1164 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ in svm_write_efer()
1170 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ in svm_write_efer()
1172 if (state->cr0 & CR0_PG) in svm_write_efer()
1177 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) in svm_write_efer()
1186 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) in svm_write_efer()
1191 * XXX bhyve does not enforce segment limits in 64-bit mode. Until in svm_write_efer()
1195 vme = vm_exitinfo(vcpu->vcpu); in svm_write_efer()
1202 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) in svm_write_efer()
1207 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) in svm_write_efer()
1215 vm_inject_gp(vcpu->vcpu); in svm_write_efer()
1226 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
1239 struct svm_regctx *ctx; in emulate_rdmsr() local
1244 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
1250 ctx = svm_get_guest_regctx(vcpu); in emulate_rdmsr()
1251 state->rax = result & 0xffffffff; in emulate_rdmsr()
1252 ctx->sctx_rdx = result >> 32; in emulate_rdmsr()
1339 struct svm_regctx *ctx; in svm_vmexit() local
1345 ctx = svm_get_guest_regctx(vcpu); in svm_vmexit()
1347 state = &vmcb->state; in svm_vmexit()
1348 ctrl = &vmcb->ctrl; in svm_vmexit()
1351 code = ctrl->exitcode; in svm_vmexit()
1352 info1 = ctrl->exitinfo1; in svm_vmexit()
1353 info2 = ctrl->exitinfo2; in svm_vmexit()
1355 vmexit->exitcode = VM_EXITCODE_BOGUS; in svm_vmexit()
1356 vmexit->rip = state->rip; in svm_vmexit()
1357 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; in svm_vmexit()
1359 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in svm_vmexit()
1371 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " in svm_vmexit()
1372 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); in svm_vmexit()
1374 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, in svm_vmexit()
1376 vmexit->inst_length, code, info1, info2)); in svm_vmexit()
1386 vmexit->inst_length = 0; in svm_vmexit()
1391 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); in svm_vmexit()
1395 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in svm_vmexit()
1402 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in svm_vmexit()
1404 idtvec = code - 0x40; in svm_vmexit()
1442 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { in svm_vmexit()
1443 vmexit->exitcode = VM_EXITCODE_DB; in svm_vmexit()
1444 vmexit->u.dbg.trace_trap = 1; in svm_vmexit()
1445 vmexit->u.dbg.pushf_intercept = 0; in svm_vmexit()
1447 if (vcpu->dbg.popf_sstep) { in svm_vmexit()
1454 vcpu->dbg.popf_sstep = 0; in svm_vmexit()
1463 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_vmexit()
1464 } else if (vcpu->dbg.pushf_sstep) { in svm_vmexit()
1469 vcpu->dbg.pushf_sstep = 0; in svm_vmexit()
1477 vmexit->u.dbg.pushf_intercept = 1; in svm_vmexit()
1478 vmexit->u.dbg.tf_shadow_val = in svm_vmexit()
1479 vcpu->dbg.rflags_tf; in svm_vmexit()
1481 &vmexit->u.dbg.paging); in svm_vmexit()
1484 /* Clear DR6 "single-step" bit. */ in svm_vmexit()
1496 vmexit->exitcode = VM_EXITCODE_BPT; in svm_vmexit()
1497 vmexit->u.bpt.inst_length = vmexit->inst_length; in svm_vmexit()
1498 vmexit->inst_length = 0; in svm_vmexit()
1507 * 'inst_length' is non-zero. in svm_vmexit()
1515 vmexit->inst_length, idtvec); in svm_vmexit()
1516 vmexit->inst_length = 0; in svm_vmexit()
1525 KASSERT(vmexit->inst_length == 0, in svm_vmexit()
1528 vmexit->inst_length, idtvec)); in svm_vmexit()
1532 error = vm_inject_exception(vcpu->vcpu, idtvec, in svm_vmexit()
1540 eax = state->rax; in svm_vmexit()
1541 ecx = ctx->sctx_rcx; in svm_vmexit()
1542 edx = ctx->sctx_rdx; in svm_vmexit()
1546 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in svm_vmexit()
1550 vmexit->exitcode = VM_EXITCODE_WRMSR; in svm_vmexit()
1551 vmexit->u.msr.code = ecx; in svm_vmexit()
1552 vmexit->u.msr.wval = val; in svm_vmexit()
1556 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1561 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in svm_vmexit()
1563 vmexit->exitcode = VM_EXITCODE_RDMSR; in svm_vmexit()
1564 vmexit->u.msr.code = ecx; in svm_vmexit()
1568 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1575 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in svm_vmexit()
1578 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in svm_vmexit()
1579 handled = x86_emulate_cpuid(vcpu->vcpu, in svm_vmexit()
1580 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, in svm_vmexit()
1581 &ctx->sctx_rdx); in svm_vmexit()
1584 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in svm_vmexit()
1585 vmexit->exitcode = VM_EXITCODE_HLT; in svm_vmexit()
1586 vmexit->u.hlt.rflags = state->rflags; in svm_vmexit()
1589 vmexit->exitcode = VM_EXITCODE_PAUSE; in svm_vmexit()
1590 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in svm_vmexit()
1598 } else if (vm_mem_allocated(vcpu->vcpu, info2) || in svm_vmexit()
1599 ppt_is_mmio(svm_sc->vm, info2)) { in svm_vmexit()
1600 vmexit->exitcode = VM_EXITCODE_PAGING; in svm_vmexit()
1601 vmexit->u.paging.gpa = info2; in svm_vmexit()
1602 vmexit->u.paging.fault_type = npf_fault_type(info1); in svm_vmexit()
1603 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in svm_vmexit()
1606 info2, info1, state->rip); in svm_vmexit()
1609 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in svm_vmexit()
1612 info2, info1, state->rip); in svm_vmexit()
1616 vmexit->exitcode = VM_EXITCODE_MONITOR; in svm_vmexit()
1619 vmexit->exitcode = VM_EXITCODE_MWAIT; in svm_vmexit()
1622 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1627 vmexit->inst_length = 0; in svm_vmexit()
1628 /* Disable PUSHF intercepts - avoid a loop. */ in svm_vmexit()
1635 vcpu->dbg.pushf_sstep = 1; in svm_vmexit()
1641 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1646 vmexit->inst_length = 0; in svm_vmexit()
1647 /* Disable POPF intercepts - avoid a loop*/ in svm_vmexit()
1652 vcpu->dbg.popf_sstep = 1; in svm_vmexit()
1667 vm_inject_ud(vcpu->vcpu); in svm_vmexit()
1676 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in svm_vmexit()
1682 vmexit->rip, vmexit->inst_length); in svm_vmexit()
1685 vmexit->rip += vmexit->inst_length; in svm_vmexit()
1686 vmexit->inst_length = 0; in svm_vmexit()
1687 state->rip = vmexit->rip; in svm_vmexit()
1689 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { in svm_vmexit()
1710 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) in svm_inj_intinfo()
1720 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); in svm_inj_intinfo()
1737 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { in svm_inj_interrupts()
1746 if (vcpu->nextrip != state->rip) { in svm_inj_interrupts()
1747 ctrl->intr_shadow = 0; in svm_inj_interrupts()
1750 vcpu->nextrip, state->rip); in svm_inj_interrupts()
1757 * during event delivery (i.e. ctrl->exitintinfo). in svm_inj_interrupts()
1765 if (vm_nmi_pending(vcpu->vcpu)) { in svm_inj_interrupts()
1772 "to NMI-blocking"); in svm_inj_interrupts()
1773 } else if (ctrl->intr_shadow) { in svm_inj_interrupts()
1781 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1787 "eventinj %#lx", ctrl->eventinj); in svm_inj_interrupts()
1790 * Use self-IPI to trigger a VM-exit as soon as in svm_inj_interrupts()
1801 vm_nmi_clear(vcpu->vcpu); in svm_inj_interrupts()
1814 extint_pending = vm_extint_pending(vcpu->vcpu); in svm_inj_interrupts()
1822 vatpic_pending_intr(sc->vm, &vector); in svm_inj_interrupts()
1831 if ((state->rflags & PSL_I) == 0) { in svm_inj_interrupts()
1833 "rflags %#lx", vector, state->rflags); in svm_inj_interrupts()
1838 if (ctrl->intr_shadow) { in svm_inj_interrupts()
1845 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1847 "eventinj %#lx", vector, ctrl->eventinj); in svm_inj_interrupts()
1857 vm_extint_clear(vcpu->vcpu); in svm_inj_interrupts()
1858 vatpic_intr_accepted(sc->vm, vector); in svm_inj_interrupts()
1862 * Force a VM-exit as soon as the vcpu is ready to accept another in svm_inj_interrupts()
1882 if (ctrl->v_tpr != v_tpr) { in svm_inj_interrupts()
1884 ctrl->v_tpr, v_tpr); in svm_inj_interrupts()
1885 ctrl->v_tpr = v_tpr; in svm_inj_interrupts()
1899 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || in svm_inj_interrupts()
1900 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, in svm_inj_interrupts()
1903 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); in svm_inj_interrupts()
1920 * type to "64-bit available TSS". in restore_host_tss()
1923 tss_sd->sd_type = SDT_SYSTSS; in restore_host_tss()
1936 CPU_SET_ATOMIC(cpu, &pmap->pm_active); in svm_pmap_activate()
1937 smr_enter(pmap->pm_eptsmr); in svm_pmap_activate()
1942 * The TLB entries associated with the vcpu's ASID are not valid in svm_pmap_activate()
1945 * 1. The vcpu's ASID generation is different than the host cpu's in svm_pmap_activate()
1946 * ASID generation. This happens when the vcpu migrates to a new in svm_pmap_activate()
1961 * (a) There is no mismatch in eptgen or ASID generation and therefore in svm_pmap_activate()
1964 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is in svm_pmap_activate()
1965 * retained and the TLB entries associated with this ASID in svm_pmap_activate()
1968 * (b2) If the cpu does not support FlushByAsid then a new ASID is in svm_pmap_activate()
1971 * (c) A new ASID is allocated. in svm_pmap_activate()
1973 * (d) A new ASID is allocated. in svm_pmap_activate()
1977 eptgen = atomic_load_long(&pmap->pm_eptgen); in svm_pmap_activate()
1978 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; in svm_pmap_activate()
1980 if (vcpu->asid.gen != asid[cpu].gen) { in svm_pmap_activate()
1982 } else if (vcpu->eptgen != eptgen) { in svm_pmap_activate()
1984 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ in svm_pmap_activate()
1991 KASSERT(!alloc_asid, ("ASID allocation not necessary")); in svm_pmap_activate()
1992 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, in svm_pmap_activate()
1993 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); in svm_pmap_activate()
1997 if (++asid[cpu].num >= nasid) { in svm_pmap_activate()
1998 asid[cpu].num = 1; in svm_pmap_activate()
1999 if (++asid[cpu].gen == 0) in svm_pmap_activate()
2000 asid[cpu].gen = 1; in svm_pmap_activate()
2002 * If this cpu does not support "flush-by-asid" in svm_pmap_activate()
2004 * bump. Subsequent ASID allocation in this in svm_pmap_activate()
2008 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; in svm_pmap_activate()
2010 vcpu->asid.gen = asid[cpu].gen; in svm_pmap_activate()
2011 vcpu->asid.num = asid[cpu].num; in svm_pmap_activate()
2013 ctrl->asid = vcpu->asid.num; in svm_pmap_activate()
2016 * If this cpu supports "flush-by-asid" then the TLB in svm_pmap_activate()
2018 * is flushed selectively after every new ASID allocation. in svm_pmap_activate()
2021 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; in svm_pmap_activate()
2023 vcpu->eptgen = eptgen; in svm_pmap_activate()
2025 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); in svm_pmap_activate()
2026 KASSERT(ctrl->asid == vcpu->asid.num, in svm_pmap_activate()
2027 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); in svm_pmap_activate()
2033 smr_exit(pmap->pm_eptsmr); in svm_pmap_deactivate()
2034 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); in svm_pmap_deactivate()
2056 gctx->host_dr7 = rdr7(); in svm_dr_enter_guest()
2057 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); in svm_dr_enter_guest()
2069 gctx->host_dr0 = rdr0(); in svm_dr_enter_guest()
2070 gctx->host_dr1 = rdr1(); in svm_dr_enter_guest()
2071 gctx->host_dr2 = rdr2(); in svm_dr_enter_guest()
2072 gctx->host_dr3 = rdr3(); in svm_dr_enter_guest()
2073 gctx->host_dr6 = rdr6(); in svm_dr_enter_guest()
2076 load_dr0(gctx->sctx_dr0); in svm_dr_enter_guest()
2077 load_dr1(gctx->sctx_dr1); in svm_dr_enter_guest()
2078 load_dr2(gctx->sctx_dr2); in svm_dr_enter_guest()
2079 load_dr3(gctx->sctx_dr3); in svm_dr_enter_guest()
2087 gctx->sctx_dr0 = rdr0(); in svm_dr_leave_guest()
2088 gctx->sctx_dr1 = rdr1(); in svm_dr_leave_guest()
2089 gctx->sctx_dr2 = rdr2(); in svm_dr_leave_guest()
2090 gctx->sctx_dr3 = rdr3(); in svm_dr_leave_guest()
2096 load_dr0(gctx->host_dr0); in svm_dr_leave_guest()
2097 load_dr1(gctx->host_dr1); in svm_dr_leave_guest()
2098 load_dr2(gctx->host_dr2); in svm_dr_leave_guest()
2099 load_dr3(gctx->host_dr3); in svm_dr_leave_guest()
2100 load_dr6(gctx->host_dr6); in svm_dr_leave_guest()
2101 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); in svm_dr_leave_guest()
2102 load_dr7(gctx->host_dr7); in svm_dr_leave_guest()
2123 svm_sc = vcpu->sc; in svm_run()
2126 vmexit = vm_exitinfo(vcpu->vcpu); in svm_run()
2127 vlapic = vm_lapic(vcpu->vcpu); in svm_run()
2130 vmcb_pa = vcpu->vmcb_pa; in svm_run()
2132 if (vcpu->lastcpu != curcpu) { in svm_run()
2134 * Force new ASID allocation by invalidating the generation. in svm_run()
2136 vcpu->asid.gen = 0; in svm_run()
2145 * Setting 'vcpu->lastcpu' here is bit premature because in svm_run()
2150 * This works for now but any new side-effects of vcpu in svm_run()
2153 vcpu->lastcpu = curcpu; in svm_run()
2154 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in svm_run()
2160 state->rip = rip; in svm_run()
2174 vm_exit_suspended(vcpu->vcpu, state->rip); in svm_run()
2178 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in svm_run()
2180 vm_exit_rendezvous(vcpu->vcpu, state->rip); in svm_run()
2186 vm_exit_reqidle(vcpu->vcpu, state->rip); in svm_run()
2191 if (vcpu_should_yield(vcpu->vcpu)) { in svm_run()
2193 vm_exit_astpending(vcpu->vcpu, state->rip); in svm_run()
2197 if (vcpu_debugged(vcpu->vcpu)) { in svm_run()
2199 vm_exit_debug(vcpu->vcpu, state->rip); in svm_run()
2215 * Check the pmap generation and the ASID generation to in svm_run()
2220 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; in svm_run()
2221 vcpu->dirty = 0; in svm_run()
2222 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); in svm_run()
2225 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); in svm_run()
2242 /* #VMEXIT disables interrupts so re-enable them here. */ in svm_run()
2246 vcpu->nextrip = state->rip; in svm_run()
2262 free(vcpu->vmcb, M_SVM); in svm_vcpu_cleanup()
2271 free(sc->iopm_bitmap, M_SVM); in svm_cleanup()
2272 free(sc->msr_bitmap, M_SVM); in svm_cleanup()
2282 return (&regctx->sctx_rbx); in swctx_regptr()
2284 return (&regctx->sctx_rcx); in swctx_regptr()
2286 return (&regctx->sctx_rdx); in swctx_regptr()
2288 return (&regctx->sctx_rdi); in swctx_regptr()
2290 return (&regctx->sctx_rsi); in swctx_regptr()
2292 return (&regctx->sctx_rbp); in swctx_regptr()
2294 return (&regctx->sctx_r8); in swctx_regptr()
2296 return (&regctx->sctx_r9); in swctx_regptr()
2298 return (&regctx->sctx_r10); in swctx_regptr()
2300 return (&regctx->sctx_r11); in swctx_regptr()
2302 return (&regctx->sctx_r12); in swctx_regptr()
2304 return (&regctx->sctx_r13); in swctx_regptr()
2306 return (&regctx->sctx_r14); in swctx_regptr()
2308 return (&regctx->sctx_r15); in swctx_regptr()
2310 return (&regctx->sctx_dr0); in swctx_regptr()
2312 return (&regctx->sctx_dr1); in swctx_regptr()
2314 return (&regctx->sctx_dr2); in swctx_regptr()
2316 return (&regctx->sctx_dr3); in swctx_regptr()
2382 * vcpu's ASID. This needs to be treated differently depending on in svm_setreg()
2409 if (meta->op == VM_SNAPSHOT_SAVE) { in svm_snapshot_reg()
2415 } else if (meta->op == VM_SNAPSHOT_RESTORE) { in svm_snapshot_reg()
2459 vlapic = vm_lapic(vcpu->vcpu); in svm_setcap()
2460 vlapic->ipi_exit = val; in svm_setcap()
2463 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); in svm_setcap()
2464 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); in svm_setcap()
2476 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_setcap()
2483 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2489 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_setcap()
2491 rflags |= vcpu->dbg.rflags_tf; in svm_setcap()
2492 vcpu->dbg.rflags_tf = 0; in svm_setcap()
2499 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2543 vlapic = vm_lapic(vcpu->vcpu); in svm_getcap()
2544 *retval = vlapic->ipi_exit; in svm_getcap()
2547 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); in svm_getcap()
2550 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); in svm_getcap()
2579 vlapic->vm = vcpu->sc->vm; in svm_vlapic_init()
2580 vlapic->vcpu = vcpu->vcpu; in svm_vlapic_init()
2581 vlapic->vcpuid = vcpu->vcpuid; in svm_vlapic_init()
2582 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, in svm_vlapic_init()
2595 free(vlapic->apic_page, M_SVM_VLAPIC); in svm_vlapic_cleanup()
2609 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in svm_vcpu_snapshot()
2611 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), in svm_vcpu_snapshot()
2612 vcpu->vcpuid); in svm_vcpu_snapshot()
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); in svm_vcpu_snapshot()
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); in svm_vcpu_snapshot()
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); in svm_vcpu_snapshot()
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); in svm_vcpu_snapshot()
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); in svm_vcpu_snapshot()
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); in svm_vcpu_snapshot()
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); in svm_vcpu_snapshot()
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); in svm_vcpu_snapshot()
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); in svm_vcpu_snapshot()
2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); in svm_vcpu_snapshot()
2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); in svm_vcpu_snapshot()
2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); in svm_vcpu_snapshot()
2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); in svm_vcpu_snapshot()
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); in svm_vcpu_snapshot()
2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); in svm_vcpu_snapshot()
2774 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); in svm_vcpu_snapshot()
2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); in svm_vcpu_snapshot()
2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); in svm_vcpu_snapshot()
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); in svm_vcpu_snapshot()
2784 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); in svm_vcpu_snapshot()
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); in svm_vcpu_snapshot()
2787 /* Restore EPTGEN field - EPT is Extended Page Table */ in svm_vcpu_snapshot()
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); in svm_vcpu_snapshot()
2790 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); in svm_vcpu_snapshot()
2791 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); in svm_vcpu_snapshot()
2793 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); in svm_vcpu_snapshot()
2796 if (meta->op == VM_SNAPSHOT_RESTORE) in svm_vcpu_snapshot()