Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * All rights reserved.
110 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
269 asid[cpu].num = nasid - 1; in svm_modinit()
301 ctrl->tsc_offset = offset; in svm_set_tsc_offset()
306 vm_set_tsc_offset(vcpu->vcpu, offset); in svm_set_tsc_offset()
330 switch (paging->cpu_mode) { in svm_get_cs_info()
356 *index = -1; in svm_msr_index()
365 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); in svm_msr_index()
367 off = (msr - MSR_AMD6TH_START); in svm_msr_index()
372 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); in svm_msr_index()
374 off = (msr - MSR_AMD7TH_START); in svm_msr_index()
426 return (ctrl->intercept[idx] & bitmask ? 1 : 0); in svm_get_intercept()
438 oldval = ctrl->intercept[idx]; in svm_set_intercept()
441 ctrl->intercept[idx] |= bitmask; in svm_set_intercept()
443 ctrl->intercept[idx] &= ~bitmask; in svm_set_intercept()
445 if (ctrl->intercept[idx] != oldval) { in svm_set_intercept()
448 oldval, ctrl->intercept[idx]); in svm_set_intercept()
478 ctrl->iopm_base_pa = iopm_base_pa; in vmcb_init()
479 ctrl->msrpm_base_pa = msrpm_base_pa; in vmcb_init()
482 ctrl->np_enable = 1; in vmcb_init()
483 ctrl->n_cr3 = np_pml4; in vmcb_init()
487 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. in vmcb_init()
501 if (vcpu_trace_exceptions(vcpu->vcpu)) { in vmcb_init()
504 * Skip unimplemented vectors in the exception bitmap. in vmcb_init()
533 * Non-intercepted VMMCALL causes #UD, skip it. in vmcb_init()
541 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmcb_init()
553 * The ASID will be set to a non-zero value just before VMRUN. in vmcb_init()
555 ctrl->asid = 0; in vmcb_init()
563 ctrl->v_intr_masking = 1; in vmcb_init()
566 ctrl->lbr_virt_en = 1; in vmcb_init()
567 state->dbgctl = BIT(0); in vmcb_init()
570 state->efer = EFER_SVM; in vmcb_init()
572 /* Set up the PAT to power-on state */ in vmcb_init()
573 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | in vmcb_init()
582 /* Set up DR6/7 to power-on state */ in vmcb_init()
583 state->dr6 = DBREG_DR6_RESERVED1; in vmcb_init()
584 state->dr7 = DBREG_DR7_RESERVED1; in vmcb_init()
597 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, in svm_init()
599 if (svm_sc->msr_bitmap == NULL) in svm_init()
601 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, in svm_init()
603 if (svm_sc->iopm_bitmap == NULL) in svm_init()
606 svm_sc->vm = vm; in svm_init()
607 svm_sc->nptp = vtophys(pmap->pm_pmltop); in svm_init()
612 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); in svm_init()
619 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); in svm_init()
620 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); in svm_init()
621 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); in svm_init()
623 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); in svm_init()
624 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); in svm_init()
625 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); in svm_init()
626 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); in svm_init()
627 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); in svm_init()
628 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); in svm_init()
629 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); in svm_init()
630 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); in svm_init()
632 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); in svm_init()
637 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); in svm_init()
640 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); in svm_init()
652 vcpu->sc = sc; in svm_vcpu_init()
653 vcpu->vcpu = vcpu1; in svm_vcpu_init()
654 vcpu->vcpuid = vcpuid; in svm_vcpu_init()
655 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, in svm_vcpu_init()
657 vcpu->nextrip = ~0; in svm_vcpu_init()
658 vcpu->lastcpu = NOCPU; in svm_vcpu_init()
659 vcpu->vmcb_pa = vtophys(vcpu->vmcb); in svm_vcpu_init()
660 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), in svm_vcpu_init()
661 sc->nptp); in svm_vcpu_init()
667 * Collateral for a generic SVM VM-exit.
673 vme->exitcode = VM_EXITCODE_SVM; in vm_exit_svm()
674 vme->u.svm.exitcode = code; in vm_exit_svm()
675 vme->u.svm.exitinfo1 = info1; in vm_exit_svm()
676 vme->u.svm.exitinfo2 = info2; in vm_exit_svm()
688 return (state->cpl); in svm_cpl()
698 state = &vmcb->state; in svm_vcpu_mode()
700 if (state->efer & EFER_LMA) { in svm_vcpu_mode()
713 } else if (state->cr0 & CR0_PE) { in svm_vcpu_mode()
742 val = in ? regs->sctx_rdi : regs->sctx_rsi; in svm_inout_str_index()
752 val = rep ? regs->sctx_rcx : 1; in svm_inout_str_count()
764 vis->seg_name = VM_REG_GUEST_ES; in svm_inout_str_seginfo()
776 vis->seg_name = vm_segment_name(s); in svm_inout_str_seginfo()
784 vis->seg_name = VM_REG_LAST; in svm_inout_str_seginfo()
785 svm_get_cs_info(vcpu->vmcb, &vis->paging, &vis->cs_d, in svm_inout_str_seginfo()
786 &vis->cs_base); in svm_inout_str_seginfo()
790 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in svm_inout_str_seginfo()
817 state = &vmcb->state; in svm_paging_info()
818 paging->cr3 = state->cr3; in svm_paging_info()
819 paging->cpl = svm_cpl(state); in svm_paging_info()
820 paging->cpu_mode = svm_vcpu_mode(vmcb); in svm_paging_info()
821 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, in svm_paging_info()
822 state->efer); in svm_paging_info()
844 info1 = ctrl->exitinfo1; in svm_handle_io()
847 vmexit->exitcode = VM_EXITCODE_INOUT; in svm_handle_io()
848 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; in svm_handle_io()
849 vmexit->u.inout.string = inout_string; in svm_handle_io()
850 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; in svm_handle_io()
851 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; in svm_handle_io()
852 vmexit->u.inout.port = (uint16_t)(info1 >> 16); in svm_handle_io()
853 vmexit->u.inout.eax = (uint32_t)(state->rax); in svm_handle_io()
856 vmexit->exitcode = VM_EXITCODE_INOUT_STR; in svm_handle_io()
857 vis = &vmexit->u.inout_str; in svm_handle_io()
858 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); in svm_handle_io()
859 vis->rflags = state->rflags; in svm_handle_io()
860 vis->cr0 = state->cr0; in svm_handle_io()
861 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); in svm_handle_io()
862 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); in svm_handle_io()
863 vis->addrsize = svm_inout_str_addrsize(info1); in svm_handle_io()
864 vis->cs_d = 0; in svm_handle_io()
865 vis->cs_base = 0; in svm_handle_io()
866 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); in svm_handle_io()
911 ctrl = &vmcb->ctrl; in svm_handle_inst_emul()
912 paging = &vmexit->u.inst_emul.paging; in svm_handle_inst_emul()
914 vmexit->exitcode = VM_EXITCODE_INST_EMUL; in svm_handle_inst_emul()
915 vmexit->u.inst_emul.gpa = gpa; in svm_handle_inst_emul()
916 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; in svm_handle_inst_emul()
919 svm_get_cs_info(vmcb, paging, &vmexit->u.inst_emul.cs_d, in svm_handle_inst_emul()
920 &vmexit->u.inst_emul.cs_base); in svm_handle_inst_emul()
926 inst_len = ctrl->inst_len; in svm_handle_inst_emul()
927 inst_bytes = ctrl->inst_bytes; in svm_handle_inst_emul()
932 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); in svm_handle_inst_emul()
965 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, in svm_eventinject()
966 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); in svm_eventinject()
984 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; in svm_eventinject()
986 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; in svm_eventinject()
987 ctrl->eventinj |= (uint64_t)error << 32; in svm_eventinject()
1002 vlapic = vm_lapic(vcpu->vcpu); in svm_update_virqinfo()
1006 vlapic_set_cr8(vlapic, ctrl->v_tpr); in svm_update_virqinfo()
1009 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " in svm_update_virqinfo()
1010 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); in svm_update_virqinfo()
1020 intinfo = ctrl->exitintinfo; in svm_save_intinfo()
1032 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); in svm_save_intinfo()
1033 vm_exit_intinfo(vcpu->vcpu, intinfo); in svm_save_intinfo()
1052 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { in enable_intr_window_exiting()
1053 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); in enable_intr_window_exiting()
1060 ctrl->v_irq = 1; in enable_intr_window_exiting()
1061 ctrl->v_ign_tpr = 1; in enable_intr_window_exiting()
1062 ctrl->v_intr_vector = 0; in enable_intr_window_exiting()
1074 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { in disable_intr_window_exiting()
1081 ctrl->v_irq = 0; in disable_intr_window_exiting()
1082 ctrl->v_intr_vector = 0; in disable_intr_window_exiting()
1094 oldval = ctrl->intr_shadow; in svm_modify_intr_shadow()
1097 ctrl->intr_shadow = newval; in svm_modify_intr_shadow()
1109 *val = ctrl->intr_shadow; in svm_get_intr_shadow()
1177 oldval = state->efer; in svm_write_efer()
1180 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ in svm_write_efer()
1186 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ in svm_write_efer()
1188 if (state->cr0 & CR0_PG) in svm_write_efer()
1193 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) in svm_write_efer()
1202 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) in svm_write_efer()
1207 * XXX bhyve does not enforce segment limits in 64-bit mode. Until in svm_write_efer()
1211 vme = vm_exitinfo(vcpu->vcpu); in svm_write_efer()
1218 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) in svm_write_efer()
1223 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) in svm_write_efer()
1231 vm_inject_gp(vcpu->vcpu); in svm_write_efer()
1242 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
1260 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
1267 state->rax = result & 0xffffffff; in emulate_rdmsr()
1268 ctx->sctx_rdx = result >> 32; in emulate_rdmsr()
1363 state = &vmcb->state; in svm_vmexit()
1364 ctrl = &vmcb->ctrl; in svm_vmexit()
1367 code = ctrl->exitcode; in svm_vmexit()
1368 info1 = ctrl->exitinfo1; in svm_vmexit()
1369 info2 = ctrl->exitinfo2; in svm_vmexit()
1371 vmexit->exitcode = VM_EXITCODE_BOGUS; in svm_vmexit()
1372 vmexit->rip = state->rip; in svm_vmexit()
1373 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; in svm_vmexit()
1375 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in svm_vmexit()
1387 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " in svm_vmexit()
1388 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); in svm_vmexit()
1390 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, in svm_vmexit()
1392 vmexit->inst_length, code, info1, info2)); in svm_vmexit()
1402 vmexit->inst_length = 0; in svm_vmexit()
1407 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); in svm_vmexit()
1411 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in svm_vmexit()
1418 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in svm_vmexit()
1420 idtvec = code - 0x40; in svm_vmexit()
1458 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { in svm_vmexit()
1459 vmexit->exitcode = VM_EXITCODE_DB; in svm_vmexit()
1460 vmexit->u.dbg.trace_trap = 1; in svm_vmexit()
1461 vmexit->u.dbg.pushf_intercept = 0; in svm_vmexit()
1463 if (vcpu->dbg.popf_sstep) { in svm_vmexit()
1470 vcpu->dbg.popf_sstep = 0; in svm_vmexit()
1479 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_vmexit()
1480 } else if (vcpu->dbg.pushf_sstep) { in svm_vmexit()
1485 vcpu->dbg.pushf_sstep = 0; in svm_vmexit()
1493 vmexit->u.dbg.pushf_intercept = 1; in svm_vmexit()
1494 vmexit->u.dbg.tf_shadow_val = in svm_vmexit()
1495 vcpu->dbg.rflags_tf; in svm_vmexit()
1497 &vmexit->u.dbg.paging); in svm_vmexit()
1500 /* Clear DR6 "single-step" bit. */ in svm_vmexit()
1512 vmexit->exitcode = VM_EXITCODE_BPT; in svm_vmexit()
1513 vmexit->u.bpt.inst_length = vmexit->inst_length; in svm_vmexit()
1514 vmexit->inst_length = 0; in svm_vmexit()
1523 * 'inst_length' is non-zero. in svm_vmexit()
1531 vmexit->inst_length, idtvec); in svm_vmexit()
1532 vmexit->inst_length = 0; in svm_vmexit()
1541 KASSERT(vmexit->inst_length == 0, in svm_vmexit()
1544 vmexit->inst_length, idtvec)); in svm_vmexit()
1548 error = vm_inject_exception(vcpu->vcpu, idtvec, in svm_vmexit()
1556 eax = state->rax; in svm_vmexit()
1557 ecx = ctx->sctx_rcx; in svm_vmexit()
1558 edx = ctx->sctx_rdx; in svm_vmexit()
1562 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in svm_vmexit()
1566 vmexit->exitcode = VM_EXITCODE_WRMSR; in svm_vmexit()
1567 vmexit->u.msr.code = ecx; in svm_vmexit()
1568 vmexit->u.msr.wval = val; in svm_vmexit()
1572 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1577 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in svm_vmexit()
1579 vmexit->exitcode = VM_EXITCODE_RDMSR; in svm_vmexit()
1580 vmexit->u.msr.code = ecx; in svm_vmexit()
1584 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1591 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in svm_vmexit()
1594 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in svm_vmexit()
1595 handled = x86_emulate_cpuid(vcpu->vcpu, in svm_vmexit()
1596 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, in svm_vmexit()
1597 &ctx->sctx_rdx); in svm_vmexit()
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in svm_vmexit()
1601 vmexit->exitcode = VM_EXITCODE_HLT; in svm_vmexit()
1602 vmexit->u.hlt.rflags = state->rflags; in svm_vmexit()
1605 vmexit->exitcode = VM_EXITCODE_PAUSE; in svm_vmexit()
1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in svm_vmexit()
1612 "reserved bits set: info1(%#lx) info2(%#lx)", in svm_vmexit()
1614 } else if (vm_mem_allocated(vcpu->vcpu, info2) || in svm_vmexit()
1615 ppt_is_mmio(svm_sc->vm, info2)) { in svm_vmexit()
1616 vmexit->exitcode = VM_EXITCODE_PAGING; in svm_vmexit()
1617 vmexit->u.paging.gpa = info2; in svm_vmexit()
1618 vmexit->u.paging.fault_type = npf_fault_type(info1); in svm_vmexit()
1619 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in svm_vmexit()
1622 info2, info1, state->rip); in svm_vmexit()
1625 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in svm_vmexit()
1628 info2, info1, state->rip); in svm_vmexit()
1632 vmexit->exitcode = VM_EXITCODE_MONITOR; in svm_vmexit()
1635 vmexit->exitcode = VM_EXITCODE_MWAIT; in svm_vmexit()
1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1643 vmexit->inst_length = 0; in svm_vmexit()
1644 /* Disable PUSHF intercepts - avoid a loop. */ in svm_vmexit()
1651 vcpu->dbg.pushf_sstep = 1; in svm_vmexit()
1657 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1662 vmexit->inst_length = 0; in svm_vmexit()
1663 /* Disable POPF intercepts - avoid a loop*/ in svm_vmexit()
1668 vcpu->dbg.popf_sstep = 1; in svm_vmexit()
1683 vm_inject_ud(vcpu->vcpu); in svm_vmexit()
1692 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in svm_vmexit()
1698 vmexit->rip, vmexit->inst_length); in svm_vmexit()
1701 vmexit->rip += vmexit->inst_length; in svm_vmexit()
1702 vmexit->inst_length = 0; in svm_vmexit()
1703 state->rip = vmexit->rip; in svm_vmexit()
1705 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { in svm_vmexit()
1726 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) in svm_inj_intinfo()
1736 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); in svm_inj_intinfo()
1753 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { in svm_inj_interrupts()
1762 if (vcpu->nextrip != state->rip) { in svm_inj_interrupts()
1763 ctrl->intr_shadow = 0; in svm_inj_interrupts()
1766 vcpu->nextrip, state->rip); in svm_inj_interrupts()
1773 * during event delivery (i.e. ctrl->exitintinfo). in svm_inj_interrupts()
1781 if (vm_nmi_pending(vcpu->vcpu)) { in svm_inj_interrupts()
1788 "to NMI-blocking"); in svm_inj_interrupts()
1789 } else if (ctrl->intr_shadow) { in svm_inj_interrupts()
1797 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1803 "eventinj %#lx", ctrl->eventinj); in svm_inj_interrupts()
1806 * Use self-IPI to trigger a VM-exit as soon as in svm_inj_interrupts()
1817 vm_nmi_clear(vcpu->vcpu); in svm_inj_interrupts()
1830 extint_pending = vm_extint_pending(vcpu->vcpu); in svm_inj_interrupts()
1838 vatpic_pending_intr(sc->vm, &vector); in svm_inj_interrupts()
1847 if ((state->rflags & PSL_I) == 0) { in svm_inj_interrupts()
1849 "rflags %#lx", vector, state->rflags); in svm_inj_interrupts()
1854 if (ctrl->intr_shadow) { in svm_inj_interrupts()
1861 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1863 "eventinj %#lx", vector, ctrl->eventinj); in svm_inj_interrupts()
1873 vm_extint_clear(vcpu->vcpu); in svm_inj_interrupts()
1874 vatpic_intr_accepted(sc->vm, vector); in svm_inj_interrupts()
1878 * Force a VM-exit as soon as the vcpu is ready to accept another in svm_inj_interrupts()
1898 if (ctrl->v_tpr != v_tpr) { in svm_inj_interrupts()
1900 ctrl->v_tpr, v_tpr); in svm_inj_interrupts()
1901 ctrl->v_tpr = v_tpr; in svm_inj_interrupts()
1915 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || in svm_inj_interrupts()
1916 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, in svm_inj_interrupts()
1919 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); in svm_inj_interrupts()
1936 * type to "64-bit available TSS". in restore_host_tss()
1939 tss_sd->sd_type = SDT_SYSTSS; in restore_host_tss()
1952 CPU_SET_ATOMIC(cpu, &pmap->pm_active); in svm_pmap_activate()
1953 smr_enter(pmap->pm_eptsmr); in svm_pmap_activate()
1993 eptgen = atomic_load_long(&pmap->pm_eptgen); in svm_pmap_activate()
1994 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; in svm_pmap_activate()
1996 if (vcpu->asid.gen != asid[cpu].gen) { in svm_pmap_activate()
1998 } else if (vcpu->eptgen != eptgen) { in svm_pmap_activate()
2000 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ in svm_pmap_activate()
2008 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, in svm_pmap_activate()
2009 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); in svm_pmap_activate()
2018 * If this cpu does not support "flush-by-asid" in svm_pmap_activate()
2024 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; in svm_pmap_activate()
2026 vcpu->asid.gen = asid[cpu].gen; in svm_pmap_activate()
2027 vcpu->asid.num = asid[cpu].num; in svm_pmap_activate()
2029 ctrl->asid = vcpu->asid.num; in svm_pmap_activate()
2032 * If this cpu supports "flush-by-asid" then the TLB in svm_pmap_activate()
2037 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; in svm_pmap_activate()
2039 vcpu->eptgen = eptgen; in svm_pmap_activate()
2041 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); in svm_pmap_activate()
2042 KASSERT(ctrl->asid == vcpu->asid.num, in svm_pmap_activate()
2043 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); in svm_pmap_activate()
2049 smr_exit(pmap->pm_eptsmr); in svm_pmap_deactivate()
2050 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); in svm_pmap_deactivate()
2072 gctx->host_dr7 = rdr7(); in svm_dr_enter_guest()
2073 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); in svm_dr_enter_guest()
2085 gctx->host_dr0 = rdr0(); in svm_dr_enter_guest()
2086 gctx->host_dr1 = rdr1(); in svm_dr_enter_guest()
2087 gctx->host_dr2 = rdr2(); in svm_dr_enter_guest()
2088 gctx->host_dr3 = rdr3(); in svm_dr_enter_guest()
2089 gctx->host_dr6 = rdr6(); in svm_dr_enter_guest()
2092 load_dr0(gctx->sctx_dr0); in svm_dr_enter_guest()
2093 load_dr1(gctx->sctx_dr1); in svm_dr_enter_guest()
2094 load_dr2(gctx->sctx_dr2); in svm_dr_enter_guest()
2095 load_dr3(gctx->sctx_dr3); in svm_dr_enter_guest()
2103 gctx->sctx_dr0 = rdr0(); in svm_dr_leave_guest()
2104 gctx->sctx_dr1 = rdr1(); in svm_dr_leave_guest()
2105 gctx->sctx_dr2 = rdr2(); in svm_dr_leave_guest()
2106 gctx->sctx_dr3 = rdr3(); in svm_dr_leave_guest()
2112 load_dr0(gctx->host_dr0); in svm_dr_leave_guest()
2113 load_dr1(gctx->host_dr1); in svm_dr_leave_guest()
2114 load_dr2(gctx->host_dr2); in svm_dr_leave_guest()
2115 load_dr3(gctx->host_dr3); in svm_dr_leave_guest()
2116 load_dr6(gctx->host_dr6); in svm_dr_leave_guest()
2117 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); in svm_dr_leave_guest()
2118 load_dr7(gctx->host_dr7); in svm_dr_leave_guest()
2139 svm_sc = vcpu->sc; in svm_run()
2142 vmexit = vm_exitinfo(vcpu->vcpu); in svm_run()
2143 vlapic = vm_lapic(vcpu->vcpu); in svm_run()
2146 vmcb_pa = vcpu->vmcb_pa; in svm_run()
2148 if (vcpu->lastcpu != curcpu) { in svm_run()
2152 vcpu->asid.gen = 0; in svm_run()
2161 * Setting 'vcpu->lastcpu' here is bit premature because in svm_run()
2166 * This works for now but any new side-effects of vcpu in svm_run()
2169 vcpu->lastcpu = curcpu; in svm_run()
2170 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in svm_run()
2176 state->rip = rip; in svm_run()
2190 vm_exit_suspended(vcpu->vcpu, state->rip); in svm_run()
2194 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in svm_run()
2196 vm_exit_rendezvous(vcpu->vcpu, state->rip); in svm_run()
2202 vm_exit_reqidle(vcpu->vcpu, state->rip); in svm_run()
2207 if (vcpu_should_yield(vcpu->vcpu)) { in svm_run()
2209 vm_exit_astpending(vcpu->vcpu, state->rip); in svm_run()
2213 if (vcpu_debugged(vcpu->vcpu)) { in svm_run()
2215 vm_exit_debug(vcpu->vcpu, state->rip); in svm_run()
2236 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; in svm_run()
2237 vcpu->dirty = 0; in svm_run()
2238 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); in svm_run()
2241 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); in svm_run()
2258 /* #VMEXIT disables interrupts so re-enable them here. */ in svm_run()
2262 vcpu->nextrip = state->rip; in svm_run()
2278 free(vcpu->vmcb, M_SVM); in svm_vcpu_cleanup()
2287 free(sc->iopm_bitmap, M_SVM); in svm_cleanup()
2288 free(sc->msr_bitmap, M_SVM); in svm_cleanup()
2298 return (&regctx->sctx_rbx); in swctx_regptr()
2300 return (&regctx->sctx_rcx); in swctx_regptr()
2302 return (&regctx->sctx_rdx); in swctx_regptr()
2304 return (&regctx->sctx_rdi); in swctx_regptr()
2306 return (&regctx->sctx_rsi); in swctx_regptr()
2308 return (&regctx->sctx_rbp); in swctx_regptr()
2310 return (&regctx->sctx_r8); in swctx_regptr()
2312 return (&regctx->sctx_r9); in swctx_regptr()
2314 return (&regctx->sctx_r10); in swctx_regptr()
2316 return (&regctx->sctx_r11); in swctx_regptr()
2318 return (&regctx->sctx_r12); in swctx_regptr()
2320 return (&regctx->sctx_r13); in swctx_regptr()
2322 return (&regctx->sctx_r14); in swctx_regptr()
2324 return (&regctx->sctx_r15); in swctx_regptr()
2326 return (&regctx->sctx_dr0); in swctx_regptr()
2328 return (&regctx->sctx_dr1); in swctx_regptr()
2330 return (&regctx->sctx_dr2); in swctx_regptr()
2332 return (&regctx->sctx_dr3); in swctx_regptr()
2425 if (meta->op == VM_SNAPSHOT_SAVE) { in svm_snapshot_reg()
2431 } else if (meta->op == VM_SNAPSHOT_RESTORE) { in svm_snapshot_reg()
2475 vlapic = vm_lapic(vcpu->vcpu); in svm_setcap()
2476 vlapic->ipi_exit = val; in svm_setcap()
2479 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); in svm_setcap()
2480 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); in svm_setcap()
2492 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_setcap()
2499 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2505 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_setcap()
2507 rflags |= vcpu->dbg.rflags_tf; in svm_setcap()
2508 vcpu->dbg.rflags_tf = 0; in svm_setcap()
2515 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2559 vlapic = vm_lapic(vcpu->vcpu); in svm_getcap()
2560 *retval = vlapic->ipi_exit; in svm_getcap()
2563 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); in svm_getcap()
2566 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); in svm_getcap()
2595 vlapic->vm = vcpu->sc->vm; in svm_vlapic_init()
2596 vlapic->vcpu = vcpu->vcpu; in svm_vlapic_init()
2597 vlapic->vcpuid = vcpu->vcpuid; in svm_vlapic_init()
2598 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, in svm_vlapic_init()
2611 free(vlapic->apic_page, M_SVM_VLAPIC); in svm_vlapic_cleanup()
2625 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in svm_vcpu_snapshot()
2627 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), in svm_vcpu_snapshot()
2628 vcpu->vcpuid); in svm_vcpu_snapshot()
2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); in svm_vcpu_snapshot()
2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); in svm_vcpu_snapshot()
2777 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); in svm_vcpu_snapshot()
2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); in svm_vcpu_snapshot()
2779 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); in svm_vcpu_snapshot()
2780 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); in svm_vcpu_snapshot()
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); in svm_vcpu_snapshot()
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); in svm_vcpu_snapshot()
2783 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); in svm_vcpu_snapshot()
2784 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); in svm_vcpu_snapshot()
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); in svm_vcpu_snapshot()
2786 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); in svm_vcpu_snapshot()
2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); in svm_vcpu_snapshot()
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); in svm_vcpu_snapshot()
2789 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); in svm_vcpu_snapshot()
2790 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); in svm_vcpu_snapshot()
2791 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); in svm_vcpu_snapshot()
2792 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); in svm_vcpu_snapshot()
2797 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); in svm_vcpu_snapshot()
2800 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); in svm_vcpu_snapshot()
2801 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); in svm_vcpu_snapshot()
2803 /* Restore EPTGEN field - EPT is Extended Page Table */ in svm_vcpu_snapshot()
2804 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); in svm_vcpu_snapshot()
2806 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); in svm_vcpu_snapshot()
2807 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); in svm_vcpu_snapshot()
2809 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); in svm_vcpu_snapshot()
2812 if (meta->op == VM_SNAPSHOT_RESTORE) in svm_vcpu_snapshot()