Lines Matching +full:fault +full:- +full:inject

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
267 asid[cpu].num = nasid - 1; in svm_modinit()
299 ctrl->tsc_offset = offset; in svm_set_tsc_offset()
304 vm_set_tsc_offset(vcpu->vcpu, offset); in svm_set_tsc_offset()
327 *index = -1; in svm_msr_index()
336 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); in svm_msr_index()
338 off = (msr - MSR_AMD6TH_START); in svm_msr_index()
343 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); in svm_msr_index()
345 off = (msr - MSR_AMD7TH_START); in svm_msr_index()
397 return (ctrl->intercept[idx] & bitmask ? 1 : 0); in svm_get_intercept()
409 oldval = ctrl->intercept[idx]; in svm_set_intercept()
412 ctrl->intercept[idx] |= bitmask; in svm_set_intercept()
414 ctrl->intercept[idx] &= ~bitmask; in svm_set_intercept()
416 if (ctrl->intercept[idx] != oldval) { in svm_set_intercept()
419 oldval, ctrl->intercept[idx]); in svm_set_intercept()
449 ctrl->iopm_base_pa = iopm_base_pa; in vmcb_init()
450 ctrl->msrpm_base_pa = msrpm_base_pa; in vmcb_init()
453 ctrl->np_enable = 1; in vmcb_init()
454 ctrl->n_cr3 = np_pml4; in vmcb_init()
458 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. in vmcb_init()
472 if (vcpu_trace_exceptions(vcpu->vcpu)) { in vmcb_init()
504 * Non-intercepted VMMCALL causes #UD, skip it. in vmcb_init()
512 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmcb_init()
524 * The ASID will be set to a non-zero value just before VMRUN. in vmcb_init()
526 ctrl->asid = 0; in vmcb_init()
534 ctrl->v_intr_masking = 1; in vmcb_init()
537 ctrl->lbr_virt_en = 1; in vmcb_init()
538 state->dbgctl = BIT(0); in vmcb_init()
541 state->efer = EFER_SVM; in vmcb_init()
543 /* Set up the PAT to power-on state */ in vmcb_init()
544 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | in vmcb_init()
553 /* Set up DR6/7 to power-on state */ in vmcb_init()
554 state->dr6 = DBREG_DR6_RESERVED1; in vmcb_init()
555 state->dr7 = DBREG_DR7_RESERVED1; in vmcb_init()
568 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM, in svm_init()
570 if (svm_sc->msr_bitmap == NULL) in svm_init()
572 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM, in svm_init()
574 if (svm_sc->iopm_bitmap == NULL) in svm_init()
577 svm_sc->vm = vm; in svm_init()
578 svm_sc->nptp = vtophys(pmap->pm_pmltop); in svm_init()
583 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE); in svm_init()
590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); in svm_init()
591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); in svm_init()
592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); in svm_init()
594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); in svm_init()
595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); in svm_init()
596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); in svm_init()
597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); in svm_init()
598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); in svm_init()
599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); in svm_init()
600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); in svm_init()
601 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); in svm_init()
603 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); in svm_init()
608 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); in svm_init()
611 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE); in svm_init()
623 vcpu->sc = sc; in svm_vcpu_init()
624 vcpu->vcpu = vcpu1; in svm_vcpu_init()
625 vcpu->vcpuid = vcpuid; in svm_vcpu_init()
626 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, in svm_vcpu_init()
628 vcpu->nextrip = ~0; in svm_vcpu_init()
629 vcpu->lastcpu = NOCPU; in svm_vcpu_init()
630 vcpu->vmcb_pa = vtophys(vcpu->vmcb); in svm_vcpu_init()
631 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), in svm_vcpu_init()
632 sc->nptp); in svm_vcpu_init()
638 * Collateral for a generic SVM VM-exit.
644 vme->exitcode = VM_EXITCODE_SVM; in vm_exit_svm()
645 vme->u.svm.exitcode = code; in vm_exit_svm()
646 vme->u.svm.exitinfo1 = info1; in vm_exit_svm()
647 vme->u.svm.exitinfo2 = info2; in vm_exit_svm()
659 return (state->cpl); in svm_cpl()
669 state = &vmcb->state; in svm_vcpu_mode()
671 if (state->efer & EFER_LMA) { in svm_vcpu_mode()
684 } else if (state->cr0 & CR0_PE) { in svm_vcpu_mode()
713 val = in ? regs->sctx_rdi : regs->sctx_rsi; in svm_inout_str_index()
723 val = rep ? regs->sctx_rcx : 1; in svm_inout_str_count()
735 vis->seg_name = VM_REG_GUEST_ES; in svm_inout_str_seginfo()
739 vis->seg_name = vm_segment_name(s); in svm_inout_str_seginfo()
742 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in svm_inout_str_seginfo()
769 state = &vmcb->state; in svm_paging_info()
770 paging->cr3 = state->cr3; in svm_paging_info()
771 paging->cpl = svm_cpl(state); in svm_paging_info()
772 paging->cpu_mode = svm_vcpu_mode(vmcb); in svm_paging_info()
773 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, in svm_paging_info()
774 state->efer); in svm_paging_info()
796 info1 = ctrl->exitinfo1; in svm_handle_io()
809 vmexit->exitcode = VM_EXITCODE_INOUT; in svm_handle_io()
810 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; in svm_handle_io()
811 vmexit->u.inout.string = inout_string; in svm_handle_io()
812 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; in svm_handle_io()
813 vmexit->u.inout.bytes = (info1 >> 4) & 0x7; in svm_handle_io()
814 vmexit->u.inout.port = (uint16_t)(info1 >> 16); in svm_handle_io()
815 vmexit->u.inout.eax = (uint32_t)(state->rax); in svm_handle_io()
818 vmexit->exitcode = VM_EXITCODE_INOUT_STR; in svm_handle_io()
819 vis = &vmexit->u.inout_str; in svm_handle_io()
820 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); in svm_handle_io()
821 vis->rflags = state->rflags; in svm_handle_io()
822 vis->cr0 = state->cr0; in svm_handle_io()
823 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); in svm_handle_io()
824 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); in svm_handle_io()
825 vis->addrsize = svm_inout_str_addrsize(info1); in svm_handle_io()
826 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); in svm_handle_io()
872 ctrl = &vmcb->ctrl; in svm_handle_inst_emul()
873 paging = &vmexit->u.inst_emul.paging; in svm_handle_inst_emul()
875 vmexit->exitcode = VM_EXITCODE_INST_EMUL; in svm_handle_inst_emul()
876 vmexit->u.inst_emul.gpa = gpa; in svm_handle_inst_emul()
877 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; in svm_handle_inst_emul()
883 switch(paging->cpu_mode) { in svm_handle_inst_emul()
885 vmexit->u.inst_emul.cs_base = seg.base; in svm_handle_inst_emul()
886 vmexit->u.inst_emul.cs_d = 0; in svm_handle_inst_emul()
890 vmexit->u.inst_emul.cs_base = seg.base; in svm_handle_inst_emul()
895 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? in svm_handle_inst_emul()
899 vmexit->u.inst_emul.cs_base = 0; in svm_handle_inst_emul()
900 vmexit->u.inst_emul.cs_d = 0; in svm_handle_inst_emul()
908 inst_len = ctrl->inst_len; in svm_handle_inst_emul()
909 inst_bytes = ctrl->inst_bytes; in svm_handle_inst_emul()
914 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); in svm_handle_inst_emul()
937 * Inject an event to vcpu as described in section 15.20, "Event injection".
947 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, in svm_eventinject()
948 ("%s: event already pending %#lx", __func__, ctrl->eventinj)); in svm_eventinject()
966 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; in svm_eventinject()
968 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; in svm_eventinject()
969 ctrl->eventinj |= (uint64_t)error << 32; in svm_eventinject()
984 vlapic = vm_lapic(vcpu->vcpu); in svm_update_virqinfo()
988 vlapic_set_cr8(vlapic, ctrl->v_tpr); in svm_update_virqinfo()
991 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid " in svm_update_virqinfo()
992 "v_intr_vector %d", __func__, ctrl->v_intr_vector)); in svm_update_virqinfo()
1002 intinfo = ctrl->exitintinfo; in svm_save_intinfo()
1014 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); in svm_save_intinfo()
1015 vm_exit_intinfo(vcpu->vcpu, intinfo); in svm_save_intinfo()
1034 if (ctrl->v_irq && ctrl->v_intr_vector == 0) { in enable_intr_window_exiting()
1035 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); in enable_intr_window_exiting()
1042 ctrl->v_irq = 1; in enable_intr_window_exiting()
1043 ctrl->v_ign_tpr = 1; in enable_intr_window_exiting()
1044 ctrl->v_intr_vector = 0; in enable_intr_window_exiting()
1056 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { in disable_intr_window_exiting()
1063 ctrl->v_irq = 0; in disable_intr_window_exiting()
1064 ctrl->v_intr_vector = 0; in disable_intr_window_exiting()
1076 oldval = ctrl->intr_shadow; in svm_modify_intr_shadow()
1079 ctrl->intr_shadow = newval; in svm_modify_intr_shadow()
1091 *val = ctrl->intr_shadow; in svm_get_intr_shadow()
1127 * the "iret" when it runs next. However, it is possible to inject in clear_nmi_blocking()
1159 oldval = state->efer; in svm_write_efer()
1162 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ in svm_write_efer()
1168 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ in svm_write_efer()
1170 if (state->cr0 & CR0_PG) in svm_write_efer()
1175 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) in svm_write_efer()
1184 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) in svm_write_efer()
1189 * XXX bhyve does not enforce segment limits in 64-bit mode. Until in svm_write_efer()
1193 vme = vm_exitinfo(vcpu->vcpu); in svm_write_efer()
1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) in svm_write_efer()
1205 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) in svm_write_efer()
1213 vm_inject_gp(vcpu->vcpu); in svm_write_efer()
1224 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
1242 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
1249 state->rax = result & 0xffffffff; in emulate_rdmsr()
1250 ctx->sctx_rdx = result >> 32; in emulate_rdmsr()
1345 state = &vmcb->state; in svm_vmexit()
1346 ctrl = &vmcb->ctrl; in svm_vmexit()
1349 code = ctrl->exitcode; in svm_vmexit()
1350 info1 = ctrl->exitinfo1; in svm_vmexit()
1351 info2 = ctrl->exitinfo2; in svm_vmexit()
1353 vmexit->exitcode = VM_EXITCODE_BOGUS; in svm_vmexit()
1354 vmexit->rip = state->rip; in svm_vmexit()
1355 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; in svm_vmexit()
1357 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in svm_vmexit()
1369 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " in svm_vmexit()
1370 "injection valid bit is set %#lx", __func__, ctrl->eventinj)); in svm_vmexit()
1372 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, in svm_vmexit()
1374 vmexit->inst_length, code, info1, info2)); in svm_vmexit()
1384 vmexit->inst_length = 0; in svm_vmexit()
1389 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); in svm_vmexit()
1393 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in svm_vmexit()
1400 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in svm_vmexit()
1402 idtvec = code - 0x40; in svm_vmexit()
1440 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { in svm_vmexit()
1441 vmexit->exitcode = VM_EXITCODE_DB; in svm_vmexit()
1442 vmexit->u.dbg.trace_trap = 1; in svm_vmexit()
1443 vmexit->u.dbg.pushf_intercept = 0; in svm_vmexit()
1445 if (vcpu->dbg.popf_sstep) { in svm_vmexit()
1452 vcpu->dbg.popf_sstep = 0; in svm_vmexit()
1461 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_vmexit()
1462 } else if (vcpu->dbg.pushf_sstep) { in svm_vmexit()
1467 vcpu->dbg.pushf_sstep = 0; in svm_vmexit()
1475 vmexit->u.dbg.pushf_intercept = 1; in svm_vmexit()
1476 vmexit->u.dbg.tf_shadow_val = in svm_vmexit()
1477 vcpu->dbg.rflags_tf; in svm_vmexit()
1479 &vmexit->u.dbg.paging); in svm_vmexit()
1482 /* Clear DR6 "single-step" bit. */ in svm_vmexit()
1494 vmexit->exitcode = VM_EXITCODE_BPT; in svm_vmexit()
1495 vmexit->u.bpt.inst_length = vmexit->inst_length; in svm_vmexit()
1496 vmexit->inst_length = 0; in svm_vmexit()
1505 * 'inst_length' is non-zero. in svm_vmexit()
1513 vmexit->inst_length, idtvec); in svm_vmexit()
1514 vmexit->inst_length = 0; in svm_vmexit()
1523 KASSERT(vmexit->inst_length == 0, in svm_vmexit()
1526 vmexit->inst_length, idtvec)); in svm_vmexit()
1530 error = vm_inject_exception(vcpu->vcpu, idtvec, in svm_vmexit()
1538 eax = state->rax; in svm_vmexit()
1539 ecx = ctx->sctx_rcx; in svm_vmexit()
1540 edx = ctx->sctx_rdx; in svm_vmexit()
1544 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in svm_vmexit()
1548 vmexit->exitcode = VM_EXITCODE_WRMSR; in svm_vmexit()
1549 vmexit->u.msr.code = ecx; in svm_vmexit()
1550 vmexit->u.msr.wval = val; in svm_vmexit()
1554 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in svm_vmexit()
1561 vmexit->exitcode = VM_EXITCODE_RDMSR; in svm_vmexit()
1562 vmexit->u.msr.code = ecx; in svm_vmexit()
1566 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in svm_vmexit()
1573 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in svm_vmexit()
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in svm_vmexit()
1577 handled = x86_emulate_cpuid(vcpu->vcpu, in svm_vmexit()
1578 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx, in svm_vmexit()
1579 &ctx->sctx_rdx); in svm_vmexit()
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in svm_vmexit()
1583 vmexit->exitcode = VM_EXITCODE_HLT; in svm_vmexit()
1584 vmexit->u.hlt.rflags = state->rflags; in svm_vmexit()
1587 vmexit->exitcode = VM_EXITCODE_PAUSE; in svm_vmexit()
1588 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in svm_vmexit()
1593 SVM_CTR2(vcpu, "nested page fault with " in svm_vmexit()
1596 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { in svm_vmexit()
1597 vmexit->exitcode = VM_EXITCODE_PAGING; in svm_vmexit()
1598 vmexit->u.paging.gpa = info2; in svm_vmexit()
1599 vmexit->u.paging.fault_type = npf_fault_type(info1); in svm_vmexit()
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in svm_vmexit()
1601 SVM_CTR3(vcpu, "nested page fault " in svm_vmexit()
1603 info2, info1, state->rip); in svm_vmexit()
1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in svm_vmexit()
1607 SVM_CTR3(vcpu, "inst_emul fault " in svm_vmexit()
1609 info2, info1, state->rip); in svm_vmexit()
1613 vmexit->exitcode = VM_EXITCODE_MONITOR; in svm_vmexit()
1616 vmexit->exitcode = VM_EXITCODE_MWAIT; in svm_vmexit()
1619 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1624 vmexit->inst_length = 0; in svm_vmexit()
1625 /* Disable PUSHF intercepts - avoid a loop. */ in svm_vmexit()
1632 vcpu->dbg.pushf_sstep = 1; in svm_vmexit()
1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1643 vmexit->inst_length = 0; in svm_vmexit()
1644 /* Disable POPF intercepts - avoid a loop*/ in svm_vmexit()
1649 vcpu->dbg.popf_sstep = 1; in svm_vmexit()
1664 vm_inject_ud(vcpu->vcpu); in svm_vmexit()
1673 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in svm_vmexit()
1679 vmexit->rip, vmexit->inst_length); in svm_vmexit()
1682 vmexit->rip += vmexit->inst_length; in svm_vmexit()
1683 vmexit->inst_length = 0; in svm_vmexit()
1684 state->rip = vmexit->rip; in svm_vmexit()
1686 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { in svm_vmexit()
1707 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) in svm_inj_intinfo()
1717 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); in svm_inj_intinfo()
1722 * Inject event to virtual cpu.
1734 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { in svm_inj_interrupts()
1743 if (vcpu->nextrip != state->rip) { in svm_inj_interrupts()
1744 ctrl->intr_shadow = 0; in svm_inj_interrupts()
1747 vcpu->nextrip, state->rip); in svm_inj_interrupts()
1751 * Inject pending events or exceptions for this vcpu. in svm_inj_interrupts()
1754 * during event delivery (i.e. ctrl->exitintinfo). in svm_inj_interrupts()
1762 if (vm_nmi_pending(vcpu->vcpu)) { in svm_inj_interrupts()
1765 * Can't inject another NMI if the guest has not in svm_inj_interrupts()
1768 SVM_CTR0(vcpu, "Cannot inject NMI due " in svm_inj_interrupts()
1769 "to NMI-blocking"); in svm_inj_interrupts()
1770 } else if (ctrl->intr_shadow) { in svm_inj_interrupts()
1772 * Can't inject an NMI if the vcpu is in an intr_shadow. in svm_inj_interrupts()
1774 SVM_CTR0(vcpu, "Cannot inject NMI due to " in svm_inj_interrupts()
1778 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1783 SVM_CTR1(vcpu, "Cannot inject NMI due to " in svm_inj_interrupts()
1784 "eventinj %#lx", ctrl->eventinj); in svm_inj_interrupts()
1787 * Use self-IPI to trigger a VM-exit as soon as in svm_inj_interrupts()
1798 vm_nmi_clear(vcpu->vcpu); in svm_inj_interrupts()
1800 /* Inject NMI, vector number is not used */ in svm_inj_interrupts()
1811 extint_pending = vm_extint_pending(vcpu->vcpu); in svm_inj_interrupts()
1818 /* Ask the legacy pic for a vector to inject */ in svm_inj_interrupts()
1819 vatpic_pending_intr(sc->vm, &vector); in svm_inj_interrupts()
1826 * then we cannot inject the pending interrupt. in svm_inj_interrupts()
1828 if ((state->rflags & PSL_I) == 0) { in svm_inj_interrupts()
1829 SVM_CTR2(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1830 "rflags %#lx", vector, state->rflags); in svm_inj_interrupts()
1835 if (ctrl->intr_shadow) { in svm_inj_interrupts()
1836 SVM_CTR1(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1842 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { in svm_inj_interrupts()
1843 SVM_CTR2(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1844 "eventinj %#lx", vector, ctrl->eventinj); in svm_inj_interrupts()
1854 vm_extint_clear(vcpu->vcpu); in svm_inj_interrupts()
1855 vatpic_intr_accepted(sc->vm, vector); in svm_inj_interrupts()
1859 * Force a VM-exit as soon as the vcpu is ready to accept another in svm_inj_interrupts()
1861 * that it wants to inject. Also, if the APIC has a pending interrupt in svm_inj_interrupts()
1862 * that was preempted by the ExtInt then it allows us to inject the in svm_inj_interrupts()
1879 if (ctrl->v_tpr != v_tpr) { in svm_inj_interrupts()
1881 ctrl->v_tpr, v_tpr); in svm_inj_interrupts()
1882 ctrl->v_tpr = v_tpr; in svm_inj_interrupts()
1896 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || in svm_inj_interrupts()
1897 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, in svm_inj_interrupts()
1900 ctrl->eventinj, ctrl->intr_shadow, state->rflags)); in svm_inj_interrupts()
1917 * type to "64-bit available TSS". in restore_host_tss()
1920 tss_sd->sd_type = SDT_SYSTSS; in restore_host_tss()
1933 CPU_SET_ATOMIC(cpu, &pmap->pm_active); in svm_pmap_activate()
1934 smr_enter(pmap->pm_eptsmr); in svm_pmap_activate()
1974 eptgen = atomic_load_long(&pmap->pm_eptgen); in svm_pmap_activate()
1975 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; in svm_pmap_activate()
1977 if (vcpu->asid.gen != asid[cpu].gen) { in svm_pmap_activate()
1979 } else if (vcpu->eptgen != eptgen) { in svm_pmap_activate()
1981 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ in svm_pmap_activate()
1989 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, in svm_pmap_activate()
1990 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); in svm_pmap_activate()
1999 * If this cpu does not support "flush-by-asid" in svm_pmap_activate()
2005 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; in svm_pmap_activate()
2007 vcpu->asid.gen = asid[cpu].gen; in svm_pmap_activate()
2008 vcpu->asid.num = asid[cpu].num; in svm_pmap_activate()
2010 ctrl->asid = vcpu->asid.num; in svm_pmap_activate()
2013 * If this cpu supports "flush-by-asid" then the TLB in svm_pmap_activate()
2018 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; in svm_pmap_activate()
2020 vcpu->eptgen = eptgen; in svm_pmap_activate()
2022 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); in svm_pmap_activate()
2023 KASSERT(ctrl->asid == vcpu->asid.num, in svm_pmap_activate()
2024 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); in svm_pmap_activate()
2030 smr_exit(pmap->pm_eptsmr); in svm_pmap_deactivate()
2031 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); in svm_pmap_deactivate()
2053 gctx->host_dr7 = rdr7(); in svm_dr_enter_guest()
2054 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); in svm_dr_enter_guest()
2066 gctx->host_dr0 = rdr0(); in svm_dr_enter_guest()
2067 gctx->host_dr1 = rdr1(); in svm_dr_enter_guest()
2068 gctx->host_dr2 = rdr2(); in svm_dr_enter_guest()
2069 gctx->host_dr3 = rdr3(); in svm_dr_enter_guest()
2070 gctx->host_dr6 = rdr6(); in svm_dr_enter_guest()
2073 load_dr0(gctx->sctx_dr0); in svm_dr_enter_guest()
2074 load_dr1(gctx->sctx_dr1); in svm_dr_enter_guest()
2075 load_dr2(gctx->sctx_dr2); in svm_dr_enter_guest()
2076 load_dr3(gctx->sctx_dr3); in svm_dr_enter_guest()
2084 gctx->sctx_dr0 = rdr0(); in svm_dr_leave_guest()
2085 gctx->sctx_dr1 = rdr1(); in svm_dr_leave_guest()
2086 gctx->sctx_dr2 = rdr2(); in svm_dr_leave_guest()
2087 gctx->sctx_dr3 = rdr3(); in svm_dr_leave_guest()
2093 load_dr0(gctx->host_dr0); in svm_dr_leave_guest()
2094 load_dr1(gctx->host_dr1); in svm_dr_leave_guest()
2095 load_dr2(gctx->host_dr2); in svm_dr_leave_guest()
2096 load_dr3(gctx->host_dr3); in svm_dr_leave_guest()
2097 load_dr6(gctx->host_dr6); in svm_dr_leave_guest()
2098 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl); in svm_dr_leave_guest()
2099 load_dr7(gctx->host_dr7); in svm_dr_leave_guest()
2120 svm_sc = vcpu->sc; in svm_run()
2123 vmexit = vm_exitinfo(vcpu->vcpu); in svm_run()
2124 vlapic = vm_lapic(vcpu->vcpu); in svm_run()
2127 vmcb_pa = vcpu->vmcb_pa; in svm_run()
2129 if (vcpu->lastcpu != curcpu) { in svm_run()
2133 vcpu->asid.gen = 0; in svm_run()
2142 * Setting 'vcpu->lastcpu' here is bit premature because in svm_run()
2147 * This works for now but any new side-effects of vcpu in svm_run()
2150 vcpu->lastcpu = curcpu; in svm_run()
2151 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in svm_run()
2157 state->rip = rip; in svm_run()
2171 vm_exit_suspended(vcpu->vcpu, state->rip); in svm_run()
2175 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in svm_run()
2177 vm_exit_rendezvous(vcpu->vcpu, state->rip); in svm_run()
2183 vm_exit_reqidle(vcpu->vcpu, state->rip); in svm_run()
2188 if (vcpu_should_yield(vcpu->vcpu)) { in svm_run()
2190 vm_exit_astpending(vcpu->vcpu, state->rip); in svm_run()
2194 if (vcpu_debugged(vcpu->vcpu)) { in svm_run()
2196 vm_exit_debug(vcpu->vcpu, state->rip); in svm_run()
2217 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; in svm_run()
2218 vcpu->dirty = 0; in svm_run()
2219 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); in svm_run()
2222 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); in svm_run()
2239 /* #VMEXIT disables interrupts so re-enable them here. */ in svm_run()
2243 vcpu->nextrip = state->rip; in svm_run()
2259 free(vcpu->vmcb, M_SVM); in svm_vcpu_cleanup()
2268 free(sc->iopm_bitmap, M_SVM); in svm_cleanup()
2269 free(sc->msr_bitmap, M_SVM); in svm_cleanup()
2279 return (&regctx->sctx_rbx); in swctx_regptr()
2281 return (&regctx->sctx_rcx); in swctx_regptr()
2283 return (&regctx->sctx_rdx); in swctx_regptr()
2285 return (&regctx->sctx_rdi); in swctx_regptr()
2287 return (&regctx->sctx_rsi); in swctx_regptr()
2289 return (&regctx->sctx_rbp); in swctx_regptr()
2291 return (&regctx->sctx_r8); in swctx_regptr()
2293 return (&regctx->sctx_r9); in swctx_regptr()
2295 return (&regctx->sctx_r10); in swctx_regptr()
2297 return (&regctx->sctx_r11); in swctx_regptr()
2299 return (&regctx->sctx_r12); in swctx_regptr()
2301 return (&regctx->sctx_r13); in swctx_regptr()
2303 return (&regctx->sctx_r14); in swctx_regptr()
2305 return (&regctx->sctx_r15); in swctx_regptr()
2307 return (&regctx->sctx_dr0); in swctx_regptr()
2309 return (&regctx->sctx_dr1); in swctx_regptr()
2311 return (&regctx->sctx_dr2); in swctx_regptr()
2313 return (&regctx->sctx_dr3); in swctx_regptr()
2406 if (meta->op == VM_SNAPSHOT_SAVE) { in svm_snapshot_reg()
2412 } else if (meta->op == VM_SNAPSHOT_RESTORE) { in svm_snapshot_reg()
2456 vlapic = vm_lapic(vcpu->vcpu); in svm_setcap()
2457 vlapic->ipi_exit = val; in svm_setcap()
2460 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); in svm_setcap()
2461 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); in svm_setcap()
2473 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_setcap()
2480 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2486 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_setcap()
2488 rflags |= vcpu->dbg.rflags_tf; in svm_setcap()
2489 vcpu->dbg.rflags_tf = 0; in svm_setcap()
2496 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2540 vlapic = vm_lapic(vcpu->vcpu); in svm_getcap()
2541 *retval = vlapic->ipi_exit; in svm_getcap()
2544 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); in svm_getcap()
2547 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); in svm_getcap()
2576 vlapic->vm = vcpu->sc->vm; in svm_vlapic_init()
2577 vlapic->vcpu = vcpu->vcpu; in svm_vlapic_init()
2578 vlapic->vcpuid = vcpu->vcpuid; in svm_vlapic_init()
2579 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC, in svm_vlapic_init()
2592 free(vlapic->apic_page, M_SVM_VLAPIC); in svm_vlapic_cleanup()
2606 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in svm_vcpu_snapshot()
2608 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), in svm_vcpu_snapshot()
2609 vcpu->vcpuid); in svm_vcpu_snapshot()
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); in svm_vcpu_snapshot()
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); in svm_vcpu_snapshot()
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); in svm_vcpu_snapshot()
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); in svm_vcpu_snapshot()
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); in svm_vcpu_snapshot()
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); in svm_vcpu_snapshot()
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); in svm_vcpu_snapshot()
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); in svm_vcpu_snapshot()
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); in svm_vcpu_snapshot()
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); in svm_vcpu_snapshot()
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); in svm_vcpu_snapshot()
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); in svm_vcpu_snapshot()
2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); in svm_vcpu_snapshot()
2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); in svm_vcpu_snapshot()
2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); in svm_vcpu_snapshot()
2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); in svm_vcpu_snapshot()
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); in svm_vcpu_snapshot()
2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); in svm_vcpu_snapshot()
2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); in svm_vcpu_snapshot()
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); in svm_vcpu_snapshot()
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); in svm_vcpu_snapshot()
2784 /* Restore EPTGEN field - EPT is Extended Page Table */ in svm_vcpu_snapshot()
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); in svm_vcpu_snapshot()
2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); in svm_vcpu_snapshot()
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); in svm_vcpu_snapshot()
2790 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); in svm_vcpu_snapshot()
2793 if (meta->op == VM_SNAPSHOT_RESTORE) in svm_vcpu_snapshot()