Lines Matching full:vcpu

294 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)  in svm_set_tsc_offset()  argument
298 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_tsc_offset()
301 svm_set_dirty(vcpu, VMCB_CACHE_I); in svm_set_tsc_offset()
302 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); in svm_set_tsc_offset()
304 vm_set_tsc_offset(vcpu->vcpu, offset); in svm_set_tsc_offset()
354 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
390 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask) in svm_get_intercept() argument
396 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_get_intercept()
401 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled) in svm_set_intercept() argument
408 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_set_intercept()
417 svm_set_dirty(vcpu, VMCB_CACHE_I); in svm_set_intercept()
418 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx, in svm_set_intercept()
424 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) in svm_disable_intercept() argument
427 svm_set_intercept(vcpu, off, bitmask, 0); in svm_disable_intercept()
431 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask) in svm_enable_intercept() argument
434 svm_set_intercept(vcpu, off, bitmask, 1); in svm_enable_intercept()
438 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa, in vmcb_init() argument
446 ctrl = svm_get_vmcb_ctrl(vcpu); in vmcb_init()
447 state = svm_get_vmcb_state(vcpu); in vmcb_init()
463 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask); in vmcb_init()
465 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask); in vmcb_init()
472 if (vcpu_trace_exceptions(vcpu->vcpu)) { in vmcb_init()
480 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n)); in vmcb_init()
483 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); in vmcb_init()
487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); in vmcb_init()
488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); in vmcb_init()
489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); in vmcb_init()
490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); in vmcb_init()
491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); in vmcb_init()
492 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); in vmcb_init()
493 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); in vmcb_init()
494 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); in vmcb_init()
495 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); in vmcb_init()
496 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD); in vmcb_init()
497 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA); in vmcb_init()
499 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); in vmcb_init()
500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); in vmcb_init()
506 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD); in vmcb_init()
507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE); in vmcb_init()
508 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI); in vmcb_init()
509 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); in vmcb_init()
510 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); in vmcb_init()
511 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); in vmcb_init()
512 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmcb_init()
513 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, in vmcb_init()
521 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); in vmcb_init()
617 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) in svm_vcpu_init()
620 struct svm_vcpu *vcpu; in svm_vcpu_init() local
622 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO); in svm_vcpu_init()
623 vcpu->sc = sc; in svm_vcpu_init()
624 vcpu->vcpu = vcpu1; in svm_vcpu_init()
625 vcpu->vcpuid = vcpuid; in svm_vcpu_init()
626 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM, in svm_vcpu_init()
628 vcpu->nextrip = ~0; in svm_vcpu_init()
629 vcpu->lastcpu = NOCPU; in svm_vcpu_init()
630 vcpu->vmcb_pa = vtophys(vcpu->vmcb); in svm_vcpu_init()
631 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap), in svm_vcpu_init()
633 svm_msr_guest_init(sc, vcpu); in svm_vcpu_init()
634 return (vcpu); in svm_vcpu_init()
729 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in, in svm_inout_str_seginfo() argument
742 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in svm_inout_str_seginfo()
783 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit) in svm_handle_io() argument
792 state = svm_get_vmcb_state(vcpu); in svm_handle_io()
793 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_handle_io()
794 regs = svm_get_guest_regctx(vcpu); in svm_handle_io()
820 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging); in svm_handle_io()
826 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis); in svm_handle_io()
937 * Inject an event to vcpu as described in section 15.20, "Event injection".
940 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector, in svm_eventinject() argument
945 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_eventinject()
970 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x", in svm_eventinject()
973 SVM_CTR2(vcpu, "Injecting %s at vector %d", in svm_eventinject()
979 svm_update_virqinfo(struct svm_vcpu *vcpu) in svm_update_virqinfo() argument
984 vlapic = vm_lapic(vcpu->vcpu); in svm_update_virqinfo()
985 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_update_virqinfo()
996 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) in svm_save_intinfo() argument
1001 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_save_intinfo()
1012 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, in svm_save_intinfo()
1014 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); in svm_save_intinfo()
1015 vm_exit_intinfo(vcpu->vcpu, intinfo); in svm_save_intinfo()
1020 vintr_intercept_enabled(struct svm_vcpu *vcpu) in vintr_intercept_enabled() argument
1023 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); in vintr_intercept_enabled()
1028 enable_intr_window_exiting(struct svm_vcpu *vcpu) in enable_intr_window_exiting() argument
1032 ctrl = svm_get_vmcb_ctrl(vcpu); in enable_intr_window_exiting()
1036 KASSERT(vintr_intercept_enabled(vcpu), in enable_intr_window_exiting()
1041 SVM_CTR0(vcpu, "Enable intr window exiting"); in enable_intr_window_exiting()
1045 svm_set_dirty(vcpu, VMCB_CACHE_TPR); in enable_intr_window_exiting()
1046 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); in enable_intr_window_exiting()
1050 disable_intr_window_exiting(struct svm_vcpu *vcpu) in disable_intr_window_exiting() argument
1054 ctrl = svm_get_vmcb_ctrl(vcpu); in disable_intr_window_exiting()
1057 KASSERT(!vintr_intercept_enabled(vcpu), in disable_intr_window_exiting()
1062 SVM_CTR0(vcpu, "Disable intr window exiting"); in disable_intr_window_exiting()
1065 svm_set_dirty(vcpu, VMCB_CACHE_TPR); in disable_intr_window_exiting()
1066 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); in disable_intr_window_exiting()
1070 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val) in svm_modify_intr_shadow() argument
1075 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_modify_intr_shadow()
1080 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval); in svm_modify_intr_shadow()
1086 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val) in svm_get_intr_shadow() argument
1090 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_get_intr_shadow()
1098 * to track when the vcpu is done handling the NMI.
1101 nmi_blocked(struct svm_vcpu *vcpu) in nmi_blocked() argument
1105 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); in nmi_blocked()
1110 enable_nmi_blocking(struct svm_vcpu *vcpu) in enable_nmi_blocking() argument
1113 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked")); in enable_nmi_blocking()
1114 SVM_CTR0(vcpu, "vNMI blocking enabled"); in enable_nmi_blocking()
1115 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); in enable_nmi_blocking()
1119 clear_nmi_blocking(struct svm_vcpu *vcpu) in clear_nmi_blocking() argument
1123 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked")); in clear_nmi_blocking()
1124 SVM_CTR0(vcpu, "vNMI blocking cleared"); in clear_nmi_blocking()
1126 * When the IRET intercept is cleared the vcpu will attempt to execute in clear_nmi_blocking()
1128 * another NMI into the vcpu before the "iret" has actually executed. in clear_nmi_blocking()
1132 * the vcpu it will be injected into the guest. in clear_nmi_blocking()
1136 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); in clear_nmi_blocking()
1142 error = svm_modify_intr_shadow(vcpu, 1); in clear_nmi_blocking()
1149 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval, in svm_write_efer() argument
1157 state = svm_get_vmcb_state(vcpu); in svm_write_efer()
1160 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); in svm_write_efer()
1184 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE)) in svm_write_efer()
1193 vme = vm_exitinfo(vcpu->vcpu); in svm_write_efer()
1200 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR)) in svm_write_efer()
1205 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE)) in svm_write_efer()
1209 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval); in svm_write_efer()
1213 vm_inject_gp(vcpu->vcpu); in svm_write_efer()
1218 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, in emulate_wrmsr() argument
1224 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
1226 error = svm_write_efer(sc, vcpu, val, retu); in emulate_wrmsr()
1228 error = svm_wrmsr(vcpu, num, val, retu); in emulate_wrmsr()
1234 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu) in emulate_rdmsr() argument
1242 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
1244 error = svm_rdmsr(vcpu, num, &result, retu); in emulate_rdmsr()
1247 state = svm_get_vmcb_state(vcpu); in emulate_rdmsr()
1248 ctx = svm_get_guest_regctx(vcpu); in emulate_rdmsr()
1331 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu, in svm_vmexit() argument
1343 ctx = svm_get_guest_regctx(vcpu); in svm_vmexit()
1344 vmcb = svm_get_vmcb(vcpu); in svm_vmexit()
1357 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in svm_vmexit()
1376 svm_update_virqinfo(vcpu); in svm_vmexit()
1377 svm_save_intinfo(svm_sc, vcpu); in svm_vmexit()
1385 clear_nmi_blocking(vcpu); in svm_vmexit()
1389 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1); in svm_vmexit()
1393 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in svm_vmexit()
1400 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in svm_vmexit()
1410 SVM_CTR0(vcpu, "Vectoring to MCE handler"); in svm_vmexit()
1414 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2); in svm_vmexit()
1438 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6); in svm_vmexit()
1440 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) { in svm_vmexit()
1445 if (vcpu->dbg.popf_sstep) { in svm_vmexit()
1452 vcpu->dbg.popf_sstep = 0; in svm_vmexit()
1459 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, in svm_vmexit()
1461 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_vmexit()
1462 } else if (vcpu->dbg.pushf_sstep) { in svm_vmexit()
1467 vcpu->dbg.pushf_sstep = 0; in svm_vmexit()
1477 vcpu->dbg.rflags_tf; in svm_vmexit()
1478 svm_paging_info(svm_get_vmcb(vcpu), in svm_vmexit()
1484 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6); in svm_vmexit()
1511 SVM_CTR2(vcpu, "Reset inst_length from %d " in svm_vmexit()
1528 SVM_CTR2(vcpu, "Reflecting exception " in svm_vmexit()
1530 error = vm_inject_exception(vcpu->vcpu, idtvec, in svm_vmexit()
1544 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in svm_vmexit()
1546 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val); in svm_vmexit()
1547 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { in svm_vmexit()
1558 SVM_CTR1(vcpu, "rdmsr %#x", ecx); in svm_vmexit()
1559 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in svm_vmexit()
1560 if (emulate_rdmsr(vcpu, ecx, &retu)) { in svm_vmexit()
1572 handled = svm_handle_io(vcpu, vmexit); in svm_vmexit()
1573 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in svm_vmexit()
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in svm_vmexit()
1577 handled = x86_emulate_cpuid(vcpu->vcpu, in svm_vmexit()
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in svm_vmexit()
1588 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in svm_vmexit()
1593 SVM_CTR2(vcpu, "nested page fault with " in svm_vmexit()
1596 } else if (vm_mem_allocated(vcpu->vcpu, info2)) { in svm_vmexit()
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in svm_vmexit()
1601 SVM_CTR3(vcpu, "nested page fault " in svm_vmexit()
1606 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in svm_vmexit()
1607 SVM_CTR3(vcpu, "inst_emul fault " in svm_vmexit()
1619 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1622 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); in svm_vmexit()
1626 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_vmexit()
1629 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); in svm_vmexit()
1632 vcpu->dbg.pushf_sstep = 1; in svm_vmexit()
1638 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_vmexit()
1641 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags); in svm_vmexit()
1645 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_vmexit()
1648 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T)); in svm_vmexit()
1649 vcpu->dbg.popf_sstep = 1; in svm_vmexit()
1664 vm_inject_ud(vcpu->vcpu); in svm_vmexit()
1673 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in svm_vmexit()
1677 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d", in svm_vmexit()
1703 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) in svm_inj_intinfo() argument
1707 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo)) in svm_inj_intinfo()
1713 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo), in svm_inj_intinfo()
1717 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1); in svm_inj_intinfo()
1718 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo); in svm_inj_intinfo()
1725 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu, in svm_inj_interrupts() argument
1734 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) { in svm_inj_interrupts()
1738 state = svm_get_vmcb_state(vcpu); in svm_inj_interrupts()
1739 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_inj_interrupts()
1743 if (vcpu->nextrip != state->rip) { in svm_inj_interrupts()
1745 SVM_CTR2(vcpu, "Guest interrupt blocking " in svm_inj_interrupts()
1747 vcpu->nextrip, state->rip); in svm_inj_interrupts()
1751 * Inject pending events or exceptions for this vcpu. in svm_inj_interrupts()
1759 svm_inj_intinfo(sc, vcpu); in svm_inj_interrupts()
1762 if (vm_nmi_pending(vcpu->vcpu)) { in svm_inj_interrupts()
1763 if (nmi_blocked(vcpu)) { in svm_inj_interrupts()
1768 SVM_CTR0(vcpu, "Cannot inject NMI due " in svm_inj_interrupts()
1772 * Can't inject an NMI if the vcpu is in an intr_shadow. in svm_inj_interrupts()
1774 SVM_CTR0(vcpu, "Cannot inject NMI due to " in svm_inj_interrupts()
1783 SVM_CTR1(vcpu, "Cannot inject NMI due to " in svm_inj_interrupts()
1798 vm_nmi_clear(vcpu->vcpu); in svm_inj_interrupts()
1801 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, in svm_inj_interrupts()
1805 enable_nmi_blocking(vcpu); in svm_inj_interrupts()
1807 SVM_CTR0(vcpu, "Injecting vNMI"); in svm_inj_interrupts()
1811 extint_pending = vm_extint_pending(vcpu->vcpu); in svm_inj_interrupts()
1829 SVM_CTR2(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1836 SVM_CTR1(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1843 SVM_CTR2(vcpu, "Cannot inject vector %d due to " in svm_inj_interrupts()
1849 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); in svm_inj_interrupts()
1854 vm_extint_clear(vcpu->vcpu); in svm_inj_interrupts()
1859 * Force a VM-exit as soon as the vcpu is ready to accept another in svm_inj_interrupts()
1880 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x", in svm_inj_interrupts()
1883 svm_set_dirty(vcpu, VMCB_CACHE_TPR); in svm_inj_interrupts()
1901 enable_intr_window_exiting(vcpu); in svm_inj_interrupts()
1903 disable_intr_window_exiting(vcpu); in svm_inj_interrupts()
1925 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap) in svm_pmap_activate() argument
1936 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_pmap_activate()
1939 * The TLB entries associated with the vcpu's ASID are not valid in svm_pmap_activate()
1942 * 1. The vcpu's ASID generation is different than the host cpu's in svm_pmap_activate()
1943 * ASID generation. This happens when the vcpu migrates to a new in svm_pmap_activate()
1961 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is in svm_pmap_activate()
1977 if (vcpu->asid.gen != asid[cpu].gen) { in svm_pmap_activate()
1979 } else if (vcpu->eptgen != eptgen) { in svm_pmap_activate()
2007 vcpu->asid.gen = asid[cpu].gen; in svm_pmap_activate()
2008 vcpu->asid.num = asid[cpu].num; in svm_pmap_activate()
2010 ctrl->asid = vcpu->asid.num; in svm_pmap_activate()
2011 svm_set_dirty(vcpu, VMCB_CACHE_ASID); in svm_pmap_activate()
2020 vcpu->eptgen = eptgen; in svm_pmap_activate()
2023 KASSERT(ctrl->asid == vcpu->asid.num, in svm_pmap_activate()
2024 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num)); in svm_pmap_activate()
2103 * Start vcpu with specified RIP.
2110 struct svm_vcpu *vcpu; in svm_run() local
2119 vcpu = vcpui; in svm_run()
2120 svm_sc = vcpu->sc; in svm_run()
2121 state = svm_get_vmcb_state(vcpu); in svm_run()
2122 ctrl = svm_get_vmcb_ctrl(vcpu); in svm_run()
2123 vmexit = vm_exitinfo(vcpu->vcpu); in svm_run()
2124 vlapic = vm_lapic(vcpu->vcpu); in svm_run()
2126 gctx = svm_get_guest_regctx(vcpu); in svm_run()
2127 vmcb_pa = vcpu->vmcb_pa; in svm_run()
2129 if (vcpu->lastcpu != curcpu) { in svm_run()
2133 vcpu->asid.gen = 0; in svm_run()
2138 svm_set_dirty(vcpu, 0xffffffff); in svm_run()
2142 * Setting 'vcpu->lastcpu' here is bit premature because in svm_run()
2147 * This works for now but any new side-effects of vcpu in svm_run()
2150 vcpu->lastcpu = curcpu; in svm_run()
2151 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in svm_run()
2154 svm_msr_guest_enter(vcpu); in svm_run()
2171 vm_exit_suspended(vcpu->vcpu, state->rip); in svm_run()
2175 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in svm_run()
2177 vm_exit_rendezvous(vcpu->vcpu, state->rip); in svm_run()
2183 vm_exit_reqidle(vcpu->vcpu, state->rip); in svm_run()
2188 if (vcpu_should_yield(vcpu->vcpu)) { in svm_run()
2190 vm_exit_astpending(vcpu->vcpu, state->rip); in svm_run()
2194 if (vcpu_debugged(vcpu->vcpu)) { in svm_run()
2196 vm_exit_debug(vcpu->vcpu, state->rip); in svm_run()
2209 svm_inj_interrupts(svm_sc, vcpu, vlapic); in svm_run()
2213 * ensure that the vcpu does not use stale TLB mappings. in svm_run()
2215 svm_pmap_activate(vcpu, pmap); in svm_run()
2217 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty; in svm_run()
2218 vcpu->dirty = 0; in svm_run()
2219 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean); in svm_run()
2222 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip); in svm_run()
2243 vcpu->nextrip = state->rip; in svm_run()
2246 handled = svm_vmexit(svm_sc, vcpu, vmexit); in svm_run()
2249 svm_msr_guest_exit(vcpu); in svm_run()
2257 struct svm_vcpu *vcpu = vcpui; in svm_vcpu_cleanup() local
2259 free(vcpu->vmcb, M_SVM); in svm_vcpu_cleanup()
2260 free(vcpu, M_SVM); in svm_vcpu_cleanup()
2322 struct svm_vcpu *vcpu; in svm_getreg() local
2325 vcpu = vcpui; in svm_getreg()
2328 return (svm_get_intr_shadow(vcpu, val)); in svm_getreg()
2331 if (vmcb_read(vcpu, ident, val) == 0) { in svm_getreg()
2335 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); in svm_getreg()
2342 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident); in svm_getreg()
2349 struct svm_vcpu *vcpu; in svm_setreg() local
2352 vcpu = vcpui; in svm_setreg()
2355 return (svm_modify_intr_shadow(vcpu, val)); in svm_setreg()
2360 if (vmcb_write(vcpu, ident, val) == 0) { in svm_setreg()
2365 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident); in svm_setreg()
2379 * vcpu's ASID. This needs to be treated differently depending on in svm_setreg()
2383 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident); in svm_setreg()
2431 struct svm_vcpu *vcpu; in svm_setcap() local
2435 vcpu = vcpui; in svm_setcap()
2440 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_setcap()
2444 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_setcap()
2453 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val); in svm_setcap()
2456 vlapic = vm_lapic(vcpu->vcpu); in svm_setcap()
2460 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR); in svm_setcap()
2461 vcpu->caps |= (val << VM_CAP_MASK_HWINTR); in svm_setcap()
2467 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) { in svm_setcap()
2473 vcpu->dbg.rflags_tf = rflags & PSL_T; in svm_setcap()
2475 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, in svm_setcap()
2480 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2483 * Restore shadowed RFLAGS.TF only if vCPU was in svm_setcap()
2486 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) { in svm_setcap()
2488 rflags |= vcpu->dbg.rflags_tf; in svm_setcap()
2489 vcpu->dbg.rflags_tf = 0; in svm_setcap()
2491 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, in svm_setcap()
2496 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF); in svm_setcap()
2500 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val); in svm_setcap()
2501 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF, in svm_setcap()
2503 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF, in svm_setcap()
2517 struct svm_vcpu *vcpu; in svm_getcap() local
2521 vcpu = vcpui; in svm_getcap()
2526 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_getcap()
2530 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, in svm_getcap()
2537 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP)); in svm_getcap()
2540 vlapic = vm_lapic(vcpu->vcpu); in svm_getcap()
2544 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF)); in svm_getcap()
2547 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR)); in svm_getcap()
2571 struct svm_vcpu *vcpu; in svm_vlapic_init() local
2574 vcpu = vcpui; in svm_vlapic_init()
2576 vlapic->vm = vcpu->sc->vm; in svm_vlapic_init()
2577 vlapic->vcpu = vcpu->vcpu; in svm_vlapic_init()
2578 vlapic->vcpuid = vcpu->vcpuid; in svm_vlapic_init()
2600 struct svm_vcpu *vcpu; in svm_vcpu_snapshot() local
2603 vcpu = vcpui; in svm_vcpu_snapshot()
2606 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in svm_vcpu_snapshot()
2608 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), in svm_vcpu_snapshot()
2609 vcpu->vcpuid); in svm_vcpu_snapshot()
2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta); in svm_vcpu_snapshot()
2614 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta); in svm_vcpu_snapshot()
2615 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta); in svm_vcpu_snapshot()
2616 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta); in svm_vcpu_snapshot()
2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta); in svm_vcpu_snapshot()
2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta); in svm_vcpu_snapshot()
2621 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta); in svm_vcpu_snapshot()
2623 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta); in svm_vcpu_snapshot()
2624 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta); in svm_vcpu_snapshot()
2625 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta); in svm_vcpu_snapshot()
2629 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta); in svm_vcpu_snapshot()
2630 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta); in svm_vcpu_snapshot()
2633 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta); in svm_vcpu_snapshot()
2634 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta); in svm_vcpu_snapshot()
2637 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta); in svm_vcpu_snapshot()
2638 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta); in svm_vcpu_snapshot()
2641 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta); in svm_vcpu_snapshot()
2642 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta); in svm_vcpu_snapshot()
2645 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta); in svm_vcpu_snapshot()
2646 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta); in svm_vcpu_snapshot()
2649 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta); in svm_vcpu_snapshot()
2650 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta); in svm_vcpu_snapshot()
2653 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta); in svm_vcpu_snapshot()
2654 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta); in svm_vcpu_snapshot()
2657 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta); in svm_vcpu_snapshot()
2658 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta); in svm_vcpu_snapshot()
2661 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta); in svm_vcpu_snapshot()
2664 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta); in svm_vcpu_snapshot()
2665 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta); in svm_vcpu_snapshot()
2668 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta); in svm_vcpu_snapshot()
2670 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2672 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2674 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2676 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2678 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2681 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2683 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2686 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2689 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2692 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2695 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2697 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2699 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2701 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2704 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2707 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2709 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2711 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2713 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2716 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2719 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2721 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2723 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2726 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2729 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2732 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2734 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2736 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2739 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2742 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2744 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2746 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2748 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2750 err += vmcb_snapshot_any(vcpu, in svm_vcpu_snapshot()
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done); in svm_vcpu_snapshot()
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done); in svm_vcpu_snapshot()
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done); in svm_vcpu_snapshot()
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done); in svm_vcpu_snapshot()
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done); in svm_vcpu_snapshot()
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done); in svm_vcpu_snapshot()
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done); in svm_vcpu_snapshot()
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done); in svm_vcpu_snapshot()
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done); in svm_vcpu_snapshot()
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done); in svm_vcpu_snapshot()
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done); in svm_vcpu_snapshot()
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done); in svm_vcpu_snapshot()
2768 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done); in svm_vcpu_snapshot()
2769 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done); in svm_vcpu_snapshot()
2770 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done); in svm_vcpu_snapshot()
2771 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done); in svm_vcpu_snapshot()
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done); in svm_vcpu_snapshot()
2773 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done); in svm_vcpu_snapshot()
2778 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done); in svm_vcpu_snapshot()
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done); in svm_vcpu_snapshot()
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done); in svm_vcpu_snapshot()
2785 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done); in svm_vcpu_snapshot()
2787 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done); in svm_vcpu_snapshot()
2788 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done); in svm_vcpu_snapshot()
2790 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done); in svm_vcpu_snapshot()
2794 svm_set_dirty(vcpu, 0xffffffff); in svm_vcpu_snapshot()
2803 struct svm_vcpu *vcpu = vcpui; in svm_restore_tsc() local
2805 svm_set_tsc_offset(vcpu, offset); in svm_restore_tsc()