Lines Matching full:vcpu

818 	 * bitmap is currently per-VM rather than per-vCPU while the  in vmx_modinit()
820 * per-vCPU basis). in vmx_modinit()
1132 vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) in vmx_vcpu_init()
1136 struct vmx_vcpu *vcpu; in vmx_vcpu_init() local
1143 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); in vmx_vcpu_init()
1144 vcpu->vmx = vmx; in vmx_vcpu_init()
1145 vcpu->vcpu = vcpu1; in vmx_vcpu_init()
1146 vcpu->vcpuid = vcpuid; in vmx_vcpu_init()
1147 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1149 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1151 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, in vmx_vcpu_init()
1154 vmcs = vcpu->vmcs; in vmx_vcpu_init()
1158 panic("vmx_init: vmclear error %d on vcpu %d\n", in vmx_vcpu_init()
1162 vmx_msr_guest_init(vmx, vcpu); in vmx_vcpu_init()
1169 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); in vmx_vcpu_init()
1173 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmx_vcpu_init()
1193 if (vcpu_trace_exceptions(vcpu->vcpu)) in vmx_vcpu_init()
1199 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; in vmx_vcpu_init()
1203 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); in vmx_vcpu_init()
1215 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); in vmx_vcpu_init()
1220 vcpu->cap.set = 0; in vmx_vcpu_init()
1221 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; in vmx_vcpu_init()
1222 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; in vmx_vcpu_init()
1223 vcpu->cap.proc_ctls = procbased_ctls; in vmx_vcpu_init()
1224 vcpu->cap.proc_ctls2 = procbased_ctls2; in vmx_vcpu_init()
1225 vcpu->cap.exc_bitmap = exc_bitmap; in vmx_vcpu_init()
1227 vcpu->state.nextrip = ~0; in vmx_vcpu_init()
1228 vcpu->state.lastcpu = NOCPU; in vmx_vcpu_init()
1229 vcpu->state.vpid = vpid; in vmx_vcpu_init()
1245 vcpu->ctx.pmap = vmx->pmap; in vmx_vcpu_init()
1247 return (vcpu); in vmx_vcpu_init()
1251 vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx) in vmx_handle_cpuid() argument
1255 handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, in vmx_handle_cpuid()
1262 vmx_run_trace(struct vmx_vcpu *vcpu) in vmx_run_trace() argument
1264 VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip()); in vmx_run_trace()
1268 vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason, in vmx_exit_trace() argument
1271 VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx", in vmx_exit_trace()
1277 vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) in vmx_astpending_trace() argument
1279 VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip); in vmx_astpending_trace()
1289 vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running) in vmx_invvpid() argument
1294 vmxstate = &vcpu->state; in vmx_invvpid()
1302 * This will invalidate TLB entries tagged with the vcpu's in vmx_invvpid()
1309 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " in vmx_invvpid()
1310 "critical section", __func__, vcpu->vcpuid)); in vmx_invvpid()
1315 * We do this because this vcpu was executing on a different host in vmx_invvpid()
1322 * move the thread associated with this vcpu between host cpus. in vmx_invvpid()
1333 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); in vmx_invvpid()
1341 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); in vmx_invvpid()
1346 vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap) in vmx_set_pcpu_defaults() argument
1350 vmxstate = &vcpu->state; in vmx_set_pcpu_defaults()
1356 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in vmx_set_pcpu_defaults()
1361 vmx_invvpid(vmx, vcpu, pmap, 1); in vmx_set_pcpu_defaults()
1370 vmx_set_int_window_exiting(struct vmx_vcpu *vcpu) in vmx_set_int_window_exiting() argument
1373 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { in vmx_set_int_window_exiting()
1374 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; in vmx_set_int_window_exiting()
1375 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_int_window_exiting()
1376 VMX_CTR0(vcpu, "Enabling interrupt window exiting"); in vmx_set_int_window_exiting()
1381 vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu) in vmx_clear_int_window_exiting() argument
1384 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, in vmx_clear_int_window_exiting()
1385 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); in vmx_clear_int_window_exiting()
1386 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; in vmx_clear_int_window_exiting()
1387 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_int_window_exiting()
1388 VMX_CTR0(vcpu, "Disabling interrupt window exiting"); in vmx_clear_int_window_exiting()
1392 vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu) in vmx_set_nmi_window_exiting() argument
1395 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { in vmx_set_nmi_window_exiting()
1396 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; in vmx_set_nmi_window_exiting()
1397 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_nmi_window_exiting()
1398 VMX_CTR0(vcpu, "Enabling NMI window exiting"); in vmx_set_nmi_window_exiting()
1403 vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu) in vmx_clear_nmi_window_exiting() argument
1406 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, in vmx_clear_nmi_window_exiting()
1407 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); in vmx_clear_nmi_window_exiting()
1408 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; in vmx_clear_nmi_window_exiting()
1409 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_nmi_window_exiting()
1410 VMX_CTR0(vcpu, "Disabling NMI window exiting"); in vmx_clear_nmi_window_exiting()
1414 vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset) in vmx_set_tsc_offset() argument
1418 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { in vmx_set_tsc_offset()
1419 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; in vmx_set_tsc_offset()
1420 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_tsc_offset()
1421 VMX_CTR0(vcpu, "Enabling TSC offsetting"); in vmx_set_tsc_offset()
1427 vm_set_tsc_offset(vcpu->vcpu, offset); in vmx_set_tsc_offset()
1438 vmx_inject_nmi(struct vmx_vcpu *vcpu) in vmx_inject_nmi() argument
1457 VMX_CTR0(vcpu, "Injecting vNMI"); in vmx_inject_nmi()
1460 vm_nmi_clear(vcpu->vcpu); in vmx_inject_nmi()
1464 vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic, in vmx_inject_interrupts() argument
1471 if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { in vmx_inject_interrupts()
1475 if (vcpu->state.nextrip != guestrip) { in vmx_inject_interrupts()
1478 VMX_CTR2(vcpu, "Guest interrupt blocking " in vmx_inject_interrupts()
1480 vcpu->state.nextrip, guestrip); in vmx_inject_interrupts()
1486 if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { in vmx_inject_interrupts()
1511 if (vm_nmi_pending(vcpu->vcpu)) { in vmx_inject_interrupts()
1528 vmx_inject_nmi(vcpu); in vmx_inject_interrupts()
1531 VMX_CTR1(vcpu, "Cannot inject NMI " in vmx_inject_interrupts()
1535 VMX_CTR1(vcpu, "Cannot inject NMI due to " in vmx_inject_interrupts()
1540 vmx_set_nmi_window_exiting(vcpu); in vmx_inject_interrupts()
1543 extint_pending = vm_extint_pending(vcpu->vcpu); in vmx_inject_interrupts()
1555 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { in vmx_inject_interrupts()
1556 VMX_CTR0(vcpu, "Skip interrupt injection due to " in vmx_inject_interrupts()
1576 vatpic_pending_intr(vcpu->vmx->vm, &vector); in vmx_inject_interrupts()
1591 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1598 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1612 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1626 vm_extint_clear(vcpu->vcpu); in vmx_inject_interrupts()
1627 vatpic_intr_accepted(vcpu->vmx->vm, vector); in vmx_inject_interrupts()
1640 vmx_set_int_window_exiting(vcpu); in vmx_inject_interrupts()
1643 VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector); in vmx_inject_interrupts()
1652 vmx_set_int_window_exiting(vcpu); in vmx_inject_interrupts()
1665 vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_restore_nmi_blocking() argument
1669 VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); in vmx_restore_nmi_blocking()
1676 vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_clear_nmi_blocking() argument
1680 VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); in vmx_clear_nmi_blocking()
1687 vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_assert_nmi_blocking() argument
1697 vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu, in vmx_emulate_xsetbv() argument
1704 vmxctx = &vcpu->ctx; in vmx_emulate_xsetbv()
1715 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1721 vm_inject_ud(vcpu->vcpu); in vmx_emulate_xsetbv()
1727 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1732 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1739 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1750 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1760 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1774 vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident) in vmx_get_guest_reg() argument
1778 vmxctx = &vcpu->ctx; in vmx_get_guest_reg()
1819 vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval) in vmx_set_guest_reg() argument
1823 vmxctx = &vcpu->ctx; in vmx_set_guest_reg()
1880 vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual) in vmx_emulate_cr0_access() argument
1888 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); in vmx_emulate_cr0_access()
1918 vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual) in vmx_emulate_cr4_access() argument
1926 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); in vmx_emulate_cr4_access()
1938 vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu, in vmx_emulate_cr8_access() argument
1950 vlapic = vm_lapic(vcpu->vcpu); in vmx_emulate_cr8_access()
1954 vmx_set_guest_reg(vcpu, regnum, cr8); in vmx_emulate_cr8_access()
1956 cr8 = vmx_get_guest_reg(vcpu, regnum); in vmx_emulate_cr8_access()
2012 inout_str_index(struct vmx_vcpu *vcpu, int in) in inout_str_index() argument
2019 error = vmx_getreg(vcpu, reg, &val); in inout_str_index()
2025 inout_str_count(struct vmx_vcpu *vcpu, int rep) in inout_str_count() argument
2031 error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val); in inout_str_count()
2058 inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in, in inout_str_seginfo() argument
2070 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in inout_str_seginfo()
2159 apic_access_virtualization(struct vmx_vcpu *vcpu) in apic_access_virtualization() argument
2163 proc_ctls2 = vcpu->cap.proc_ctls2; in apic_access_virtualization()
2168 x2apic_virtualization(struct vmx_vcpu *vcpu) in x2apic_virtualization() argument
2172 proc_ctls2 = vcpu->cap.proc_ctls2; in x2apic_virtualization()
2177 vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic, in vmx_handle_apic_write() argument
2187 if (!apic_access_virtualization(vcpu)) { in vmx_handle_apic_write()
2195 if (x2apic_virtualization(vcpu) && in vmx_handle_apic_write()
2245 apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa) in apic_access_fault() argument
2248 if (apic_access_virtualization(vcpu) && in apic_access_fault()
2256 vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_handle_apic_access() argument
2261 if (!apic_access_virtualization(vcpu)) in vmx_handle_apic_access()
2342 emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu) in emulate_wrmsr() argument
2347 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
2349 error = vmx_wrmsr(vcpu, num, val, retu); in emulate_wrmsr()
2355 emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu) in emulate_rdmsr() argument
2363 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
2365 error = vmx_rdmsr(vcpu, num, &result, retu); in emulate_rdmsr()
2369 vmxctx = &vcpu->ctx; in emulate_rdmsr()
2382 vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_exit_process() argument
2401 vmxctx = &vcpu->ctx; in vmx_exit_process()
2403 vcpuid = vcpu->vcpuid; in vmx_exit_process()
2410 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in vmx_exit_process()
2420 VMX_CTR0(vcpu, "Handling MCE during VM-entry"); in vmx_exit_process()
2441 error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); in vmx_exit_process()
2459 vmx_clear_nmi_blocking(vcpu); in vmx_exit_process()
2461 vmx_assert_nmi_blocking(vcpu); in vmx_exit_process()
2515 VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, " in vmx_exit_process()
2521 vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); in vmx_exit_process()
2525 handled = vmx_emulate_cr0_access(vcpu, qual); in vmx_exit_process()
2528 handled = vmx_emulate_cr4_access(vcpu, qual); in vmx_exit_process()
2531 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); in vmx_exit_process()
2536 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in vmx_exit_process()
2539 VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); in vmx_exit_process()
2541 error = emulate_rdmsr(vcpu, ecx, &retu); in vmx_exit_process()
2554 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in vmx_exit_process()
2559 VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx", in vmx_exit_process()
2563 error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax, in vmx_exit_process()
2578 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in vmx_exit_process()
2589 vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); in vmx_exit_process()
2595 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in vmx_exit_process()
2600 vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); in vmx_exit_process()
2602 vmx_clear_int_window_exiting(vcpu); in vmx_exit_process()
2633 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in vmx_exit_process()
2638 if (vm_nmi_pending(vcpu->vcpu)) in vmx_exit_process()
2639 vmx_inject_nmi(vcpu); in vmx_exit_process()
2640 vmx_clear_nmi_window_exiting(vcpu); in vmx_exit_process()
2641 vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); in vmx_exit_process()
2644 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in vmx_exit_process()
2659 vis->index = inout_str_index(vcpu, in); in vmx_exit_process()
2660 vis->count = inout_str_count(vcpu, vis->inout.rep); in vmx_exit_process()
2662 inout_str_seginfo(vcpu, inst_info, in, vis); in vmx_exit_process()
2667 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in vmx_exit_process()
2669 handled = vmx_handle_cpuid(vcpu, vmxctx); in vmx_exit_process()
2672 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in vmx_exit_process()
2692 vmx_restore_nmi_blocking(vcpu); in vmx_exit_process()
2705 VMX_CTR0(vcpu, "Vectoring to MCE handler"); in vmx_exit_process()
2715 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { in vmx_exit_process()
2743 VMX_CTR2(vcpu, "Reflecting exception %d/%#x into " in vmx_exit_process()
2747 error = vm_inject_exception(vcpu->vcpu, intr_vec, in vmx_exit_process()
2760 if (vm_mem_allocated(vcpu->vcpu, gpa) || in vmx_exit_process()
2761 ppt_is_mmio(vmx->vm, gpa) || apic_access_fault(vcpu, gpa)) { in vmx_exit_process()
2766 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in vmx_exit_process()
2771 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in vmx_exit_process()
2785 vmx_restore_nmi_blocking(vcpu); in vmx_exit_process()
2795 handled = vmx_handle_apic_access(vcpu, vmexit); in vmx_exit_process()
2803 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2806 handled = vmx_handle_apic_write(vcpu, vlapic, qual); in vmx_exit_process()
2810 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); in vmx_exit_process()
2821 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2847 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in vmx_exit_process()
2922 vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_exit_handle_nmi() argument
2938 VMX_CTR0(vcpu, "Vectoring to NMI handler"); in vmx_exit_handle_nmi()
3040 struct vmx_vcpu *vcpu; in vmx_run() local
3049 vcpu = vcpui; in vmx_run()
3050 vmx = vcpu->vmx; in vmx_run()
3051 vmcs = vcpu->vmcs; in vmx_run()
3052 vmxctx = &vcpu->ctx; in vmx_run()
3053 vlapic = vm_lapic(vcpu->vcpu); in vmx_run()
3054 vmexit = vm_exitinfo(vcpu->vcpu); in vmx_run()
3060 vmx_msr_guest_enter(vcpu); in vmx_run()
3075 vmx_set_pcpu_defaults(vmx, vcpu, pmap); in vmx_run()
3100 vmx_inject_interrupts(vcpu, vlapic, rip); in vmx_run()
3103 * Check for vcpu suspension after injecting events because in vmx_run()
3104 * vmx_inject_interrupts() can suspend the vcpu due to a in vmx_run()
3109 vm_exit_suspended(vcpu->vcpu, rip); in vmx_run()
3113 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in vmx_run()
3115 vm_exit_rendezvous(vcpu->vcpu, rip); in vmx_run()
3121 vm_exit_reqidle(vcpu->vcpu, rip); in vmx_run()
3125 if (vcpu_should_yield(vcpu->vcpu)) { in vmx_run()
3127 vm_exit_astpending(vcpu->vcpu, rip); in vmx_run()
3128 vmx_astpending_trace(vcpu, rip); in vmx_run()
3133 if (vcpu_debugged(vcpu->vcpu)) { in vmx_run()
3135 vm_exit_debug(vcpu->vcpu, rip); in vmx_run()
3144 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { in vmx_run()
3180 vmx_msr_guest_enter_tsc_aux(vmx, vcpu); in vmx_run()
3190 vmx_run_trace(vcpu); in vmx_run()
3195 vmx_msr_guest_exit_tsc_aux(vmx, vcpu); in vmx_run()
3208 vcpu->state.nextrip = rip; in vmx_run()
3211 vmx_exit_handle_nmi(vcpu, vmexit); in vmx_run()
3213 handled = vmx_exit_process(vmx, vcpu, vmexit); in vmx_run()
3219 vmx_exit_trace(vcpu, rip, exit_reason, handled); in vmx_run()
3233 VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d", in vmx_run()
3237 vmx_msr_guest_exit(vcpu); in vmx_run()
3245 struct vmx_vcpu *vcpu = vcpui; in vmx_vcpu_cleanup() local
3247 vpid_free(vcpu->state.vpid); in vmx_vcpu_cleanup()
3248 free(vcpu->pir_desc, M_VMX); in vmx_vcpu_cleanup()
3249 free(vcpu->apic_page, M_VMX); in vmx_vcpu_cleanup()
3250 free(vcpu->vmcs, M_VMX); in vmx_vcpu_cleanup()
3251 free(vcpu, M_VMX); in vmx_vcpu_cleanup()
3346 vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval) in vmx_get_intr_shadow() argument
3351 error = vmcs_getreg(vcpu->vmcs, running, in vmx_get_intr_shadow()
3358 vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val) in vmx_modify_intr_shadow() argument
3365 * Forcing the vcpu into an interrupt shadow is not supported. in vmx_modify_intr_shadow()
3372 vmcs = vcpu->vmcs; in vmx_modify_intr_shadow()
3380 VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val, in vmx_modify_intr_shadow()
3410 struct vmx_vcpu *vcpu = vcpui; in vmx_getreg() local
3411 struct vmx *vmx = vcpu->vmx; in vmx_getreg()
3413 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getreg()
3416 vcpu->vcpuid); in vmx_getreg()
3420 return (vmx_get_intr_shadow(vcpu, running, retval)); in vmx_getreg()
3422 *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; in vmx_getreg()
3425 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmx_getreg()
3429 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) in vmx_getreg()
3432 return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); in vmx_getreg()
3441 struct vmx_vcpu *vcpu = vcpui; in vmx_setreg() local
3442 struct vmx *vmx = vcpu->vmx; in vmx_setreg()
3444 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setreg()
3447 vcpu->vcpuid); in vmx_setreg()
3450 return (vmx_modify_intr_shadow(vcpu, running, val)); in vmx_setreg()
3452 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) in vmx_setreg()
3459 error = vmcs_setreg(vcpu->vmcs, running, reg, val); in vmx_setreg()
3469 vmcs_getreg(vcpu->vmcs, running, in vmx_setreg()
3475 vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3484 error = vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3490 * Invalidate the guest vcpu's TLB mappings to emulate in vmx_setreg()
3496 pmap = vcpu->ctx.pmap; in vmx_setreg()
3497 vmx_invvpid(vmx, vcpu, pmap, running); in vmx_setreg()
3508 struct vmx_vcpu *vcpu = vcpui; in vmx_getdesc() local
3509 struct vmx *vmx = vcpu->vmx; in vmx_getdesc()
3511 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getdesc()
3514 vcpu->vcpuid); in vmx_getdesc()
3516 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); in vmx_getdesc()
3523 struct vmx_vcpu *vcpu = vcpui; in vmx_setdesc() local
3524 struct vmx *vmx = vcpu->vmx; in vmx_setdesc()
3526 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setdesc()
3529 vcpu->vcpuid); in vmx_setdesc()
3531 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); in vmx_setdesc()
3537 struct vmx_vcpu *vcpu = vcpui; in vmx_getcap() local
3543 vcap = vcpu->cap.set; in vmx_getcap()
3591 struct vmx_vcpu *vcpu = vcpui; in vmx_setcap() local
3592 struct vmcs *vmcs = vcpu->vmcs; in vmx_setcap()
3608 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3617 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3626 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3646 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3655 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3665 if (vcpu->cap.exc_bitmap != 0xffffffff) { in vmx_setcap()
3666 pptr = &vcpu->cap.exc_bitmap; in vmx_setcap()
3675 vlapic = vm_lapic(vcpu->vcpu); in vmx_setcap()
3709 vcpu->cap.set |= (1 << type); in vmx_setcap()
3711 vcpu->cap.set &= ~(1 << type); in vmx_setcap()
3732 struct vmx_vcpu *vcpu; member
3767 * modified if the vcpu is running. in vmx_set_intr_ready()
3779 * vCPU is HLTed with a high PPR, a low priority interrupt would cause in vmx_set_intr_ready()
3780 * the 0->1 'pending' transition with a notification, but the vCPU in vmx_set_intr_ready()
3781 * would ignore the interrupt for the time being. The same vCPU would in vmx_set_intr_ready()
3837 vmexit = vm_exitinfo(vlapic->vcpu); in vmx_pending_intr()
3872 * processor priority of this vCPU, ensure that 'pending_prio' does not in vmx_pending_intr()
3903 KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), in vmx_set_tmr()
3904 ("vmx_set_tmr: vcpu cannot be running")); in vmx_set_tmr()
3907 vmcs = vlapic_vtx->vcpu->vmcs; in vmx_set_tmr()
3924 struct vmx_vcpu *vcpu; in vmx_enable_x2apic_mode_ts() local
3929 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_ts()
3930 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_ts()
3932 proc_ctls = vcpu->cap.proc_ctls; in vmx_enable_x2apic_mode_ts()
3936 vcpu->cap.proc_ctls = proc_ctls; in vmx_enable_x2apic_mode_ts()
3948 struct vmx_vcpu *vcpu; in vmx_enable_x2apic_mode_vid() local
3954 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_vid()
3955 vmx = vcpu->vmx; in vmx_enable_x2apic_mode_vid()
3956 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_vid()
3958 proc_ctls2 = vcpu->cap.proc_ctls2; in vmx_enable_x2apic_mode_vid()
3964 vcpu->cap.proc_ctls2 = proc_ctls2; in vmx_enable_x2apic_mode_vid()
3981 * once in the context of vcpu 0. in vmx_enable_x2apic_mode_vid()
4094 struct vmx_vcpu *vcpu; in vmx_vlapic_init() local
4098 vcpu = vcpui; in vmx_vlapic_init()
4099 vmx = vcpu->vmx; in vmx_vlapic_init()
4103 vlapic->vcpu = vcpu->vcpu; in vmx_vlapic_init()
4104 vlapic->vcpuid = vcpu->vcpuid; in vmx_vlapic_init()
4105 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; in vmx_vlapic_init()
4108 vlapic_vtx->pir_desc = vcpu->pir_desc; in vmx_vlapic_init()
4109 vlapic_vtx->vcpu = vcpu; in vmx_vlapic_init()
4145 struct vmx_vcpu *vcpu; in vmx_vcpu_snapshot() local
4150 vcpu = vcpui; in vmx_vcpu_snapshot()
4151 vmx = vcpu->vmx; in vmx_vcpu_snapshot()
4152 vmcs = vcpu->vmcs; in vmx_vcpu_snapshot()
4154 run = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_vcpu_snapshot()
4157 vcpu->vcpuid); in vmx_vcpu_snapshot()
4216 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, in vmx_vcpu_snapshot()
4217 sizeof(vcpu->guest_msrs), meta, err, done); in vmx_vcpu_snapshot()
4219 SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, in vmx_vcpu_snapshot()
4220 sizeof(*vcpu->pir_desc), meta, err, done); in vmx_vcpu_snapshot()
4222 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, in vmx_vcpu_snapshot()
4223 sizeof(vcpu->mtrr), meta, err, done); in vmx_vcpu_snapshot()
4225 vmxctx = &vcpu->ctx; in vmx_vcpu_snapshot()
4255 struct vmx_vcpu *vcpu = vcpui; in vmx_restore_tsc() local
4260 vmx = vcpu->vmx; in vmx_restore_tsc()
4261 vmcs = vcpu->vmcs; in vmx_restore_tsc()
4263 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_restore_tsc()
4266 vcpu->vcpuid); in vmx_restore_tsc()
4273 error = vmx_set_tsc_offset(vcpu, offset); in vmx_restore_tsc()