Lines Matching full:vcpu
816 * bitmap is currently per-VM rather than per-vCPU while the in vmx_modinit()
818 * per-vCPU basis). in vmx_modinit()
1130 vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid) in vmx_vcpu_init()
1134 struct vmx_vcpu *vcpu; in vmx_vcpu_init() local
1141 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); in vmx_vcpu_init()
1142 vcpu->vmx = vmx; in vmx_vcpu_init()
1143 vcpu->vcpu = vcpu1; in vmx_vcpu_init()
1144 vcpu->vcpuid = vcpuid; in vmx_vcpu_init()
1145 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1147 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1149 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, in vmx_vcpu_init()
1152 vmcs = vcpu->vmcs; in vmx_vcpu_init()
1156 panic("vmx_init: vmclear error %d on vcpu %d\n", in vmx_vcpu_init()
1160 vmx_msr_guest_init(vmx, vcpu); in vmx_vcpu_init()
1167 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); in vmx_vcpu_init()
1171 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmx_vcpu_init()
1191 if (vcpu_trace_exceptions(vcpu->vcpu)) in vmx_vcpu_init()
1197 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; in vmx_vcpu_init()
1201 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); in vmx_vcpu_init()
1213 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); in vmx_vcpu_init()
1218 vcpu->cap.set = 0; in vmx_vcpu_init()
1219 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; in vmx_vcpu_init()
1220 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; in vmx_vcpu_init()
1221 vcpu->cap.proc_ctls = procbased_ctls; in vmx_vcpu_init()
1222 vcpu->cap.proc_ctls2 = procbased_ctls2; in vmx_vcpu_init()
1223 vcpu->cap.exc_bitmap = exc_bitmap; in vmx_vcpu_init()
1225 vcpu->state.nextrip = ~0; in vmx_vcpu_init()
1226 vcpu->state.lastcpu = NOCPU; in vmx_vcpu_init()
1227 vcpu->state.vpid = vpid; in vmx_vcpu_init()
1243 vcpu->ctx.pmap = vmx->pmap; in vmx_vcpu_init()
1245 return (vcpu); in vmx_vcpu_init()
1249 vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx) in vmx_handle_cpuid() argument
1253 handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, in vmx_handle_cpuid()
1260 vmx_run_trace(struct vmx_vcpu *vcpu) in vmx_run_trace() argument
1262 VMX_CTR1(vcpu, "Resume execution at %#lx", vmcs_guest_rip()); in vmx_run_trace()
1266 vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason, in vmx_exit_trace() argument
1269 VMX_CTR3(vcpu, "%s %s vmexit at 0x%0lx", in vmx_exit_trace()
1275 vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) in vmx_astpending_trace() argument
1277 VMX_CTR1(vcpu, "astpending vmexit at 0x%0lx", rip); in vmx_astpending_trace()
1287 vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running) in vmx_invvpid() argument
1292 vmxstate = &vcpu->state; in vmx_invvpid()
1300 * This will invalidate TLB entries tagged with the vcpu's in vmx_invvpid()
1307 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " in vmx_invvpid()
1308 "critical section", __func__, vcpu->vcpuid)); in vmx_invvpid()
1313 * We do this because this vcpu was executing on a different host in vmx_invvpid()
1320 * move the thread associated with this vcpu between host cpus. in vmx_invvpid()
1331 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); in vmx_invvpid()
1339 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); in vmx_invvpid()
1344 vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap) in vmx_set_pcpu_defaults() argument
1348 vmxstate = &vcpu->state; in vmx_set_pcpu_defaults()
1354 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in vmx_set_pcpu_defaults()
1359 vmx_invvpid(vmx, vcpu, pmap, 1); in vmx_set_pcpu_defaults()
1368 vmx_set_int_window_exiting(struct vmx_vcpu *vcpu) in vmx_set_int_window_exiting() argument
1371 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { in vmx_set_int_window_exiting()
1372 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; in vmx_set_int_window_exiting()
1373 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_int_window_exiting()
1374 VMX_CTR0(vcpu, "Enabling interrupt window exiting"); in vmx_set_int_window_exiting()
1379 vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu) in vmx_clear_int_window_exiting() argument
1382 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, in vmx_clear_int_window_exiting()
1383 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); in vmx_clear_int_window_exiting()
1384 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; in vmx_clear_int_window_exiting()
1385 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_int_window_exiting()
1386 VMX_CTR0(vcpu, "Disabling interrupt window exiting"); in vmx_clear_int_window_exiting()
1390 vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu) in vmx_set_nmi_window_exiting() argument
1393 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { in vmx_set_nmi_window_exiting()
1394 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; in vmx_set_nmi_window_exiting()
1395 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_nmi_window_exiting()
1396 VMX_CTR0(vcpu, "Enabling NMI window exiting"); in vmx_set_nmi_window_exiting()
1401 vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu) in vmx_clear_nmi_window_exiting() argument
1404 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, in vmx_clear_nmi_window_exiting()
1405 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); in vmx_clear_nmi_window_exiting()
1406 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; in vmx_clear_nmi_window_exiting()
1407 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_nmi_window_exiting()
1408 VMX_CTR0(vcpu, "Disabling NMI window exiting"); in vmx_clear_nmi_window_exiting()
1412 vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset) in vmx_set_tsc_offset() argument
1416 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { in vmx_set_tsc_offset()
1417 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; in vmx_set_tsc_offset()
1418 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_tsc_offset()
1419 VMX_CTR0(vcpu, "Enabling TSC offsetting"); in vmx_set_tsc_offset()
1425 vm_set_tsc_offset(vcpu->vcpu, offset); in vmx_set_tsc_offset()
1436 vmx_inject_nmi(struct vmx_vcpu *vcpu) in vmx_inject_nmi() argument
1455 VMX_CTR0(vcpu, "Injecting vNMI"); in vmx_inject_nmi()
1458 vm_nmi_clear(vcpu->vcpu); in vmx_inject_nmi()
1462 vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic, in vmx_inject_interrupts() argument
1469 if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { in vmx_inject_interrupts()
1473 if (vcpu->state.nextrip != guestrip) { in vmx_inject_interrupts()
1476 VMX_CTR2(vcpu, "Guest interrupt blocking " in vmx_inject_interrupts()
1478 vcpu->state.nextrip, guestrip); in vmx_inject_interrupts()
1484 if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { in vmx_inject_interrupts()
1509 if (vm_nmi_pending(vcpu->vcpu)) { in vmx_inject_interrupts()
1526 vmx_inject_nmi(vcpu); in vmx_inject_interrupts()
1529 VMX_CTR1(vcpu, "Cannot inject NMI " in vmx_inject_interrupts()
1533 VMX_CTR1(vcpu, "Cannot inject NMI due to " in vmx_inject_interrupts()
1538 vmx_set_nmi_window_exiting(vcpu); in vmx_inject_interrupts()
1541 extint_pending = vm_extint_pending(vcpu->vcpu); in vmx_inject_interrupts()
1553 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { in vmx_inject_interrupts()
1554 VMX_CTR0(vcpu, "Skip interrupt injection due to " in vmx_inject_interrupts()
1574 vatpic_pending_intr(vcpu->vmx->vm, &vector); in vmx_inject_interrupts()
1589 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1596 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1610 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1624 vm_extint_clear(vcpu->vcpu); in vmx_inject_interrupts()
1625 vatpic_intr_accepted(vcpu->vmx->vm, vector); in vmx_inject_interrupts()
1638 vmx_set_int_window_exiting(vcpu); in vmx_inject_interrupts()
1641 VMX_CTR1(vcpu, "Injecting hwintr at vector %d", vector); in vmx_inject_interrupts()
1650 vmx_set_int_window_exiting(vcpu); in vmx_inject_interrupts()
1663 vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_restore_nmi_blocking() argument
1667 VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); in vmx_restore_nmi_blocking()
1674 vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_clear_nmi_blocking() argument
1678 VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); in vmx_clear_nmi_blocking()
1685 vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu) in vmx_assert_nmi_blocking() argument
1695 vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu, in vmx_emulate_xsetbv() argument
1702 vmxctx = &vcpu->ctx; in vmx_emulate_xsetbv()
1713 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1719 vm_inject_ud(vcpu->vcpu); in vmx_emulate_xsetbv()
1725 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1730 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1737 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1748 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1758 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1772 vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident) in vmx_get_guest_reg() argument
1776 vmxctx = &vcpu->ctx; in vmx_get_guest_reg()
1817 vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval) in vmx_set_guest_reg() argument
1821 vmxctx = &vcpu->ctx; in vmx_set_guest_reg()
1878 vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual) in vmx_emulate_cr0_access() argument
1886 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); in vmx_emulate_cr0_access()
1916 vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual) in vmx_emulate_cr4_access() argument
1924 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); in vmx_emulate_cr4_access()
1936 vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu, in vmx_emulate_cr8_access() argument
1948 vlapic = vm_lapic(vcpu->vcpu); in vmx_emulate_cr8_access()
1952 vmx_set_guest_reg(vcpu, regnum, cr8); in vmx_emulate_cr8_access()
1954 cr8 = vmx_get_guest_reg(vcpu, regnum); in vmx_emulate_cr8_access()
2010 inout_str_index(struct vmx_vcpu *vcpu, int in) in inout_str_index() argument
2017 error = vmx_getreg(vcpu, reg, &val); in inout_str_index()
2023 inout_str_count(struct vmx_vcpu *vcpu, int rep) in inout_str_count() argument
2029 error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val); in inout_str_count()
2056 inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in, in inout_str_seginfo() argument
2068 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in inout_str_seginfo()
2157 apic_access_virtualization(struct vmx_vcpu *vcpu) in apic_access_virtualization() argument
2161 proc_ctls2 = vcpu->cap.proc_ctls2; in apic_access_virtualization()
2166 x2apic_virtualization(struct vmx_vcpu *vcpu) in x2apic_virtualization() argument
2170 proc_ctls2 = vcpu->cap.proc_ctls2; in x2apic_virtualization()
2175 vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic, in vmx_handle_apic_write() argument
2185 if (!apic_access_virtualization(vcpu)) { in vmx_handle_apic_write()
2193 if (x2apic_virtualization(vcpu) && in vmx_handle_apic_write()
2243 apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa) in apic_access_fault() argument
2246 if (apic_access_virtualization(vcpu) && in apic_access_fault()
2254 vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_handle_apic_access() argument
2259 if (!apic_access_virtualization(vcpu)) in vmx_handle_apic_access()
2340 emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu) in emulate_wrmsr() argument
2345 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
2347 error = vmx_wrmsr(vcpu, num, val, retu); in emulate_wrmsr()
2353 emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu) in emulate_rdmsr() argument
2361 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
2363 error = vmx_rdmsr(vcpu, num, &result, retu); in emulate_rdmsr()
2367 vmxctx = &vcpu->ctx; in emulate_rdmsr()
2380 vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_exit_process() argument
2399 vmxctx = &vcpu->ctx; in vmx_exit_process()
2401 vcpuid = vcpu->vcpuid; in vmx_exit_process()
2408 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in vmx_exit_process()
2418 VMX_CTR0(vcpu, "Handling MCE during VM-entry"); in vmx_exit_process()
2439 error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); in vmx_exit_process()
2457 vmx_clear_nmi_blocking(vcpu); in vmx_exit_process()
2459 vmx_assert_nmi_blocking(vcpu); in vmx_exit_process()
2513 VMX_CTR4(vcpu, "task switch reason %d, tss 0x%04x, " in vmx_exit_process()
2519 vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); in vmx_exit_process()
2523 handled = vmx_emulate_cr0_access(vcpu, qual); in vmx_exit_process()
2526 handled = vmx_emulate_cr4_access(vcpu, qual); in vmx_exit_process()
2529 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); in vmx_exit_process()
2534 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in vmx_exit_process()
2537 VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); in vmx_exit_process()
2539 error = emulate_rdmsr(vcpu, ecx, &retu); in vmx_exit_process()
2552 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in vmx_exit_process()
2557 VMX_CTR2(vcpu, "wrmsr 0x%08x value 0x%016lx", in vmx_exit_process()
2561 error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax, in vmx_exit_process()
2576 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in vmx_exit_process()
2587 vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); in vmx_exit_process()
2593 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in vmx_exit_process()
2598 vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); in vmx_exit_process()
2600 vmx_clear_int_window_exiting(vcpu); in vmx_exit_process()
2631 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in vmx_exit_process()
2636 if (vm_nmi_pending(vcpu->vcpu)) in vmx_exit_process()
2637 vmx_inject_nmi(vcpu); in vmx_exit_process()
2638 vmx_clear_nmi_window_exiting(vcpu); in vmx_exit_process()
2639 vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); in vmx_exit_process()
2642 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in vmx_exit_process()
2657 vis->index = inout_str_index(vcpu, in); in vmx_exit_process()
2658 vis->count = inout_str_count(vcpu, vis->inout.rep); in vmx_exit_process()
2660 inout_str_seginfo(vcpu, inst_info, in, vis); in vmx_exit_process()
2665 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in vmx_exit_process()
2667 handled = vmx_handle_cpuid(vcpu, vmxctx); in vmx_exit_process()
2670 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in vmx_exit_process()
2690 vmx_restore_nmi_blocking(vcpu); in vmx_exit_process()
2703 VMX_CTR0(vcpu, "Vectoring to MCE handler"); in vmx_exit_process()
2713 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { in vmx_exit_process()
2741 VMX_CTR2(vcpu, "Reflecting exception %d/%#x into " in vmx_exit_process()
2745 error = vm_inject_exception(vcpu->vcpu, intr_vec, in vmx_exit_process()
2758 if (vm_mem_allocated(vcpu->vcpu, gpa) || in vmx_exit_process()
2759 apic_access_fault(vcpu, gpa)) { in vmx_exit_process()
2764 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in vmx_exit_process()
2769 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in vmx_exit_process()
2783 vmx_restore_nmi_blocking(vcpu); in vmx_exit_process()
2793 handled = vmx_handle_apic_access(vcpu, vmexit); in vmx_exit_process()
2801 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2804 handled = vmx_handle_apic_write(vcpu, vlapic, qual); in vmx_exit_process()
2808 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); in vmx_exit_process()
2819 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2845 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in vmx_exit_process()
2920 vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) in vmx_exit_handle_nmi() argument
2936 VMX_CTR0(vcpu, "Vectoring to NMI handler"); in vmx_exit_handle_nmi()
3038 struct vmx_vcpu *vcpu; in vmx_run() local
3047 vcpu = vcpui; in vmx_run()
3048 vmx = vcpu->vmx; in vmx_run()
3049 vmcs = vcpu->vmcs; in vmx_run()
3050 vmxctx = &vcpu->ctx; in vmx_run()
3051 vlapic = vm_lapic(vcpu->vcpu); in vmx_run()
3052 vmexit = vm_exitinfo(vcpu->vcpu); in vmx_run()
3058 vmx_msr_guest_enter(vcpu); in vmx_run()
3073 vmx_set_pcpu_defaults(vmx, vcpu, pmap); in vmx_run()
3098 vmx_inject_interrupts(vcpu, vlapic, rip); in vmx_run()
3101 * Check for vcpu suspension after injecting events because in vmx_run()
3102 * vmx_inject_interrupts() can suspend the vcpu due to a in vmx_run()
3107 vm_exit_suspended(vcpu->vcpu, rip); in vmx_run()
3111 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in vmx_run()
3113 vm_exit_rendezvous(vcpu->vcpu, rip); in vmx_run()
3119 vm_exit_reqidle(vcpu->vcpu, rip); in vmx_run()
3123 if (vcpu_should_yield(vcpu->vcpu)) { in vmx_run()
3125 vm_exit_astpending(vcpu->vcpu, rip); in vmx_run()
3126 vmx_astpending_trace(vcpu, rip); in vmx_run()
3131 if (vcpu_debugged(vcpu->vcpu)) { in vmx_run()
3133 vm_exit_debug(vcpu->vcpu, rip); in vmx_run()
3142 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { in vmx_run()
3178 vmx_msr_guest_enter_tsc_aux(vmx, vcpu); in vmx_run()
3188 vmx_run_trace(vcpu); in vmx_run()
3193 vmx_msr_guest_exit_tsc_aux(vmx, vcpu); in vmx_run()
3206 vcpu->state.nextrip = rip; in vmx_run()
3209 vmx_exit_handle_nmi(vcpu, vmexit); in vmx_run()
3211 handled = vmx_exit_process(vmx, vcpu, vmexit); in vmx_run()
3217 vmx_exit_trace(vcpu, rip, exit_reason, handled); in vmx_run()
3231 VMX_CTR1(vcpu, "returning from vmx_run: exitcode %d", in vmx_run()
3235 vmx_msr_guest_exit(vcpu); in vmx_run()
3243 struct vmx_vcpu *vcpu = vcpui; in vmx_vcpu_cleanup() local
3245 vpid_free(vcpu->state.vpid); in vmx_vcpu_cleanup()
3246 free(vcpu->pir_desc, M_VMX); in vmx_vcpu_cleanup()
3247 free(vcpu->apic_page, M_VMX); in vmx_vcpu_cleanup()
3248 free(vcpu->vmcs, M_VMX); in vmx_vcpu_cleanup()
3249 free(vcpu, M_VMX); in vmx_vcpu_cleanup()
3344 vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval) in vmx_get_intr_shadow() argument
3349 error = vmcs_getreg(vcpu->vmcs, running, in vmx_get_intr_shadow()
3356 vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val) in vmx_modify_intr_shadow() argument
3363 * Forcing the vcpu into an interrupt shadow is not supported. in vmx_modify_intr_shadow()
3370 vmcs = vcpu->vmcs; in vmx_modify_intr_shadow()
3378 VMX_CTR2(vcpu, "Setting intr_shadow to %#lx %s", val, in vmx_modify_intr_shadow()
3408 struct vmx_vcpu *vcpu = vcpui; in vmx_getreg() local
3409 struct vmx *vmx = vcpu->vmx; in vmx_getreg()
3411 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getreg()
3414 vcpu->vcpuid); in vmx_getreg()
3418 return (vmx_get_intr_shadow(vcpu, running, retval)); in vmx_getreg()
3420 *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; in vmx_getreg()
3423 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmx_getreg()
3427 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) in vmx_getreg()
3430 return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); in vmx_getreg()
3439 struct vmx_vcpu *vcpu = vcpui; in vmx_setreg() local
3440 struct vmx *vmx = vcpu->vmx; in vmx_setreg()
3442 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setreg()
3445 vcpu->vcpuid); in vmx_setreg()
3448 return (vmx_modify_intr_shadow(vcpu, running, val)); in vmx_setreg()
3450 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) in vmx_setreg()
3457 error = vmcs_setreg(vcpu->vmcs, running, reg, val); in vmx_setreg()
3467 vmcs_getreg(vcpu->vmcs, running, in vmx_setreg()
3473 vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3482 error = vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3488 * Invalidate the guest vcpu's TLB mappings to emulate in vmx_setreg()
3494 pmap = vcpu->ctx.pmap; in vmx_setreg()
3495 vmx_invvpid(vmx, vcpu, pmap, running); in vmx_setreg()
3506 struct vmx_vcpu *vcpu = vcpui; in vmx_getdesc() local
3507 struct vmx *vmx = vcpu->vmx; in vmx_getdesc()
3509 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getdesc()
3512 vcpu->vcpuid); in vmx_getdesc()
3514 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); in vmx_getdesc()
3521 struct vmx_vcpu *vcpu = vcpui; in vmx_setdesc() local
3522 struct vmx *vmx = vcpu->vmx; in vmx_setdesc()
3524 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setdesc()
3527 vcpu->vcpuid); in vmx_setdesc()
3529 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); in vmx_setdesc()
3535 struct vmx_vcpu *vcpu = vcpui; in vmx_getcap() local
3541 vcap = vcpu->cap.set; in vmx_getcap()
3589 struct vmx_vcpu *vcpu = vcpui; in vmx_setcap() local
3590 struct vmcs *vmcs = vcpu->vmcs; in vmx_setcap()
3606 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3615 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3624 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3644 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3653 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3663 if (vcpu->cap.exc_bitmap != 0xffffffff) { in vmx_setcap()
3664 pptr = &vcpu->cap.exc_bitmap; in vmx_setcap()
3673 vlapic = vm_lapic(vcpu->vcpu); in vmx_setcap()
3707 vcpu->cap.set |= (1 << type); in vmx_setcap()
3709 vcpu->cap.set &= ~(1 << type); in vmx_setcap()
3730 struct vmx_vcpu *vcpu; member
3765 * modified if the vcpu is running. in vmx_set_intr_ready()
3777 * vCPU is HLTed with a high PPR, a low priority interrupt would cause in vmx_set_intr_ready()
3778 * the 0->1 'pending' transition with a notification, but the vCPU in vmx_set_intr_ready()
3779 * would ignore the interrupt for the time being. The same vCPU would in vmx_set_intr_ready()
3835 vmexit = vm_exitinfo(vlapic->vcpu); in vmx_pending_intr()
3870 * processor priority of this vCPU, ensure that 'pending_prio' does not in vmx_pending_intr()
3901 KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), in vmx_set_tmr()
3902 ("vmx_set_tmr: vcpu cannot be running")); in vmx_set_tmr()
3905 vmcs = vlapic_vtx->vcpu->vmcs; in vmx_set_tmr()
3922 struct vmx_vcpu *vcpu; in vmx_enable_x2apic_mode_ts() local
3927 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_ts()
3928 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_ts()
3930 proc_ctls = vcpu->cap.proc_ctls; in vmx_enable_x2apic_mode_ts()
3934 vcpu->cap.proc_ctls = proc_ctls; in vmx_enable_x2apic_mode_ts()
3946 struct vmx_vcpu *vcpu; in vmx_enable_x2apic_mode_vid() local
3952 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_vid()
3953 vmx = vcpu->vmx; in vmx_enable_x2apic_mode_vid()
3954 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_vid()
3956 proc_ctls2 = vcpu->cap.proc_ctls2; in vmx_enable_x2apic_mode_vid()
3962 vcpu->cap.proc_ctls2 = proc_ctls2; in vmx_enable_x2apic_mode_vid()
3979 * once in the context of vcpu 0. in vmx_enable_x2apic_mode_vid()
4092 struct vmx_vcpu *vcpu; in vmx_vlapic_init() local
4096 vcpu = vcpui; in vmx_vlapic_init()
4097 vmx = vcpu->vmx; in vmx_vlapic_init()
4101 vlapic->vcpu = vcpu->vcpu; in vmx_vlapic_init()
4102 vlapic->vcpuid = vcpu->vcpuid; in vmx_vlapic_init()
4103 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; in vmx_vlapic_init()
4106 vlapic_vtx->pir_desc = vcpu->pir_desc; in vmx_vlapic_init()
4107 vlapic_vtx->vcpu = vcpu; in vmx_vlapic_init()
4143 struct vmx_vcpu *vcpu; in vmx_vcpu_snapshot() local
4148 vcpu = vcpui; in vmx_vcpu_snapshot()
4149 vmx = vcpu->vmx; in vmx_vcpu_snapshot()
4150 vmcs = vcpu->vmcs; in vmx_vcpu_snapshot()
4152 run = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_vcpu_snapshot()
4155 vcpu->vcpuid); in vmx_vcpu_snapshot()
4214 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, in vmx_vcpu_snapshot()
4215 sizeof(vcpu->guest_msrs), meta, err, done); in vmx_vcpu_snapshot()
4217 SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, in vmx_vcpu_snapshot()
4218 sizeof(*vcpu->pir_desc), meta, err, done); in vmx_vcpu_snapshot()
4220 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, in vmx_vcpu_snapshot()
4221 sizeof(vcpu->mtrr), meta, err, done); in vmx_vcpu_snapshot()
4223 vmxctx = &vcpu->ctx; in vmx_vcpu_snapshot()
4253 struct vmx_vcpu *vcpu = vcpui; in vmx_restore_tsc() local
4258 vmx = vcpu->vmx; in vmx_restore_tsc()
4259 vmcs = vcpu->vmcs; in vmx_restore_tsc()
4261 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_restore_tsc()
4264 vcpu->vcpuid); in vmx_restore_tsc()
4271 error = vmx_set_tsc_offset(vcpu, offset); in vmx_restore_tsc()