Lines Matching +full:fault +full:- +full:inject
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
165 "HLT triggers a VM-exit");
169 0, "PAUSE triggers a VM-exit");
173 0, "WBINVD triggers a VM-exit");
210 static int pirvec = -1;
420 return "mce-during-entry"; in exit_reason_to_str()
424 return "apic-access"; in exit_reason_to_str()
446 return "apic-write"; in exit_reason_to_str()
493 * "Virtualizing MSR-Based APIC Accesses". in vmx_allow_x2apic_msrs()
549 if (x == -1) { in vpid_alloc()
560 * It is still sub-optimal because the invvpid will invalidate in vpid_alloc()
694 * - bit 54 indicates support for INS/OUTS decoding in vmx_modinit()
703 /* Check support for primary processor-based VM-execution controls */ in vmx_modinit()
710 "primary processor-based controls\n"); in vmx_modinit()
714 /* Clear the processor-based ctl bits that are set on demand */ in vmx_modinit()
717 /* Check support for secondary processor-based VM-execution controls */ in vmx_modinit()
724 "secondary processor-based controls\n"); in vmx_modinit()
734 /* Check support for pin-based VM-execution controls */ in vmx_modinit()
741 "pin-based controls\n"); in vmx_modinit()
745 /* Check support for VM-exit controls */ in vmx_modinit()
756 /* Check support for VM-entry controls */ in vmx_modinit()
794 * Support a pass-through-based implementation of these via the in vmx_modinit()
795 * "enable RDTSCP" VM-execution control and the "RDTSC exiting" in vmx_modinit()
796 * VM-execution control. in vmx_modinit()
798 * The "enable RDTSCP" VM-execution control applies to both RDPID in vmx_modinit()
800 * Instruction Behavior in VMX Non-root operation"); this is why in vmx_modinit()
801 * only this VM-execution control needs to be enabled in order to in vmx_modinit()
805 * The "RDTSC exiting" VM-execution control applies to both RDTSC in vmx_modinit()
807 * already set up for RDTSC and RDTSCP pass-through by the current in vmx_modinit()
816 * bitmap is currently per-VM rather than per-vCPU while the in vmx_modinit()
818 * per-vCPU basis). in vmx_modinit()
966 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation in vmx_modinit()
1006 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", in vmx_trigger_hostintr()
1008 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " in vmx_trigger_hostintr()
1009 "has invalid type %d", vector, gd->gd_type)); in vmx_trigger_hostintr()
1010 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " in vmx_trigger_hostintr()
1011 "has invalid dpl %d", vector, gd->gd_dpl)); in vmx_trigger_hostintr()
1012 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " in vmx_trigger_hostintr()
1013 "for vector %d has invalid selector %d", vector, gd->gd_selector)); in vmx_trigger_hostintr()
1014 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " in vmx_trigger_hostintr()
1015 "IST %d", vector, gd->gd_ist)); in vmx_trigger_hostintr()
1017 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); in vmx_trigger_hostintr()
1060 vmx->vm = vm; in vmx_init()
1062 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); in vmx_init()
1065 * Clean up EPTP-tagged guest physical and combined mappings in vmx_init()
1073 ept_invalidate_mappings(vmx->eptp); in vmx_init()
1075 vmx->msr_bitmap = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_init()
1077 msr_bitmap_initialize(vmx->msr_bitmap); in vmx_init()
1082 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are in vmx_init()
1083 * always restored from the vmcs host state area on vm-exit. in vmx_init()
1093 * The TSC MSR is exposed read-only. Writes are disallowed as in vmx_init()
1100 * guest RDTSCP support are enabled (since, as per Table 2-2 in SDM in vmx_init()
1103 * exposed read-only so that the VMM can do one fewer MSR read per in vmx_init()
1104 * exit than if this register were exposed read-write; the guest in vmx_init()
1125 vmx->pmap = pmap; in vmx_init()
1142 vcpu->vmx = vmx; in vmx_vcpu_init()
1143 vcpu->vcpu = vcpu1; in vmx_vcpu_init()
1144 vcpu->vcpuid = vcpuid; in vmx_vcpu_init()
1145 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1147 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1149 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, in vmx_vcpu_init()
1152 vmcs = vcpu->vmcs; in vmx_vcpu_init()
1153 vmcs->identifier = vmx_revision(); in vmx_vcpu_init()
1167 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); in vmx_vcpu_init()
1168 error += vmwrite(VMCS_EPTP, vmx->eptp); in vmx_vcpu_init()
1171 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmx_vcpu_init()
1178 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); in vmx_vcpu_init()
1191 if (vcpu_trace_exceptions(vcpu->vcpu)) in vmx_vcpu_init()
1197 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; in vmx_vcpu_init()
1201 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); in vmx_vcpu_init()
1213 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); in vmx_vcpu_init()
1218 vcpu->cap.set = 0; in vmx_vcpu_init()
1219 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; in vmx_vcpu_init()
1220 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; in vmx_vcpu_init()
1221 vcpu->cap.proc_ctls = procbased_ctls; in vmx_vcpu_init()
1222 vcpu->cap.proc_ctls2 = procbased_ctls2; in vmx_vcpu_init()
1223 vcpu->cap.exc_bitmap = exc_bitmap; in vmx_vcpu_init()
1225 vcpu->state.nextrip = ~0; in vmx_vcpu_init()
1226 vcpu->state.lastcpu = NOCPU; in vmx_vcpu_init()
1227 vcpu->state.vpid = vpid; in vmx_vcpu_init()
1231 * to the power-on register value from the Intel Sys Arch. in vmx_vcpu_init()
1232 * CR0 - 0x60000010 in vmx_vcpu_init()
1233 * CR4 - 0 in vmx_vcpu_init()
1243 vcpu->ctx.pmap = vmx->pmap; in vmx_vcpu_init()
1253 handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, in vmx_handle_cpuid()
1254 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, in vmx_handle_cpuid()
1255 (uint64_t *)&vmxctx->guest_rdx); in vmx_handle_cpuid()
1292 vmxstate = &vcpu->state; in vmx_invvpid()
1293 if (vmxstate->vpid == 0) in vmx_invvpid()
1303 vmxstate->lastcpu = NOCPU; in vmx_invvpid()
1307 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " in vmx_invvpid()
1308 "critical section", __func__, vcpu->vcpuid)); in vmx_invvpid()
1325 if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) { in vmx_invvpid()
1328 invvpid_desc.vpid = vmxstate->vpid; in vmx_invvpid()
1331 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); in vmx_invvpid()
1337 * 'vmx->eptp' for all vpids. in vmx_invvpid()
1339 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); in vmx_invvpid()
1348 vmxstate = &vcpu->state; in vmx_set_pcpu_defaults()
1349 if (vmxstate->lastcpu == curcpu) in vmx_set_pcpu_defaults()
1352 vmxstate->lastcpu = curcpu; in vmx_set_pcpu_defaults()
1354 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in vmx_set_pcpu_defaults()
1371 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { in vmx_set_int_window_exiting()
1372 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; in vmx_set_int_window_exiting()
1373 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_int_window_exiting()
1382 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, in vmx_clear_int_window_exiting()
1383 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); in vmx_clear_int_window_exiting()
1384 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; in vmx_clear_int_window_exiting()
1385 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_int_window_exiting()
1393 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { in vmx_set_nmi_window_exiting()
1394 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; in vmx_set_nmi_window_exiting()
1395 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_nmi_window_exiting()
1404 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, in vmx_clear_nmi_window_exiting()
1405 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); in vmx_clear_nmi_window_exiting()
1406 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; in vmx_clear_nmi_window_exiting()
1407 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_nmi_window_exiting()
1416 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { in vmx_set_tsc_offset()
1417 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; in vmx_set_tsc_offset()
1418 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_tsc_offset()
1425 vm_set_tsc_offset(vcpu->vcpu, offset); in vmx_set_tsc_offset()
1442 "interruptibility-state %#x", gi)); in vmx_inject_nmi()
1446 "VM-entry interruption information %#x", info)); in vmx_inject_nmi()
1449 * Inject the virtual NMI. The vector must be the NMI IDT entry in vmx_inject_nmi()
1458 vm_nmi_clear(vcpu->vcpu); in vmx_inject_nmi()
1469 if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { in vmx_inject_interrupts()
1473 if (vcpu->state.nextrip != guestrip) { in vmx_inject_interrupts()
1478 vcpu->state.nextrip, guestrip); in vmx_inject_interrupts()
1484 if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { in vmx_inject_interrupts()
1489 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " in vmx_inject_interrupts()
1496 * VT-x requires #BP and #OF to be injected as software in vmx_inject_interrupts()
1509 if (vm_nmi_pending(vcpu->vcpu)) { in vmx_inject_interrupts()
1512 * inject it directly here otherwise enable "NMI window in vmx_inject_interrupts()
1513 * exiting" to inject it as soon as we can. in vmx_inject_interrupts()
1529 VMX_CTR1(vcpu, "Cannot inject NMI " in vmx_inject_interrupts()
1530 "due to VM-entry intr info %#x", info); in vmx_inject_interrupts()
1533 VMX_CTR1(vcpu, "Cannot inject NMI due to " in vmx_inject_interrupts()
1534 "Guest Interruptibility-state %#x", gi); in vmx_inject_interrupts()
1541 extint_pending = vm_extint_pending(vcpu->vcpu); in vmx_inject_interrupts()
1549 * If interrupt-window exiting is already in effect then don't bother in vmx_inject_interrupts()
1553 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { in vmx_inject_interrupts()
1560 /* Ask the local apic for a vector to inject */ in vmx_inject_interrupts()
1567 * - maskable interrupt vectors [16,255] can be delivered in vmx_inject_interrupts()
1573 /* Ask the legacy pic for a vector to inject */ in vmx_inject_interrupts()
1574 vatpic_pending_intr(vcpu->vmx->vm, &vector); in vmx_inject_interrupts()
1579 * - maskable interrupt vectors [0,255] can be delivered in vmx_inject_interrupts()
1589 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1596 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1597 "Guest Interruptibility-state %#x", vector, gi); in vmx_inject_interrupts()
1605 * - A vectoring VM-entry was aborted due to astpending in vmx_inject_interrupts()
1606 * - A VM-exit happened during event injection. in vmx_inject_interrupts()
1607 * - An exception was injected above. in vmx_inject_interrupts()
1608 * - An NMI was injected above or after "NMI window exiting" in vmx_inject_interrupts()
1610 VMX_CTR2(vcpu, "Cannot inject vector %d due to " in vmx_inject_interrupts()
1611 "VM-entry intr info %#x", vector, info); in vmx_inject_interrupts()
1615 /* Inject the interrupt */ in vmx_inject_interrupts()
1624 vm_extint_clear(vcpu->vcpu); in vmx_inject_interrupts()
1625 vatpic_intr_accepted(vcpu->vmx->vm, vector); in vmx_inject_interrupts()
1631 * we can inject that one too. in vmx_inject_interrupts()
1633 * Also, interrupt window exiting allows us to inject any in vmx_inject_interrupts()
1647 * Set the Interrupt Window Exiting execution control so we can inject in vmx_inject_interrupts()
1655 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1656 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1657 * virtual-NMI blocking.
1659 * This unblocking occurs even if the IRET causes a fault. In this case the
1660 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1667 VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); in vmx_restore_nmi_blocking()
1678 VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); in vmx_clear_nmi_blocking()
1702 vmxctx = &vcpu->ctx; in vmx_emulate_xsetbv()
1706 * Note that the processor raises a GP# fault on its own if in vmx_emulate_xsetbv()
1708 * emulate that fault here. in vmx_emulate_xsetbv()
1712 if (vmxctx->guest_rcx != 0) { in vmx_emulate_xsetbv()
1713 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1718 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { in vmx_emulate_xsetbv()
1719 vm_inject_ud(vcpu->vcpu); in vmx_emulate_xsetbv()
1723 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); in vmx_emulate_xsetbv()
1724 if ((xcrval & ~limits->xcr0_allowed) != 0) { in vmx_emulate_xsetbv()
1725 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1730 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1737 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1748 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1758 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1776 vmxctx = &vcpu->ctx; in vmx_get_guest_reg()
1780 return (vmxctx->guest_rax); in vmx_get_guest_reg()
1782 return (vmxctx->guest_rcx); in vmx_get_guest_reg()
1784 return (vmxctx->guest_rdx); in vmx_get_guest_reg()
1786 return (vmxctx->guest_rbx); in vmx_get_guest_reg()
1790 return (vmxctx->guest_rbp); in vmx_get_guest_reg()
1792 return (vmxctx->guest_rsi); in vmx_get_guest_reg()
1794 return (vmxctx->guest_rdi); in vmx_get_guest_reg()
1796 return (vmxctx->guest_r8); in vmx_get_guest_reg()
1798 return (vmxctx->guest_r9); in vmx_get_guest_reg()
1800 return (vmxctx->guest_r10); in vmx_get_guest_reg()
1802 return (vmxctx->guest_r11); in vmx_get_guest_reg()
1804 return (vmxctx->guest_r12); in vmx_get_guest_reg()
1806 return (vmxctx->guest_r13); in vmx_get_guest_reg()
1808 return (vmxctx->guest_r14); in vmx_get_guest_reg()
1810 return (vmxctx->guest_r15); in vmx_get_guest_reg()
1821 vmxctx = &vcpu->ctx; in vmx_set_guest_reg()
1825 vmxctx->guest_rax = regval; in vmx_set_guest_reg()
1828 vmxctx->guest_rcx = regval; in vmx_set_guest_reg()
1831 vmxctx->guest_rdx = regval; in vmx_set_guest_reg()
1834 vmxctx->guest_rbx = regval; in vmx_set_guest_reg()
1840 vmxctx->guest_rbp = regval; in vmx_set_guest_reg()
1843 vmxctx->guest_rsi = regval; in vmx_set_guest_reg()
1846 vmxctx->guest_rdi = regval; in vmx_set_guest_reg()
1849 vmxctx->guest_r8 = regval; in vmx_set_guest_reg()
1852 vmxctx->guest_r9 = regval; in vmx_set_guest_reg()
1855 vmxctx->guest_r10 = regval; in vmx_set_guest_reg()
1858 vmxctx->guest_r11 = regval; in vmx_set_guest_reg()
1861 vmxctx->guest_r12 = regval; in vmx_set_guest_reg()
1864 vmxctx->guest_r13 = regval; in vmx_set_guest_reg()
1867 vmxctx->guest_r14 = regval; in vmx_set_guest_reg()
1870 vmxctx->guest_r15 = regval; in vmx_set_guest_reg()
1899 * the "IA-32e mode guest" bit in VM-entry control must be in vmx_emulate_cr0_access()
1948 vlapic = vm_lapic(vcpu->vcpu); in vmx_emulate_cr8_access()
2062 vis->seg_name = VM_REG_GUEST_ES; in inout_str_seginfo()
2065 vis->seg_name = vm_segment_name(s); in inout_str_seginfo()
2068 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in inout_str_seginfo()
2075 paging->cr3 = vmcs_guest_cr3(); in vmx_paging_info()
2076 paging->cpl = vmx_cpl(); in vmx_paging_info()
2077 paging->cpu_mode = vmx_cpu_mode(); in vmx_paging_info()
2078 paging->paging_mode = vmx_paging_mode(); in vmx_paging_info()
2087 paging = &vmexit->u.inst_emul.paging; in vmexit_inst_emul()
2089 vmexit->exitcode = VM_EXITCODE_INST_EMUL; in vmexit_inst_emul()
2090 vmexit->inst_length = 0; in vmexit_inst_emul()
2091 vmexit->u.inst_emul.gpa = gpa; in vmexit_inst_emul()
2092 vmexit->u.inst_emul.gla = gla; in vmexit_inst_emul()
2094 switch (paging->cpu_mode) { in vmexit_inst_emul()
2096 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); in vmexit_inst_emul()
2097 vmexit->u.inst_emul.cs_d = 0; in vmexit_inst_emul()
2101 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); in vmexit_inst_emul()
2103 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); in vmexit_inst_emul()
2106 vmexit->u.inst_emul.cs_base = 0; in vmexit_inst_emul()
2107 vmexit->u.inst_emul.cs_d = 0; in vmexit_inst_emul()
2110 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); in vmexit_inst_emul()
2133 /* EPT fault on an instruction fetch doesn't make sense here */ in ept_emulation_fault()
2137 /* EPT fault must be a read fault or a write fault */ in ept_emulation_fault()
2145 * guest-physical address that is a translation of a guest-linear in ept_emulation_fault()
2161 proc_ctls2 = vcpu->cap.proc_ctls2; in apic_access_virtualization()
2170 proc_ctls2 = vcpu->cap.proc_ctls2; in x2apic_virtualization()
2187 * In general there should not be any APIC write VM-exits in vmx_handle_apic_write()
2188 * unless APIC-access virtualization is enabled. in vmx_handle_apic_write()
2190 * However self-IPI virtualization can legitimately trigger in vmx_handle_apic_write()
2191 * an APIC-write VM-exit so treat it specially. in vmx_handle_apic_write()
2195 apic_regs = (uint32_t *)(vlapic->apic_page); in vmx_handle_apic_write()
2262 qual = vmexit->u.vmx.exit_qualification; in vmx_handle_apic_access()
2309 * Regardless of whether the APIC-access is allowed this handler in vmx_handle_apic_access()
2311 * - if the access is allowed then it is handled by emulating the in vmx_handle_apic_access()
2312 * instruction that caused the VM-exit (outside the critical section) in vmx_handle_apic_access()
2313 * - if the access is not allowed then it will be converted to an in vmx_handle_apic_access()
2345 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
2361 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
2367 vmxctx = &vcpu->ctx; in emulate_rdmsr()
2399 vmxctx = &vcpu->ctx; in vmx_exit_process()
2401 vcpuid = vcpu->vcpuid; in vmx_exit_process()
2404 qual = vmexit->u.vmx.exit_qualification; in vmx_exit_process()
2405 reason = vmexit->u.vmx.exit_reason; in vmx_exit_process()
2406 vmexit->exitcode = VM_EXITCODE_BOGUS; in vmx_exit_process()
2408 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in vmx_exit_process()
2412 * VM-entry failures during or after loading guest state. in vmx_exit_process()
2414 * These VM-exits are uncommon but must be handled specially in vmx_exit_process()
2415 * as most VM-exit fields are not populated as usual. in vmx_exit_process()
2418 VMX_CTR0(vcpu, "Handling MCE during VM-entry"); in vmx_exit_process()
2425 * be handled specially by re-injecting the event if the IDT in vmx_exit_process()
2439 error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); in vmx_exit_process()
2444 * If 'virtual NMIs' are being used and the VM-exit in vmx_exit_process()
2446 * VM-entry, then clear "blocking by NMI" in the in vmx_exit_process()
2447 * Guest Interruptibility-State so the NMI can be in vmx_exit_process()
2448 * reinjected on the subsequent VM-entry. in vmx_exit_process()
2463 * Update VM-entry instruction length if the event being in vmx_exit_process()
2469 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); in vmx_exit_process()
2475 ts = &vmexit->u.task_switch; in vmx_exit_process()
2476 ts->tsssel = qual & 0xffff; in vmx_exit_process()
2477 ts->reason = vmx_task_switch_reason(qual); in vmx_exit_process()
2478 ts->ext = 0; in vmx_exit_process()
2479 ts->errcode_valid = 0; in vmx_exit_process()
2480 vmx_paging_info(&ts->paging); in vmx_exit_process()
2494 if (ts->reason == TSR_IDT_GATE) { in vmx_exit_process()
2503 ts->ext = 1; in vmx_exit_process()
2504 vmexit->inst_length = 0; in vmx_exit_process()
2506 ts->errcode_valid = 1; in vmx_exit_process()
2507 ts->errcode = vmcs_idt_vectoring_err(); in vmx_exit_process()
2511 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; in vmx_exit_process()
2514 "%s errcode 0x%016lx", ts->reason, ts->tsssel, in vmx_exit_process()
2515 ts->ext ? "external" : "internal", in vmx_exit_process()
2516 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); in vmx_exit_process()
2519 vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); in vmx_exit_process()
2534 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in vmx_exit_process()
2536 ecx = vmxctx->guest_rcx; in vmx_exit_process()
2541 vmexit->exitcode = VM_EXITCODE_RDMSR; in vmx_exit_process()
2542 vmexit->u.msr.code = ecx; in vmx_exit_process()
2547 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in vmx_exit_process()
2552 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in vmx_exit_process()
2554 eax = vmxctx->guest_rax; in vmx_exit_process()
2555 ecx = vmxctx->guest_rcx; in vmx_exit_process()
2556 edx = vmxctx->guest_rdx; in vmx_exit_process()
2564 vmexit->exitcode = VM_EXITCODE_WRMSR; in vmx_exit_process()
2565 vmexit->u.msr.code = ecx; in vmx_exit_process()
2566 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; in vmx_exit_process()
2571 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in vmx_exit_process()
2576 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in vmx_exit_process()
2578 vmexit->exitcode = VM_EXITCODE_HLT; in vmx_exit_process()
2579 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); in vmx_exit_process()
2581 vmexit->u.hlt.intr_status = in vmx_exit_process()
2584 vmexit->u.hlt.intr_status = 0; in vmx_exit_process()
2587 vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); in vmx_exit_process()
2589 vmexit->exitcode = VM_EXITCODE_MTRAP; in vmx_exit_process()
2590 vmexit->inst_length = 0; in vmx_exit_process()
2593 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in vmx_exit_process()
2595 vmexit->exitcode = VM_EXITCODE_PAUSE; in vmx_exit_process()
2598 vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); in vmx_exit_process()
2609 * host interrupt handler in the VM's softc. We will inject in vmx_exit_process()
2629 * VM-exit but not increment the instruction pointer. in vmx_exit_process()
2631 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in vmx_exit_process()
2636 if (vm_nmi_pending(vcpu->vcpu)) in vmx_exit_process()
2639 vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); in vmx_exit_process()
2642 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in vmx_exit_process()
2643 vmexit->exitcode = VM_EXITCODE_INOUT; in vmx_exit_process()
2644 vmexit->u.inout.bytes = (qual & 0x7) + 1; in vmx_exit_process()
2645 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; in vmx_exit_process()
2646 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; in vmx_exit_process()
2647 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; in vmx_exit_process()
2648 vmexit->u.inout.port = (uint16_t)(qual >> 16); in vmx_exit_process()
2649 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); in vmx_exit_process()
2650 if (vmexit->u.inout.string) { in vmx_exit_process()
2652 vmexit->exitcode = VM_EXITCODE_INOUT_STR; in vmx_exit_process()
2653 vis = &vmexit->u.inout_str; in vmx_exit_process()
2654 vmx_paging_info(&vis->paging); in vmx_exit_process()
2655 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); in vmx_exit_process()
2656 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); in vmx_exit_process()
2657 vis->index = inout_str_index(vcpu, in); in vmx_exit_process()
2658 vis->count = inout_str_count(vcpu, vis->inout.rep); in vmx_exit_process()
2659 vis->addrsize = inout_str_addrsize(inst_info); in vmx_exit_process()
2665 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in vmx_exit_process()
2670 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in vmx_exit_process()
2679 * If Virtual NMIs control is 1 and the VM-exit is due to a in vmx_exit_process()
2680 * fault encountered during the execution of IRET then we must in vmx_exit_process()
2681 * restore the state of "virtual-NMI blocking" before resuming in vmx_exit_process()
2713 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { in vmx_exit_process()
2714 vmexit->exitcode = VM_EXITCODE_BPT; in vmx_exit_process()
2715 vmexit->u.bpt.inst_length = vmexit->inst_length; in vmx_exit_process()
2716 vmexit->inst_length = 0; in vmx_exit_process()
2727 * Software exceptions exhibit trap-like behavior. This in in vmx_exit_process()
2728 * turn requires populating the VM-entry instruction length in vmx_exit_process()
2733 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); in vmx_exit_process()
2745 error = vm_inject_exception(vcpu->vcpu, intr_vec, in vmx_exit_process()
2754 * memory then this must be a nested page fault otherwise in vmx_exit_process()
2758 if (vm_mem_allocated(vcpu->vcpu, gpa) || in vmx_exit_process()
2760 vmexit->exitcode = VM_EXITCODE_PAGING; in vmx_exit_process()
2761 vmexit->inst_length = 0; in vmx_exit_process()
2762 vmexit->u.paging.gpa = gpa; in vmx_exit_process()
2763 vmexit->u.paging.fault_type = ept_fault_type(qual); in vmx_exit_process()
2764 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in vmx_exit_process()
2769 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in vmx_exit_process()
2774 * If Virtual NMIs control is 1 and the VM-exit is due to an in vmx_exit_process()
2775 * EPT fault during the execution of IRET then we must restore in vmx_exit_process()
2776 * the state of "virtual-NMI blocking" before resuming. in vmx_exit_process()
2786 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; in vmx_exit_process()
2787 vmexit->u.ioapic_eoi.vector = qual & 0xFF; in vmx_exit_process()
2789 vmexit->inst_length = 0; /* trap-like */ in vmx_exit_process()
2797 * APIC-write VM exit is trap-like so the %rip is already in vmx_exit_process()
2800 vmexit->inst_length = 0; in vmx_exit_process()
2801 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2812 vmexit->exitcode = VM_EXITCODE_MONITOR; in vmx_exit_process()
2816 vmexit->exitcode = VM_EXITCODE_MWAIT; in vmx_exit_process()
2819 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2821 vmexit->inst_length = 0; in vmx_exit_process()
2835 vmexit->exitcode = VM_EXITCODE_VMINSN; in vmx_exit_process()
2845 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in vmx_exit_process()
2860 vmexit->rip += vmexit->inst_length; in vmx_exit_process()
2861 vmexit->inst_length = 0; in vmx_exit_process()
2862 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); in vmx_exit_process()
2864 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { in vmx_exit_process()
2869 vmexit->exitcode = VM_EXITCODE_VMX; in vmx_exit_process()
2870 vmexit->u.vmx.status = VM_SUCCESS; in vmx_exit_process()
2871 vmexit->u.vmx.inst_type = 0; in vmx_exit_process()
2872 vmexit->u.vmx.inst_error = 0; in vmx_exit_process()
2890 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, in vmx_exit_inst_error()
2892 vmxctx->inst_fail_status)); in vmx_exit_inst_error()
2894 vmexit->inst_length = 0; in vmx_exit_inst_error()
2895 vmexit->exitcode = VM_EXITCODE_VMX; in vmx_exit_inst_error()
2896 vmexit->u.vmx.status = vmxctx->inst_fail_status; in vmx_exit_inst_error()
2897 vmexit->u.vmx.inst_error = vmcs_instruction_error(); in vmx_exit_inst_error()
2898 vmexit->u.vmx.exit_reason = ~0; in vmx_exit_inst_error()
2899 vmexit->u.vmx.exit_qualification = ~0; in vmx_exit_inst_error()
2904 vmexit->u.vmx.inst_type = rc; in vmx_exit_inst_error()
2912 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2913 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2926 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) in vmx_exit_handle_nmi()
2947 vmxctx->host_dr7 = rdr7(); in vmx_dr_enter_guest()
2948 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); in vmx_dr_enter_guest()
2965 vmxctx->host_tf = rflags & PSL_T; in vmx_dr_enter_guest()
2969 vmxctx->host_dr0 = rdr0(); in vmx_dr_enter_guest()
2970 vmxctx->host_dr1 = rdr1(); in vmx_dr_enter_guest()
2971 vmxctx->host_dr2 = rdr2(); in vmx_dr_enter_guest()
2972 vmxctx->host_dr3 = rdr3(); in vmx_dr_enter_guest()
2973 vmxctx->host_dr6 = rdr6(); in vmx_dr_enter_guest()
2976 load_dr0(vmxctx->guest_dr0); in vmx_dr_enter_guest()
2977 load_dr1(vmxctx->guest_dr1); in vmx_dr_enter_guest()
2978 load_dr2(vmxctx->guest_dr2); in vmx_dr_enter_guest()
2979 load_dr3(vmxctx->guest_dr3); in vmx_dr_enter_guest()
2980 load_dr6(vmxctx->guest_dr6); in vmx_dr_enter_guest()
2988 vmxctx->guest_dr0 = rdr0(); in vmx_dr_leave_guest()
2989 vmxctx->guest_dr1 = rdr1(); in vmx_dr_leave_guest()
2990 vmxctx->guest_dr2 = rdr2(); in vmx_dr_leave_guest()
2991 vmxctx->guest_dr3 = rdr3(); in vmx_dr_leave_guest()
2992 vmxctx->guest_dr6 = rdr6(); in vmx_dr_leave_guest()
2998 load_dr0(vmxctx->host_dr0); in vmx_dr_leave_guest()
2999 load_dr1(vmxctx->host_dr1); in vmx_dr_leave_guest()
3000 load_dr2(vmxctx->host_dr2); in vmx_dr_leave_guest()
3001 load_dr3(vmxctx->host_dr3); in vmx_dr_leave_guest()
3002 load_dr6(vmxctx->host_dr6); in vmx_dr_leave_guest()
3003 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); in vmx_dr_leave_guest()
3004 load_dr7(vmxctx->host_dr7); in vmx_dr_leave_guest()
3005 write_rflags(read_rflags() | vmxctx->host_tf); in vmx_dr_leave_guest()
3016 CPU_SET_ATOMIC(cpu, &pmap->pm_active); in vmx_pmap_activate()
3017 smr_enter(pmap->pm_eptsmr); in vmx_pmap_activate()
3018 eptgen = atomic_load_long(&pmap->pm_eptgen); in vmx_pmap_activate()
3019 if (eptgen != vmx->eptgen[cpu]) { in vmx_pmap_activate()
3020 vmx->eptgen[cpu] = eptgen; in vmx_pmap_activate()
3022 (struct invept_desc){ .eptp = vmx->eptp, ._res = 0 }); in vmx_pmap_activate()
3029 smr_exit(pmap->pm_eptsmr); in vmx_pmap_deactivate()
3030 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); in vmx_pmap_deactivate()
3048 vmx = vcpu->vmx; in vmx_run()
3049 vmcs = vcpu->vmcs; in vmx_run()
3050 vmxctx = &vcpu->ctx; in vmx_run()
3051 vlapic = vm_lapic(vcpu->vcpu); in vmx_run()
3052 vmexit = vm_exitinfo(vcpu->vcpu); in vmx_run()
3055 KASSERT(vmxctx->pmap == pmap, in vmx_run()
3056 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); in vmx_run()
3103 * triple fault. in vmx_run()
3107 vm_exit_suspended(vcpu->vcpu, rip); in vmx_run()
3111 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in vmx_run()
3113 vm_exit_rendezvous(vcpu->vcpu, rip); in vmx_run()
3119 vm_exit_reqidle(vcpu->vcpu, rip); in vmx_run()
3123 if (vcpu_should_yield(vcpu->vcpu)) { in vmx_run()
3125 vm_exit_astpending(vcpu->vcpu, rip); in vmx_run()
3131 if (vcpu_debugged(vcpu->vcpu)) { in vmx_run()
3133 vm_exit_debug(vcpu->vcpu, rip); in vmx_run()
3142 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { in vmx_run()
3184 * EPTP-tagged TLB entries if required. in vmx_run()
3200 vmexit->rip = rip = vmcs_guest_rip(); in vmx_run()
3201 vmexit->inst_length = vmexit_instruction_length(); in vmx_run()
3202 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); in vmx_run()
3203 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); in vmx_run()
3206 vcpu->state.nextrip = rip; in vmx_run()
3218 rip = vmexit->rip; in vmx_run()
3225 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || in vmx_run()
3226 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { in vmx_run()
3228 handled, vmexit->exitcode); in vmx_run()
3232 vmexit->exitcode); in vmx_run()
3245 vpid_free(vcpu->state.vpid); in vmx_vcpu_cleanup()
3246 free(vcpu->pir_desc, M_VMX); in vmx_vcpu_cleanup()
3247 free(vcpu->apic_page, M_VMX); in vmx_vcpu_cleanup()
3248 free(vcpu->vmcs, M_VMX); in vmx_vcpu_cleanup()
3258 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); in vmx_cleanup()
3260 free(vmx->msr_bitmap, M_VMX); in vmx_cleanup()
3272 return (&vmxctx->guest_rax); in vmxctx_regptr()
3274 return (&vmxctx->guest_rbx); in vmxctx_regptr()
3276 return (&vmxctx->guest_rcx); in vmxctx_regptr()
3278 return (&vmxctx->guest_rdx); in vmxctx_regptr()
3280 return (&vmxctx->guest_rsi); in vmxctx_regptr()
3282 return (&vmxctx->guest_rdi); in vmxctx_regptr()
3284 return (&vmxctx->guest_rbp); in vmxctx_regptr()
3286 return (&vmxctx->guest_r8); in vmxctx_regptr()
3288 return (&vmxctx->guest_r9); in vmxctx_regptr()
3290 return (&vmxctx->guest_r10); in vmxctx_regptr()
3292 return (&vmxctx->guest_r11); in vmxctx_regptr()
3294 return (&vmxctx->guest_r12); in vmxctx_regptr()
3296 return (&vmxctx->guest_r13); in vmxctx_regptr()
3298 return (&vmxctx->guest_r14); in vmxctx_regptr()
3300 return (&vmxctx->guest_r15); in vmxctx_regptr()
3302 return (&vmxctx->guest_cr2); in vmxctx_regptr()
3304 return (&vmxctx->guest_dr0); in vmxctx_regptr()
3306 return (&vmxctx->guest_dr1); in vmxctx_regptr()
3308 return (&vmxctx->guest_dr2); in vmxctx_regptr()
3310 return (&vmxctx->guest_dr3); in vmxctx_regptr()
3312 return (&vmxctx->guest_dr6); in vmxctx_regptr()
3349 error = vmcs_getreg(vcpu->vmcs, running, in vmx_get_intr_shadow()
3370 vmcs = vcpu->vmcs; in vmx_modify_intr_shadow()
3388 shreg = -1; in vmx_shadow_reg()
3409 struct vmx *vmx = vcpu->vmx; in vmx_getreg()
3411 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getreg()
3413 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), in vmx_getreg()
3414 vcpu->vcpuid); in vmx_getreg()
3420 *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; in vmx_getreg()
3423 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmx_getreg()
3427 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) in vmx_getreg()
3430 return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); in vmx_getreg()
3440 struct vmx *vmx = vcpu->vmx; in vmx_setreg()
3442 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setreg()
3444 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), in vmx_setreg()
3445 vcpu->vcpuid); in vmx_setreg()
3450 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) in vmx_setreg()
3457 error = vmcs_setreg(vcpu->vmcs, running, reg, val); in vmx_setreg()
3461 * If the "load EFER" VM-entry control is 1 then the in vmx_setreg()
3462 * value of EFER.LMA must be identical to "IA-32e mode guest" in vmx_setreg()
3463 * bit in the VM-entry control. in vmx_setreg()
3467 vmcs_getreg(vcpu->vmcs, running, in vmx_setreg()
3473 vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3482 error = vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3494 pmap = vcpu->ctx.pmap; in vmx_setreg()
3507 struct vmx *vmx = vcpu->vmx; in vmx_getdesc()
3509 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getdesc()
3511 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), in vmx_getdesc()
3512 vcpu->vcpuid); in vmx_getdesc()
3514 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); in vmx_getdesc()
3522 struct vmx *vmx = vcpu->vmx; in vmx_setdesc()
3524 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setdesc()
3526 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), in vmx_setdesc()
3527 vcpu->vcpuid); in vmx_setdesc()
3529 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); in vmx_setdesc()
3541 vcap = vcpu->cap.set; in vmx_getcap()
3590 struct vmcs *vmcs = vcpu->vmcs; in vmx_setcap()
3606 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3615 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3624 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3644 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3653 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3663 if (vcpu->cap.exc_bitmap != 0xffffffff) { in vmx_setcap()
3664 pptr = &vcpu->cap.exc_bitmap; in vmx_setcap()
3673 vlapic = vm_lapic(vcpu->vcpu); in vmx_setcap()
3674 vlapic->ipi_exit = val; in vmx_setcap()
3707 vcpu->cap.set |= (1 << type); in vmx_setcap()
3709 vcpu->cap.set &= ~(1 << type); in vmx_setcap()
3738 VLAPIC_CTR2(vlapic, msg " assert %s-triggered vector %d", \
3740 VLAPIC_CTR1(vlapic, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3741 VLAPIC_CTR1(vlapic, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3742 VLAPIC_CTR1(vlapic, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3743 VLAPIC_CTR1(vlapic, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3748 * vlapic->ops handlers that utilize the APICv hardware assist described in
3760 pir_desc = vlapic_vtx->pir_desc; in vmx_set_intr_ready()
3769 atomic_set_long(&pir_desc->pir[idx], mask); in vmx_set_intr_ready()
3773 * transition from 0->1. in vmx_set_intr_ready()
3778 * the 0->1 'pending' transition with a notification, but the vCPU in vmx_set_intr_ready()
3780 * need to then be notified if a high-priority interrupt arrived which in vmx_set_intr_ready()
3785 * to-be-injected interrupt exceed the priorities already present, the in vmx_set_intr_ready()
3787 * cleared whenever the 'pending' bit makes another 0->1 transition. in vmx_set_intr_ready()
3789 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { in vmx_set_intr_ready()
3791 vlapic_vtx->pending_prio = 0; in vmx_set_intr_ready()
3793 const u_int old_prio = vlapic_vtx->pending_prio; in vmx_set_intr_ready()
3797 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); in vmx_set_intr_ready()
3825 pir_desc = vlapic_vtx->pir_desc; in vmx_pending_intr()
3826 lapic = vlapic->apic_page; in vmx_pending_intr()
3835 vmexit = vm_exitinfo(vlapic->vcpu); in vmx_pending_intr()
3836 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, in vmx_pending_intr()
3838 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; in vmx_pending_intr()
3839 ppr = lapic->ppr & APIC_TPR_INT; in vmx_pending_intr()
3843 pending = atomic_load_acq_long(&pir_desc->pending); in vmx_pending_intr()
3857 VLAPIC_CTR1(vlapic, "HLT with non-zero PPR %d", lapic->ppr); in vmx_pending_intr()
3860 for (i = 3; i >= 0; i--) { in vmx_pending_intr()
3861 pirval = pir_desc->pir[i]; in vmx_pending_intr()
3863 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; in vmx_pending_intr()
3869 * If the highest-priority pending interrupt falls short of the in vmx_pending_intr()
3871 * have any stale bits which would preclude a higher-priority interrupt in vmx_pending_intr()
3876 const u_int old = vlapic_vtx->pending_prio; in vmx_pending_intr()
3879 vlapic_vtx->pending_prio = prio_bit; in vmx_pending_intr()
3901 KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), in vmx_set_tmr()
3905 vmcs = vlapic_vtx->vcpu->vmcs; in vmx_set_tmr()
3927 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_ts()
3928 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_ts()
3930 proc_ctls = vcpu->cap.proc_ctls; in vmx_enable_x2apic_mode_ts()
3934 vcpu->cap.proc_ctls = proc_ctls; in vmx_enable_x2apic_mode_ts()
3952 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_vid()
3953 vmx = vcpu->vmx; in vmx_enable_x2apic_mode_vid()
3954 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_vid()
3956 proc_ctls2 = vcpu->cap.proc_ctls2; in vmx_enable_x2apic_mode_vid()
3962 vcpu->cap.proc_ctls2 = proc_ctls2; in vmx_enable_x2apic_mode_vid()
3968 if (vlapic->vcpuid == 0) { in vmx_enable_x2apic_mode_vid()
3973 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); in vmx_enable_x2apic_mode_vid()
4005 int rvi, pirbase = -1; in vmx_inject_pir()
4009 pir_desc = vlapic_vtx->pir_desc; in vmx_inject_pir()
4010 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { in vmx_inject_pir()
4017 pirbase = -1; in vmx_inject_pir()
4018 lapic = vlapic->apic_page; in vmx_inject_pir()
4020 val = atomic_readandclear_long(&pir_desc->pir[0]); in vmx_inject_pir()
4022 lapic->irr0 |= val; in vmx_inject_pir()
4023 lapic->irr1 |= val >> 32; in vmx_inject_pir()
4028 val = atomic_readandclear_long(&pir_desc->pir[1]); in vmx_inject_pir()
4030 lapic->irr2 |= val; in vmx_inject_pir()
4031 lapic->irr3 |= val >> 32; in vmx_inject_pir()
4036 val = atomic_readandclear_long(&pir_desc->pir[2]); in vmx_inject_pir()
4038 lapic->irr4 |= val; in vmx_inject_pir()
4039 lapic->irr5 |= val >> 32; in vmx_inject_pir()
4044 val = atomic_readandclear_long(&pir_desc->pir[3]); in vmx_inject_pir()
4046 lapic->irr6 |= val; in vmx_inject_pir()
4047 lapic->irr7 |= val >> 32; in vmx_inject_pir()
4056 * interrupts on VM-entry. in vmx_inject_pir()
4060 * CPU-Y is sending a posted interrupt to CPU-X, which in vmx_inject_pir()
4062 * CPU-X will eventually exit and the state seen in s/w is in vmx_inject_pir()
4065 * CPU-X CPU-Y in vmx_inject_pir()
4076 rvi = pirbase + flsl(pirval) - 1; in vmx_inject_pir()
4097 vmx = vcpu->vmx; in vmx_vlapic_init()
4100 vlapic->vm = vmx->vm; in vmx_vlapic_init()
4101 vlapic->vcpu = vcpu->vcpu; in vmx_vlapic_init()
4102 vlapic->vcpuid = vcpu->vcpuid; in vmx_vlapic_init()
4103 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; in vmx_vlapic_init()
4106 vlapic_vtx->pir_desc = vcpu->pir_desc; in vmx_vlapic_init()
4107 vlapic_vtx->vcpu = vcpu; in vmx_vlapic_init()
4110 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; in vmx_vlapic_init()
4114 vlapic->ops.set_intr_ready = vmx_set_intr_ready; in vmx_vlapic_init()
4115 vlapic->ops.pending_intr = vmx_pending_intr; in vmx_vlapic_init()
4116 vlapic->ops.intr_accepted = vmx_intr_accepted; in vmx_vlapic_init()
4117 vlapic->ops.set_tmr = vmx_set_tmr; in vmx_vlapic_init()
4118 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; in vmx_vlapic_init()
4122 vlapic->ops.post_intr = vmx_post_intr; in vmx_vlapic_init()
4149 vmx = vcpu->vmx; in vmx_vcpu_snapshot()
4150 vmcs = vcpu->vmcs; in vmx_vcpu_snapshot()
4152 run = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_vcpu_snapshot()
4154 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), in vmx_vcpu_snapshot()
4155 vcpu->vcpuid); in vmx_vcpu_snapshot()
4214 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, in vmx_vcpu_snapshot()
4215 sizeof(vcpu->guest_msrs), meta, err, done); in vmx_vcpu_snapshot()
4217 SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, in vmx_vcpu_snapshot()
4218 sizeof(*vcpu->pir_desc), meta, err, done); in vmx_vcpu_snapshot()
4220 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, in vmx_vcpu_snapshot()
4221 sizeof(vcpu->mtrr), meta, err, done); in vmx_vcpu_snapshot()
4223 vmxctx = &vcpu->ctx; in vmx_vcpu_snapshot()
4224 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdi, meta, err, done); in vmx_vcpu_snapshot()
4225 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rsi, meta, err, done); in vmx_vcpu_snapshot()
4226 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdx, meta, err, done); in vmx_vcpu_snapshot()
4227 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rcx, meta, err, done); in vmx_vcpu_snapshot()
4228 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r8, meta, err, done); in vmx_vcpu_snapshot()
4229 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r9, meta, err, done); in vmx_vcpu_snapshot()
4230 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rax, meta, err, done); in vmx_vcpu_snapshot()
4231 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbx, meta, err, done); in vmx_vcpu_snapshot()
4232 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbp, meta, err, done); in vmx_vcpu_snapshot()
4233 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r10, meta, err, done); in vmx_vcpu_snapshot()
4234 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r11, meta, err, done); in vmx_vcpu_snapshot()
4235 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r12, meta, err, done); in vmx_vcpu_snapshot()
4236 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r13, meta, err, done); in vmx_vcpu_snapshot()
4237 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r14, meta, err, done); in vmx_vcpu_snapshot()
4238 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r15, meta, err, done); in vmx_vcpu_snapshot()
4239 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_cr2, meta, err, done); in vmx_vcpu_snapshot()
4240 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr0, meta, err, done); in vmx_vcpu_snapshot()
4241 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr1, meta, err, done); in vmx_vcpu_snapshot()
4242 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr2, meta, err, done); in vmx_vcpu_snapshot()
4243 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr3, meta, err, done); in vmx_vcpu_snapshot()
4244 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr6, meta, err, done); in vmx_vcpu_snapshot()
4258 vmx = vcpu->vmx; in vmx_restore_tsc()
4259 vmcs = vcpu->vmcs; in vmx_restore_tsc()
4261 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_restore_tsc()
4263 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), in vmx_restore_tsc()
4264 vcpu->vcpuid); in vmx_restore_tsc()