Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * All rights reserved.
167 "HLT triggers a VM-exit");
171 0, "PAUSE triggers a VM-exit");
175 0, "WBINVD triggers a VM-exit");
212 static int pirvec = -1;
422 return "mce-during-entry"; in exit_reason_to_str()
426 return "apic-access"; in exit_reason_to_str()
448 return "apic-write"; in exit_reason_to_str()
495 * "Virtualizing MSR-Based APIC Accesses". in vmx_allow_x2apic_msrs()
551 if (x == -1) { in vpid_alloc()
562 * It is still sub-optimal because the invvpid will invalidate in vpid_alloc()
696 * - bit 54 indicates support for INS/OUTS decoding in vmx_modinit()
705 /* Check support for primary processor-based VM-execution controls */ in vmx_modinit()
712 "primary processor-based controls\n"); in vmx_modinit()
716 /* Clear the processor-based ctl bits that are set on demand */ in vmx_modinit()
719 /* Check support for secondary processor-based VM-execution controls */ in vmx_modinit()
726 "secondary processor-based controls\n"); in vmx_modinit()
736 /* Check support for pin-based VM-execution controls */ in vmx_modinit()
743 "pin-based controls\n"); in vmx_modinit()
747 /* Check support for VM-exit controls */ in vmx_modinit()
758 /* Check support for VM-entry controls */ in vmx_modinit()
796 * Support a pass-through-based implementation of these via the in vmx_modinit()
797 * "enable RDTSCP" VM-execution control and the "RDTSC exiting" in vmx_modinit()
798 * VM-execution control. in vmx_modinit()
800 * The "enable RDTSCP" VM-execution control applies to both RDPID in vmx_modinit()
802 * Instruction Behavior in VMX Non-root operation"); this is why in vmx_modinit()
803 * only this VM-execution control needs to be enabled in order to in vmx_modinit()
807 * The "RDTSC exiting" VM-execution control applies to both RDTSC in vmx_modinit()
809 * already set up for RDTSC and RDTSCP pass-through by the current in vmx_modinit()
818 * bitmap is currently per-VM rather than per-vCPU while the in vmx_modinit()
820 * per-vCPU basis). in vmx_modinit()
968 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation in vmx_modinit()
1008 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", in vmx_trigger_hostintr()
1010 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " in vmx_trigger_hostintr()
1011 "has invalid type %d", vector, gd->gd_type)); in vmx_trigger_hostintr()
1012 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " in vmx_trigger_hostintr()
1013 "has invalid dpl %d", vector, gd->gd_dpl)); in vmx_trigger_hostintr()
1014 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " in vmx_trigger_hostintr()
1015 "for vector %d has invalid selector %d", vector, gd->gd_selector)); in vmx_trigger_hostintr()
1016 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " in vmx_trigger_hostintr()
1017 "IST %d", vector, gd->gd_ist)); in vmx_trigger_hostintr()
1019 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); in vmx_trigger_hostintr()
1062 vmx->vm = vm; in vmx_init()
1064 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); in vmx_init()
1067 * Clean up EPTP-tagged guest physical and combined mappings in vmx_init()
1075 ept_invalidate_mappings(vmx->eptp); in vmx_init()
1077 vmx->msr_bitmap = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_init()
1079 msr_bitmap_initialize(vmx->msr_bitmap); in vmx_init()
1084 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are in vmx_init()
1085 * always restored from the vmcs host state area on vm-exit. in vmx_init()
1095 * The TSC MSR is exposed read-only. Writes are disallowed as in vmx_init()
1102 * guest RDTSCP support are enabled (since, as per Table 2-2 in SDM in vmx_init()
1105 * exposed read-only so that the VMM can do one fewer MSR read per in vmx_init()
1106 * exit than if this register were exposed read-write; the guest in vmx_init()
1127 vmx->pmap = pmap; in vmx_init()
1144 vcpu->vmx = vmx; in vmx_vcpu_init()
1145 vcpu->vcpu = vcpu1; in vmx_vcpu_init()
1146 vcpu->vcpuid = vcpuid; in vmx_vcpu_init()
1147 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1149 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, in vmx_vcpu_init()
1151 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, in vmx_vcpu_init()
1154 vmcs = vcpu->vmcs; in vmx_vcpu_init()
1155 vmcs->identifier = vmx_revision(); in vmx_vcpu_init()
1169 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); in vmx_vcpu_init()
1170 error += vmwrite(VMCS_EPTP, vmx->eptp); in vmx_vcpu_init()
1173 if (vcpu_trap_wbinvd(vcpu->vcpu)) { in vmx_vcpu_init()
1180 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); in vmx_vcpu_init()
1193 if (vcpu_trace_exceptions(vcpu->vcpu)) in vmx_vcpu_init()
1199 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; in vmx_vcpu_init()
1203 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); in vmx_vcpu_init()
1215 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); in vmx_vcpu_init()
1220 vcpu->cap.set = 0; in vmx_vcpu_init()
1221 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; in vmx_vcpu_init()
1222 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; in vmx_vcpu_init()
1223 vcpu->cap.proc_ctls = procbased_ctls; in vmx_vcpu_init()
1224 vcpu->cap.proc_ctls2 = procbased_ctls2; in vmx_vcpu_init()
1225 vcpu->cap.exc_bitmap = exc_bitmap; in vmx_vcpu_init()
1227 vcpu->state.nextrip = ~0; in vmx_vcpu_init()
1228 vcpu->state.lastcpu = NOCPU; in vmx_vcpu_init()
1229 vcpu->state.vpid = vpid; in vmx_vcpu_init()
1233 * to the power-on register value from the Intel Sys Arch. in vmx_vcpu_init()
1234 * CR0 - 0x60000010 in vmx_vcpu_init()
1235 * CR4 - 0 in vmx_vcpu_init()
1245 vcpu->ctx.pmap = vmx->pmap; in vmx_vcpu_init()
1255 handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax, in vmx_handle_cpuid()
1256 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, in vmx_handle_cpuid()
1257 (uint64_t *)&vmxctx->guest_rdx); in vmx_handle_cpuid()
1294 vmxstate = &vcpu->state; in vmx_invvpid()
1295 if (vmxstate->vpid == 0) in vmx_invvpid()
1305 vmxstate->lastcpu = NOCPU; in vmx_invvpid()
1309 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " in vmx_invvpid()
1310 "critical section", __func__, vcpu->vcpuid)); in vmx_invvpid()
1327 if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) { in vmx_invvpid()
1330 invvpid_desc.vpid = vmxstate->vpid; in vmx_invvpid()
1333 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1); in vmx_invvpid()
1339 * 'vmx->eptp' for all vpids. in vmx_invvpid()
1341 vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1); in vmx_invvpid()
1350 vmxstate = &vcpu->state; in vmx_set_pcpu_defaults()
1351 if (vmxstate->lastcpu == curcpu) in vmx_set_pcpu_defaults()
1354 vmxstate->lastcpu = curcpu; in vmx_set_pcpu_defaults()
1356 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); in vmx_set_pcpu_defaults()
1373 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { in vmx_set_int_window_exiting()
1374 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; in vmx_set_int_window_exiting()
1375 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_int_window_exiting()
1384 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, in vmx_clear_int_window_exiting()
1385 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); in vmx_clear_int_window_exiting()
1386 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; in vmx_clear_int_window_exiting()
1387 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_int_window_exiting()
1395 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { in vmx_set_nmi_window_exiting()
1396 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; in vmx_set_nmi_window_exiting()
1397 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_nmi_window_exiting()
1406 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, in vmx_clear_nmi_window_exiting()
1407 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); in vmx_clear_nmi_window_exiting()
1408 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; in vmx_clear_nmi_window_exiting()
1409 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_clear_nmi_window_exiting()
1418 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { in vmx_set_tsc_offset()
1419 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; in vmx_set_tsc_offset()
1420 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); in vmx_set_tsc_offset()
1427 vm_set_tsc_offset(vcpu->vcpu, offset); in vmx_set_tsc_offset()
1444 "interruptibility-state %#x", gi)); in vmx_inject_nmi()
1448 "VM-entry interruption information %#x", info)); in vmx_inject_nmi()
1460 vm_nmi_clear(vcpu->vcpu); in vmx_inject_nmi()
1471 if (vcpu->cap.set & (1 << VM_CAP_MASK_HWINTR)) { in vmx_inject_interrupts()
1475 if (vcpu->state.nextrip != guestrip) { in vmx_inject_interrupts()
1480 vcpu->state.nextrip, guestrip); in vmx_inject_interrupts()
1486 if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) { in vmx_inject_interrupts()
1498 * VT-x requires #BP and #OF to be injected as software in vmx_inject_interrupts()
1511 if (vm_nmi_pending(vcpu->vcpu)) { in vmx_inject_interrupts()
1532 "due to VM-entry intr info %#x", info); in vmx_inject_interrupts()
1536 "Guest Interruptibility-state %#x", gi); in vmx_inject_interrupts()
1543 extint_pending = vm_extint_pending(vcpu->vcpu); in vmx_inject_interrupts()
1551 * If interrupt-window exiting is already in effect then don't bother in vmx_inject_interrupts()
1555 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { in vmx_inject_interrupts()
1569 * - maskable interrupt vectors [16,255] can be delivered in vmx_inject_interrupts()
1576 vatpic_pending_intr(vcpu->vmx->vm, &vector); in vmx_inject_interrupts()
1581 * - maskable interrupt vectors [0,255] can be delivered in vmx_inject_interrupts()
1599 "Guest Interruptibility-state %#x", vector, gi); in vmx_inject_interrupts()
1607 * - A vectoring VM-entry was aborted due to astpending in vmx_inject_interrupts()
1608 * - A VM-exit happened during event injection. in vmx_inject_interrupts()
1609 * - An exception was injected above. in vmx_inject_interrupts()
1610 * - An NMI was injected above or after "NMI window exiting" in vmx_inject_interrupts()
1613 "VM-entry intr info %#x", vector, info); in vmx_inject_interrupts()
1626 vm_extint_clear(vcpu->vcpu); in vmx_inject_interrupts()
1627 vatpic_intr_accepted(vcpu->vmx->vm, vector); in vmx_inject_interrupts()
1657 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1658 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1659 * virtual-NMI blocking.
1662 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1669 VMX_CTR0(vcpu, "Restore Virtual-NMI blocking"); in vmx_restore_nmi_blocking()
1680 VMX_CTR0(vcpu, "Clear Virtual-NMI blocking"); in vmx_clear_nmi_blocking()
1704 vmxctx = &vcpu->ctx; in vmx_emulate_xsetbv()
1714 if (vmxctx->guest_rcx != 0) { in vmx_emulate_xsetbv()
1715 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1720 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { in vmx_emulate_xsetbv()
1721 vm_inject_ud(vcpu->vcpu); in vmx_emulate_xsetbv()
1725 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); in vmx_emulate_xsetbv()
1726 if ((xcrval & ~limits->xcr0_allowed) != 0) { in vmx_emulate_xsetbv()
1727 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1732 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1739 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1750 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1760 vm_inject_gp(vcpu->vcpu); in vmx_emulate_xsetbv()
1778 vmxctx = &vcpu->ctx; in vmx_get_guest_reg()
1782 return (vmxctx->guest_rax); in vmx_get_guest_reg()
1784 return (vmxctx->guest_rcx); in vmx_get_guest_reg()
1786 return (vmxctx->guest_rdx); in vmx_get_guest_reg()
1788 return (vmxctx->guest_rbx); in vmx_get_guest_reg()
1792 return (vmxctx->guest_rbp); in vmx_get_guest_reg()
1794 return (vmxctx->guest_rsi); in vmx_get_guest_reg()
1796 return (vmxctx->guest_rdi); in vmx_get_guest_reg()
1798 return (vmxctx->guest_r8); in vmx_get_guest_reg()
1800 return (vmxctx->guest_r9); in vmx_get_guest_reg()
1802 return (vmxctx->guest_r10); in vmx_get_guest_reg()
1804 return (vmxctx->guest_r11); in vmx_get_guest_reg()
1806 return (vmxctx->guest_r12); in vmx_get_guest_reg()
1808 return (vmxctx->guest_r13); in vmx_get_guest_reg()
1810 return (vmxctx->guest_r14); in vmx_get_guest_reg()
1812 return (vmxctx->guest_r15); in vmx_get_guest_reg()
1823 vmxctx = &vcpu->ctx; in vmx_set_guest_reg()
1827 vmxctx->guest_rax = regval; in vmx_set_guest_reg()
1830 vmxctx->guest_rcx = regval; in vmx_set_guest_reg()
1833 vmxctx->guest_rdx = regval; in vmx_set_guest_reg()
1836 vmxctx->guest_rbx = regval; in vmx_set_guest_reg()
1842 vmxctx->guest_rbp = regval; in vmx_set_guest_reg()
1845 vmxctx->guest_rsi = regval; in vmx_set_guest_reg()
1848 vmxctx->guest_rdi = regval; in vmx_set_guest_reg()
1851 vmxctx->guest_r8 = regval; in vmx_set_guest_reg()
1854 vmxctx->guest_r9 = regval; in vmx_set_guest_reg()
1857 vmxctx->guest_r10 = regval; in vmx_set_guest_reg()
1860 vmxctx->guest_r11 = regval; in vmx_set_guest_reg()
1863 vmxctx->guest_r12 = regval; in vmx_set_guest_reg()
1866 vmxctx->guest_r13 = regval; in vmx_set_guest_reg()
1869 vmxctx->guest_r14 = regval; in vmx_set_guest_reg()
1872 vmxctx->guest_r15 = regval; in vmx_set_guest_reg()
1901 * the "IA-32e mode guest" bit in VM-entry control must be in vmx_emulate_cr0_access()
1950 vlapic = vm_lapic(vcpu->vcpu); in vmx_emulate_cr8_access()
2064 vis->seg_name = VM_REG_GUEST_ES; in inout_str_seginfo()
2067 vis->seg_name = vm_segment_name(s); in inout_str_seginfo()
2070 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); in inout_str_seginfo()
2077 paging->cr3 = vmcs_guest_cr3(); in vmx_paging_info()
2078 paging->cpl = vmx_cpl(); in vmx_paging_info()
2079 paging->cpu_mode = vmx_cpu_mode(); in vmx_paging_info()
2080 paging->paging_mode = vmx_paging_mode(); in vmx_paging_info()
2089 paging = &vmexit->u.inst_emul.paging; in vmexit_inst_emul()
2091 vmexit->exitcode = VM_EXITCODE_INST_EMUL; in vmexit_inst_emul()
2092 vmexit->inst_length = 0; in vmexit_inst_emul()
2093 vmexit->u.inst_emul.gpa = gpa; in vmexit_inst_emul()
2094 vmexit->u.inst_emul.gla = gla; in vmexit_inst_emul()
2096 switch (paging->cpu_mode) { in vmexit_inst_emul()
2098 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); in vmexit_inst_emul()
2099 vmexit->u.inst_emul.cs_d = 0; in vmexit_inst_emul()
2103 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); in vmexit_inst_emul()
2105 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); in vmexit_inst_emul()
2108 vmexit->u.inst_emul.cs_base = 0; in vmexit_inst_emul()
2109 vmexit->u.inst_emul.cs_d = 0; in vmexit_inst_emul()
2112 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); in vmexit_inst_emul()
2147 * guest-physical address that is a translation of a guest-linear in ept_emulation_fault()
2163 proc_ctls2 = vcpu->cap.proc_ctls2; in apic_access_virtualization()
2172 proc_ctls2 = vcpu->cap.proc_ctls2; in x2apic_virtualization()
2189 * In general there should not be any APIC write VM-exits in vmx_handle_apic_write()
2190 * unless APIC-access virtualization is enabled. in vmx_handle_apic_write()
2192 * However self-IPI virtualization can legitimately trigger in vmx_handle_apic_write()
2193 * an APIC-write VM-exit so treat it specially. in vmx_handle_apic_write()
2197 apic_regs = (uint32_t *)(vlapic->apic_page); in vmx_handle_apic_write()
2264 qual = vmexit->u.vmx.exit_qualification; in vmx_handle_apic_access()
2311 * Regardless of whether the APIC-access is allowed this handler in vmx_handle_apic_access()
2313 * - if the access is allowed then it is handled by emulating the in vmx_handle_apic_access()
2314 * instruction that caused the VM-exit (outside the critical section) in vmx_handle_apic_access()
2315 * - if the access is not allowed then it will be converted to an in vmx_handle_apic_access()
2347 error = lapic_wrmsr(vcpu->vcpu, num, val, retu); in emulate_wrmsr()
2363 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu); in emulate_rdmsr()
2369 vmxctx = &vcpu->ctx; in emulate_rdmsr()
2401 vmxctx = &vcpu->ctx; in vmx_exit_process()
2403 vcpuid = vcpu->vcpuid; in vmx_exit_process()
2406 qual = vmexit->u.vmx.exit_qualification; in vmx_exit_process()
2407 reason = vmexit->u.vmx.exit_reason; in vmx_exit_process()
2408 vmexit->exitcode = VM_EXITCODE_BOGUS; in vmx_exit_process()
2410 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1); in vmx_exit_process()
2414 * VM-entry failures during or after loading guest state. in vmx_exit_process()
2416 * These VM-exits are uncommon but must be handled specially in vmx_exit_process()
2417 * as most VM-exit fields are not populated as usual. in vmx_exit_process()
2420 VMX_CTR0(vcpu, "Handling MCE during VM-entry"); in vmx_exit_process()
2427 * be handled specially by re-injecting the event if the IDT in vmx_exit_process()
2441 error = vm_exit_intinfo(vcpu->vcpu, exitintinfo); in vmx_exit_process()
2446 * If 'virtual NMIs' are being used and the VM-exit in vmx_exit_process()
2448 * VM-entry, then clear "blocking by NMI" in the in vmx_exit_process()
2449 * Guest Interruptibility-State so the NMI can be in vmx_exit_process()
2450 * reinjected on the subsequent VM-entry. in vmx_exit_process()
2465 * Update VM-entry instruction length if the event being in vmx_exit_process()
2471 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); in vmx_exit_process()
2477 ts = &vmexit->u.task_switch; in vmx_exit_process()
2478 ts->tsssel = qual & 0xffff; in vmx_exit_process()
2479 ts->reason = vmx_task_switch_reason(qual); in vmx_exit_process()
2480 ts->ext = 0; in vmx_exit_process()
2481 ts->errcode_valid = 0; in vmx_exit_process()
2482 vmx_paging_info(&ts->paging); in vmx_exit_process()
2496 if (ts->reason == TSR_IDT_GATE) { in vmx_exit_process()
2505 ts->ext = 1; in vmx_exit_process()
2506 vmexit->inst_length = 0; in vmx_exit_process()
2508 ts->errcode_valid = 1; in vmx_exit_process()
2509 ts->errcode = vmcs_idt_vectoring_err(); in vmx_exit_process()
2513 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; in vmx_exit_process()
2516 "%s errcode 0x%016lx", ts->reason, ts->tsssel, in vmx_exit_process()
2517 ts->ext ? "external" : "internal", in vmx_exit_process()
2518 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); in vmx_exit_process()
2521 vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1); in vmx_exit_process()
2536 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); in vmx_exit_process()
2538 ecx = vmxctx->guest_rcx; in vmx_exit_process()
2543 vmexit->exitcode = VM_EXITCODE_RDMSR; in vmx_exit_process()
2544 vmexit->u.msr.code = ecx; in vmx_exit_process()
2549 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in vmx_exit_process()
2554 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1); in vmx_exit_process()
2556 eax = vmxctx->guest_rax; in vmx_exit_process()
2557 ecx = vmxctx->guest_rcx; in vmx_exit_process()
2558 edx = vmxctx->guest_rdx; in vmx_exit_process()
2566 vmexit->exitcode = VM_EXITCODE_WRMSR; in vmx_exit_process()
2567 vmexit->u.msr.code = ecx; in vmx_exit_process()
2568 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; in vmx_exit_process()
2573 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, in vmx_exit_process()
2578 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); in vmx_exit_process()
2580 vmexit->exitcode = VM_EXITCODE_HLT; in vmx_exit_process()
2581 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); in vmx_exit_process()
2583 vmexit->u.hlt.intr_status = in vmx_exit_process()
2586 vmexit->u.hlt.intr_status = 0; in vmx_exit_process()
2589 vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1); in vmx_exit_process()
2591 vmexit->exitcode = VM_EXITCODE_MTRAP; in vmx_exit_process()
2592 vmexit->inst_length = 0; in vmx_exit_process()
2595 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1); in vmx_exit_process()
2597 vmexit->exitcode = VM_EXITCODE_PAUSE; in vmx_exit_process()
2600 vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1); in vmx_exit_process()
2631 * VM-exit but not increment the instruction pointer. in vmx_exit_process()
2633 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1); in vmx_exit_process()
2638 if (vm_nmi_pending(vcpu->vcpu)) in vmx_exit_process()
2641 vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); in vmx_exit_process()
2644 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1); in vmx_exit_process()
2645 vmexit->exitcode = VM_EXITCODE_INOUT; in vmx_exit_process()
2646 vmexit->u.inout.bytes = (qual & 0x7) + 1; in vmx_exit_process()
2647 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; in vmx_exit_process()
2648 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; in vmx_exit_process()
2649 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; in vmx_exit_process()
2650 vmexit->u.inout.port = (uint16_t)(qual >> 16); in vmx_exit_process()
2651 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); in vmx_exit_process()
2652 if (vmexit->u.inout.string) { in vmx_exit_process()
2654 vmexit->exitcode = VM_EXITCODE_INOUT_STR; in vmx_exit_process()
2655 vis = &vmexit->u.inout_str; in vmx_exit_process()
2656 vmx_paging_info(&vis->paging); in vmx_exit_process()
2657 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); in vmx_exit_process()
2658 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); in vmx_exit_process()
2659 vis->index = inout_str_index(vcpu, in); in vmx_exit_process()
2660 vis->count = inout_str_count(vcpu, vis->inout.rep); in vmx_exit_process()
2661 vis->addrsize = inout_str_addrsize(inst_info); in vmx_exit_process()
2662 vis->cs_d = 0; in vmx_exit_process()
2663 vis->cs_base = 0; in vmx_exit_process()
2669 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); in vmx_exit_process()
2674 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); in vmx_exit_process()
2683 * If Virtual NMIs control is 1 and the VM-exit is due to a in vmx_exit_process()
2685 * restore the state of "virtual-NMI blocking" before resuming in vmx_exit_process()
2717 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { in vmx_exit_process()
2718 vmexit->exitcode = VM_EXITCODE_BPT; in vmx_exit_process()
2719 vmexit->u.bpt.inst_length = vmexit->inst_length; in vmx_exit_process()
2720 vmexit->inst_length = 0; in vmx_exit_process()
2731 * Software exceptions exhibit trap-like behavior. This in in vmx_exit_process()
2732 * turn requires populating the VM-entry instruction length in vmx_exit_process()
2737 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); in vmx_exit_process()
2749 error = vm_inject_exception(vcpu->vcpu, intr_vec, in vmx_exit_process()
2762 if (vm_mem_allocated(vcpu->vcpu, gpa) || in vmx_exit_process()
2763 ppt_is_mmio(vmx->vm, gpa) || apic_access_fault(vcpu, gpa)) { in vmx_exit_process()
2764 vmexit->exitcode = VM_EXITCODE_PAGING; in vmx_exit_process()
2765 vmexit->inst_length = 0; in vmx_exit_process()
2766 vmexit->u.paging.gpa = gpa; in vmx_exit_process()
2767 vmexit->u.paging.fault_type = ept_fault_type(qual); in vmx_exit_process()
2768 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1); in vmx_exit_process()
2773 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1); in vmx_exit_process()
2778 * If Virtual NMIs control is 1 and the VM-exit is due to an in vmx_exit_process()
2780 * the state of "virtual-NMI blocking" before resuming. in vmx_exit_process()
2790 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; in vmx_exit_process()
2791 vmexit->u.ioapic_eoi.vector = qual & 0xFF; in vmx_exit_process()
2793 vmexit->inst_length = 0; /* trap-like */ in vmx_exit_process()
2801 * APIC-write VM exit is trap-like so the %rip is already in vmx_exit_process()
2804 vmexit->inst_length = 0; in vmx_exit_process()
2805 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2816 vmexit->exitcode = VM_EXITCODE_MONITOR; in vmx_exit_process()
2820 vmexit->exitcode = VM_EXITCODE_MWAIT; in vmx_exit_process()
2823 vlapic = vm_lapic(vcpu->vcpu); in vmx_exit_process()
2825 vmexit->inst_length = 0; in vmx_exit_process()
2839 vmexit->exitcode = VM_EXITCODE_VMINSN; in vmx_exit_process()
2849 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1); in vmx_exit_process()
2864 vmexit->rip += vmexit->inst_length; in vmx_exit_process()
2865 vmexit->inst_length = 0; in vmx_exit_process()
2866 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); in vmx_exit_process()
2868 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { in vmx_exit_process()
2873 vmexit->exitcode = VM_EXITCODE_VMX; in vmx_exit_process()
2874 vmexit->u.vmx.status = VM_SUCCESS; in vmx_exit_process()
2875 vmexit->u.vmx.inst_type = 0; in vmx_exit_process()
2876 vmexit->u.vmx.inst_error = 0; in vmx_exit_process()
2894 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, in vmx_exit_inst_error()
2896 vmxctx->inst_fail_status)); in vmx_exit_inst_error()
2898 vmexit->inst_length = 0; in vmx_exit_inst_error()
2899 vmexit->exitcode = VM_EXITCODE_VMX; in vmx_exit_inst_error()
2900 vmexit->u.vmx.status = vmxctx->inst_fail_status; in vmx_exit_inst_error()
2901 vmexit->u.vmx.inst_error = vmcs_instruction_error(); in vmx_exit_inst_error()
2902 vmexit->u.vmx.exit_reason = ~0; in vmx_exit_inst_error()
2903 vmexit->u.vmx.exit_qualification = ~0; in vmx_exit_inst_error()
2908 vmexit->u.vmx.inst_type = rc; in vmx_exit_inst_error()
2916 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2917 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2930 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) in vmx_exit_handle_nmi()
2951 vmxctx->host_dr7 = rdr7(); in vmx_dr_enter_guest()
2952 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); in vmx_dr_enter_guest()
2969 vmxctx->host_tf = rflags & PSL_T; in vmx_dr_enter_guest()
2973 vmxctx->host_dr0 = rdr0(); in vmx_dr_enter_guest()
2974 vmxctx->host_dr1 = rdr1(); in vmx_dr_enter_guest()
2975 vmxctx->host_dr2 = rdr2(); in vmx_dr_enter_guest()
2976 vmxctx->host_dr3 = rdr3(); in vmx_dr_enter_guest()
2977 vmxctx->host_dr6 = rdr6(); in vmx_dr_enter_guest()
2980 load_dr0(vmxctx->guest_dr0); in vmx_dr_enter_guest()
2981 load_dr1(vmxctx->guest_dr1); in vmx_dr_enter_guest()
2982 load_dr2(vmxctx->guest_dr2); in vmx_dr_enter_guest()
2983 load_dr3(vmxctx->guest_dr3); in vmx_dr_enter_guest()
2984 load_dr6(vmxctx->guest_dr6); in vmx_dr_enter_guest()
2992 vmxctx->guest_dr0 = rdr0(); in vmx_dr_leave_guest()
2993 vmxctx->guest_dr1 = rdr1(); in vmx_dr_leave_guest()
2994 vmxctx->guest_dr2 = rdr2(); in vmx_dr_leave_guest()
2995 vmxctx->guest_dr3 = rdr3(); in vmx_dr_leave_guest()
2996 vmxctx->guest_dr6 = rdr6(); in vmx_dr_leave_guest()
3002 load_dr0(vmxctx->host_dr0); in vmx_dr_leave_guest()
3003 load_dr1(vmxctx->host_dr1); in vmx_dr_leave_guest()
3004 load_dr2(vmxctx->host_dr2); in vmx_dr_leave_guest()
3005 load_dr3(vmxctx->host_dr3); in vmx_dr_leave_guest()
3006 load_dr6(vmxctx->host_dr6); in vmx_dr_leave_guest()
3007 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); in vmx_dr_leave_guest()
3008 load_dr7(vmxctx->host_dr7); in vmx_dr_leave_guest()
3009 write_rflags(read_rflags() | vmxctx->host_tf); in vmx_dr_leave_guest()
3020 CPU_SET_ATOMIC(cpu, &pmap->pm_active); in vmx_pmap_activate()
3021 smr_enter(pmap->pm_eptsmr); in vmx_pmap_activate()
3022 eptgen = atomic_load_long(&pmap->pm_eptgen); in vmx_pmap_activate()
3023 if (eptgen != vmx->eptgen[cpu]) { in vmx_pmap_activate()
3024 vmx->eptgen[cpu] = eptgen; in vmx_pmap_activate()
3026 (struct invept_desc){ .eptp = vmx->eptp, ._res = 0 }); in vmx_pmap_activate()
3033 smr_exit(pmap->pm_eptsmr); in vmx_pmap_deactivate()
3034 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); in vmx_pmap_deactivate()
3052 vmx = vcpu->vmx; in vmx_run()
3053 vmcs = vcpu->vmcs; in vmx_run()
3054 vmxctx = &vcpu->ctx; in vmx_run()
3055 vlapic = vm_lapic(vcpu->vcpu); in vmx_run()
3056 vmexit = vm_exitinfo(vcpu->vcpu); in vmx_run()
3059 KASSERT(vmxctx->pmap == pmap, in vmx_run()
3060 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); in vmx_run()
3098 * The same reasoning applies to the IPI generated by in vmx_run()
3111 vm_exit_suspended(vcpu->vcpu, rip); in vmx_run()
3115 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { in vmx_run()
3117 vm_exit_rendezvous(vcpu->vcpu, rip); in vmx_run()
3123 vm_exit_reqidle(vcpu->vcpu, rip); in vmx_run()
3127 if (vcpu_should_yield(vcpu->vcpu)) { in vmx_run()
3129 vm_exit_astpending(vcpu->vcpu, rip); in vmx_run()
3135 if (vcpu_debugged(vcpu->vcpu)) { in vmx_run()
3137 vm_exit_debug(vcpu->vcpu, rip); in vmx_run()
3146 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { in vmx_run()
3188 * EPTP-tagged TLB entries if required. in vmx_run()
3204 vmexit->rip = rip = vmcs_guest_rip(); in vmx_run()
3205 vmexit->inst_length = vmexit_instruction_length(); in vmx_run()
3206 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); in vmx_run()
3207 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); in vmx_run()
3210 vcpu->state.nextrip = rip; in vmx_run()
3222 rip = vmexit->rip; in vmx_run()
3229 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || in vmx_run()
3230 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { in vmx_run()
3232 handled, vmexit->exitcode); in vmx_run()
3236 vmexit->exitcode); in vmx_run()
3249 vpid_free(vcpu->state.vpid); in vmx_vcpu_cleanup()
3250 free(vcpu->pir_desc, M_VMX); in vmx_vcpu_cleanup()
3251 free(vcpu->apic_page, M_VMX); in vmx_vcpu_cleanup()
3252 free(vcpu->vmcs, M_VMX); in vmx_vcpu_cleanup()
3262 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); in vmx_cleanup()
3264 free(vmx->msr_bitmap, M_VMX); in vmx_cleanup()
3276 return (&vmxctx->guest_rax); in vmxctx_regptr()
3278 return (&vmxctx->guest_rbx); in vmxctx_regptr()
3280 return (&vmxctx->guest_rcx); in vmxctx_regptr()
3282 return (&vmxctx->guest_rdx); in vmxctx_regptr()
3284 return (&vmxctx->guest_rsi); in vmxctx_regptr()
3286 return (&vmxctx->guest_rdi); in vmxctx_regptr()
3288 return (&vmxctx->guest_rbp); in vmxctx_regptr()
3290 return (&vmxctx->guest_r8); in vmxctx_regptr()
3292 return (&vmxctx->guest_r9); in vmxctx_regptr()
3294 return (&vmxctx->guest_r10); in vmxctx_regptr()
3296 return (&vmxctx->guest_r11); in vmxctx_regptr()
3298 return (&vmxctx->guest_r12); in vmxctx_regptr()
3300 return (&vmxctx->guest_r13); in vmxctx_regptr()
3302 return (&vmxctx->guest_r14); in vmxctx_regptr()
3304 return (&vmxctx->guest_r15); in vmxctx_regptr()
3306 return (&vmxctx->guest_cr2); in vmxctx_regptr()
3308 return (&vmxctx->guest_dr0); in vmxctx_regptr()
3310 return (&vmxctx->guest_dr1); in vmxctx_regptr()
3312 return (&vmxctx->guest_dr2); in vmxctx_regptr()
3314 return (&vmxctx->guest_dr3); in vmxctx_regptr()
3316 return (&vmxctx->guest_dr6); in vmxctx_regptr()
3353 error = vmcs_getreg(vcpu->vmcs, running, in vmx_get_intr_shadow()
3374 vmcs = vcpu->vmcs; in vmx_modify_intr_shadow()
3392 shreg = -1; in vmx_shadow_reg()
3413 struct vmx *vmx = vcpu->vmx; in vmx_getreg()
3415 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getreg()
3417 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), in vmx_getreg()
3418 vcpu->vcpuid); in vmx_getreg()
3424 *retval = vcpu->guest_msrs[IDX_MSR_KGSBASE]; in vmx_getreg()
3427 *retval = vlapic_get_cr8(vm_lapic(vcpu->vcpu)); in vmx_getreg()
3431 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) in vmx_getreg()
3434 return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); in vmx_getreg()
3444 struct vmx *vmx = vcpu->vmx; in vmx_setreg()
3446 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setreg()
3448 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), in vmx_setreg()
3449 vcpu->vcpuid); in vmx_setreg()
3454 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) in vmx_setreg()
3461 error = vmcs_setreg(vcpu->vmcs, running, reg, val); in vmx_setreg()
3465 * If the "load EFER" VM-entry control is 1 then the in vmx_setreg()
3466 * value of EFER.LMA must be identical to "IA-32e mode guest" in vmx_setreg()
3467 * bit in the VM-entry control. in vmx_setreg()
3471 vmcs_getreg(vcpu->vmcs, running, in vmx_setreg()
3477 vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3486 error = vmcs_setreg(vcpu->vmcs, running, in vmx_setreg()
3498 pmap = vcpu->ctx.pmap; in vmx_setreg()
3511 struct vmx *vmx = vcpu->vmx; in vmx_getdesc()
3513 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_getdesc()
3515 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), in vmx_getdesc()
3516 vcpu->vcpuid); in vmx_getdesc()
3518 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); in vmx_getdesc()
3526 struct vmx *vmx = vcpu->vmx; in vmx_setdesc()
3528 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_setdesc()
3530 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), in vmx_setdesc()
3531 vcpu->vcpuid); in vmx_setdesc()
3533 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); in vmx_setdesc()
3545 vcap = vcpu->cap.set; in vmx_getcap()
3594 struct vmcs *vmcs = vcpu->vmcs; in vmx_setcap()
3610 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3619 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3628 pptr = &vcpu->cap.proc_ctls; in vmx_setcap()
3648 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3657 pptr = &vcpu->cap.proc_ctls2; in vmx_setcap()
3667 if (vcpu->cap.exc_bitmap != 0xffffffff) { in vmx_setcap()
3668 pptr = &vcpu->cap.exc_bitmap; in vmx_setcap()
3677 vlapic = vm_lapic(vcpu->vcpu); in vmx_setcap()
3678 vlapic->ipi_exit = val; in vmx_setcap()
3711 vcpu->cap.set |= (1 << type); in vmx_setcap()
3713 vcpu->cap.set &= ~(1 << type); in vmx_setcap()
3742 VLAPIC_CTR2(vlapic, msg " assert %s-triggered vector %d", \
3744 VLAPIC_CTR1(vlapic, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3745 VLAPIC_CTR1(vlapic, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3746 VLAPIC_CTR1(vlapic, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3747 VLAPIC_CTR1(vlapic, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3752 * vlapic->ops handlers that utilize the APICv hardware assist described in
3764 pir_desc = vlapic_vtx->pir_desc; in vmx_set_intr_ready()
3773 atomic_set_long(&pir_desc->pir[idx], mask); in vmx_set_intr_ready()
3777 * transition from 0->1. in vmx_set_intr_ready()
3782 * the 0->1 'pending' transition with a notification, but the vCPU in vmx_set_intr_ready()
3784 * need to then be notified if a high-priority interrupt arrived which in vmx_set_intr_ready()
3789 * to-be-injected interrupt exceed the priorities already present, the in vmx_set_intr_ready()
3791 * cleared whenever the 'pending' bit makes another 0->1 transition. in vmx_set_intr_ready()
3793 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { in vmx_set_intr_ready()
3795 vlapic_vtx->pending_prio = 0; in vmx_set_intr_ready()
3797 const u_int old_prio = vlapic_vtx->pending_prio; in vmx_set_intr_ready()
3801 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); in vmx_set_intr_ready()
3829 pir_desc = vlapic_vtx->pir_desc; in vmx_pending_intr()
3830 lapic = vlapic->apic_page; in vmx_pending_intr()
3839 vmexit = vm_exitinfo(vlapic->vcpu); in vmx_pending_intr()
3840 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, in vmx_pending_intr()
3842 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; in vmx_pending_intr()
3843 ppr = lapic->ppr & APIC_TPR_INT; in vmx_pending_intr()
3847 pending = atomic_load_acq_long(&pir_desc->pending); in vmx_pending_intr()
3861 VLAPIC_CTR1(vlapic, "HLT with non-zero PPR %d", lapic->ppr); in vmx_pending_intr()
3864 for (i = 3; i >= 0; i--) { in vmx_pending_intr()
3865 pirval = pir_desc->pir[i]; in vmx_pending_intr()
3867 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; in vmx_pending_intr()
3873 * If the highest-priority pending interrupt falls short of the in vmx_pending_intr()
3875 * have any stale bits which would preclude a higher-priority interrupt in vmx_pending_intr()
3880 const u_int old = vlapic_vtx->pending_prio; in vmx_pending_intr()
3883 vlapic_vtx->pending_prio = prio_bit; in vmx_pending_intr()
3905 KASSERT(!vcpu_is_running(vlapic->vcpu, NULL), in vmx_set_tmr()
3909 vmcs = vlapic_vtx->vcpu->vmcs; in vmx_set_tmr()
3931 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_ts()
3932 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_ts()
3934 proc_ctls = vcpu->cap.proc_ctls; in vmx_enable_x2apic_mode_ts()
3938 vcpu->cap.proc_ctls = proc_ctls; in vmx_enable_x2apic_mode_ts()
3956 vcpu = vlapic_vtx->vcpu; in vmx_enable_x2apic_mode_vid()
3957 vmx = vcpu->vmx; in vmx_enable_x2apic_mode_vid()
3958 vmcs = vcpu->vmcs; in vmx_enable_x2apic_mode_vid()
3960 proc_ctls2 = vcpu->cap.proc_ctls2; in vmx_enable_x2apic_mode_vid()
3966 vcpu->cap.proc_ctls2 = proc_ctls2; in vmx_enable_x2apic_mode_vid()
3972 if (vlapic->vcpuid == 0) { in vmx_enable_x2apic_mode_vid()
3977 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); in vmx_enable_x2apic_mode_vid()
4009 int rvi, pirbase = -1; in vmx_inject_pir()
4013 pir_desc = vlapic_vtx->pir_desc; in vmx_inject_pir()
4014 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { in vmx_inject_pir()
4021 pirbase = -1; in vmx_inject_pir()
4022 lapic = vlapic->apic_page; in vmx_inject_pir()
4024 val = atomic_readandclear_long(&pir_desc->pir[0]); in vmx_inject_pir()
4026 lapic->irr0 |= val; in vmx_inject_pir()
4027 lapic->irr1 |= val >> 32; in vmx_inject_pir()
4032 val = atomic_readandclear_long(&pir_desc->pir[1]); in vmx_inject_pir()
4034 lapic->irr2 |= val; in vmx_inject_pir()
4035 lapic->irr3 |= val >> 32; in vmx_inject_pir()
4040 val = atomic_readandclear_long(&pir_desc->pir[2]); in vmx_inject_pir()
4042 lapic->irr4 |= val; in vmx_inject_pir()
4043 lapic->irr5 |= val >> 32; in vmx_inject_pir()
4048 val = atomic_readandclear_long(&pir_desc->pir[3]); in vmx_inject_pir()
4050 lapic->irr6 |= val; in vmx_inject_pir()
4051 lapic->irr7 |= val >> 32; in vmx_inject_pir()
4060 * interrupts on VM-entry. in vmx_inject_pir()
4064 * CPU-Y is sending a posted interrupt to CPU-X, which in vmx_inject_pir()
4066 * CPU-X will eventually exit and the state seen in s/w is in vmx_inject_pir()
4069 * CPU-X CPU-Y in vmx_inject_pir()
4080 rvi = pirbase + flsl(pirval) - 1; in vmx_inject_pir()
4101 vmx = vcpu->vmx; in vmx_vlapic_init()
4104 vlapic->vm = vmx->vm; in vmx_vlapic_init()
4105 vlapic->vcpu = vcpu->vcpu; in vmx_vlapic_init()
4106 vlapic->vcpuid = vcpu->vcpuid; in vmx_vlapic_init()
4107 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; in vmx_vlapic_init()
4110 vlapic_vtx->pir_desc = vcpu->pir_desc; in vmx_vlapic_init()
4111 vlapic_vtx->vcpu = vcpu; in vmx_vlapic_init()
4114 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; in vmx_vlapic_init()
4118 vlapic->ops.set_intr_ready = vmx_set_intr_ready; in vmx_vlapic_init()
4119 vlapic->ops.pending_intr = vmx_pending_intr; in vmx_vlapic_init()
4120 vlapic->ops.intr_accepted = vmx_intr_accepted; in vmx_vlapic_init()
4121 vlapic->ops.set_tmr = vmx_set_tmr; in vmx_vlapic_init()
4122 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; in vmx_vlapic_init()
4126 vlapic->ops.post_intr = vmx_post_intr; in vmx_vlapic_init()
4153 vmx = vcpu->vmx; in vmx_vcpu_snapshot()
4154 vmcs = vcpu->vmcs; in vmx_vcpu_snapshot()
4156 run = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_vcpu_snapshot()
4158 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), in vmx_vcpu_snapshot()
4159 vcpu->vcpuid); in vmx_vcpu_snapshot()
4218 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, in vmx_vcpu_snapshot()
4219 sizeof(vcpu->guest_msrs), meta, err, done); in vmx_vcpu_snapshot()
4221 SNAPSHOT_BUF_OR_LEAVE(vcpu->pir_desc, in vmx_vcpu_snapshot()
4222 sizeof(*vcpu->pir_desc), meta, err, done); in vmx_vcpu_snapshot()
4224 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, in vmx_vcpu_snapshot()
4225 sizeof(vcpu->mtrr), meta, err, done); in vmx_vcpu_snapshot()
4227 vmxctx = &vcpu->ctx; in vmx_vcpu_snapshot()
4228 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdi, meta, err, done); in vmx_vcpu_snapshot()
4229 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rsi, meta, err, done); in vmx_vcpu_snapshot()
4230 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdx, meta, err, done); in vmx_vcpu_snapshot()
4231 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rcx, meta, err, done); in vmx_vcpu_snapshot()
4232 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r8, meta, err, done); in vmx_vcpu_snapshot()
4233 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r9, meta, err, done); in vmx_vcpu_snapshot()
4234 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rax, meta, err, done); in vmx_vcpu_snapshot()
4235 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbx, meta, err, done); in vmx_vcpu_snapshot()
4236 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbp, meta, err, done); in vmx_vcpu_snapshot()
4237 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r10, meta, err, done); in vmx_vcpu_snapshot()
4238 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r11, meta, err, done); in vmx_vcpu_snapshot()
4239 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r12, meta, err, done); in vmx_vcpu_snapshot()
4240 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r13, meta, err, done); in vmx_vcpu_snapshot()
4241 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r14, meta, err, done); in vmx_vcpu_snapshot()
4242 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r15, meta, err, done); in vmx_vcpu_snapshot()
4243 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_cr2, meta, err, done); in vmx_vcpu_snapshot()
4244 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr0, meta, err, done); in vmx_vcpu_snapshot()
4245 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr1, meta, err, done); in vmx_vcpu_snapshot()
4246 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr2, meta, err, done); in vmx_vcpu_snapshot()
4247 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr3, meta, err, done); in vmx_vcpu_snapshot()
4248 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr6, meta, err, done); in vmx_vcpu_snapshot()
4262 vmx = vcpu->vmx; in vmx_restore_tsc()
4263 vmcs = vcpu->vmcs; in vmx_restore_tsc()
4265 running = vcpu_is_running(vcpu->vcpu, &hostcpu); in vmx_restore_tsc()
4267 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), in vmx_restore_tsc()
4268 vcpu->vcpuid); in vmx_restore_tsc()