Lines Matching refs:vcpuid
314 static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid);
315 static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector);
432 vcpu_trace_exceptions(struct vm *vm, int vcpuid) in vcpu_trace_exceptions() argument
438 vcpu_trap_wbinvd(struct vm *vm, int vcpuid) in vcpu_trap_wbinvd() argument
831 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) in vm_mem_allocated() argument
838 state = vcpu_get_state(vm, vcpuid, &hostcpu); in vm_mem_allocated()
1177 vm_get_register(struct vm *vm, int vcpuid, int reg, uint64_t *retval) in vm_get_register() argument
1179 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_get_register()
1185 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_get_register()
1191 return (VMGETREG(vm->cookie, vcpuid, reg, retval)); in vm_get_register()
1196 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) in vm_set_register() argument
1198 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_set_register()
1205 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_set_register()
1208 error = VMSETREG(vm->cookie, vcpuid, reg, val); in vm_set_register()
1220 return (VMSETREG(vm->cookie, vcpuid, reg, val)); in vm_set_register()
1298 vm_get_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) in vm_get_fpu() argument
1300 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_get_fpu()
1303 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_get_fpu()
1311 vm_set_fpu(struct vm *vm, int vcpuid, void *buf, size_t len) in vm_set_fpu() argument
1313 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_set_fpu()
1316 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_set_fpu()
1324 vm_get_run_state(struct vm *vm, int vcpuid, uint32_t *state, uint8_t *sipi_vec) in vm_get_run_state() argument
1328 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { in vm_get_run_state()
1332 vcpu = &vm->vcpu[vcpuid]; in vm_get_run_state()
1343 vm_set_run_state(struct vm *vm, int vcpuid, uint32_t state, uint8_t sipi_vec) in vm_set_run_state() argument
1347 if (vcpuid < 0 || vcpuid >= vm->maxcpus) { in vm_set_run_state()
1354 vcpu = &vm->vcpu[vcpuid]; in vm_set_run_state()
1468 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, in vcpu_set_state_locked() argument
1474 vcpu = &vm->vcpu[vcpuid]; in vcpu_set_state_locked()
1539 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) in vcpu_require_state() argument
1543 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) in vcpu_require_state()
1548 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) in vcpu_require_state_locked() argument
1552 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) in vcpu_require_state_locked()
1560 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled) in vm_handle_hlt() argument
1566 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1568 vcpu = &vm->vcpu[vcpuid]; in vm_handle_hlt()
1578 if (vm_nmi_pending(vm, vcpuid)) in vm_handle_hlt()
1580 if (vcpu_run_state_pending(vm, vcpuid)) in vm_handle_hlt()
1583 if (vm_extint_pending(vm, vcpuid) || in vm_handle_hlt()
1594 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { in vm_handle_hlt()
1608 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1616 vcpu_ustate_change(vm, vcpuid, VU_IDLE); in vm_handle_hlt()
1617 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); in vm_handle_hlt()
1619 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); in vm_handle_hlt()
1620 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); in vm_handle_hlt()
1624 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1636 vm_handle_paging(struct vm *vm, int vcpuid) in vm_handle_paging() argument
1638 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_handle_paging()
1696 vm_handle_mmio_emul(struct vm *vm, int vcpuid) in vm_handle_mmio_emul() argument
1704 vcpu = &vm->vcpu[vcpuid]; in vm_handle_mmio_emul()
1716 error = vie_fetch_instruction(vie, vm, vcpuid, inst_addr, in vm_handle_mmio_emul()
1731 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { in vm_handle_mmio_emul()
1737 vie_verify_gla(vie, vm, vcpuid, vme->u.mmio_emul.gla) != 0) { in vm_handle_mmio_emul()
1744 error = vie_emulate_mmio(vie, vm, vcpuid); in vm_handle_mmio_emul()
1760 if (!vcpu_should_yield(vm, vcpuid)) { in vm_handle_mmio_emul()
1779 vm_handle_inout(struct vm *vm, int vcpuid, struct vm_exit *vme) in vm_handle_inout() argument
1785 vcpu = &vm->vcpu[vcpuid]; in vm_handle_inout()
1789 err = vie_emulate_inout(vie, vm, vcpuid); in vm_handle_inout()
1807 if (!vcpu_should_yield(vm, vcpuid)) { in vm_handle_inout()
1830 vm_handle_inst_emul(struct vm *vm, int vcpuid) in vm_handle_inst_emul() argument
1838 vcpu = &vm->vcpu[vcpuid]; in vm_handle_inst_emul()
1842 vie_cs_info(vie, vm, vcpuid, &cs_base, &cs_d); in vm_handle_inst_emul()
1846 error = vie_fetch_instruction(vie, vm, vcpuid, vme->rip + cs_base, in vm_handle_inst_emul()
1859 if (vie_decode_instruction(vie, vm, vcpuid, cs_d) != 0) { in vm_handle_inst_emul()
1865 error = vie_emulate_other(vie, vm, vcpuid); in vm_handle_inst_emul()
1880 vm_handle_run_state(struct vm *vm, int vcpuid) in vm_handle_run_state() argument
1882 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_handle_run_state()
1889 VERIFY0(vcpu_arch_reset(vm, vcpuid, true)); in vm_handle_run_state()
1901 VERIFY0(vcpu_vector_sipi(vm, vcpuid, vector)); in vm_handle_run_state()
1922 if (vcpu_sleep_bailout_checks(vm, vcpuid)) { in vm_handle_run_state()
1926 vcpu_ustate_change(vm, vcpuid, VU_IDLE); in vm_handle_run_state()
1927 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); in vm_handle_run_state()
1929 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); in vm_handle_run_state()
1930 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); in vm_handle_run_state()
2036 vm_handle_rdmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) in vm_handle_rdmsr() argument
2038 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_handle_rdmsr()
2055 vm_inject_gp(vm, vcpuid); in vm_handle_rdmsr()
2074 vcpu_tsc_offset(vm, vcpuid, false)); in vm_handle_rdmsr()
2085 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX, in vm_handle_rdmsr()
2087 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, in vm_handle_rdmsr()
2093 vm_handle_wrmsr(struct vm *vm, int vcpuid, struct vm_exit *vme) in vm_handle_wrmsr() argument
2095 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_handle_wrmsr()
2112 vm_inject_gp(vm, vcpuid); in vm_handle_wrmsr()
2250 vm_exit_run_state(struct vm *vm, int vcpuid, uint64_t rip) in vm_exit_run_state() argument
2254 vmexit = vm_exitinfo(vm, vcpuid); in vm_exit_run_state()
2258 vmm_stat_incr(vm, vcpuid, VMEXIT_RUN_STATE, 1); in vm_exit_run_state()
2303 const int vcpuid = vtc->vtc_vcpuid; in vmm_savectx() local
2306 ops->vmsavectx(vm->cookie, vcpuid); in vmm_savectx()
2313 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { in vmm_savectx()
2314 vtc->vtc_ustate = vm->vcpu[vcpuid].ustate; in vmm_savectx()
2315 vcpu_ustate_change(vm, vcpuid, VU_SCHED); in vmm_savectx()
2323 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vmm_savectx()
2335 const int vcpuid = vtc->vtc_vcpuid; in vmm_restorectx() local
2338 if (vm->vcpu[vcpuid].ustate != VU_IDLE) { in vmm_restorectx()
2339 vcpu_ustate_change(vm, vcpuid, vtc->vtc_ustate); in vmm_restorectx()
2354 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vmm_restorectx()
2361 ops->vmrestorectx(vm->cookie, vcpuid); in vmm_restorectx()
2371 vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry, in vm_entry_actions() argument
2374 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_entry_actions()
2390 err = vie_emulate_mmio(vie, vm, vcpuid); in vm_entry_actions()
2409 err = vie_emulate_inout(vie, vm, vcpuid); in vm_entry_actions()
2442 vm_loop_checks(struct vm *vm, int vcpuid, struct vm_exit *vme) in vm_loop_checks() argument
2446 vie = vm->vcpu[vcpuid].vie_ctx; in vm_loop_checks()
2461 vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry) in vm_run() argument
2469 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_run()
2471 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
2477 vcpu = &vm->vcpu[vcpuid]; in vm_run()
2480 vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN); in vm_run()
2485 error = vm_entry_actions(vm, vcpuid, entry, vme); in vm_run()
2491 error = vm_loop_checks(vm, vcpuid, vme); in vm_run()
2518 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); in vm_run()
2519 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip); in vm_run()
2520 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); in vm_run()
2538 error = vm_handle_run_state(vm, vcpuid); in vm_run()
2541 vioapic_process_eoi(vm, vcpuid, in vm_run()
2546 error = vm_handle_hlt(vm, vcpuid, intr_disabled); in vm_run()
2549 error = vm_handle_paging(vm, vcpuid); in vm_run()
2552 error = vm_handle_mmio_emul(vm, vcpuid); in vm_run()
2555 error = vm_handle_inout(vm, vcpuid, vme); in vm_run()
2558 error = vm_handle_inst_emul(vm, vcpuid); in vm_run()
2563 vm_inject_ud(vm, vcpuid); in vm_run()
2566 error = vm_handle_rdmsr(vm, vcpuid, vme); in vm_run()
2569 error = vm_handle_wrmsr(vm, vcpuid, vme); in vm_run()
2575 VERIFY0(vm_suspend_cpu(vm, vcpuid)); in vm_run()
2600 vcpu_ustate_change(vm, vcpuid, in vm_run()
2607 vm_restart_instruction(void *arg, int vcpuid) in vm_restart_instruction() argument
2616 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_restart_instruction()
2619 vcpu = &vm->vcpu[vcpuid]; in vm_restart_instruction()
2620 state = vcpu_get_state(vm, vcpuid, NULL); in vm_restart_instruction()
2636 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); in vm_restart_instruction()
2646 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) in vm_exit_intinfo() argument
2650 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_exit_intinfo()
2653 vcpu = &vm->vcpu[vcpuid]; in vm_exit_intinfo()
2728 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) in vm_entry_intinfo() argument
2730 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_entry_intinfo()
2744 (void) vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT, vcpuid); in vm_entry_intinfo()
2780 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) in vm_get_intinfo() argument
2784 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_get_intinfo()
2787 vcpu = &vm->vcpu[vcpuid]; in vm_get_intinfo()
2794 vm_inject_exception(struct vm *vm, int vcpuid, uint8_t vector, in vm_inject_exception() argument
2801 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_inject_exception()
2824 vcpu = &vm->vcpu[vcpuid]; in vm_inject_exception()
2835 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); in vm_inject_exception()
2848 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); in vm_inject_exception()
2852 VERIFY0(vm_restart_instruction(vm, vcpuid)); in vm_inject_exception()
2865 vm_inject_ud(struct vm *vm, int vcpuid) in vm_inject_ud() argument
2867 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_UD, false, 0, true)); in vm_inject_ud()
2871 vm_inject_gp(struct vm *vm, int vcpuid) in vm_inject_gp() argument
2873 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_GP, true, 0, true)); in vm_inject_gp()
2877 vm_inject_ac(struct vm *vm, int vcpuid, uint32_t errcode) in vm_inject_ac() argument
2879 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_AC, true, errcode, true)); in vm_inject_ac()
2883 vm_inject_ss(struct vm *vm, int vcpuid, uint32_t errcode) in vm_inject_ss() argument
2885 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_SS, true, errcode, true)); in vm_inject_ss()
2889 vm_inject_pf(struct vm *vm, int vcpuid, uint32_t errcode, uint64_t cr2) in vm_inject_pf() argument
2891 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2)); in vm_inject_pf()
2892 VERIFY0(vm_inject_exception(vm, vcpuid, IDT_PF, true, errcode, true)); in vm_inject_pf()
2898 vm_inject_nmi(struct vm *vm, int vcpuid) in vm_inject_nmi() argument
2902 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_inject_nmi()
2905 vcpu = &vm->vcpu[vcpuid]; in vm_inject_nmi()
2908 vcpu_notify_event(vm, vcpuid); in vm_inject_nmi()
2913 vm_nmi_pending(struct vm *vm, int vcpuid) in vm_nmi_pending() argument
2915 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_nmi_pending()
2921 vm_nmi_clear(struct vm *vm, int vcpuid) in vm_nmi_clear() argument
2923 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_nmi_clear()
2928 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); in vm_nmi_clear()
2934 vm_inject_extint(struct vm *vm, int vcpuid) in vm_inject_extint() argument
2938 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_inject_extint()
2941 vcpu = &vm->vcpu[vcpuid]; in vm_inject_extint()
2944 vcpu_notify_event(vm, vcpuid); in vm_inject_extint()
2949 vm_extint_pending(struct vm *vm, int vcpuid) in vm_extint_pending() argument
2951 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_extint_pending()
2957 vm_extint_clear(struct vm *vm, int vcpuid) in vm_extint_clear() argument
2959 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_extint_clear()
2964 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); in vm_extint_clear()
2968 vm_inject_init(struct vm *vm, int vcpuid) in vm_inject_init() argument
2972 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_inject_init()
2975 vcpu = &vm->vcpu[vcpuid]; in vm_inject_init()
2992 vm_inject_sipi(struct vm *vm, int vcpuid, uint8_t vector) in vm_inject_sipi() argument
2996 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_inject_sipi()
2999 vcpu = &vm->vcpu[vcpuid]; in vm_inject_sipi()
3012 vcpu_run_state_pending(struct vm *vm, int vcpuid) in vcpu_run_state_pending() argument
3016 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); in vcpu_run_state_pending()
3017 vcpu = &vm->vcpu[vcpuid]; in vcpu_run_state_pending()
3024 vcpu_arch_reset(struct vm *vm, int vcpuid, bool init_only) in vcpu_arch_reset() argument
3059 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_arch_reset()
3061 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vcpu_arch_reset()
3065 VERIFY0(vm_set_register(vm, vcpuid, clear_regs[i], 0)); in vcpu_arch_reset()
3068 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, 2)); in vcpu_arch_reset()
3069 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0xfff0)); in vcpu_arch_reset()
3070 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CR0, 0x60000010)); in vcpu_arch_reset()
3080 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RDX, 0x600)); in vcpu_arch_reset()
3082 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR6, 0xffff0ff0)); in vcpu_arch_reset()
3083 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_DR7, 0x400)); in vcpu_arch_reset()
3089 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); in vcpu_arch_reset()
3090 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, 0xf000)); in vcpu_arch_reset()
3097 VERIFY0(vm_set_seg_desc(vm, vcpuid, data_segs[i], &desc)); in vcpu_arch_reset()
3098 VERIFY0(vm_set_register(vm, vcpuid, data_segs[i], 0)); in vcpu_arch_reset()
3104 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_GDTR, &desc)); in vcpu_arch_reset()
3105 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_IDTR, &desc)); in vcpu_arch_reset()
3111 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_LDTR, &desc)); in vcpu_arch_reset()
3112 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_LDTR, 0)); in vcpu_arch_reset()
3118 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_TR, &desc)); in vcpu_arch_reset()
3119 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_TR, 0)); in vcpu_arch_reset()
3121 vlapic_reset(vm_lapic(vm, vcpuid)); in vcpu_arch_reset()
3123 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0)); in vcpu_arch_reset()
3146 vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector) in vcpu_vector_sipi() argument
3150 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vcpu_vector_sipi()
3157 VERIFY0(vm_set_seg_desc(vm, vcpuid, VM_REG_GUEST_CS, &desc)); in vcpu_vector_sipi()
3158 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_CS, in vcpu_vector_sipi()
3161 VERIFY0(vm_set_register(vm, vcpuid, VM_REG_GUEST_RIP, 0)); in vcpu_vector_sipi()
3191 vm_cpuid_config(struct vm *vm, int vcpuid) in vm_cpuid_config() argument
3193 ASSERT3S(vcpuid, >=, 0); in vm_cpuid_config()
3194 ASSERT3S(vcpuid, <, VM_MAXCPU); in vm_cpuid_config()
3196 return (&vm->vcpu[vcpuid].cpuid_cfg); in vm_cpuid_config()
3230 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, in vcpu_set_state() argument
3236 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vcpu_set_state()
3237 panic("vcpu_set_state: invalid vcpuid %d", vcpuid); in vcpu_set_state()
3239 vcpu = &vm->vcpu[vcpuid]; in vcpu_set_state()
3242 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); in vcpu_set_state()
3249 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) in vcpu_get_state() argument
3254 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vcpu_get_state()
3255 panic("vcpu_get_state: invalid vcpuid %d", vcpuid); in vcpu_get_state()
3257 vcpu = &vm->vcpu[vcpuid]; in vcpu_get_state()
3273 vcpu_tsc_offset(struct vm *vm, int vcpuid, bool phys_adj) in vcpu_tsc_offset() argument
3275 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); in vcpu_tsc_offset()
3277 uint64_t vcpu_off = vm->tsc_offset + vm->vcpu[vcpuid].tsc_offset; in vcpu_tsc_offset()
3310 vm_activate_cpu(struct vm *vm, int vcpuid) in vm_activate_cpu() argument
3313 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_activate_cpu()
3316 if (CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_activate_cpu()
3323 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); in vm_activate_cpu()
3337 vm_suspend_cpu(struct vm *vm, int vcpuid) in vm_suspend_cpu() argument
3341 if (vcpuid < -1 || vcpuid >= vm->maxcpus) in vm_suspend_cpu()
3344 if (vcpuid == -1) { in vm_suspend_cpu()
3351 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
3354 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
3355 vcpu_notify_event(vm, vcpuid); in vm_suspend_cpu()
3361 vm_resume_cpu(struct vm *vm, int vcpuid) in vm_resume_cpu() argument
3364 if (vcpuid < -1 || vcpuid >= vm->maxcpus) in vm_resume_cpu()
3367 if (vcpuid == -1) { in vm_resume_cpu()
3370 if (!CPU_ISSET(vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
3373 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus); in vm_resume_cpu()
3379 vcpu_bailout_checks(struct vm *vm, int vcpuid) in vcpu_bailout_checks() argument
3381 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_bailout_checks()
3384 ASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus); in vcpu_bailout_checks()
3405 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1); in vcpu_bailout_checks()
3429 if (vcpu_should_yield(vm, vcpuid)) { in vcpu_bailout_checks()
3431 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); in vcpu_bailout_checks()
3434 if (CPU_ISSET(vcpuid, &vm->debug_cpus)) { in vcpu_bailout_checks()
3443 vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid) in vcpu_sleep_bailout_checks() argument
3445 if (vcpu_bailout_checks(vm, vcpuid)) { in vcpu_sleep_bailout_checks()
3446 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_sleep_bailout_checks()
3462 vcpu_entry_bailout_checks(struct vm *vm, int vcpuid, uint64_t rip) in vcpu_entry_bailout_checks() argument
3464 if (vcpu_bailout_checks(vm, vcpuid)) { in vcpu_entry_bailout_checks()
3465 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_entry_bailout_checks()
3481 vm_vcpu_barrier(struct vm *vm, int vcpuid) in vm_vcpu_barrier() argument
3483 if (vcpuid >= 0 && vcpuid < vm->maxcpus) { in vm_vcpu_barrier()
3484 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vm_vcpu_barrier()
3488 if (CPU_ISSET(vcpuid, &vm->active_cpus)) { in vm_vcpu_barrier()
3495 } else if (vcpuid == -1) { in vm_vcpu_barrier()
3501 if (CPU_ISSET(vcpuid, &vm->active_cpus)) { in vm_vcpu_barrier()
3528 vcpu_stats(struct vm *vm, int vcpuid) in vcpu_stats() argument
3531 return (vm->vcpu[vcpuid].stats); in vcpu_stats()
3535 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) in vm_get_x2apic_state() argument
3537 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_get_x2apic_state()
3540 *state = vm->vcpu[vcpuid].x2apic_state; in vm_get_x2apic_state()
3546 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) in vm_set_x2apic_state() argument
3548 if (vcpuid < 0 || vcpuid >= vm->maxcpus) in vm_set_x2apic_state()
3554 vm->vcpu[vcpuid].x2apic_state = state; in vm_set_x2apic_state()
3556 vlapic_set_x2apic_state(vm, vcpuid, state); in vm_set_x2apic_state()
3602 vcpu_notify_event(struct vm *vm, int vcpuid) in vcpu_notify_event() argument
3604 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_notify_event()
3612 vcpu_notify_event_type(struct vm *vm, int vcpuid, vcpu_notify_t ntype) in vcpu_notify_event_type() argument
3614 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_notify_event_type()
3626 vcpu_ustate_change(struct vm *vm, int vcpuid, enum vcpu_ustate ustate) in vcpu_ustate_change() argument
3628 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vcpu_ustate_change()
3661 vm_get_vmclient(struct vm *vm, int vcpuid) in vm_get_vmclient() argument
3663 return (vm->vcpu[vcpuid].vmclient); in vm_get_vmclient()
3719 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, in vm_copy_teardown() argument
3731 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, in vm_copy_setup() argument
3737 vm_client_t *vmc = vm_get_vmclient(vm, vcpuid); in vm_copy_setup()
3749 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault); in vm_copy_setup()
3780 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); in vm_copy_setup()
3789 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, in vm_copyin() argument
3808 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, in vm_copyout() argument
3844 vm_ioport_access(struct vm *vm, int vcpuid, bool in, uint16_t port, in vm_ioport_access() argument
3921 const int vcpuid = vvk->vvk_vcpu.value.ui32; in vmm_kstat_update_vcpu() local
3922 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vmm_kstat_update_vcpu()
3924 ASSERT3U(vcpuid, <, VM_MAXCPU); in vmm_kstat_update_vcpu()
4082 vmm_data_read_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t *value) in vmm_data_read_msr() argument
4095 *value = vm->vcpu[vcpuid].tsc_offset; in vmm_data_read_msr()
4100 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); in vmm_data_read_msr()
4102 err = ops->vmgetmsr(vm->cookie, vcpuid, msr, value); in vmm_data_read_msr()
4111 vmm_data_write_msr(struct vm *vm, int vcpuid, uint32_t msr, uint64_t value) in vmm_data_write_msr() argument
4118 vm->vcpu[vcpuid].tsc_offset = value; in vmm_data_write_msr()
4126 err = vm_rdmtrr(&vm->vcpu[vcpuid].mtrr, msr, &comp); in vmm_data_write_msr()
4137 err = vm_wrmtrr(&vm->vcpu[vcpuid].mtrr, msr, value); in vmm_data_write_msr()
4139 err = ops->vmsetmsr(vm->cookie, vcpuid, msr, value); in vmm_data_write_msr()
4148 vmm_data_read_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) in vmm_data_read_msrs() argument
4161 int err = vmm_data_read_msr(vm, vcpuid, in vmm_data_read_msrs()
4193 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, in vmm_data_read_msrs()
4202 VERIFY0(vmm_data_read_msr(vm, vcpuid, entryp->vfe_ident, in vmm_data_read_msrs()
4209 vmm_data_write_msrs(struct vm *vm, int vcpuid, const vmm_data_req_t *req) in vmm_data_write_msrs() argument
4226 if (vmm_data_read_msr(vm, vcpuid, msr, &val) != 0) { in vmm_data_write_msrs()
4237 int err = vmm_data_write_msr(vm, vcpuid, entryp->vfe_ident, in vmm_data_write_msrs()
4270 vmm_read_arch_field(struct vm *vm, int vcpuid, uint32_t ident, uint64_t *valp) in vmm_read_arch_field() argument
4274 if (vcpuid == -1) { in vmm_read_arch_field()
4283 VERIFY(vcpuid >= 0 && vcpuid <= VM_MAXCPU); in vmm_read_arch_field()
4285 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vmm_read_arch_field()
4307 vmm_data_read_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) in vmm_data_read_varch() argument
4313 if (vcpuid != -1 && (vcpuid < 0 || vcpuid >= VM_MAXCPU)) { in vmm_data_read_varch()
4325 if (!vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, in vmm_data_read_varch()
4339 if (vcpuid == -1) { in vmm_data_read_varch()
4357 VERIFY(vmm_read_arch_field(vm, vcpuid, entryp->vfe_ident, in vmm_data_read_varch()
4364 vmm_data_write_varch_vcpu(struct vm *vm, int vcpuid, const vmm_data_req_t *req) in vmm_data_write_varch_vcpu() argument
4369 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) { in vmm_data_write_varch_vcpu()
4376 struct vcpu *vcpu = &vm->vcpu[vcpuid]; in vmm_data_write_varch_vcpu()
4400 if (vm_exit_intinfo(vm, vcpuid, val) != 0) { in vmm_data_write_varch_vcpu()
4414 vmm_data_write_varch(struct vm *vm, int vcpuid, const vmm_data_req_t *req) in vmm_data_write_varch() argument
4420 if (vcpuid != -1) { in vmm_data_write_varch()
4421 return (vmm_data_write_varch_vcpu(vm, vcpuid, req)); in vmm_data_write_varch()