Lines Matching full:vcpu
97 * (a) allocated when vcpu is created
98 * (i) initialized when vcpu is created and when it is reinitialized
99 * (o) initialized the first time the vcpu is created
102 struct vcpu { struct
104 enum vcpu_state state; /* (o) vcpu state */ argument
106 int hostcpu; /* (o) vcpu's host cpu */ argument
107 int reqidle; /* (i) request vcpu to idle */ argument
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
160 * [v] reads require one frozen vcpu, writes require freezing all vcpus
187 struct vcpu **vcpu; /* (o) guest vcpus */ member
197 #define VMM_CTR0(vcpu, format) \ argument
198 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
200 #define VMM_CTR1(vcpu, format, p1) \ argument
201 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
203 #define VMM_CTR2(vcpu, format, p1, p2) \ argument
204 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
206 #define VMM_CTR3(vcpu, format, p1, p2, p3) \ argument
207 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
209 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ argument
210 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
241 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
266 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
282 "IPI vector used for vcpu notifications");
299 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
302 VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
353 vcpu_cleanup(struct vcpu *vcpu, bool destroy) in vcpu_cleanup() argument
355 vmmops_vlapic_cleanup(vcpu->vlapic); in vcpu_cleanup()
356 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
357 vcpu->cookie = NULL; in vcpu_cleanup()
359 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
360 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
361 vcpu_lock_destroy(vcpu); in vcpu_cleanup()
362 free(vcpu, M_VM); in vcpu_cleanup()
366 static struct vcpu *
369 struct vcpu *vcpu; in vcpu_alloc() local
372 ("vcpu_init: invalid vcpu %d", vcpu_id)); in vcpu_alloc()
374 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO); in vcpu_alloc()
375 vcpu_lock_init(vcpu); in vcpu_alloc()
376 vcpu->state = VCPU_IDLE; in vcpu_alloc()
377 vcpu->hostcpu = NOCPU; in vcpu_alloc()
378 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
379 vcpu->vm = vm; in vcpu_alloc()
380 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
381 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
382 vcpu->tsc_offset = 0; in vcpu_alloc()
383 return (vcpu); in vcpu_alloc()
387 vcpu_init(struct vcpu *vcpu) in vcpu_init() argument
389 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
390 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); in vcpu_init()
391 vm_set_x2apic_state(vcpu, X2APIC_DISABLED); in vcpu_init()
392 vcpu->reqidle = 0; in vcpu_init()
393 vcpu->exitintinfo = 0; in vcpu_init()
394 vcpu->nmi_pending = 0; in vcpu_init()
395 vcpu->extint_pending = 0; in vcpu_init()
396 vcpu->exception_pending = 0; in vcpu_init()
397 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; in vcpu_init()
398 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
399 vmm_stat_init(vcpu->stats); in vcpu_init()
403 vcpu_trace_exceptions(struct vcpu *vcpu) in vcpu_trace_exceptions() argument
410 vcpu_trap_wbinvd(struct vcpu *vcpu) in vcpu_trap_wbinvd() argument
416 vm_exitinfo(struct vcpu *vcpu) in vm_exitinfo() argument
418 return (&vcpu->exitinfo); in vm_exitinfo()
422 vm_exitinfo_cpuset(struct vcpu *vcpu) in vm_exitinfo_cpuset() argument
424 return (&vcpu->exitinfo_cpuset); in vm_exitinfo_cpuset()
546 if (vm->vcpu[i] != NULL) in vm_init()
547 vcpu_init(vm->vcpu[i]); in vm_init()
560 struct vcpu *
563 struct vcpu *vcpu; in vm_alloc_vcpu() local
568 vcpu = (struct vcpu *) in vm_alloc_vcpu()
569 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
570 if (__predict_true(vcpu != NULL)) in vm_alloc_vcpu()
571 return (vcpu); in vm_alloc_vcpu()
574 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
575 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
576 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
577 vcpu_init(vcpu); in vm_alloc_vcpu()
580 * Ensure vCPU is fully created before updating pointer in vm_alloc_vcpu()
583 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
584 (uintptr_t)vcpu); in vm_alloc_vcpu()
587 return (vcpu); in vm_alloc_vcpu()
635 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
703 if (vm->vcpu[i] != NULL) in vm_cleanup()
704 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
731 free(vm->vcpu, M_VM); in vm_cleanup()
810 * This function is called in the context of a running vcpu which acts as
814 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) in vm_mem_allocated() argument
816 struct vm *vm = vcpu->vm; in vm_mem_allocated()
822 state = vcpu_get_state(vcpu, &hostcpu); in vm_mem_allocated()
824 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); in vm_mem_allocated()
1222 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold() argument
1227 * The current vcpu should be frozen to ensure 'vm_memmap[]' in vm_gpa_hold()
1230 int state = vcpu_get_state(vcpu, NULL); in vm_gpa_hold()
1231 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", in vm_gpa_hold()
1234 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); in vm_gpa_hold()
1254 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) in vm_get_register() argument
1260 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
1264 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) in vm_set_register() argument
1271 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
1276 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val); in vm_set_register()
1277 vcpu->nextrip = val; in vm_set_register()
1314 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) in vm_get_seg_desc() argument
1320 return (vmmops_getdesc(vcpu->cookie, reg, desc)); in vm_get_seg_desc()
1324 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) in vm_set_seg_desc() argument
1330 return (vmmops_setdesc(vcpu->cookie, reg, desc)); in vm_set_seg_desc()
1334 restore_guest_fpustate(struct vcpu *vcpu) in restore_guest_fpustate() argument
1342 fpurestore(vcpu->guestfpu); in restore_guest_fpustate()
1346 load_xcr(0, vcpu->guest_xcr0); in restore_guest_fpustate()
1356 save_guest_fpustate(struct vcpu *vcpu) in save_guest_fpustate() argument
1364 vcpu->guest_xcr0 = rxcr(0); in save_guest_fpustate()
1370 fpusave(vcpu->guestfpu); in save_guest_fpustate()
1374 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1377 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, in vcpu_set_state_locked() argument
1382 vcpu_assert_locked(vcpu); in vcpu_set_state_locked()
1387 * ioctl() operating on a vcpu at any point. in vcpu_set_state_locked()
1390 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
1391 vcpu->reqidle = 1; in vcpu_set_state_locked()
1392 vcpu_notify_event_locked(vcpu, false); in vcpu_set_state_locked()
1393 VMM_CTR1(vcpu, "vcpu state change from %s to " in vcpu_set_state_locked()
1394 "idle requested", vcpu_state2str(vcpu->state)); in vcpu_set_state_locked()
1395 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
1398 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
1399 "vcpu idle state")); in vcpu_set_state_locked()
1402 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
1403 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
1404 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
1406 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
1407 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
1416 switch (vcpu->state) { in vcpu_set_state_locked()
1433 VMM_CTR2(vcpu, "vcpu state changed from %s to %s", in vcpu_set_state_locked()
1434 vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); in vcpu_set_state_locked()
1436 vcpu->state = newstate; in vcpu_set_state_locked()
1438 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
1440 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
1443 wakeup(&vcpu->state); in vcpu_set_state_locked()
1449 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state() argument
1453 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) in vcpu_require_state()
1458 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state_locked() argument
1462 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) in vcpu_require_state_locked()
1467 vm_handle_rendezvous(struct vcpu *vcpu) in vm_handle_rendezvous() argument
1469 struct vm *vm = vcpu->vm; in vm_handle_rendezvous()
1474 vcpuid = vcpu->vcpuid; in vm_handle_rendezvous()
1483 VMM_CTR0(vcpu, "Calling rendezvous func"); in vm_handle_rendezvous()
1484 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_handle_rendezvous()
1489 VMM_CTR0(vcpu, "Rendezvous completed"); in vm_handle_rendezvous()
1495 VMM_CTR0(vcpu, "Wait for rendezvous completion"); in vm_handle_rendezvous()
1511 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1514 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu) in vm_handle_hlt() argument
1516 struct vm *vm = vcpu->vm; in vm_handle_hlt()
1521 vcpuid = vcpu->vcpuid; in vm_handle_hlt()
1527 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1529 vcpu_lock(vcpu); in vm_handle_hlt()
1534 * software events that would cause this vcpu to wakeup. in vm_handle_hlt()
1537 * vcpu returned from vmmops_run() and before it acquired the in vm_handle_hlt()
1538 * vcpu lock above. in vm_handle_hlt()
1540 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1542 if (vm_nmi_pending(vcpu)) in vm_handle_hlt()
1545 if (vm_extint_pending(vcpu) || in vm_handle_hlt()
1546 vlapic_pending_intr(vcpu->vlapic, NULL)) { in vm_handle_hlt()
1551 /* Don't go to sleep if the vcpu thread needs to yield */ in vm_handle_hlt()
1552 if (vcpu_should_yield(vcpu)) in vm_handle_hlt()
1555 if (vcpu_debugged(vcpu)) in vm_handle_hlt()
1566 VMM_CTR0(vcpu, "Halted"); in vm_handle_hlt()
1580 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_hlt()
1585 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); in vm_handle_hlt()
1586 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_hlt()
1587 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); in vm_handle_hlt()
1589 vcpu_unlock(vcpu); in vm_handle_hlt()
1598 vcpu_lock(vcpu); in vm_handle_hlt()
1605 vcpu_unlock(vcpu); in vm_handle_hlt()
1614 vm_handle_paging(struct vcpu *vcpu, bool *retu) in vm_handle_paging() argument
1616 struct vm *vm = vcpu->vm; in vm_handle_paging()
1621 vme = &vcpu->exitinfo; in vm_handle_paging()
1635 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx", in vm_handle_paging()
1645 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, " in vm_handle_paging()
1655 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) in vm_handle_inst_emul() argument
1666 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
1679 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa); in vm_handle_inst_emul()
1683 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, in vm_handle_inst_emul()
1694 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { in vm_handle_inst_emul()
1695 VMM_CTR1(vcpu, "Error decoding instruction at %#lx", in vm_handle_inst_emul()
1705 vcpu->nextrip += vie->num_processed; in vm_handle_inst_emul()
1706 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding", in vm_handle_inst_emul()
1707 vcpu->nextrip); in vm_handle_inst_emul()
1724 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, in vm_handle_inst_emul()
1731 vm_handle_suspend(struct vcpu *vcpu, bool *retu) in vm_handle_suspend() argument
1733 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1740 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1749 vcpu_lock(vcpu); in vm_handle_suspend()
1752 VMM_CTR0(vcpu, "All vcpus suspended"); in vm_handle_suspend()
1757 VMM_CTR0(vcpu, "Sleeping during suspend"); in vm_handle_suspend()
1758 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_suspend()
1759 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1760 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_suspend()
1762 vcpu_unlock(vcpu); in vm_handle_suspend()
1764 vcpu_lock(vcpu); in vm_handle_suspend()
1767 VMM_CTR0(vcpu, "Rendezvous during suspend"); in vm_handle_suspend()
1768 vcpu_unlock(vcpu); in vm_handle_suspend()
1769 error = vm_handle_rendezvous(vcpu); in vm_handle_suspend()
1770 vcpu_lock(vcpu); in vm_handle_suspend()
1773 vcpu_unlock(vcpu); in vm_handle_suspend()
1789 vm_handle_reqidle(struct vcpu *vcpu, bool *retu) in vm_handle_reqidle() argument
1791 vcpu_lock(vcpu); in vm_handle_reqidle()
1792 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); in vm_handle_reqidle()
1793 vcpu->reqidle = 0; in vm_handle_reqidle()
1794 vcpu_unlock(vcpu); in vm_handle_reqidle()
1800 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) in vm_handle_db() argument
1812 vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp); in vm_handle_db()
1813 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t), in vm_handle_db()
1861 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip) in vm_exit_suspended() argument
1863 struct vm *vm = vcpu->vm; in vm_exit_suspended()
1869 vmexit = vm_exitinfo(vcpu); in vm_exit_suspended()
1877 vm_exit_debug(struct vcpu *vcpu, uint64_t rip) in vm_exit_debug() argument
1881 vmexit = vm_exitinfo(vcpu); in vm_exit_debug()
1888 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip) in vm_exit_rendezvous() argument
1892 vmexit = vm_exitinfo(vcpu); in vm_exit_rendezvous()
1896 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1); in vm_exit_rendezvous()
1900 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip) in vm_exit_reqidle() argument
1904 vmexit = vm_exitinfo(vcpu); in vm_exit_reqidle()
1908 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1); in vm_exit_reqidle()
1912 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip) in vm_exit_astpending() argument
1916 vmexit = vm_exitinfo(vcpu); in vm_exit_astpending()
1920 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1); in vm_exit_astpending()
1924 vm_run(struct vcpu *vcpu) in vm_run() argument
1926 struct vm *vm = vcpu->vm; in vm_run()
1935 vcpuid = vcpu->vcpuid; in vm_run()
1944 vme = &vcpu->exitinfo; in vm_run()
1947 evinfo.iptr = &vcpu->reqidle; in vm_run()
1959 restore_guest_fpustate(vcpu); in vm_run()
1961 vcpu_require_state(vcpu, VCPU_RUNNING); in vm_run()
1962 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); in vm_run()
1963 vcpu_require_state(vcpu, VCPU_FROZEN); in vm_run()
1965 save_guest_fpustate(vcpu); in vm_run()
1967 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); in vm_run()
1973 vcpu->nextrip = vme->rip + vme->inst_length; in vm_run()
1976 error = vm_handle_reqidle(vcpu, &retu); in vm_run()
1979 error = vm_handle_suspend(vcpu, &retu); in vm_run()
1985 error = vm_handle_rendezvous(vcpu); in vm_run()
1989 error = vm_handle_hlt(vcpu, intr_disabled, &retu); in vm_run()
1992 error = vm_handle_paging(vcpu, &retu); in vm_run()
1995 error = vm_handle_inst_emul(vcpu, &retu); in vm_run()
1999 error = vm_handle_inout(vcpu, vme, &retu); in vm_run()
2002 error = vm_handle_db(vcpu, vme, &retu); in vm_run()
2007 vm_inject_ud(vcpu); in vm_run()
2020 error = vm_handle_ipi(vcpu, vme, &retu); in vm_run()
2025 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1); in vm_run()
2026 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode); in vm_run()
2032 vm_restart_instruction(struct vcpu *vcpu) in vm_restart_instruction() argument
2038 state = vcpu_get_state(vcpu, NULL); in vm_restart_instruction()
2041 * When a vcpu is "running" the next instruction is determined in vm_restart_instruction()
2042 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. in vm_restart_instruction()
2046 vcpu->exitinfo.inst_length = 0; in vm_restart_instruction()
2047 VMM_CTR1(vcpu, "restarting instruction at %#lx by " in vm_restart_instruction()
2048 "setting inst_length to zero", vcpu->exitinfo.rip); in vm_restart_instruction()
2051 * When a vcpu is "frozen" it is outside the critical section in vm_restart_instruction()
2054 * 'nextrip' to the vcpu's %rip. in vm_restart_instruction()
2056 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip); in vm_restart_instruction()
2058 VMM_CTR2(vcpu, "restarting instruction by updating " in vm_restart_instruction()
2059 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); in vm_restart_instruction()
2060 vcpu->nextrip = rip; in vm_restart_instruction()
2068 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info) in vm_exit_intinfo() argument
2084 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info); in vm_exit_intinfo()
2085 vcpu->exitintinfo = info; in vm_exit_intinfo()
2144 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2, in nested_fault() argument
2160 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)", in nested_fault()
2162 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
2186 vcpu_exception_intinfo(struct vcpu *vcpu) in vcpu_exception_intinfo() argument
2190 if (vcpu->exception_pending) { in vcpu_exception_intinfo()
2191 info = vcpu->exc_vector & 0xff; in vcpu_exception_intinfo()
2193 if (vcpu->exc_errcode_valid) { in vcpu_exception_intinfo()
2195 info |= (uint64_t)vcpu->exc_errcode << 32; in vcpu_exception_intinfo()
2202 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo) in vm_entry_intinfo() argument
2207 info1 = vcpu->exitintinfo; in vm_entry_intinfo()
2208 vcpu->exitintinfo = 0; in vm_entry_intinfo()
2211 if (vcpu->exception_pending) { in vm_entry_intinfo()
2212 info2 = vcpu_exception_intinfo(vcpu); in vm_entry_intinfo()
2213 vcpu->exception_pending = 0; in vm_entry_intinfo()
2214 VMM_CTR2(vcpu, "Exception %d delivered: %#lx", in vm_entry_intinfo()
2215 vcpu->exc_vector, info2); in vm_entry_intinfo()
2219 valid = nested_fault(vcpu, info1, info2, retinfo); in vm_entry_intinfo()
2231 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), " in vm_entry_intinfo()
2239 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) in vm_get_intinfo() argument
2241 *info1 = vcpu->exitintinfo; in vm_get_intinfo()
2242 *info2 = vcpu_exception_intinfo(vcpu); in vm_get_intinfo()
2247 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, in vm_inject_exception() argument
2264 if (vcpu->exception_pending) { in vm_inject_exception()
2265 VMM_CTR2(vcpu, "Unable to inject exception %d due to " in vm_inject_exception()
2266 "pending exception %d", vector, vcpu->exc_vector); in vm_inject_exception()
2274 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val); in vm_inject_exception()
2286 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); in vm_inject_exception()
2291 vm_restart_instruction(vcpu); in vm_inject_exception()
2293 vcpu->exception_pending = 1; in vm_inject_exception()
2294 vcpu->exc_vector = vector; in vm_inject_exception()
2295 vcpu->exc_errcode = errcode; in vm_inject_exception()
2296 vcpu->exc_errcode_valid = errcode_valid; in vm_inject_exception()
2297 VMM_CTR1(vcpu, "Exception %d pending", vector); in vm_inject_exception()
2302 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode) in vm_inject_fault() argument
2308 error = vm_inject_exception(vcpu, vector, errcode_valid, in vm_inject_fault()
2314 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2) in vm_inject_pf() argument
2318 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx", in vm_inject_pf()
2321 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2); in vm_inject_pf()
2324 vm_inject_fault(vcpu, IDT_PF, 1, error_code); in vm_inject_pf()
2327 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2330 vm_inject_nmi(struct vcpu *vcpu) in vm_inject_nmi() argument
2333 vcpu->nmi_pending = 1; in vm_inject_nmi()
2334 vcpu_notify_event(vcpu, false); in vm_inject_nmi()
2339 vm_nmi_pending(struct vcpu *vcpu) in vm_nmi_pending() argument
2341 return (vcpu->nmi_pending); in vm_nmi_pending()
2345 vm_nmi_clear(struct vcpu *vcpu) in vm_nmi_clear() argument
2347 if (vcpu->nmi_pending == 0) in vm_nmi_clear()
2350 vcpu->nmi_pending = 0; in vm_nmi_clear()
2351 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1); in vm_nmi_clear()
2354 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2357 vm_inject_extint(struct vcpu *vcpu) in vm_inject_extint() argument
2360 vcpu->extint_pending = 1; in vm_inject_extint()
2361 vcpu_notify_event(vcpu, false); in vm_inject_extint()
2366 vm_extint_pending(struct vcpu *vcpu) in vm_extint_pending() argument
2368 return (vcpu->extint_pending); in vm_extint_pending()
2372 vm_extint_clear(struct vcpu *vcpu) in vm_extint_clear() argument
2374 if (vcpu->extint_pending == 0) in vm_extint_clear()
2377 vcpu->extint_pending = 0; in vm_extint_clear()
2378 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1); in vm_extint_clear()
2382 vm_get_capability(struct vcpu *vcpu, int type, int *retval) in vm_get_capability() argument
2387 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
2391 vm_set_capability(struct vcpu *vcpu, int type, int val) in vm_set_capability() argument
2396 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
2400 vcpu_vm(struct vcpu *vcpu) in vcpu_vm() argument
2402 return (vcpu->vm); in vcpu_vm()
2406 vcpu_vcpuid(struct vcpu *vcpu) in vcpu_vcpuid() argument
2408 return (vcpu->vcpuid); in vcpu_vcpuid()
2411 struct vcpu *
2414 return (vm->vcpu[vcpuid]); in vm_vcpu()
2418 vm_lapic(struct vcpu *vcpu) in vm_lapic() argument
2420 return (vcpu->vlapic); in vm_lapic()
2487 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) in vcpu_set_state() argument
2491 vcpu_lock(vcpu); in vcpu_set_state()
2492 error = vcpu_set_state_locked(vcpu, newstate, from_idle); in vcpu_set_state()
2493 vcpu_unlock(vcpu); in vcpu_set_state()
2499 vcpu_get_state(struct vcpu *vcpu, int *hostcpu) in vcpu_get_state() argument
2503 vcpu_lock(vcpu); in vcpu_get_state()
2504 state = vcpu->state; in vcpu_get_state()
2506 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
2507 vcpu_unlock(vcpu); in vcpu_get_state()
2513 vm_activate_cpu(struct vcpu *vcpu) in vm_activate_cpu() argument
2515 struct vm *vm = vcpu->vm; in vm_activate_cpu()
2517 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2520 VMM_CTR0(vcpu, "activated"); in vm_activate_cpu()
2521 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2526 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
2528 if (vcpu == NULL) { in vm_suspend_cpu()
2535 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2538 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2539 vcpu_notify_event(vcpu, false); in vm_suspend_cpu()
2545 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
2548 if (vcpu == NULL) { in vm_resume_cpu()
2551 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2554 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2560 vcpu_debugged(struct vcpu *vcpu) in vcpu_debugged() argument
2563 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2612 vcpu_stats(struct vcpu *vcpu) in vcpu_stats() argument
2615 return (vcpu->stats); in vcpu_stats()
2619 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) in vm_get_x2apic_state() argument
2621 *state = vcpu->x2apic_state; in vm_get_x2apic_state()
2627 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) in vm_set_x2apic_state() argument
2632 vcpu->x2apic_state = state; in vm_set_x2apic_state()
2634 vlapic_set_x2apic_state(vcpu, state); in vm_set_x2apic_state()
2640 * This function is called to ensure that a vcpu "sees" a pending event
2642 * - If the vcpu thread is sleeping then it is woken up.
2643 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2644 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2647 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) in vcpu_notify_event_locked() argument
2651 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
2652 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
2653 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); in vcpu_notify_event_locked()
2656 vlapic_post_intr(vcpu->vlapic, hostcpu, in vcpu_notify_event_locked()
2663 * If the 'vcpu' is running on 'curcpu' then it must in vcpu_notify_event_locked()
2665 * The pending event will be picked up when the vcpu in vcpu_notify_event_locked()
2670 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " in vcpu_notify_event_locked()
2671 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
2672 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
2673 wakeup_one(vcpu); in vcpu_notify_event_locked()
2678 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr) in vcpu_notify_event() argument
2680 vcpu_lock(vcpu); in vcpu_notify_event()
2681 vcpu_notify_event_locked(vcpu, lapic_intr); in vcpu_notify_event()
2682 vcpu_unlock(vcpu); in vcpu_notify_event()
2696 * XXX apic id is assumed to be numerically identical to vcpu id in vm_apicid2vcpuid()
2702 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, in vm_smp_rendezvous() argument
2705 struct vm *vm = vcpu->vm; in vm_smp_rendezvous()
2718 * call the rendezvous handler in case this 'vcpu' is one in vm_smp_rendezvous()
2721 VMM_CTR0(vcpu, "Rendezvous already in progress"); in vm_smp_rendezvous()
2723 error = vm_handle_rendezvous(vcpu); in vm_smp_rendezvous()
2731 VMM_CTR0(vcpu, "Initiating rendezvous"); in vm_smp_rendezvous()
2747 return (vm_handle_rendezvous(vcpu)); in vm_smp_rendezvous()
2806 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, in vm_copy_setup() argument
2822 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); in vm_copy_setup()
2835 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, in vm_copy_setup()
2886 * these are global stats, only return the values with for vCPU 0
2892 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat) in vm_get_rescnt() argument
2895 if (vcpu->vcpuid == 0) { in vm_get_rescnt()
2896 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE * in vm_get_rescnt()
2897 vmspace_resident_count(vcpu->vm->vmspace)); in vm_get_rescnt()
2902 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat) in vm_get_wiredcnt() argument
2905 if (vcpu->vcpuid == 0) { in vm_get_wiredcnt()
2906 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE * in vm_get_wiredcnt()
2907 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); in vm_get_wiredcnt()
2920 struct vcpu *vcpu; in vm_snapshot_vcpus() local
2926 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2927 if (vcpu == NULL) in vm_snapshot_vcpus()
2930 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); in vm_snapshot_vcpus()
2931 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); in vm_snapshot_vcpus()
2932 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); in vm_snapshot_vcpus()
2933 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); in vm_snapshot_vcpus()
2934 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); in vm_snapshot_vcpus()
2935 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); in vm_snapshot_vcpus()
2936 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); in vm_snapshot_vcpus()
2937 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); in vm_snapshot_vcpus()
2945 tsc = now + vcpu->tsc_offset; in vm_snapshot_vcpus()
2948 vcpu->tsc_offset = tsc; in vm_snapshot_vcpus()
2973 struct vcpu *vcpu; in vm_snapshot_vcpu() local
2980 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2981 if (vcpu == NULL) in vm_snapshot_vcpu()
2984 error = vmmops_vcpu_snapshot(vcpu->cookie, meta); in vm_snapshot_vcpu()
2987 "vCPU: %d; error: %d\n", __func__, i, error); in vm_snapshot_vcpu()
3041 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset) in vm_set_tsc_offset() argument
3043 vcpu->tsc_offset = offset; in vm_set_tsc_offset()
3051 struct vcpu *vcpu; in vm_restore_time() local
3062 vcpu = vm->vcpu[i]; in vm_restore_time()
3063 if (vcpu == NULL) in vm_restore_time()
3066 error = vmmops_restore_tsc(vcpu->cookie, in vm_restore_time()
3067 vcpu->tsc_offset - now); in vm_restore_time()