Lines Matching full:vcpu
97 * (a) allocated when vcpu is created
98 * (i) initialized when vcpu is created and when it is reinitialized
99 * (o) initialized the first time the vcpu is created
102 struct vcpu { struct
104 enum vcpu_state state; /* (o) vcpu state */ argument
106 int hostcpu; /* (o) vcpu's host cpu */ argument
107 int reqidle; /* (i) request vcpu to idle */ argument
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
143 * [v] reads require one frozen vcpu, writes require freezing all vcpus
169 struct vcpu **vcpu; /* (o) guest vcpus */ member
178 #define VMM_CTR0(vcpu, format) \ argument
179 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
181 #define VMM_CTR1(vcpu, format, p1) \ argument
182 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
184 #define VMM_CTR2(vcpu, format, p1, p2) \ argument
185 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
187 #define VMM_CTR3(vcpu, format, p1, p2, p3) \ argument
188 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
190 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \ argument
191 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
222 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
247 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
263 "IPI vector used for vcpu notifications");
278 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
281 VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
332 vcpu_cleanup(struct vcpu *vcpu, bool destroy) in vcpu_cleanup() argument
334 vmmops_vlapic_cleanup(vcpu->vlapic); in vcpu_cleanup()
335 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
336 vcpu->cookie = NULL; in vcpu_cleanup()
338 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
339 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
340 vcpu_lock_destroy(vcpu); in vcpu_cleanup()
341 free(vcpu, M_VM); in vcpu_cleanup()
345 static struct vcpu *
348 struct vcpu *vcpu; in vcpu_alloc() local
351 ("vcpu_init: invalid vcpu %d", vcpu_id)); in vcpu_alloc()
353 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO); in vcpu_alloc()
354 vcpu_lock_init(vcpu); in vcpu_alloc()
355 vcpu->state = VCPU_IDLE; in vcpu_alloc()
356 vcpu->hostcpu = NOCPU; in vcpu_alloc()
357 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
358 vcpu->vm = vm; in vcpu_alloc()
359 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
360 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
361 vcpu->tsc_offset = 0; in vcpu_alloc()
362 return (vcpu); in vcpu_alloc()
366 vcpu_init(struct vcpu *vcpu) in vcpu_init() argument
368 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
369 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); in vcpu_init()
370 vm_set_x2apic_state(vcpu, X2APIC_DISABLED); in vcpu_init()
371 vcpu->reqidle = 0; in vcpu_init()
372 vcpu->exitintinfo = 0; in vcpu_init()
373 vcpu->nmi_pending = 0; in vcpu_init()
374 vcpu->extint_pending = 0; in vcpu_init()
375 vcpu->exception_pending = 0; in vcpu_init()
376 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; in vcpu_init()
377 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
378 vmm_stat_init(vcpu->stats); in vcpu_init()
382 vcpu_trace_exceptions(struct vcpu *vcpu) in vcpu_trace_exceptions() argument
389 vcpu_trap_wbinvd(struct vcpu *vcpu) in vcpu_trap_wbinvd() argument
395 vm_exitinfo(struct vcpu *vcpu) in vm_exitinfo() argument
397 return (&vcpu->exitinfo); in vm_exitinfo()
401 vm_exitinfo_cpuset(struct vcpu *vcpu) in vm_exitinfo_cpuset() argument
403 return (&vcpu->exitinfo_cpuset); in vm_exitinfo_cpuset()
521 if (vm->vcpu[i] != NULL) in vm_init()
522 vcpu_init(vm->vcpu[i]); in vm_init()
535 struct vcpu *
538 struct vcpu *vcpu; in vm_alloc_vcpu() local
543 vcpu = (struct vcpu *) in vm_alloc_vcpu()
544 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
545 if (__predict_true(vcpu != NULL)) in vm_alloc_vcpu()
546 return (vcpu); in vm_alloc_vcpu()
549 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
550 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
551 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
552 vcpu_init(vcpu); in vm_alloc_vcpu()
555 * Ensure vCPU is fully created before updating pointer in vm_alloc_vcpu()
558 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
559 (uintptr_t)vcpu); in vm_alloc_vcpu()
562 return (vcpu); in vm_alloc_vcpu()
610 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
677 if (vm->vcpu[i] != NULL) in vm_cleanup()
678 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
691 free(vm->vcpu, M_VM); in vm_cleanup()
874 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) in vm_get_register() argument
880 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
884 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) in vm_set_register() argument
891 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
896 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val); in vm_set_register()
897 vcpu->nextrip = val; in vm_set_register()
934 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) in vm_get_seg_desc() argument
940 return (vmmops_getdesc(vcpu->cookie, reg, desc)); in vm_get_seg_desc()
944 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc) in vm_set_seg_desc() argument
950 return (vmmops_setdesc(vcpu->cookie, reg, desc)); in vm_set_seg_desc()
954 restore_guest_fpustate(struct vcpu *vcpu) in restore_guest_fpustate() argument
962 fpurestore(vcpu->guestfpu); in restore_guest_fpustate()
966 load_xcr(0, vcpu->guest_xcr0); in restore_guest_fpustate()
976 save_guest_fpustate(struct vcpu *vcpu) in save_guest_fpustate() argument
984 vcpu->guest_xcr0 = rxcr(0); in save_guest_fpustate()
990 fpusave(vcpu->guestfpu); in save_guest_fpustate()
994 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
997 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, in vcpu_set_state_locked() argument
1002 vcpu_assert_locked(vcpu); in vcpu_set_state_locked()
1007 * ioctl() operating on a vcpu at any point. in vcpu_set_state_locked()
1010 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
1011 vcpu->reqidle = 1; in vcpu_set_state_locked()
1012 vcpu_notify_event_locked(vcpu, false); in vcpu_set_state_locked()
1013 VMM_CTR1(vcpu, "vcpu state change from %s to " in vcpu_set_state_locked()
1014 "idle requested", vcpu_state2str(vcpu->state)); in vcpu_set_state_locked()
1015 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
1018 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
1019 "vcpu idle state")); in vcpu_set_state_locked()
1022 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
1023 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
1024 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
1026 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
1027 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
1036 switch (vcpu->state) { in vcpu_set_state_locked()
1053 VMM_CTR2(vcpu, "vcpu state changed from %s to %s", in vcpu_set_state_locked()
1054 vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); in vcpu_set_state_locked()
1056 vcpu->state = newstate; in vcpu_set_state_locked()
1058 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
1060 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
1063 wakeup(&vcpu->state); in vcpu_set_state_locked()
1069 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state() argument
1073 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) in vcpu_require_state()
1078 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state_locked() argument
1082 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) in vcpu_require_state_locked()
1087 vm_handle_rendezvous(struct vcpu *vcpu) in vm_handle_rendezvous() argument
1089 struct vm *vm = vcpu->vm; in vm_handle_rendezvous()
1094 vcpuid = vcpu->vcpuid; in vm_handle_rendezvous()
1103 VMM_CTR0(vcpu, "Calling rendezvous func"); in vm_handle_rendezvous()
1104 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_handle_rendezvous()
1109 VMM_CTR0(vcpu, "Rendezvous completed"); in vm_handle_rendezvous()
1115 VMM_CTR0(vcpu, "Wait for rendezvous completion"); in vm_handle_rendezvous()
1131 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1134 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu) in vm_handle_hlt() argument
1136 struct vm *vm = vcpu->vm; in vm_handle_hlt()
1141 vcpuid = vcpu->vcpuid; in vm_handle_hlt()
1147 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1149 vcpu_lock(vcpu); in vm_handle_hlt()
1154 * software events that would cause this vcpu to wakeup. in vm_handle_hlt()
1157 * vcpu returned from vmmops_run() and before it acquired the in vm_handle_hlt()
1158 * vcpu lock above. in vm_handle_hlt()
1160 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1162 if (vm_nmi_pending(vcpu)) in vm_handle_hlt()
1165 if (vm_extint_pending(vcpu) || in vm_handle_hlt()
1166 vlapic_pending_intr(vcpu->vlapic, NULL)) { in vm_handle_hlt()
1171 /* Don't go to sleep if the vcpu thread needs to yield */ in vm_handle_hlt()
1172 if (vcpu_should_yield(vcpu)) in vm_handle_hlt()
1175 if (vcpu_debugged(vcpu)) in vm_handle_hlt()
1186 VMM_CTR0(vcpu, "Halted"); in vm_handle_hlt()
1200 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_hlt()
1205 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); in vm_handle_hlt()
1206 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_hlt()
1207 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); in vm_handle_hlt()
1209 vcpu_unlock(vcpu); in vm_handle_hlt()
1218 vcpu_lock(vcpu); in vm_handle_hlt()
1225 vcpu_unlock(vcpu); in vm_handle_hlt()
1234 vm_handle_paging(struct vcpu *vcpu, bool *retu) in vm_handle_paging() argument
1236 struct vm *vm = vcpu->vm; in vm_handle_paging()
1241 vme = &vcpu->exitinfo; in vm_handle_paging()
1255 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx", in vm_handle_paging()
1265 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, " in vm_handle_paging()
1275 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) in vm_handle_inst_emul() argument
1286 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
1299 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa); in vm_handle_inst_emul()
1303 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, in vm_handle_inst_emul()
1314 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { in vm_handle_inst_emul()
1315 VMM_CTR1(vcpu, "Error decoding instruction at %#lx", in vm_handle_inst_emul()
1325 vcpu->nextrip += vie->num_processed; in vm_handle_inst_emul()
1326 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding", in vm_handle_inst_emul()
1327 vcpu->nextrip); in vm_handle_inst_emul()
1344 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, in vm_handle_inst_emul()
1351 vm_handle_suspend(struct vcpu *vcpu, bool *retu) in vm_handle_suspend() argument
1353 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1360 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1369 vcpu_lock(vcpu); in vm_handle_suspend()
1372 VMM_CTR0(vcpu, "All vcpus suspended"); in vm_handle_suspend()
1377 VMM_CTR0(vcpu, "Sleeping during suspend"); in vm_handle_suspend()
1378 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_suspend()
1379 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1380 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_suspend()
1382 vcpu_unlock(vcpu); in vm_handle_suspend()
1384 vcpu_lock(vcpu); in vm_handle_suspend()
1387 VMM_CTR0(vcpu, "Rendezvous during suspend"); in vm_handle_suspend()
1388 vcpu_unlock(vcpu); in vm_handle_suspend()
1389 error = vm_handle_rendezvous(vcpu); in vm_handle_suspend()
1390 vcpu_lock(vcpu); in vm_handle_suspend()
1393 vcpu_unlock(vcpu); in vm_handle_suspend()
1409 vm_handle_reqidle(struct vcpu *vcpu, bool *retu) in vm_handle_reqidle() argument
1411 vcpu_lock(vcpu); in vm_handle_reqidle()
1412 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); in vm_handle_reqidle()
1413 vcpu->reqidle = 0; in vm_handle_reqidle()
1414 vcpu_unlock(vcpu); in vm_handle_reqidle()
1420 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) in vm_handle_db() argument
1432 vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp); in vm_handle_db()
1433 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t), in vm_handle_db()
1481 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip) in vm_exit_suspended() argument
1483 struct vm *vm = vcpu->vm; in vm_exit_suspended()
1489 vmexit = vm_exitinfo(vcpu); in vm_exit_suspended()
1497 vm_exit_debug(struct vcpu *vcpu, uint64_t rip) in vm_exit_debug() argument
1501 vmexit = vm_exitinfo(vcpu); in vm_exit_debug()
1508 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip) in vm_exit_rendezvous() argument
1512 vmexit = vm_exitinfo(vcpu); in vm_exit_rendezvous()
1516 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1); in vm_exit_rendezvous()
1520 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip) in vm_exit_reqidle() argument
1524 vmexit = vm_exitinfo(vcpu); in vm_exit_reqidle()
1528 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1); in vm_exit_reqidle()
1532 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip) in vm_exit_astpending() argument
1536 vmexit = vm_exitinfo(vcpu); in vm_exit_astpending()
1540 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1); in vm_exit_astpending()
1544 vm_run(struct vcpu *vcpu) in vm_run() argument
1546 struct vm *vm = vcpu->vm; in vm_run()
1555 vcpuid = vcpu->vcpuid; in vm_run()
1564 vme = &vcpu->exitinfo; in vm_run()
1567 evinfo.iptr = &vcpu->reqidle; in vm_run()
1579 restore_guest_fpustate(vcpu); in vm_run()
1581 vcpu_require_state(vcpu, VCPU_RUNNING); in vm_run()
1582 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); in vm_run()
1583 vcpu_require_state(vcpu, VCPU_FROZEN); in vm_run()
1585 save_guest_fpustate(vcpu); in vm_run()
1587 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); in vm_run()
1593 vcpu->nextrip = vme->rip + vme->inst_length; in vm_run()
1596 error = vm_handle_reqidle(vcpu, &retu); in vm_run()
1599 error = vm_handle_suspend(vcpu, &retu); in vm_run()
1605 error = vm_handle_rendezvous(vcpu); in vm_run()
1609 error = vm_handle_hlt(vcpu, intr_disabled, &retu); in vm_run()
1612 error = vm_handle_paging(vcpu, &retu); in vm_run()
1615 error = vm_handle_inst_emul(vcpu, &retu); in vm_run()
1619 error = vm_handle_inout(vcpu, vme, &retu); in vm_run()
1622 error = vm_handle_db(vcpu, vme, &retu); in vm_run()
1627 vm_inject_ud(vcpu); in vm_run()
1640 error = vm_handle_ipi(vcpu, vme, &retu); in vm_run()
1645 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1); in vm_run()
1646 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode); in vm_run()
1652 vm_restart_instruction(struct vcpu *vcpu) in vm_restart_instruction() argument
1658 state = vcpu_get_state(vcpu, NULL); in vm_restart_instruction()
1661 * When a vcpu is "running" the next instruction is determined in vm_restart_instruction()
1662 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. in vm_restart_instruction()
1666 vcpu->exitinfo.inst_length = 0; in vm_restart_instruction()
1667 VMM_CTR1(vcpu, "restarting instruction at %#lx by " in vm_restart_instruction()
1668 "setting inst_length to zero", vcpu->exitinfo.rip); in vm_restart_instruction()
1671 * When a vcpu is "frozen" it is outside the critical section in vm_restart_instruction()
1674 * 'nextrip' to the vcpu's %rip. in vm_restart_instruction()
1676 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip); in vm_restart_instruction()
1678 VMM_CTR2(vcpu, "restarting instruction by updating " in vm_restart_instruction()
1679 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); in vm_restart_instruction()
1680 vcpu->nextrip = rip; in vm_restart_instruction()
1688 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info) in vm_exit_intinfo() argument
1704 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info); in vm_exit_intinfo()
1705 vcpu->exitintinfo = info; in vm_exit_intinfo()
1764 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2, in nested_fault() argument
1780 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)", in nested_fault()
1782 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
1806 vcpu_exception_intinfo(struct vcpu *vcpu) in vcpu_exception_intinfo() argument
1810 if (vcpu->exception_pending) { in vcpu_exception_intinfo()
1811 info = vcpu->exc_vector & 0xff; in vcpu_exception_intinfo()
1813 if (vcpu->exc_errcode_valid) { in vcpu_exception_intinfo()
1815 info |= (uint64_t)vcpu->exc_errcode << 32; in vcpu_exception_intinfo()
1822 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo) in vm_entry_intinfo() argument
1827 info1 = vcpu->exitintinfo; in vm_entry_intinfo()
1828 vcpu->exitintinfo = 0; in vm_entry_intinfo()
1831 if (vcpu->exception_pending) { in vm_entry_intinfo()
1832 info2 = vcpu_exception_intinfo(vcpu); in vm_entry_intinfo()
1833 vcpu->exception_pending = 0; in vm_entry_intinfo()
1834 VMM_CTR2(vcpu, "Exception %d delivered: %#lx", in vm_entry_intinfo()
1835 vcpu->exc_vector, info2); in vm_entry_intinfo()
1839 valid = nested_fault(vcpu, info1, info2, retinfo); in vm_entry_intinfo()
1851 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), " in vm_entry_intinfo()
1859 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2) in vm_get_intinfo() argument
1861 *info1 = vcpu->exitintinfo; in vm_get_intinfo()
1862 *info2 = vcpu_exception_intinfo(vcpu); in vm_get_intinfo()
1867 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid, in vm_inject_exception() argument
1884 if (vcpu->exception_pending) { in vm_inject_exception()
1885 VMM_CTR2(vcpu, "Unable to inject exception %d due to " in vm_inject_exception()
1886 "pending exception %d", vector, vcpu->exc_vector); in vm_inject_exception()
1894 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val); in vm_inject_exception()
1906 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); in vm_inject_exception()
1911 vm_restart_instruction(vcpu); in vm_inject_exception()
1913 vcpu->exception_pending = 1; in vm_inject_exception()
1914 vcpu->exc_vector = vector; in vm_inject_exception()
1915 vcpu->exc_errcode = errcode; in vm_inject_exception()
1916 vcpu->exc_errcode_valid = errcode_valid; in vm_inject_exception()
1917 VMM_CTR1(vcpu, "Exception %d pending", vector); in vm_inject_exception()
1922 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode) in vm_inject_fault() argument
1928 error = vm_inject_exception(vcpu, vector, errcode_valid, in vm_inject_fault()
1934 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2) in vm_inject_pf() argument
1938 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx", in vm_inject_pf()
1941 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2); in vm_inject_pf()
1944 vm_inject_fault(vcpu, IDT_PF, 1, error_code); in vm_inject_pf()
1947 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1950 vm_inject_nmi(struct vcpu *vcpu) in vm_inject_nmi() argument
1953 vcpu->nmi_pending = 1; in vm_inject_nmi()
1954 vcpu_notify_event(vcpu, false); in vm_inject_nmi()
1959 vm_nmi_pending(struct vcpu *vcpu) in vm_nmi_pending() argument
1961 return (vcpu->nmi_pending); in vm_nmi_pending()
1965 vm_nmi_clear(struct vcpu *vcpu) in vm_nmi_clear() argument
1967 if (vcpu->nmi_pending == 0) in vm_nmi_clear()
1970 vcpu->nmi_pending = 0; in vm_nmi_clear()
1971 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1); in vm_nmi_clear()
1974 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
1977 vm_inject_extint(struct vcpu *vcpu) in vm_inject_extint() argument
1980 vcpu->extint_pending = 1; in vm_inject_extint()
1981 vcpu_notify_event(vcpu, false); in vm_inject_extint()
1986 vm_extint_pending(struct vcpu *vcpu) in vm_extint_pending() argument
1988 return (vcpu->extint_pending); in vm_extint_pending()
1992 vm_extint_clear(struct vcpu *vcpu) in vm_extint_clear() argument
1994 if (vcpu->extint_pending == 0) in vm_extint_clear()
1997 vcpu->extint_pending = 0; in vm_extint_clear()
1998 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1); in vm_extint_clear()
2002 vm_get_capability(struct vcpu *vcpu, int type, int *retval) in vm_get_capability() argument
2007 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
2011 vm_set_capability(struct vcpu *vcpu, int type, int val) in vm_set_capability() argument
2016 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
2020 vcpu_vm(struct vcpu *vcpu) in vcpu_vm() argument
2022 return (vcpu->vm); in vcpu_vm()
2026 vcpu_vcpuid(struct vcpu *vcpu) in vcpu_vcpuid() argument
2028 return (vcpu->vcpuid); in vcpu_vcpuid()
2031 struct vcpu *
2034 return (vm->vcpu[vcpuid]); in vm_vcpu()
2038 vm_lapic(struct vcpu *vcpu) in vm_lapic() argument
2040 return (vcpu->vlapic); in vm_lapic()
2107 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) in vcpu_set_state() argument
2111 vcpu_lock(vcpu); in vcpu_set_state()
2112 error = vcpu_set_state_locked(vcpu, newstate, from_idle); in vcpu_set_state()
2113 vcpu_unlock(vcpu); in vcpu_set_state()
2119 vcpu_get_state(struct vcpu *vcpu, int *hostcpu) in vcpu_get_state() argument
2123 vcpu_lock(vcpu); in vcpu_get_state()
2124 state = vcpu->state; in vcpu_get_state()
2126 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
2127 vcpu_unlock(vcpu); in vcpu_get_state()
2133 vm_activate_cpu(struct vcpu *vcpu) in vm_activate_cpu() argument
2135 struct vm *vm = vcpu->vm; in vm_activate_cpu()
2137 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2140 VMM_CTR0(vcpu, "activated"); in vm_activate_cpu()
2141 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2146 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
2148 if (vcpu == NULL) { in vm_suspend_cpu()
2155 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2158 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2159 vcpu_notify_event(vcpu, false); in vm_suspend_cpu()
2165 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
2168 if (vcpu == NULL) { in vm_resume_cpu()
2171 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2174 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2180 vcpu_debugged(struct vcpu *vcpu) in vcpu_debugged() argument
2183 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2232 vcpu_stats(struct vcpu *vcpu) in vcpu_stats() argument
2235 return (vcpu->stats); in vcpu_stats()
2239 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state) in vm_get_x2apic_state() argument
2241 *state = vcpu->x2apic_state; in vm_get_x2apic_state()
2247 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state) in vm_set_x2apic_state() argument
2252 vcpu->x2apic_state = state; in vm_set_x2apic_state()
2254 vlapic_set_x2apic_state(vcpu, state); in vm_set_x2apic_state()
2260 * This function is called to ensure that a vcpu "sees" a pending event
2262 * - If the vcpu thread is sleeping then it is woken up.
2263 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2264 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2267 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) in vcpu_notify_event_locked() argument
2271 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
2272 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
2273 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); in vcpu_notify_event_locked()
2276 vlapic_post_intr(vcpu->vlapic, hostcpu, in vcpu_notify_event_locked()
2283 * If the 'vcpu' is running on 'curcpu' then it must in vcpu_notify_event_locked()
2285 * The pending event will be picked up when the vcpu in vcpu_notify_event_locked()
2290 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " in vcpu_notify_event_locked()
2291 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
2292 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
2293 wakeup_one(vcpu); in vcpu_notify_event_locked()
2298 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr) in vcpu_notify_event() argument
2300 vcpu_lock(vcpu); in vcpu_notify_event()
2301 vcpu_notify_event_locked(vcpu, lapic_intr); in vcpu_notify_event()
2302 vcpu_unlock(vcpu); in vcpu_notify_event()
2321 * XXX apic id is assumed to be numerically identical to vcpu id in vm_apicid2vcpuid()
2327 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest, in vm_smp_rendezvous() argument
2330 struct vm *vm = vcpu->vm; in vm_smp_rendezvous()
2343 * call the rendezvous handler in case this 'vcpu' is one in vm_smp_rendezvous()
2346 VMM_CTR0(vcpu, "Rendezvous already in progress"); in vm_smp_rendezvous()
2348 error = vm_handle_rendezvous(vcpu); in vm_smp_rendezvous()
2356 VMM_CTR0(vcpu, "Initiating rendezvous"); in vm_smp_rendezvous()
2372 return (vm_handle_rendezvous(vcpu)); in vm_smp_rendezvous()
2431 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging, in vm_copy_setup() argument
2447 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); in vm_copy_setup()
2460 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, in vm_copy_setup()
2511 * these are global stats, only return the values with for vCPU 0
2517 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat) in vm_get_rescnt() argument
2520 if (vcpu->vcpuid == 0) { in vm_get_rescnt()
2521 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE * in vm_get_rescnt()
2522 vmspace_resident_count(vcpu->vm->vmspace)); in vm_get_rescnt()
2527 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat) in vm_get_wiredcnt() argument
2530 if (vcpu->vcpuid == 0) { in vm_get_wiredcnt()
2531 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE * in vm_get_wiredcnt()
2532 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); in vm_get_wiredcnt()
2545 struct vcpu *vcpu; in vm_snapshot_vcpus() local
2551 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2552 if (vcpu == NULL) in vm_snapshot_vcpus()
2555 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); in vm_snapshot_vcpus()
2556 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); in vm_snapshot_vcpus()
2557 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); in vm_snapshot_vcpus()
2558 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); in vm_snapshot_vcpus()
2559 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); in vm_snapshot_vcpus()
2560 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); in vm_snapshot_vcpus()
2561 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); in vm_snapshot_vcpus()
2562 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); in vm_snapshot_vcpus()
2570 tsc = now + vcpu->tsc_offset; in vm_snapshot_vcpus()
2573 vcpu->tsc_offset = tsc; in vm_snapshot_vcpus()
2598 struct vcpu *vcpu; in vm_snapshot_vcpu() local
2605 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2606 if (vcpu == NULL) in vm_snapshot_vcpu()
2609 error = vmmops_vcpu_snapshot(vcpu->cookie, meta); in vm_snapshot_vcpu()
2612 "vCPU: %d; error: %d\n", __func__, i, error); in vm_snapshot_vcpu()
2666 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset) in vm_set_tsc_offset() argument
2668 vcpu->tsc_offset = offset; in vm_set_tsc_offset()
2676 struct vcpu *vcpu; in vm_restore_time() local
2687 vcpu = vm->vcpu[i]; in vm_restore_time()
2688 if (vcpu == NULL) in vm_restore_time()
2691 error = vmmops_restore_tsc(vcpu->cookie, in vm_restore_time()
2692 vcpu->tsc_offset - now); in vm_restore_time()