Lines Matching full:vcpu

81 struct vcpu {  struct
85 int hostcpu; /* host cpuid this vcpu last ran on */ argument
96 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) argument
127 struct vcpu **vcpu; /* (i) guest vcpus */ member
143 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
149 "IPI vector used for vcpu notifications");
155 static void vcpu_notify_event_locked(struct vcpu *vcpu);
169 vcpu_cleanup(struct vcpu *vcpu, bool destroy) in vcpu_cleanup() argument
171 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
172 vcpu->cookie = NULL; in vcpu_cleanup()
174 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
175 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
176 vcpu_lock_destroy(vcpu); in vcpu_cleanup()
180 static struct vcpu *
183 struct vcpu *vcpu; in vcpu_alloc() local
186 ("vcpu_alloc: invalid vcpu %d", vcpu_id)); in vcpu_alloc()
188 vcpu = malloc(sizeof(*vcpu), M_VMM, M_WAITOK | M_ZERO); in vcpu_alloc()
189 vcpu_lock_init(vcpu); in vcpu_alloc()
190 vcpu->state = VCPU_IDLE; in vcpu_alloc()
191 vcpu->hostcpu = NOCPU; in vcpu_alloc()
192 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
193 vcpu->vm = vm; in vcpu_alloc()
194 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
195 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
196 return (vcpu); in vcpu_alloc()
200 vcpu_init(struct vcpu *vcpu) in vcpu_init() argument
202 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
203 MPASS(vcpu->cookie != NULL); in vcpu_init()
204 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
205 vmm_stat_init(vcpu->stats); in vcpu_init()
209 vm_exitinfo(struct vcpu *vcpu) in vm_exitinfo() argument
211 return (&vcpu->exitinfo); in vm_exitinfo()
301 if (vm->vcpu[i] != NULL) in vm_init()
302 vcpu_init(vm->vcpu[i]); in vm_init()
315 struct vcpu *
318 struct vcpu *vcpu; in vm_alloc_vcpu() local
327 vcpu = (struct vcpu *) in vm_alloc_vcpu()
328 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
329 if (__predict_true(vcpu != NULL)) in vm_alloc_vcpu()
330 return (vcpu); in vm_alloc_vcpu()
333 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
334 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
335 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
336 vcpu_init(vcpu); in vm_alloc_vcpu()
339 * Ensure vCPU is fully created before updating pointer in vm_alloc_vcpu()
342 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
343 (uintptr_t)vcpu); in vm_alloc_vcpu()
346 return (vcpu); in vm_alloc_vcpu()
392 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, in vm_create()
443 if (vm->vcpu[i] != NULL) in vm_cleanup()
444 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
457 free(vm->vcpu[i], M_VMM); in vm_cleanup()
458 free(vm->vcpu, M_VMM); in vm_cleanup()
498 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging, in vm_gla2gpa_nofault() argument
501 return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault)); in vm_gla2gpa_nofault()
543 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu) in vm_handle_inst_emul() argument
554 vm = vcpu->vm; in vm_handle_inst_emul()
559 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
576 error = vmm_emulate_instruction(vcpu, fault_ipa, vie, paging, in vm_handle_inst_emul()
613 vm_exit_suspended(struct vcpu *vcpu, uint64_t pc) in vm_exit_suspended() argument
615 struct vm *vm = vcpu->vm; in vm_exit_suspended()
621 vmexit = vm_exitinfo(vcpu); in vm_exit_suspended()
629 vm_exit_debug(struct vcpu *vcpu, uint64_t pc) in vm_exit_debug() argument
633 vmexit = vm_exitinfo(vcpu); in vm_exit_debug()
640 vm_activate_cpu(struct vcpu *vcpu) in vm_activate_cpu() argument
642 struct vm *vm = vcpu->vm; in vm_activate_cpu()
644 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
647 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
653 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
655 if (vcpu == NULL) { in vm_suspend_cpu()
662 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
665 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
666 vcpu_notify_event(vcpu); in vm_suspend_cpu()
672 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
675 if (vcpu == NULL) { in vm_resume_cpu()
678 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
681 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
687 vcpu_debugged(struct vcpu *vcpu) in vcpu_debugged() argument
690 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
716 vcpu_stats(struct vcpu *vcpu) in vcpu_stats() argument
719 return (vcpu->stats); in vcpu_stats()
723 * This function is called to ensure that a vcpu "sees" a pending event
725 * - If the vcpu thread is sleeping then it is woken up.
726 * - If the vcpu is running on a different host_cpu then an IPI will be directed
727 * to the host_cpu to cause the vcpu to trap into the hypervisor.
730 vcpu_notify_event_locked(struct vcpu *vcpu) in vcpu_notify_event_locked() argument
734 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
735 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
736 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); in vcpu_notify_event_locked()
741 * If the 'vcpu' is running on 'curcpu' then it must in vcpu_notify_event_locked()
743 * The pending event will be picked up when the vcpu in vcpu_notify_event_locked()
748 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " in vcpu_notify_event_locked()
749 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
750 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
751 wakeup_one(vcpu); in vcpu_notify_event_locked()
756 vcpu_notify_event(struct vcpu *vcpu) in vcpu_notify_event() argument
758 vcpu_lock(vcpu); in vcpu_notify_event()
759 vcpu_notify_event_locked(vcpu); in vcpu_notify_event()
760 vcpu_unlock(vcpu); in vcpu_notify_event()
776 restore_guest_fpustate(struct vcpu *vcpu) in restore_guest_fpustate() argument
787 fpe_restore(vcpu->guestfpu); in restore_guest_fpustate()
797 save_guest_fpustate(struct vcpu *vcpu) in save_guest_fpustate() argument
802 fpe_store(vcpu->guestfpu); in save_guest_fpustate()
810 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, in vcpu_set_state_locked() argument
815 vcpu_assert_locked(vcpu); in vcpu_set_state_locked()
820 * ioctl() operating on a vcpu at any point. in vcpu_set_state_locked()
823 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
824 vcpu_notify_event_locked(vcpu); in vcpu_set_state_locked()
825 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
828 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
829 "vcpu idle state")); in vcpu_set_state_locked()
832 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
833 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
834 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
836 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
837 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
846 switch (vcpu->state) { in vcpu_set_state_locked()
863 vcpu->state = newstate; in vcpu_set_state_locked()
865 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
867 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
870 wakeup(&vcpu->state); in vcpu_set_state_locked()
876 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state() argument
880 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) in vcpu_require_state()
885 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) in vcpu_require_state_locked() argument
889 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) in vcpu_require_state_locked()
894 vm_get_capability(struct vcpu *vcpu, int type, int *retval) in vm_get_capability() argument
900 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
904 vm_set_capability(struct vcpu *vcpu, int type, int val) in vm_set_capability() argument
910 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
914 vcpu_vm(struct vcpu *vcpu) in vcpu_vm() argument
917 return (vcpu->vm); in vcpu_vm()
921 vcpu_vcpuid(struct vcpu *vcpu) in vcpu_vcpuid() argument
924 return (vcpu->vcpuid); in vcpu_vcpuid()
928 vcpu_get_cookie(struct vcpu *vcpu) in vcpu_get_cookie() argument
931 return (vcpu->cookie); in vcpu_get_cookie()
934 struct vcpu *
938 return (vm->vcpu[vcpuid]); in vm_vcpu()
942 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle) in vcpu_set_state() argument
946 vcpu_lock(vcpu); in vcpu_set_state()
947 error = vcpu_set_state_locked(vcpu, newstate, from_idle); in vcpu_set_state()
948 vcpu_unlock(vcpu); in vcpu_set_state()
954 vcpu_get_state(struct vcpu *vcpu, int *hostcpu) in vcpu_get_state() argument
958 vcpu_lock(vcpu); in vcpu_get_state()
959 state = vcpu->state; in vcpu_get_state()
961 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
962 vcpu_unlock(vcpu); in vcpu_get_state()
968 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) in vm_get_register() argument
974 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
978 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) in vm_set_register() argument
984 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
988 vcpu->nextpc = val; in vm_set_register()
1001 vm_inject_exception(struct vcpu *vcpu, uint64_t scause) in vm_inject_exception() argument
1004 return (vmmops_exception(vcpu->cookie, scause)); in vm_inject_exception()
1037 vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) in vm_handle_wfi() argument
1040 vcpu_lock(vcpu); in vm_handle_wfi()
1043 if (aplic_check_pending(vcpu->cookie)) in vm_handle_wfi()
1046 if (riscv_check_ipi(vcpu->cookie, false)) in vm_handle_wfi()
1049 if (riscv_check_interrupts_pending(vcpu->cookie)) in vm_handle_wfi()
1052 if (vcpu_should_yield(vcpu)) in vm_handle_wfi()
1055 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_wfi()
1060 msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz); in vm_handle_wfi()
1061 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_wfi()
1063 vcpu_unlock(vcpu); in vm_handle_wfi()
1071 vm_handle_paging(struct vcpu *vcpu, bool *retu) in vm_handle_paging() argument
1080 vm = vcpu->vm; in vm_handle_paging()
1081 vme = &vcpu->exitinfo; in vm_handle_paging()
1118 vm_handle_suspend(struct vcpu *vcpu, bool *retu) in vm_handle_suspend() argument
1120 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1127 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1136 vcpu_lock(vcpu); in vm_handle_suspend()
1141 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); in vm_handle_suspend()
1142 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1143 vcpu_require_state_locked(vcpu, VCPU_FROZEN); in vm_handle_suspend()
1145 vcpu_unlock(vcpu); in vm_handle_suspend()
1147 vcpu_lock(vcpu); in vm_handle_suspend()
1150 vcpu_unlock(vcpu); in vm_handle_suspend()
1166 vm_run(struct vcpu *vcpu) in vm_run() argument
1176 vm = vcpu->vm; in vm_run()
1180 vcpuid = vcpu->vcpuid; in vm_run()
1189 vme = &vcpu->exitinfo; in vm_run()
1196 restore_guest_fpustate(vcpu); in vm_run()
1198 vcpu_require_state(vcpu, VCPU_RUNNING); in vm_run()
1199 error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo); in vm_run()
1200 vcpu_require_state(vcpu, VCPU_FROZEN); in vm_run()
1202 save_guest_fpustate(vcpu); in vm_run()
1210 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1211 error = vm_handle_inst_emul(vcpu, &retu); in vm_run()
1214 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1215 error = vm_handle_wfi(vcpu, vme, &retu); in vm_run()
1219 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1223 vcpu->nextpc = vme->pc; in vm_run()
1224 error = vm_handle_paging(vcpu, &retu); in vm_run()
1227 vcpu->nextpc = vme->pc; in vm_run()
1232 vcpu->nextpc = vme->pc; in vm_run()
1233 error = vm_handle_suspend(vcpu, &retu); in vm_run()
1237 vcpu->nextpc = vme->pc; in vm_run()