Lines Matching +full:retain +full:- +full:state +full:- +full:suspended

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
10 * 1. Redistributions of source code must retain the above copyright
79 enum vcpu_state state; member
87 void *cookie; /* (i) cpu-specific data */
88 struct vfpstate *guestfpu; /* (a,i) guest fpu state */
91 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
92 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
93 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
94 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
95 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
96 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
122 void *cookie; /* (i) cpu-specific data */
127 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
233 VMM_STAT(VMEXIT_SS, "number of vmexits for a single-step exception");
241 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
247 regs->field = vmm_arch_regs_masks.field; \ in vmm_regs_init()
248 if (!get_kernel_reg_iss_masked(reg ## _ISS, &regs->field, \ in vmm_regs_init()
249 masks->field)) \ in vmm_regs_init()
250 regs->field = 0; \ in vmm_regs_init()
271 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
272 vcpu->cookie = NULL; in vcpu_cleanup()
274 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
275 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
285 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
290 vcpu->state = VCPU_IDLE; in vcpu_alloc()
291 vcpu->hostcpu = NOCPU; in vcpu_alloc()
292 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
293 vcpu->vm = vm; in vcpu_alloc()
294 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
295 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
302 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
303 MPASS(vcpu->cookie != NULL); in vcpu_init()
304 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
305 vmm_stat_init(vcpu->stats); in vcpu_init()
311 return (&vcpu->exitinfo); in vm_exitinfo()
375 * Something bad happened - prevent new in vmm_handler()
398 * - HYP initialization requires smp_rendezvous() and therefore must happen
400 * - vmm device initialization requires an initialized devfs.
410 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
411 MPASS(vm->cookie != NULL); in vm_init()
413 CPU_ZERO(&vm->active_cpus); in vm_init()
414 CPU_ZERO(&vm->debug_cpus); in vm_init()
416 vm->suspend = 0; in vm_init()
417 CPU_ZERO(&vm->suspended_cpus); in vm_init()
419 memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); in vm_init()
420 memset(vm->special_reg, 0, sizeof(vm->special_reg)); in vm_init()
423 for (i = 0; i < vm->maxcpus; i++) { in vm_init()
424 if (vm->vcpu[i] != NULL) in vm_init()
425 vcpu_init(vm->vcpu[i]); in vm_init()
433 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
434 vm->dying = true; in vm_disable_vcpu_creation()
435 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
447 if (vcpuid >= vgic_max_cpu_count(vm->cookie)) in vm_alloc_vcpu()
451 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
455 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
456 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
457 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
465 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
468 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
475 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
481 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
505 strcpy(vm->name, name); in vm_create()
506 vm->vmspace = vmspace; in vm_create()
507 vm_mem_init(&vm->mem); in vm_create()
508 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
510 vm->sockets = 1; in vm_create()
511 vm->cores = 1; /* XXX backwards compatibility */ in vm_create()
512 vm->threads = 1; /* XXX backwards compatibility */ in vm_create()
513 vm->maxcpus = vm_maxcpu; in vm_create()
515 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, in vm_create()
528 *sockets = vm->sockets; in vm_get_topology()
529 *cores = vm->cores; in vm_get_topology()
530 *threads = vm->threads; in vm_get_topology()
531 *maxcpus = vm->maxcpus; in vm_get_topology()
537 return (vm->maxcpus); in vm_get_maxcpus()
545 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
547 vm->sockets = sockets; in vm_set_topology()
548 vm->cores = cores; in vm_set_topology()
549 vm->threads = threads; in vm_set_topology()
561 pmap = vmspace_pmap(vm->vmspace); in vm_cleanup()
566 MPASS(cpuid_to_pcpu[i]->pc_curvmpmap != pmap); in vm_cleanup()
572 vgic_detach_from_vm(vm->cookie); in vm_cleanup()
574 for (i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
575 if (vm->vcpu[i] != NULL) in vm_cleanup()
576 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
579 vmmops_cleanup(vm->cookie); in vm_cleanup()
585 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
586 vm->vmspace = NULL; in vm_cleanup()
588 for (i = 0; i < vm->maxcpus; i++) in vm_cleanup()
589 free(vm->vcpu[i], M_VMM); in vm_cleanup()
590 free(vm->vcpu, M_VMM); in vm_cleanup()
591 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
608 * A virtual machine can be reset only if all vcpus are suspended. in vm_reinit()
610 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
624 return (vm->name); in vm_name()
631 return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault)); in vm_gla2gpa_nofault()
719 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_register_reg_handler()
720 if (vm->special_reg[i].esr_iss == 0 && in vm_register_reg_handler()
721 vm->special_reg[i].esr_mask == 0) { in vm_register_reg_handler()
722 vm->special_reg[i].esr_iss = iss; in vm_register_reg_handler()
723 vm->special_reg[i].esr_mask = mask; in vm_register_reg_handler()
724 vm->special_reg[i].reg_read = reg_read; in vm_register_reg_handler()
725 vm->special_reg[i].reg_write = reg_write; in vm_register_reg_handler()
726 vm->special_reg[i].arg = arg; in vm_register_reg_handler()
739 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_deregister_reg_handler()
740 if (vm->special_reg[i].esr_iss == iss && in vm_deregister_reg_handler()
741 vm->special_reg[i].esr_mask == mask) { in vm_deregister_reg_handler()
742 memset(&vm->special_reg[i], 0, in vm_deregister_reg_handler()
743 sizeof(vm->special_reg[i])); in vm_deregister_reg_handler()
760 vm = vcpu->vm; in vm_handle_reg_emul()
761 vme = &vcpu->exitinfo; in vm_handle_reg_emul()
762 vre = &vme->u.reg_emul.vre; in vm_handle_reg_emul()
764 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_handle_reg_emul()
765 if (vm->special_reg[i].esr_iss == 0 && in vm_handle_reg_emul()
766 vm->special_reg[i].esr_mask == 0) in vm_handle_reg_emul()
769 if ((vre->inst_syndrome & vm->special_reg[i].esr_mask) == in vm_handle_reg_emul()
770 vm->special_reg[i].esr_iss) { in vm_handle_reg_emul()
772 vm->special_reg[i].reg_read, in vm_handle_reg_emul()
773 vm->special_reg[i].reg_write, in vm_handle_reg_emul()
774 vm->special_reg[i].arg); in vm_handle_reg_emul()
782 if ((vre->inst_syndrome & vmm_special_regs[i].esr_mask) == in vm_handle_reg_emul()
806 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_register_inst_handler()
807 if (vm->mmio_region[i].start == 0 && in vm_register_inst_handler()
808 vm->mmio_region[i].end == 0) { in vm_register_inst_handler()
809 vm->mmio_region[i].start = start; in vm_register_inst_handler()
810 vm->mmio_region[i].end = start + size; in vm_register_inst_handler()
811 vm->mmio_region[i].read = mmio_read; in vm_register_inst_handler()
812 vm->mmio_region[i].write = mmio_write; in vm_register_inst_handler()
825 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_deregister_inst_handler()
826 if (vm->mmio_region[i].start == start && in vm_deregister_inst_handler()
827 vm->mmio_region[i].end == start + size) { in vm_deregister_inst_handler()
828 memset(&vm->mmio_region[i], 0, in vm_deregister_inst_handler()
829 sizeof(vm->mmio_region[i])); in vm_deregister_inst_handler()
834 panic("%s: Invalid MMIO region: %lx - %lx", __func__, start, in vm_deregister_inst_handler()
850 vm = vcpu->vm; in vm_handle_inst_emul()
851 hyp = vm->cookie; in vm_handle_inst_emul()
852 if (!hyp->vgic_attached) in vm_handle_inst_emul()
855 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
856 vie = &vme->u.inst_emul.vie; in vm_handle_inst_emul()
857 paging = &vme->u.inst_emul.paging; in vm_handle_inst_emul()
859 fault_ipa = vme->u.inst_emul.gpa; in vm_handle_inst_emul()
862 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_handle_inst_emul()
863 if (vm->mmio_region[i].start <= fault_ipa && in vm_handle_inst_emul()
864 vm->mmio_region[i].end > fault_ipa) { in vm_handle_inst_emul()
865 vmr = &vm->mmio_region[i]; in vm_handle_inst_emul()
873 vmr->read, vmr->write, retu); in vm_handle_inst_emul()
889 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
890 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
891 vm->suspend, how); in vm_suspend()
895 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
898 * Notify all active vcpus that they are now suspended. in vm_suspend()
900 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
901 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
911 struct vm *vm = vcpu->vm; in vm_exit_suspended()
914 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
915 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
918 vmexit->pc = pc; in vm_exit_suspended()
919 vmexit->inst_length = 4; in vm_exit_suspended()
920 vmexit->exitcode = VM_EXITCODE_SUSPENDED; in vm_exit_suspended()
921 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
930 vmexit->pc = pc; in vm_exit_debug()
931 vmexit->inst_length = 4; in vm_exit_debug()
932 vmexit->exitcode = VM_EXITCODE_DEBUG; in vm_exit_debug()
938 struct vm *vm = vcpu->vm; in vm_activate_cpu()
940 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
943 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
952 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
953 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
954 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
958 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
961 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
972 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
974 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
977 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
986 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
993 return (vm->active_cpus); in vm_active_cpus()
1000 return (vm->debug_cpus); in vm_debug_cpus()
1007 return (vm->suspended_cpus); in vm_suspended_cpus()
1015 return (vcpu->stats); in vcpu_stats()
1021 * - If the vcpu thread is sleeping then it is woken up.
1022 * - If the vcpu is running on a different host_cpu then an IPI will be directed
1030 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
1031 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
1044 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " in vcpu_notify_event_locked()
1045 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
1046 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
1062 return (vm->vmspace); in vm_vmspace()
1068 return (&vm->mem); in vm_mem()
1075 /* flush host state to the pcb */ in restore_guest_fpustate()
1076 vfp_save_state(curthread, curthread->td_pcb); in restore_guest_fpustate()
1077 /* Ensure the VFP state will be re-loaded when exiting the guest */ in restore_guest_fpustate()
1080 /* restore guest FPU state */ in restore_guest_fpustate()
1082 vfp_restore(vcpu->guestfpu); in restore_guest_fpustate()
1085 * The FPU is now "dirty" with the guest's state so turn on emulation in restore_guest_fpustate()
1098 /* save guest FPU state */ in save_guest_fpustate()
1100 vfp_store(vcpu->guestfpu); in save_guest_fpustate()
1115 * State transitions from the vmmdev_ioctl() must always begin from in vcpu_set_state_locked()
1116 * the VCPU_IDLE state. This guarantees that there is only a single in vcpu_set_state_locked()
1120 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
1122 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
1125 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
1126 "vcpu idle state")); in vcpu_set_state_locked()
1129 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
1130 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
1131 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
1133 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
1134 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
1138 * The following state transitions are allowed: in vcpu_set_state_locked()
1139 * IDLE -> FROZEN -> IDLE in vcpu_set_state_locked()
1140 * FROZEN -> RUNNING -> FROZEN in vcpu_set_state_locked()
1141 * FROZEN -> SLEEPING -> FROZEN in vcpu_set_state_locked()
1143 switch (vcpu->state) { in vcpu_set_state_locked()
1160 vcpu->state = newstate; in vcpu_set_state_locked()
1162 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
1164 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
1167 wakeup(&vcpu->state); in vcpu_set_state_locked()
1178 panic("Error %d setting state to %d\n", error, newstate); in vcpu_require_state()
1187 panic("Error %d setting state to %d", error, newstate); in vcpu_require_state_locked()
1196 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
1205 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
1211 return (vcpu->vm); in vcpu_vm()
1217 return (vcpu->vcpuid); in vcpu_vcpuid()
1223 return (vcpu->cookie); in vcpu_get_cookie()
1229 return (vm->vcpu[vcpuid]); in vm_vcpu()
1247 enum vcpu_state state; in vcpu_get_state() local
1250 state = vcpu->state; in vcpu_get_state()
1252 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
1255 return (state); in vcpu_get_state()
1265 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
1275 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
1279 vcpu->nextpc = val; in vm_set_register()
1287 return (vm->cookie); in vm_get_cookie()
1293 return (vmmops_exception(vcpu->cookie, esr, far)); in vm_inject_exception()
1299 return (vgic_attach_to_vm(vm->cookie, descr)); in vm_attach_vgic()
1305 return (vgic_inject_irq(vm->cookie, -1, irq, true)); in vm_assert_irq()
1311 return (vgic_inject_irq(vm->cookie, -1, irq, false)); in vm_deassert_irq()
1319 return (vgic_inject_msi(vm->cookie, msg, addr)); in vm_raise_msi()
1330 if ((hypctx->tf.tf_esr & ESR_ELx_ISS_MASK) != 0) in vm_handle_smccc_call()
1333 vme->exitcode = VM_EXITCODE_SMCCC; in vm_handle_smccc_call()
1334 vme->u.smccc_call.func_id = hypctx->tf.tf_x[0]; in vm_handle_smccc_call()
1335 for (i = 0; i < nitems(vme->u.smccc_call.args); i++) in vm_handle_smccc_call()
1336 vme->u.smccc_call.args[i] = hypctx->tf.tf_x[i + 1]; in vm_handle_smccc_call()
1347 if (vgic_has_pending_irq(vcpu->cookie)) in vm_handle_wfi()
1358 msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz); in vm_handle_wfi()
1370 struct vm *vm = vcpu->vm; in vm_handle_paging()
1377 vme = &vcpu->exitinfo; in vm_handle_paging()
1379 pmap = vmspace_pmap(vcpu->vm->vmspace); in vm_handle_paging()
1380 addr = vme->u.paging.gpa; in vm_handle_paging()
1381 esr = vme->u.paging.esr; in vm_handle_paging()
1396 map = &vm->vmspace->vm_map; in vm_handle_paging()
1397 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); in vm_handle_paging()
1407 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1414 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1417 * Wait until all 'active_cpus' have suspended themselves. in vm_handle_suspend()
1419 * Since a VM may be suspended at any time including when one or in vm_handle_suspend()
1425 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) in vm_handle_suspend()
1429 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1442 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1443 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1455 struct vm *vm = vcpu->vm; in vm_run()
1462 vcpuid = vcpu->vcpuid; in vm_run()
1464 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1467 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1470 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1471 vme = &vcpu->exitinfo; in vm_run()
1473 evinfo.sptr = &vm->suspend; in vm_run()
1481 error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo); in vm_run()
1490 switch (vme->exitcode) { in vm_run()
1492 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1497 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1506 vcpu->nextpc = vme->pc; in vm_run()
1515 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1520 vcpu->nextpc = vme->pc; in vm_run()
1525 vcpu->nextpc = vme->pc; in vm_run()
1531 vcpu->nextpc = vme->pc; in vm_run()