Lines Matching +full:gpa +full:- +full:0

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
86 void *cookie; /* (i) cpu-specific data */
90 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
91 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
92 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
93 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
94 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
95 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
98 uint64_t gpa; member
107 vm_paddr_t gpa; member
140 void *cookie; /* (i) cpu-specific data */
177 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
236 &vm_maxcpu, 0, "Maximum number of vCPUs");
255 VMM_STAT(VMEXIT_SS, "number of vmexits for a single-step exception");
263 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
269 regs->field = vmm_arch_regs_masks.field; \ in vmm_regs_init()
270 if (!get_kernel_reg_masked(reg, &regs->field, masks->field)) \ in vmm_regs_init()
271 regs->field = 0; \ in vmm_regs_init()
272 } while (0) in vmm_regs_init()
286 return (0); in vmm_regs_init()
292 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
293 vcpu->cookie = NULL; in vcpu_cleanup()
295 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
296 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
306 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
311 vcpu->state = VCPU_IDLE; in vcpu_alloc()
312 vcpu->hostcpu = NOCPU; in vcpu_alloc()
313 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
314 vcpu->vm = vm; in vcpu_alloc()
315 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
316 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
323 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
324 MPASS(vcpu->cookie != NULL); in vcpu_init()
325 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
326 vmm_stat_init(vcpu->stats); in vcpu_init()
332 return (&vcpu->exitinfo); in vm_exitinfo()
347 if (vm_maxcpu == 0) in vmm_init()
351 if (error != 0) in vmm_init()
354 return (vmmops_modinit(0)); in vmm_init()
366 if (error != 0) in vmm_handler()
369 if (error == 0) in vmm_handler()
375 if (error == 0 && vmm_initialized) { in vmm_handler()
382 error = 0; in vmm_handler()
397 * - HYP initialization requires smp_rendezvous() and therefore must happen
399 * - vmm device initialization requires an initialized devfs.
409 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
410 MPASS(vm->cookie != NULL); in vm_init()
412 CPU_ZERO(&vm->active_cpus); in vm_init()
413 CPU_ZERO(&vm->debug_cpus); in vm_init()
415 vm->suspend = 0; in vm_init()
416 CPU_ZERO(&vm->suspended_cpus); in vm_init()
418 memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); in vm_init()
419 memset(vm->special_reg, 0, sizeof(vm->special_reg)); in vm_init()
422 for (i = 0; i < vm->maxcpus; i++) { in vm_init()
423 if (vm->vcpu[i] != NULL) in vm_init()
424 vcpu_init(vm->vcpu[i]); in vm_init()
432 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
433 vm->dying = true; in vm_disable_vcpu_creation()
434 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
442 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
446 if (vcpuid >= vgic_max_cpu_count(vm->cookie)) in vm_alloc_vcpu()
450 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
454 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
455 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
456 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
464 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
467 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
474 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
480 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
499 vmspace = vmmops_vmspace_alloc(0, 1ul << 39); in vm_create()
504 strcpy(vm->name, name); in vm_create()
505 vm->vmspace = vmspace; in vm_create()
506 sx_init(&vm->mem_segs_lock, "vm mem_segs"); in vm_create()
507 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
509 vm->sockets = 1; in vm_create()
510 vm->cores = 1; /* XXX backwards compatibility */ in vm_create()
511 vm->threads = 1; /* XXX backwards compatibility */ in vm_create()
512 vm->maxcpus = vm_maxcpu; in vm_create()
514 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, in vm_create()
520 return (0); in vm_create()
527 *sockets = vm->sockets; in vm_get_topology()
528 *cores = vm->cores; in vm_get_topology()
529 *threads = vm->threads; in vm_get_topology()
530 *maxcpus = vm->maxcpus; in vm_get_topology()
536 return (vm->maxcpus); in vm_get_maxcpus()
544 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
546 vm->sockets = sockets; in vm_set_topology()
547 vm->cores = cores; in vm_set_topology()
548 vm->threads = threads; in vm_set_topology()
549 return(0); in vm_set_topology()
560 pmap = vmspace_pmap(vm->vmspace); in vm_cleanup()
565 MPASS(cpuid_to_pcpu[i]->pc_curvmpmap != pmap); in vm_cleanup()
569 vgic_detach_from_vm(vm->cookie); in vm_cleanup()
571 for (i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
572 if (vm->vcpu[i] != NULL) in vm_cleanup()
573 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
576 vmmops_cleanup(vm->cookie); in vm_cleanup()
587 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_cleanup()
588 mm = &vm->mem_maps[i]; in vm_cleanup()
595 for (i = 0; i < VM_MAX_MEMSEGS; i++) in vm_cleanup()
598 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
599 vm->vmspace = NULL; in vm_cleanup()
601 for (i = 0; i < vm->maxcpus; i++) in vm_cleanup()
602 free(vm->vcpu[i], M_VMM); in vm_cleanup()
603 free(vm->vcpu, M_VMM); in vm_cleanup()
604 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
605 sx_destroy(&vm->mem_segs_lock); in vm_cleanup()
624 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
627 error = 0; in vm_reinit()
638 return (vm->name); in vm_name()
644 sx_slock(&vm->mem_segs_lock); in vm_slock_memsegs()
650 sx_xlock(&vm->mem_segs_lock); in vm_xlock_memsegs()
656 sx_unlock(&vm->mem_segs_lock); in vm_unlock_memsegs()
660 * Return 'true' if 'gpa' is allocated in the guest address space.
663 * an implicit lock on 'vm->mem_maps[]'.
666 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) in vm_mem_allocated() argument
668 struct vm *vm = vcpu->vm; in vm_mem_allocated()
679 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mem_allocated()
680 mm = &vm->mem_maps[i]; in vm_mem_allocated()
681 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) in vm_mem_allocated()
682 return (true); /* 'gpa' is sysmem or devmem */ in vm_mem_allocated()
694 sx_assert(&vm->mem_segs_lock, SX_XLOCKED); in vm_alloc_memseg()
696 if (ident < 0 || ident >= VM_MAX_MEMSEGS) in vm_alloc_memseg()
699 if (len == 0 || (len & PAGE_MASK)) in vm_alloc_memseg()
702 seg = &vm->mem_segs[ident]; in vm_alloc_memseg()
703 if (seg->object != NULL) { in vm_alloc_memseg()
704 if (seg->len == len && seg->sysmem == sysmem) in vm_alloc_memseg()
714 seg->len = len; in vm_alloc_memseg()
715 seg->object = obj; in vm_alloc_memseg()
716 seg->sysmem = sysmem; in vm_alloc_memseg()
717 return (0); in vm_alloc_memseg()
726 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_get_memseg()
728 if (ident < 0 || ident >= VM_MAX_MEMSEGS) in vm_get_memseg()
731 seg = &vm->mem_segs[ident]; in vm_get_memseg()
733 *len = seg->len; in vm_get_memseg()
735 *sysmem = seg->sysmem; in vm_get_memseg()
737 *objptr = seg->object; in vm_get_memseg()
738 return (0); in vm_get_memseg()
746 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, in vm_free_memseg()
749 seg = &vm->mem_segs[ident]; in vm_free_memseg()
750 if (seg->object != NULL) { in vm_free_memseg()
751 vm_object_deallocate(seg->object); in vm_free_memseg()
757 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, in vm_mmap_memseg() argument
765 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) in vm_mmap_memseg()
771 if (segid < 0 || segid >= VM_MAX_MEMSEGS) in vm_mmap_memseg()
774 seg = &vm->mem_segs[segid]; in vm_mmap_memseg()
775 if (seg->object == NULL) in vm_mmap_memseg()
779 if (first < 0 || first >= last || last > seg->len) in vm_mmap_memseg()
782 if ((gpa | first | last) & PAGE_MASK) in vm_mmap_memseg()
786 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mmap_memseg()
787 m = &vm->mem_maps[i]; in vm_mmap_memseg()
788 if (m->len == 0) { in vm_mmap_memseg()
797 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, in vm_mmap_memseg()
798 len, 0, VMFS_NO_SPACE, prot, prot, 0); in vm_mmap_memseg()
802 vm_object_reference(seg->object); in vm_mmap_memseg()
805 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, in vm_mmap_memseg()
808 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); in vm_mmap_memseg()
814 map->gpa = gpa; in vm_mmap_memseg()
815 map->len = len; in vm_mmap_memseg()
816 map->segoff = first; in vm_mmap_memseg()
817 map->segid = segid; in vm_mmap_memseg()
818 map->prot = prot; in vm_mmap_memseg()
819 map->flags = flags; in vm_mmap_memseg()
820 return (0); in vm_mmap_memseg()
824 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_munmap_memseg() argument
829 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_munmap_memseg()
830 m = &vm->mem_maps[i]; in vm_munmap_memseg()
831 if (m->gpa == gpa && m->len == len) { in vm_munmap_memseg()
833 return (0); in vm_munmap_memseg()
841 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, in vm_mmap_getnext() argument
848 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mmap_getnext()
849 mm = &vm->mem_maps[i]; in vm_mmap_getnext()
850 if (mm->len == 0 || mm->gpa < *gpa) in vm_mmap_getnext()
852 if (mmnext == NULL || mm->gpa < mmnext->gpa) in vm_mmap_getnext()
857 *gpa = mmnext->gpa; in vm_mmap_getnext()
859 *segid = mmnext->segid; in vm_mmap_getnext()
861 *segoff = mmnext->segoff; in vm_mmap_getnext()
863 *len = mmnext->len; in vm_mmap_getnext()
865 *prot = mmnext->prot; in vm_mmap_getnext()
867 *flags = mmnext->flags; in vm_mmap_getnext()
868 return (0); in vm_mmap_getnext()
880 mm = &vm->mem_maps[ident]; in vm_free_memmap()
881 if (mm->len) { in vm_free_memmap()
882 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, in vm_free_memmap()
883 mm->gpa + mm->len); in vm_free_memmap()
894 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) in sysmem_mapping()
907 maxaddr = 0; in vmm_sysmem_maxaddr()
908 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vmm_sysmem_maxaddr()
909 mm = &vm->mem_maps[i]; in vmm_sysmem_maxaddr()
911 if (maxaddr < mm->gpa + mm->len) in vmm_sysmem_maxaddr()
912 maxaddr = mm->gpa + mm->len; in vmm_sysmem_maxaddr()
920 uint64_t gla, int prot, uint64_t *gpa, int *is_fault) in vm_gla2gpa_nofault() argument
923 vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault); in vm_gla2gpa_nofault()
924 return (0); in vm_gla2gpa_nofault()
930 *rval = 0; in vmm_reg_raz()
931 return (0); in vmm_reg_raz()
938 return (0); in vmm_reg_read_arg()
944 return (0); in vmm_reg_wi()
982 * They are all in the op0=3, op1=0, CRn=0, CRm={0..7} space.
986 (0 << ISS_MSR_OP1_SHIFT) |
987 (0 << ISS_MSR_CRn_SHIFT) |
988 (0 << ISS_MSR_CRm_SHIFT),
990 ISS_MSR_CRn_MASK | (0x8 << ISS_MSR_CRm_SHIFT),
1012 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_register_reg_handler()
1013 if (vm->special_reg[i].esr_iss == 0 && in vm_register_reg_handler()
1014 vm->special_reg[i].esr_mask == 0) { in vm_register_reg_handler()
1015 vm->special_reg[i].esr_iss = iss; in vm_register_reg_handler()
1016 vm->special_reg[i].esr_mask = mask; in vm_register_reg_handler()
1017 vm->special_reg[i].reg_read = reg_read; in vm_register_reg_handler()
1018 vm->special_reg[i].reg_write = reg_write; in vm_register_reg_handler()
1019 vm->special_reg[i].arg = arg; in vm_register_reg_handler()
1032 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_deregister_reg_handler()
1033 if (vm->special_reg[i].esr_iss == iss && in vm_deregister_reg_handler()
1034 vm->special_reg[i].esr_mask == mask) { in vm_deregister_reg_handler()
1035 memset(&vm->special_reg[i], 0, in vm_deregister_reg_handler()
1036 sizeof(vm->special_reg[i])); in vm_deregister_reg_handler()
1053 vm = vcpu->vm; in vm_handle_reg_emul()
1054 vme = &vcpu->exitinfo; in vm_handle_reg_emul()
1055 vre = &vme->u.reg_emul.vre; in vm_handle_reg_emul()
1057 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_handle_reg_emul()
1058 if (vm->special_reg[i].esr_iss == 0 && in vm_handle_reg_emul()
1059 vm->special_reg[i].esr_mask == 0) in vm_handle_reg_emul()
1062 if ((vre->inst_syndrome & vm->special_reg[i].esr_mask) == in vm_handle_reg_emul()
1063 vm->special_reg[i].esr_iss) { in vm_handle_reg_emul()
1065 vm->special_reg[i].reg_read, in vm_handle_reg_emul()
1066 vm->special_reg[i].reg_write, in vm_handle_reg_emul()
1067 vm->special_reg[i].arg); in vm_handle_reg_emul()
1068 if (rv == 0) { in vm_handle_reg_emul()
1074 for (i = 0; i < nitems(vmm_special_regs); i++) { in vm_handle_reg_emul()
1075 if ((vre->inst_syndrome & vmm_special_regs[i].esr_mask) == in vm_handle_reg_emul()
1081 if (rv == 0) { in vm_handle_reg_emul()
1090 return (0); in vm_handle_reg_emul()
1099 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_register_inst_handler()
1100 if (vm->mmio_region[i].start == 0 && in vm_register_inst_handler()
1101 vm->mmio_region[i].end == 0) { in vm_register_inst_handler()
1102 vm->mmio_region[i].start = start; in vm_register_inst_handler()
1103 vm->mmio_region[i].end = start + size; in vm_register_inst_handler()
1104 vm->mmio_region[i].read = mmio_read; in vm_register_inst_handler()
1105 vm->mmio_region[i].write = mmio_write; in vm_register_inst_handler()
1118 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_deregister_inst_handler()
1119 if (vm->mmio_region[i].start == start && in vm_deregister_inst_handler()
1120 vm->mmio_region[i].end == start + size) { in vm_deregister_inst_handler()
1121 memset(&vm->mmio_region[i], 0, in vm_deregister_inst_handler()
1122 sizeof(vm->mmio_region[i])); in vm_deregister_inst_handler()
1127 panic("%s: Invalid MMIO region: %lx - %lx", __func__, start, in vm_deregister_inst_handler()
1143 vm = vcpu->vm; in vm_handle_inst_emul()
1144 hyp = vm->cookie; in vm_handle_inst_emul()
1145 if (!hyp->vgic_attached) in vm_handle_inst_emul()
1148 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
1149 vie = &vme->u.inst_emul.vie; in vm_handle_inst_emul()
1150 paging = &vme->u.inst_emul.paging; in vm_handle_inst_emul()
1152 fault_ipa = vme->u.inst_emul.gpa; in vm_handle_inst_emul()
1155 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_handle_inst_emul()
1156 if (vm->mmio_region[i].start <= fault_ipa && in vm_handle_inst_emul()
1157 vm->mmio_region[i].end > fault_ipa) { in vm_handle_inst_emul()
1158 vmr = &vm->mmio_region[i]; in vm_handle_inst_emul()
1166 vmr->read, vmr->write, retu); in vm_handle_inst_emul()
1171 return (0); in vm_handle_inst_emul()
1182 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
1184 vm->suspend, how); in vm_suspend()
1193 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
1194 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
1198 return (0); in vm_suspend()
1204 struct vm *vm = vcpu->vm; in vm_exit_suspended()
1207 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
1208 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
1211 vmexit->pc = pc; in vm_exit_suspended()
1212 vmexit->inst_length = 4; in vm_exit_suspended()
1213 vmexit->exitcode = VM_EXITCODE_SUSPENDED; in vm_exit_suspended()
1214 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
1223 vmexit->pc = pc; in vm_exit_debug()
1224 vmexit->inst_length = 4; in vm_exit_debug()
1225 vmexit->exitcode = VM_EXITCODE_DEBUG; in vm_exit_debug()
1231 struct vm *vm = vcpu->vm; in vm_activate_cpu()
1233 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
1236 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
1237 return (0); in vm_activate_cpu()
1245 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
1246 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
1247 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
1251 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
1254 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
1257 return (0); in vm_suspend_cpu()
1265 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
1267 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
1270 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
1272 return (0); in vm_resume_cpu()
1279 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
1286 return (vm->active_cpus); in vm_active_cpus()
1293 return (vm->debug_cpus); in vm_debug_cpus()
1300 return (vm->suspended_cpus); in vm_suspended_cpus()
1308 return (vcpu->stats); in vcpu_stats()
1314 * - If the vcpu thread is sleeping then it is woken up.
1315 * - If the vcpu is running on a different host_cpu then an IPI will be directed
1323 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
1324 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
1338 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
1339 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
1357 vfp_save_state(curthread, curthread->td_pcb); in restore_guest_fpustate()
1358 /* Ensure the VFP state will be re-loaded when exiting the guest */ in restore_guest_fpustate()
1363 vfp_restore(vcpu->guestfpu); in restore_guest_fpustate()
1381 vfp_store(vcpu->guestfpu); in save_guest_fpustate()
1401 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
1403 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
1406 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
1410 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
1411 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
1412 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
1414 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
1415 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
1420 * IDLE -> FROZEN -> IDLE in vcpu_set_state_locked()
1421 * FROZEN -> RUNNING -> FROZEN in vcpu_set_state_locked()
1422 * FROZEN -> SLEEPING -> FROZEN in vcpu_set_state_locked()
1424 switch (vcpu->state) { in vcpu_set_state_locked()
1441 vcpu->state = newstate; in vcpu_set_state_locked()
1443 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
1445 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
1448 wakeup(&vcpu->state); in vcpu_set_state_locked()
1450 return (0); in vcpu_set_state_locked()
1458 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) in vcpu_require_state()
1467 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) in vcpu_require_state_locked()
1474 if (type < 0 || type >= VM_CAP_MAX) in vm_get_capability()
1477 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
1483 if (type < 0 || type >= VM_CAP_MAX) in vm_set_capability()
1486 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
1492 return (vcpu->vm); in vcpu_vm()
1498 return (vcpu->vcpuid); in vcpu_vcpuid()
1504 return (vcpu->cookie); in vcpu_get_cookie()
1510 return (vm->vcpu[vcpuid]); in vm_vcpu()
1531 state = vcpu->state; in vcpu_get_state()
1533 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
1540 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in _vm_gpa_hold() argument
1547 pageoff = gpa & PAGE_MASK; in _vm_gpa_hold()
1548 if (len > PAGE_SIZE - pageoff) in _vm_gpa_hold()
1549 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); in _vm_gpa_hold()
1551 count = 0; in _vm_gpa_hold()
1552 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in _vm_gpa_hold()
1553 mm = &vm->mem_maps[i]; in _vm_gpa_hold()
1554 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa && in _vm_gpa_hold()
1555 gpa < mm->gpa + mm->len) { in _vm_gpa_hold()
1556 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, in _vm_gpa_hold()
1557 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); in _vm_gpa_hold()
1572 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold() argument
1584 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie)); in vm_gpa_hold()
1588 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold_global() argument
1591 sx_assert(&vm->mem_segs_lock, SX_LOCKED); in vm_gpa_hold_global()
1592 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); in vm_gpa_hold_global()
1610 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
1620 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
1624 vcpu->nextpc = val; in vm_set_register()
1626 return (0); in vm_set_register()
1632 return (vm->cookie); in vm_get_cookie()
1638 return (vmmops_exception(vcpu->cookie, esr, far)); in vm_inject_exception()
1644 return (vgic_attach_to_vm(vm->cookie, descr)); in vm_attach_vgic()
1650 return (vgic_inject_irq(vm->cookie, -1, irq, true)); in vm_assert_irq()
1656 return (vgic_inject_irq(vm->cookie, -1, irq, false)); in vm_deassert_irq()
1664 return (vgic_inject_msi(vm->cookie, msg, addr)); in vm_raise_msi()
1675 if ((hypctx->tf.tf_esr & ESR_ELx_ISS_MASK) != 0) in vm_handle_smccc_call()
1678 vme->exitcode = VM_EXITCODE_SMCCC; in vm_handle_smccc_call()
1679 vme->u.smccc_call.func_id = hypctx->tf.tf_x[0]; in vm_handle_smccc_call()
1680 for (i = 0; i < nitems(vme->u.smccc_call.args); i++) in vm_handle_smccc_call()
1681 vme->u.smccc_call.args[i] = hypctx->tf.tf_x[i + 1]; in vm_handle_smccc_call()
1684 return (0); in vm_handle_smccc_call()
1692 if (vgic_has_pending_irq(vcpu->cookie)) in vm_handle_wfi()
1703 msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz); in vm_handle_wfi()
1709 return (0); in vm_handle_wfi()
1715 struct vm *vm = vcpu->vm; in vm_handle_paging()
1722 vme = &vcpu->exitinfo; in vm_handle_paging()
1724 pmap = vmspace_pmap(vcpu->vm->vmspace); in vm_handle_paging()
1725 addr = vme->u.paging.gpa; in vm_handle_paging()
1726 esr = vme->u.paging.esr; in vm_handle_paging()
1730 return (0); in vm_handle_paging()
1741 map = &vm->vmspace->vm_map; in vm_handle_paging()
1742 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); in vm_handle_paging()
1746 return (0); in vm_handle_paging()
1752 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1756 error = 0; in vm_handle_suspend()
1759 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1769 while (error == 0) { in vm_handle_suspend()
1770 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) in vm_handle_suspend()
1774 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1787 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1788 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1800 struct vm *vm = vcpu->vm; in vm_run()
1807 vcpuid = vcpu->vcpuid; in vm_run()
1809 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1812 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1815 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1816 vme = &vcpu->exitinfo; in vm_run()
1818 evinfo.sptr = &vm->suspend; in vm_run()
1826 error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo); in vm_run()
1833 if (error == 0) { in vm_run()
1835 switch (vme->exitcode) { in vm_run()
1837 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1842 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1851 vcpu->nextpc = vme->pc; in vm_run()
1860 vcpu->nextpc = vme->pc + vme->inst_length; in vm_run()
1865 vcpu->nextpc = vme->pc; in vm_run()
1870 vcpu->nextpc = vme->pc; in vm_run()
1876 vcpu->nextpc = vme->pc; in vm_run()
1882 if (error == 0 && retu == false) in vm_run()