Lines Matching +full:gpa +full:- +full:0
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
109 void *cookie; /* (i) cpu-specific data */
128 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
129 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
130 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
131 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
132 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
146 void *cookie; /* (i) cpu-specific data */
147 void *iommu; /* (x) iommu-specific data */
179 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
182 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
185 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
188 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
191 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
258 &halt_detection_enabled, 0,
262 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
267 &trace_guest_exceptions, 0,
271 SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
272 "WBINVD triggers a VM-exit");
276 &vm_maxcpu, 0, "Maximum number of vCPUs");
305 * counts as well as range of vpid values for VT-x and by the capacity
307 * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
309 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
334 vmmops_vlapic_cleanup(vcpu->vlapic); in vcpu_cleanup()
335 vmmops_vcpu_cleanup(vcpu->cookie); in vcpu_cleanup()
336 vcpu->cookie = NULL; in vcpu_cleanup()
338 vmm_stat_free(vcpu->stats); in vcpu_cleanup()
339 fpu_save_area_free(vcpu->guestfpu); in vcpu_cleanup()
350 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
355 vcpu->state = VCPU_IDLE; in vcpu_alloc()
356 vcpu->hostcpu = NOCPU; in vcpu_alloc()
357 vcpu->vcpuid = vcpu_id; in vcpu_alloc()
358 vcpu->vm = vm; in vcpu_alloc()
359 vcpu->guestfpu = fpu_save_area_alloc(); in vcpu_alloc()
360 vcpu->stats = vmm_stat_alloc(); in vcpu_alloc()
361 vcpu->tsc_offset = 0; in vcpu_alloc()
368 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
369 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie); in vcpu_init()
371 vcpu->reqidle = 0; in vcpu_init()
372 vcpu->exitintinfo = 0; in vcpu_init()
373 vcpu->nmi_pending = 0; in vcpu_init()
374 vcpu->extint_pending = 0; in vcpu_init()
375 vcpu->exception_pending = 0; in vcpu_init()
376 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; in vcpu_init()
377 fpu_save_area_reset(vcpu->guestfpu); in vcpu_init()
378 vmm_stat_init(vcpu->stats); in vcpu_init()
397 return (&vcpu->exitinfo); in vm_exitinfo()
403 return (&vcpu->exitinfo_cpuset); in vm_exitinfo_cpuset()
419 if (vm_maxcpu == 0) in vmm_init()
426 if (vmm_ipinum < 0) in vmm_init()
444 if (error != 0) in vmm_handler()
447 if (error == 0) in vmm_handler()
458 if (error == 0) { in vmm_handler()
466 * Something bad happened - prevent new in vmm_handler()
470 vmm_initialized = 0; in vmm_handler()
473 error = 0; in vmm_handler()
477 error = 0; in vmm_handler()
492 * - VT-x initialization requires smp_rendezvous() and therefore must happen
494 * - vmm device initialization requires an initialized devfs.
502 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
503 vm->iommu = NULL; in vm_init()
504 vm->vioapic = vioapic_init(vm); in vm_init()
505 vm->vhpet = vhpet_init(vm); in vm_init()
506 vm->vatpic = vatpic_init(vm); in vm_init()
507 vm->vatpit = vatpit_init(vm); in vm_init()
508 vm->vpmtmr = vpmtmr_init(vm); in vm_init()
510 vm->vrtc = vrtc_init(vm); in vm_init()
512 CPU_ZERO(&vm->active_cpus); in vm_init()
513 CPU_ZERO(&vm->debug_cpus); in vm_init()
514 CPU_ZERO(&vm->startup_cpus); in vm_init()
516 vm->suspend = 0; in vm_init()
517 CPU_ZERO(&vm->suspended_cpus); in vm_init()
520 for (int i = 0; i < vm->maxcpus; i++) { in vm_init()
521 if (vm->vcpu[i] != NULL) in vm_init()
522 vcpu_init(vm->vcpu[i]); in vm_init()
530 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
531 vm->dying = true; in vm_disable_vcpu_creation()
532 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
540 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
544 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
548 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
549 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
550 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
558 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
561 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
568 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
574 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
600 vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48); in vm_create()
605 strcpy(vm->name, name); in vm_create()
606 vm->vmspace = vmspace; in vm_create()
607 vm_mem_init(&vm->mem); in vm_create()
608 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); in vm_create()
609 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
610 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
613 vm->sockets = 1; in vm_create()
614 vm->cores = cores_per_package; /* XXX backwards compatibility */ in vm_create()
615 vm->threads = threads_per_core; /* XXX backwards compatibility */ in vm_create()
616 vm->maxcpus = vm_maxcpu; in vm_create()
621 return (0); in vm_create()
628 *sockets = vm->sockets; in vm_get_topology()
629 *cores = vm->cores; in vm_get_topology()
630 *threads = vm->threads; in vm_get_topology()
631 *maxcpus = vm->maxcpus; in vm_get_topology()
637 return (vm->maxcpus); in vm_get_maxcpus()
645 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
647 vm->sockets = sockets; in vm_set_topology()
648 vm->cores = cores; in vm_set_topology()
649 vm->threads = threads; in vm_set_topology()
650 return(0); in vm_set_topology()
663 if (vm->iommu != NULL) in vm_cleanup()
664 iommu_destroy_domain(vm->iommu); in vm_cleanup()
667 vrtc_cleanup(vm->vrtc); in vm_cleanup()
669 vrtc_reset(vm->vrtc); in vm_cleanup()
670 vpmtmr_cleanup(vm->vpmtmr); in vm_cleanup()
671 vatpit_cleanup(vm->vatpit); in vm_cleanup()
672 vhpet_cleanup(vm->vhpet); in vm_cleanup()
673 vatpic_cleanup(vm->vatpic); in vm_cleanup()
674 vioapic_cleanup(vm->vioapic); in vm_cleanup()
676 for (int i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
677 if (vm->vcpu[i] != NULL) in vm_cleanup()
678 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
681 vmmops_cleanup(vm->cookie); in vm_cleanup()
688 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
689 vm->vmspace = NULL; in vm_cleanup()
691 free(vm->vcpu, M_VM); in vm_cleanup()
692 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
693 mtx_destroy(&vm->rendezvous_mtx); in vm_cleanup()
712 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
715 error = 0; in vm_reinit()
726 return (vm->name); in vm_name()
730 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) in vm_map_mmio() argument
734 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) in vm_map_mmio()
737 return (0); in vm_map_mmio()
741 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_unmap_mmio() argument
744 vmm_mmio_free(vm->vmspace, gpa, len); in vm_unmap_mmio()
745 return (0); in vm_unmap_mmio()
751 vm_paddr_t gpa, hpa; in vm_iommu_map() local
755 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_map()
757 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_iommu_map()
761 mm = &vm->mem.mem_maps[i]; in vm_iommu_map()
762 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, in vm_iommu_map()
764 mm->gpa, mm->len, mm->flags)); in vm_iommu_map()
765 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) in vm_iommu_map()
767 mm->flags |= VM_MEMMAP_F_IOMMU; in vm_iommu_map()
769 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) { in vm_iommu_map()
770 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa); in vm_iommu_map()
775 * Because we are in pass-through mode, the in vm_iommu_map()
785 ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired", in vm_iommu_map()
786 vm, (uintmax_t)gpa, (uintmax_t)hpa)); in vm_iommu_map()
788 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE); in vm_iommu_map()
798 vm_paddr_t gpa; in vm_iommu_unmap() local
802 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_unmap()
804 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_iommu_unmap()
808 mm = &vm->mem.mem_maps[i]; in vm_iommu_unmap()
809 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) in vm_iommu_unmap()
811 mm->flags &= ~VM_MEMMAP_F_IOMMU; in vm_iommu_unmap()
812 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, in vm_iommu_unmap()
814 mm->gpa, mm->len, mm->flags)); in vm_iommu_unmap()
816 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) { in vm_iommu_unmap()
818 vmspace_pmap(vm->vmspace), gpa))), in vm_iommu_unmap()
819 ("vm_iommu_unmap: vm %p gpa %jx not wired", in vm_iommu_unmap()
820 vm, (uintmax_t)gpa)); in vm_iommu_unmap()
821 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE); in vm_iommu_unmap()
829 iommu_invalidate_tlb(vm->iommu); in vm_iommu_unmap()
841 if (ppt_assigned_devices(vm) == 0) in vm_unassign_pptdev()
844 return (0); in vm_unassign_pptdev()
853 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ in vm_assign_pptdev()
854 if (ppt_assigned_devices(vm) == 0) { in vm_assign_pptdev()
855 KASSERT(vm->iommu == NULL, in vm_assign_pptdev()
858 vm->iommu = iommu_create_domain(maxaddr); in vm_assign_pptdev()
859 if (vm->iommu == NULL) in vm_assign_pptdev()
875 return (vmmops_getreg(vcpu->cookie, reg, retval)); in vm_get_register()
886 error = vmmops_setreg(vcpu->cookie, reg, val); in vm_set_register()
892 vcpu->nextrip = val; in vm_set_register()
893 return (0); in vm_set_register()
935 return (vmmops_getdesc(vcpu->cookie, reg, desc)); in vm_get_seg_desc()
945 return (vmmops_setdesc(vcpu->cookie, reg, desc)); in vm_set_seg_desc()
957 fpurestore(vcpu->guestfpu); in restore_guest_fpustate()
961 load_xcr(0, vcpu->guest_xcr0); in restore_guest_fpustate()
974 if ((rcr0() & CR0_TS) == 0) in save_guest_fpustate()
979 vcpu->guest_xcr0 = rxcr(0); in save_guest_fpustate()
980 load_xcr(0, vmm_get_host_xcr0()); in save_guest_fpustate()
985 fpusave(vcpu->guestfpu); in save_guest_fpustate()
1005 while (vcpu->state != VCPU_IDLE) { in vcpu_set_state_locked()
1006 vcpu->reqidle = 1; in vcpu_set_state_locked()
1009 "idle requested", vcpu_state2str(vcpu->state)); in vcpu_set_state_locked()
1010 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); in vcpu_set_state_locked()
1013 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " in vcpu_set_state_locked()
1017 if (vcpu->state == VCPU_RUNNING) { in vcpu_set_state_locked()
1018 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " in vcpu_set_state_locked()
1019 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); in vcpu_set_state_locked()
1021 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " in vcpu_set_state_locked()
1022 "vcpu that is not running", vcpu->hostcpu)); in vcpu_set_state_locked()
1027 * IDLE -> FROZEN -> IDLE in vcpu_set_state_locked()
1028 * FROZEN -> RUNNING -> FROZEN in vcpu_set_state_locked()
1029 * FROZEN -> SLEEPING -> FROZEN in vcpu_set_state_locked()
1031 switch (vcpu->state) { in vcpu_set_state_locked()
1049 vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); in vcpu_set_state_locked()
1051 vcpu->state = newstate; in vcpu_set_state_locked()
1053 vcpu->hostcpu = curcpu; in vcpu_set_state_locked()
1055 vcpu->hostcpu = NOCPU; in vcpu_set_state_locked()
1058 wakeup(&vcpu->state); in vcpu_set_state_locked()
1060 return (0); in vcpu_set_state_locked()
1068 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0) in vcpu_require_state()
1077 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) in vcpu_require_state_locked()
1084 struct vm *vm = vcpu->vm; in vm_handle_rendezvous()
1088 error = 0; in vm_handle_rendezvous()
1089 vcpuid = vcpu->vcpuid; in vm_handle_rendezvous()
1091 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1092 while (vm->rendezvous_func != NULL) { in vm_handle_rendezvous()
1094 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); in vm_handle_rendezvous()
1096 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && in vm_handle_rendezvous()
1097 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { in vm_handle_rendezvous()
1099 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_handle_rendezvous()
1100 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); in vm_handle_rendezvous()
1102 if (CPU_CMP(&vm->rendezvous_req_cpus, in vm_handle_rendezvous()
1103 &vm->rendezvous_done_cpus) == 0) { in vm_handle_rendezvous()
1105 CPU_ZERO(&vm->rendezvous_req_cpus); in vm_handle_rendezvous()
1106 vm->rendezvous_func = NULL; in vm_handle_rendezvous()
1107 wakeup(&vm->rendezvous_func); in vm_handle_rendezvous()
1111 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, in vm_handle_rendezvous()
1114 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1116 if (error != 0) in vm_handle_rendezvous()
1118 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1121 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1122 return (0); in vm_handle_rendezvous()
1131 struct vm *vm = vcpu->vm; in vm_handle_hlt()
1136 vcpuid = vcpu->vcpuid; in vm_handle_hlt()
1137 vcpu_halted = 0; in vm_handle_hlt()
1138 vm_halted = 0; in vm_handle_hlt()
1139 error = 0; in vm_handle_hlt()
1142 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1155 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1161 vlapic_pending_intr(vcpu->vlapic, NULL)) { in vm_handle_hlt()
1184 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1186 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { in vm_handle_hlt()
1200 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); in vm_handle_hlt()
1202 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t); in vm_handle_hlt()
1206 if (error != 0) { in vm_handle_hlt()
1209 &vm->halted_cpus); in vm_handle_hlt()
1218 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1225 return (0); in vm_handle_hlt()
1231 struct vm *vm = vcpu->vm; in vm_handle_paging()
1236 vme = &vcpu->exitinfo; in vm_handle_paging()
1238 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", in vm_handle_paging()
1239 __func__, vme->inst_length)); in vm_handle_paging()
1241 ftype = vme->u.paging.fault_type; in vm_handle_paging()
1247 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), in vm_handle_paging()
1248 vme->u.paging.gpa, ftype); in vm_handle_paging()
1249 if (rv == 0) { in vm_handle_paging()
1250 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx", in vm_handle_paging()
1252 vme->u.paging.gpa); in vm_handle_paging()
1257 map = &vm->vmspace->vm_map; in vm_handle_paging()
1258 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL); in vm_handle_paging()
1260 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, " in vm_handle_paging()
1261 "ftype = %d", rv, vme->u.paging.gpa, ftype); in vm_handle_paging()
1266 return (0); in vm_handle_paging()
1274 uint64_t gla, gpa, cs_base; in vm_handle_inst_emul() local
1281 vme = &vcpu->exitinfo; in vm_handle_inst_emul()
1283 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", in vm_handle_inst_emul()
1284 __func__, vme->inst_length)); in vm_handle_inst_emul()
1286 gla = vme->u.inst_emul.gla; in vm_handle_inst_emul()
1287 gpa = vme->u.inst_emul.gpa; in vm_handle_inst_emul()
1288 cs_base = vme->u.inst_emul.cs_base; in vm_handle_inst_emul()
1289 cs_d = vme->u.inst_emul.cs_d; in vm_handle_inst_emul()
1290 vie = &vme->u.inst_emul.vie; in vm_handle_inst_emul()
1291 paging = &vme->u.inst_emul.paging; in vm_handle_inst_emul()
1292 cpu_mode = paging->cpu_mode; in vm_handle_inst_emul()
1294 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa); in vm_handle_inst_emul()
1297 if (vie->num_valid == 0) { in vm_handle_inst_emul()
1298 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base, in vm_handle_inst_emul()
1304 error = fault = 0; in vm_handle_inst_emul()
1309 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) { in vm_handle_inst_emul()
1311 vme->rip + cs_base); in vm_handle_inst_emul()
1313 return (0); in vm_handle_inst_emul()
1319 vme->inst_length = vie->num_processed; in vm_handle_inst_emul()
1320 vcpu->nextrip += vie->num_processed; in vm_handle_inst_emul()
1322 vcpu->nextrip); in vm_handle_inst_emul()
1324 /* return to userland unless this is an in-kernel emulated device */ in vm_handle_inst_emul()
1325 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { in vm_handle_inst_emul()
1328 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { in vm_handle_inst_emul()
1331 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { in vm_handle_inst_emul()
1336 return (0); in vm_handle_inst_emul()
1339 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite, in vm_handle_inst_emul()
1348 struct vm *vm = vcpu->vm; in vm_handle_suspend()
1352 error = 0; in vm_handle_suspend()
1355 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1365 while (error == 0) { in vm_handle_suspend()
1366 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_handle_suspend()
1371 if (vm->rendezvous_func == NULL) { in vm_handle_suspend()
1374 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); in vm_handle_suspend()
1393 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1394 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1407 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); in vm_handle_reqidle()
1408 vcpu->reqidle = 0; in vm_handle_reqidle()
1411 return (0); in vm_handle_reqidle()
1423 if (!vme->u.dbg.pushf_intercept || vme->u.dbg.tf_shadow_val != 0) { in vm_handle_db()
1424 return (0); in vm_handle_db()
1428 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t), in vm_handle_db()
1430 if (error != 0 || fault != 0) { in vm_handle_db()
1445 return (0); in vm_handle_db()
1456 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
1458 vm->suspend, how); in vm_suspend()
1467 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
1468 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
1472 return (0); in vm_suspend()
1478 struct vm *vm = vcpu->vm; in vm_exit_suspended()
1481 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
1482 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
1485 vmexit->rip = rip; in vm_exit_suspended()
1486 vmexit->inst_length = 0; in vm_exit_suspended()
1487 vmexit->exitcode = VM_EXITCODE_SUSPENDED; in vm_exit_suspended()
1488 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
1497 vmexit->rip = rip; in vm_exit_debug()
1498 vmexit->inst_length = 0; in vm_exit_debug()
1499 vmexit->exitcode = VM_EXITCODE_DEBUG; in vm_exit_debug()
1508 vmexit->rip = rip; in vm_exit_rendezvous()
1509 vmexit->inst_length = 0; in vm_exit_rendezvous()
1510 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; in vm_exit_rendezvous()
1520 vmexit->rip = rip; in vm_exit_reqidle()
1521 vmexit->inst_length = 0; in vm_exit_reqidle()
1522 vmexit->exitcode = VM_EXITCODE_REQIDLE; in vm_exit_reqidle()
1532 vmexit->rip = rip; in vm_exit_astpending()
1533 vmexit->inst_length = 0; in vm_exit_astpending()
1534 vmexit->exitcode = VM_EXITCODE_BOGUS; in vm_exit_astpending()
1541 struct vm *vm = vcpu->vm; in vm_run()
1550 vcpuid = vcpu->vcpuid; in vm_run()
1552 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1555 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1558 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1559 vme = &vcpu->exitinfo; in vm_run()
1560 evinfo.rptr = &vm->rendezvous_req_cpus; in vm_run()
1561 evinfo.sptr = &vm->suspend; in vm_run()
1562 evinfo.iptr = &vcpu->reqidle; in vm_run()
1566 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), in vm_run()
1577 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo); in vm_run()
1582 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); in vm_run()
1586 if (error == 0) { in vm_run()
1588 vcpu->nextrip = vme->rip + vme->inst_length; in vm_run()
1589 switch (vme->exitcode) { in vm_run()
1597 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); in vm_run()
1603 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); in vm_run()
1634 if (error == 0 && vme->exitcode == VM_EXITCODE_IPI) in vm_run()
1637 if (error == 0 && retu == false) in vm_run()
1641 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode); in vm_run()
1661 vcpu->exitinfo.inst_length = 0; in vm_restart_instruction()
1663 "setting inst_length to zero", vcpu->exitinfo.rip); in vm_restart_instruction()
1674 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); in vm_restart_instruction()
1675 vcpu->nextrip = rip; in vm_restart_instruction()
1679 return (0); in vm_restart_instruction()
1689 vector = info & 0xff; in vm_exit_intinfo()
1697 info = 0; in vm_exit_intinfo()
1700 vcpu->exitintinfo = info; in vm_exit_intinfo()
1701 return (0); in vm_exit_intinfo()
1719 vector = info & 0xff; in exception_class()
1721 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ in exception_class()
1731 * SVM and VT-x use identical type values to represent NMI, in exception_class()
1734 * SVM uses type '3' for all exceptions. VT-x uses type '3' in exception_class()
1769 * If an exception occurs while attempting to call the double-fault in nested_fault()
1773 vector1 = info1 & 0xff; in nested_fault()
1777 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
1778 *retinfo = 0; in nested_fault()
1779 return (0); in nested_fault()
1783 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 in nested_fault()
1803 uint64_t info = 0; in vcpu_exception_intinfo()
1805 if (vcpu->exception_pending) { in vcpu_exception_intinfo()
1806 info = vcpu->exc_vector & 0xff; in vcpu_exception_intinfo()
1808 if (vcpu->exc_errcode_valid) { in vcpu_exception_intinfo()
1810 info |= (uint64_t)vcpu->exc_errcode << 32; in vcpu_exception_intinfo()
1822 info1 = vcpu->exitintinfo; in vm_entry_intinfo()
1823 vcpu->exitintinfo = 0; in vm_entry_intinfo()
1825 info2 = 0; in vm_entry_intinfo()
1826 if (vcpu->exception_pending) { in vm_entry_intinfo()
1828 vcpu->exception_pending = 0; in vm_entry_intinfo()
1830 vcpu->exc_vector, info2); in vm_entry_intinfo()
1842 valid = 0; in vm_entry_intinfo()
1856 *info1 = vcpu->exitintinfo; in vm_get_intinfo()
1858 return (0); in vm_get_intinfo()
1868 if (vector < 0 || vector >= 32) in vm_inject_exception()
1879 if (vcpu->exception_pending) { in vm_inject_exception()
1881 "pending exception %d", vector, vcpu->exc_vector); in vm_inject_exception()
1892 errcode_valid = 0; in vm_inject_exception()
1901 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0); in vm_inject_exception()
1902 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", in vm_inject_exception()
1908 vcpu->exception_pending = 1; in vm_inject_exception()
1909 vcpu->exc_vector = vector; in vm_inject_exception()
1910 vcpu->exc_errcode = errcode; in vm_inject_exception()
1911 vcpu->exc_errcode_valid = errcode_valid; in vm_inject_exception()
1913 return (0); in vm_inject_exception()
1925 KASSERT(error == 0, ("vm_inject_exception error %d", error)); in vm_inject_fault()
1937 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); in vm_inject_pf()
1948 vcpu->nmi_pending = 1; in vm_inject_nmi()
1950 return (0); in vm_inject_nmi()
1956 return (vcpu->nmi_pending); in vm_nmi_pending()
1962 if (vcpu->nmi_pending == 0) in vm_nmi_clear()
1965 vcpu->nmi_pending = 0; in vm_nmi_clear()
1975 vcpu->extint_pending = 1; in vm_inject_extint()
1977 return (0); in vm_inject_extint()
1983 return (vcpu->extint_pending); in vm_extint_pending()
1989 if (vcpu->extint_pending == 0) in vm_extint_clear()
1992 vcpu->extint_pending = 0; in vm_extint_clear()
1999 if (type < 0 || type >= VM_CAP_MAX) in vm_get_capability()
2002 return (vmmops_getcap(vcpu->cookie, type, retval)); in vm_get_capability()
2008 if (type < 0 || type >= VM_CAP_MAX) in vm_set_capability()
2011 return (vmmops_setcap(vcpu->cookie, type, val)); in vm_set_capability()
2017 return (vcpu->vm); in vcpu_vm()
2023 return (vcpu->vcpuid); in vcpu_vcpuid()
2029 return (vm->vcpu[vcpuid]); in vm_vcpu()
2035 return (vcpu->vlapic); in vm_lapic()
2042 return (vm->vioapic); in vm_ioapic()
2049 return (vm->vhpet); in vm_hpet()
2066 * names instead of a single one - yuck! in vmm_is_pptdev()
2072 for (i = 0; names[i] != NULL && !found; i++) { in vmm_is_pptdev()
2074 while (cp != NULL && *cp != '\0') { in vmm_is_pptdev()
2076 *cp2 = '\0'; in vmm_is_pptdev()
2098 return (vm->iommu); in vm_iommu_domain()
2119 state = vcpu->state; in vcpu_get_state()
2121 *hostcpu = vcpu->hostcpu; in vcpu_get_state()
2130 struct vm *vm = vcpu->vm; in vm_activate_cpu()
2132 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2136 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2137 return (0); in vm_activate_cpu()
2144 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
2145 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
2146 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
2150 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2153 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2156 return (0); in vm_suspend_cpu()
2164 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
2166 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2169 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2171 return (0); in vm_resume_cpu()
2178 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2185 return (vm->active_cpus); in vm_active_cpus()
2192 return (vm->debug_cpus); in vm_debug_cpus()
2199 return (vm->suspended_cpus); in vm_suspended_cpus()
2211 mtx_lock(&vm->rendezvous_mtx); in vm_start_cpus()
2212 CPU_AND(&set, &vm->startup_cpus, tostart); in vm_start_cpus()
2213 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); in vm_start_cpus()
2214 mtx_unlock(&vm->rendezvous_mtx); in vm_start_cpus()
2221 mtx_lock(&vm->rendezvous_mtx); in vm_await_start()
2222 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); in vm_await_start()
2223 mtx_unlock(&vm->rendezvous_mtx); in vm_await_start()
2230 return (vcpu->stats); in vcpu_stats()
2236 *state = vcpu->x2apic_state; in vm_get_x2apic_state()
2238 return (0); in vm_get_x2apic_state()
2247 vcpu->x2apic_state = state; in vm_set_x2apic_state()
2251 return (0); in vm_set_x2apic_state()
2257 * - If the vcpu thread is sleeping then it is woken up.
2258 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2266 hostcpu = vcpu->hostcpu; in vcpu_notify_event_locked()
2267 if (vcpu->state == VCPU_RUNNING) { in vcpu_notify_event_locked()
2271 vlapic_post_intr(vcpu->vlapic, hostcpu, in vcpu_notify_event_locked()
2286 "with hostcpu %d", vcpu->state, hostcpu)); in vcpu_notify_event_locked()
2287 if (vcpu->state == VCPU_SLEEPING) in vcpu_notify_event_locked()
2303 return (vm->vmspace); in vm_vmspace()
2309 return (&vm->mem); in vm_mem()
2325 struct vm *vm = vcpu->vm; in vm_smp_rendezvous()
2334 mtx_lock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2335 if (vm->rendezvous_func != NULL) { in vm_smp_rendezvous()
2342 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2344 if (error != 0) in vm_smp_rendezvous()
2348 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " in vm_smp_rendezvous()
2352 vm->rendezvous_req_cpus = dest; in vm_smp_rendezvous()
2353 CPU_ZERO(&vm->rendezvous_done_cpus); in vm_smp_rendezvous()
2354 vm->rendezvous_arg = arg; in vm_smp_rendezvous()
2355 vm->rendezvous_func = func; in vm_smp_rendezvous()
2356 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2359 * Wake up any sleeping vcpus and trigger a VM-exit in any running in vm_smp_rendezvous()
2362 for (i = 0; i < vm->maxcpus; i++) { in vm_smp_rendezvous()
2373 return (vm->vatpic); in vm_atpic()
2379 return (vm->vatpit); in vm_atpit()
2386 return (vm->vpmtmr); in vm_pmtmr()
2393 return (vm->vrtc); in vm_rtc()
2408 KASSERT(seg >= 0 && seg < nitems(seg_names), in vm_segment_name()
2418 for (idx = 0; idx < num_copyinfo; idx++) { in vm_copy_teardown()
2433 uint64_t gpa; in vm_copy_setup() local
2437 nused = 0; in vm_copy_setup()
2439 while (remaining > 0) { in vm_copy_setup()
2442 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); in vm_copy_setup()
2445 off = gpa & PAGE_MASK; in vm_copy_setup()
2446 n = min(remaining, PAGE_SIZE - off); in vm_copy_setup()
2447 copyinfo[nused].gpa = gpa; in vm_copy_setup()
2449 remaining -= n; in vm_copy_setup()
2454 for (idx = 0; idx < nused; idx++) { in vm_copy_setup()
2455 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa, in vm_copy_setup()
2467 *fault = 0; in vm_copy_setup()
2468 return (0); in vm_copy_setup()
2479 idx = 0; in vm_copyin()
2480 while (len > 0) { in vm_copyin()
2482 len -= copyinfo[idx].len; in vm_copyin()
2495 idx = 0; in vm_copyout()
2496 while (len > 0) { in vm_copyout()
2498 len -= copyinfo[idx].len; in vm_copyout()
2505 * Return the amount of in-use and wired memory for the VM. Since
2506 * these are global stats, only return the values with for vCPU 0
2515 if (vcpu->vcpuid == 0) { in vm_get_rescnt()
2517 vmspace_resident_count(vcpu->vm->vmspace)); in vm_get_rescnt()
2525 if (vcpu->vcpuid == 0) { in vm_get_wiredcnt()
2527 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); in vm_get_wiredcnt()
2545 for (i = 0; i < maxcpus; i++) { in vm_snapshot_vcpus()
2546 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2550 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done); in vm_snapshot_vcpus()
2551 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done); in vm_snapshot_vcpus()
2552 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done); in vm_snapshot_vcpus()
2553 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done); in vm_snapshot_vcpus()
2554 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done); in vm_snapshot_vcpus()
2555 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done); in vm_snapshot_vcpus()
2556 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done); in vm_snapshot_vcpus()
2557 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done); in vm_snapshot_vcpus()
2565 tsc = now + vcpu->tsc_offset; in vm_snapshot_vcpus()
2567 if (meta->op == VM_SNAPSHOT_RESTORE) in vm_snapshot_vcpus()
2568 vcpu->tsc_offset = tsc; in vm_snapshot_vcpus()
2581 if (ret != 0) in vm_snapshot_vm()
2584 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); in vm_snapshot_vm()
2596 error = 0; in vm_snapshot_vcpu()
2599 for (i = 0; i < maxcpus; i++) { in vm_snapshot_vcpu()
2600 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2604 error = vmmops_vcpu_snapshot(vcpu->cookie, meta); in vm_snapshot_vcpu()
2605 if (error != 0) { in vm_snapshot_vcpu()
2617 * Save kernel-side structures to user-space for snapshotting.
2622 int ret = 0; in vm_snapshot_req()
2624 switch (meta->dev_req) { in vm_snapshot_req()
2654 __func__, meta->dev_req); in vm_snapshot_req()
2663 vcpu->tsc_offset = offset; in vm_set_tsc_offset()
2681 for (i = 0; i < maxcpus; i++) { in vm_restore_time()
2682 vcpu = vm->vcpu[i]; in vm_restore_time()
2686 error = vmmops_restore_tsc(vcpu->cookie, in vm_restore_time()
2687 vcpu->tsc_offset - now); in vm_restore_time()
2692 return (0); in vm_restore_time()