Lines Matching full:vm
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_page.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vnode_pager.h>
56 #include <vm/swap_pager.h>
57 #include <vm/uma.h>
107 struct vm *vm; /* (o) */ member
111 uint64_t exitintinfo; /* (i) events pending at VM exit */
135 * (o) initialized the first time the VM is created
136 * (i) initialized when VM is created and when it is reinitialized
144 struct vm { struct
156 int suspend; /* (i) stop VM execution */
168 /* The following describe the vm cpu topology */ argument
177 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) argument
180 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
183 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
186 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
189 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
214 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
240 static MALLOC_DEFINE(M_VM, "vm", "vm");
255 "Halt VM if all vcpus execute HLT with interrupts disabled");
268 "WBINVD triggers a VM-exit");
274 VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
275 VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt");
282 VMM_STAT(VMEXIT_INTR_WINDOW, "vm exits due to interrupt window opening");
283 VMM_STAT(VMEXIT_NMI_WINDOW, "vm exits due to nmi window opening");
286 VMM_STAT(VMEXIT_NESTED_FAULT, "vm exits due to nested page fault");
287 VMM_STAT(VMEXIT_INST_EMUL, "vm exits for instruction emulation");
288 VMM_STAT(VMEXIT_UNKNOWN, "number of vm exits for unknown reason");
291 VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
293 VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
330 vcpu_alloc(struct vm *vm, int vcpu_id) in vcpu_alloc() argument
334 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
342 vcpu->vm = vm; in vcpu_alloc()
352 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
421 vm_init(struct vm *vm, bool create) in vm_init() argument
423 vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm))); in vm_init()
424 vm->iommu = NULL; in vm_init()
425 vm->vioapic = vioapic_init(vm); in vm_init()
426 vm->vhpet = vhpet_init(vm); in vm_init()
427 vm->vatpic = vatpic_init(vm); in vm_init()
428 vm->vatpit = vatpit_init(vm); in vm_init()
429 vm->vpmtmr = vpmtmr_init(vm); in vm_init()
431 vm->vrtc = vrtc_init(vm); in vm_init()
433 CPU_ZERO(&vm->active_cpus); in vm_init()
434 CPU_ZERO(&vm->debug_cpus); in vm_init()
435 CPU_ZERO(&vm->startup_cpus); in vm_init()
437 vm->suspend = 0; in vm_init()
438 CPU_ZERO(&vm->suspended_cpus); in vm_init()
441 for (int i = 0; i < vm->maxcpus; i++) { in vm_init()
442 if (vm->vcpu[i] != NULL) in vm_init()
443 vcpu_init(vm->vcpu[i]); in vm_init()
449 vm_disable_vcpu_creation(struct vm *vm) in vm_disable_vcpu_creation() argument
451 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
452 vm->dying = true; in vm_disable_vcpu_creation()
453 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
457 vm_alloc_vcpu(struct vm *vm, int vcpuid) in vm_alloc_vcpu() argument
461 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
465 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
469 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
470 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
471 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
472 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
479 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
482 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
487 vm_lock_vcpus(struct vm *vm) in vm_lock_vcpus() argument
489 sx_xlock(&vm->vcpus_init_lock); in vm_lock_vcpus()
493 vm_unlock_vcpus(struct vm *vm) in vm_unlock_vcpus() argument
495 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
499 vm_create(const char *name, struct vm **retvm) in vm_create()
501 struct vm *vm; in vm_create() local
504 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); in vm_create()
505 error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48); in vm_create()
507 free(vm, M_VM); in vm_create()
510 strcpy(vm->name, name); in vm_create()
511 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); in vm_create()
512 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
513 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
516 vm->sockets = 1; in vm_create()
517 vm->cores = 1; /* XXX backwards compatibility */ in vm_create()
518 vm->threads = 1; /* XXX backwards compatibility */ in vm_create()
519 vm->maxcpus = vm_maxcpu; in vm_create()
521 vm_init(vm, true); in vm_create()
523 *retvm = vm; in vm_create()
528 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, in vm_get_topology() argument
531 *sockets = vm->sockets; in vm_get_topology()
532 *cores = vm->cores; in vm_get_topology()
533 *threads = vm->threads; in vm_get_topology()
534 *maxcpus = vm->maxcpus; in vm_get_topology()
538 vm_get_maxcpus(struct vm *vm) in vm_get_maxcpus() argument
540 return (vm->maxcpus); in vm_get_maxcpus()
544 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, in vm_set_topology() argument
548 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
550 vm->sockets = sockets; in vm_set_topology()
551 vm->cores = cores; in vm_set_topology()
552 vm->threads = threads; in vm_set_topology()
557 vm_cleanup(struct vm *vm, bool destroy) in vm_cleanup() argument
560 vm_xlock_memsegs(vm); in vm_cleanup()
562 vm_assert_memseg_xlocked(vm); in vm_cleanup()
564 ppt_unassign_all(vm); in vm_cleanup()
566 if (vm->iommu != NULL) in vm_cleanup()
567 iommu_destroy_domain(vm->iommu); in vm_cleanup()
570 vrtc_cleanup(vm->vrtc); in vm_cleanup()
572 vrtc_reset(vm->vrtc); in vm_cleanup()
573 vpmtmr_cleanup(vm->vpmtmr); in vm_cleanup()
574 vatpit_cleanup(vm->vatpit); in vm_cleanup()
575 vhpet_cleanup(vm->vhpet); in vm_cleanup()
576 vatpic_cleanup(vm->vatpic); in vm_cleanup()
577 vioapic_cleanup(vm->vioapic); in vm_cleanup()
579 for (int i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
580 if (vm->vcpu[i] != NULL) in vm_cleanup()
581 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
584 vmmops_cleanup(vm->cookie); in vm_cleanup()
586 vm_mem_cleanup(vm); in vm_cleanup()
589 vm_mem_destroy(vm); in vm_cleanup()
591 free(vm->vcpu, M_VM); in vm_cleanup()
592 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
593 mtx_destroy(&vm->rendezvous_mtx); in vm_cleanup()
598 vm_destroy(struct vm *vm) in vm_destroy() argument
600 vm_cleanup(vm, true); in vm_destroy()
601 free(vm, M_VM); in vm_destroy()
605 vm_reinit(struct vm *vm) in vm_reinit() argument
612 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
613 vm_cleanup(vm, false); in vm_reinit()
614 vm_init(vm, false); in vm_reinit()
624 vm_name(struct vm *vm) in vm_name() argument
626 return (vm->name); in vm_name()
630 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) in vm_map_mmio() argument
632 return (vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa)); in vm_map_mmio()
636 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_unmap_mmio() argument
639 vmm_mmio_free(vm_vmspace(vm), gpa, len); in vm_unmap_mmio()
644 vm_iommu_map(struct vm *vm) in vm_iommu_map() argument
651 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_map()
653 pmap = vmspace_pmap(vm_vmspace(vm)); in vm_iommu_map()
655 if (!vm_memseg_sysmem(vm, i)) in vm_iommu_map()
658 mm = &vm->mem.mem_maps[i]; in vm_iommu_map()
682 ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired", in vm_iommu_map()
683 vm, (uintmax_t)gpa, (uintmax_t)hpa)); in vm_iommu_map()
685 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE); in vm_iommu_map()
694 vm_iommu_unmap(struct vm *vm) in vm_iommu_unmap() argument
700 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_unmap()
703 if (!vm_memseg_sysmem(vm, i)) in vm_iommu_unmap()
706 mm = &vm->mem.mem_maps[i]; in vm_iommu_unmap()
716 vmspace_pmap(vm_vmspace(vm)), gpa))), in vm_iommu_unmap()
717 ("vm_iommu_unmap: vm %p gpa %jx not wired", in vm_iommu_unmap()
718 vm, (uintmax_t)gpa)); in vm_iommu_unmap()
719 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE); in vm_iommu_unmap()
727 error = iommu_invalidate_tlb(vm->iommu); in vm_iommu_unmap()
732 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_unassign_pptdev() argument
736 error = ppt_unassign_device(vm, bus, slot, func); in vm_unassign_pptdev()
740 if (ppt_assigned_devices(vm) == 0) in vm_unassign_pptdev()
741 error = vm_iommu_unmap(vm); in vm_unassign_pptdev()
747 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_assign_pptdev() argument
754 if (ppt_assigned_devices(vm) == 0) { in vm_assign_pptdev()
755 KASSERT(vm->iommu == NULL, in vm_assign_pptdev()
757 maxaddr = vmm_sysmem_maxaddr(vm); in vm_assign_pptdev()
758 vm->iommu = iommu_create_domain(maxaddr); in vm_assign_pptdev()
759 if (vm->iommu == NULL) in vm_assign_pptdev()
764 error = ppt_assign_device(vm, bus, slot, func); in vm_assign_pptdev()
766 error = vm_iommu_map(vm); in vm_assign_pptdev()
773 /* Negative values represent VM control structure fields. */ in vm_get_register()
785 /* Negative values represent VM control structure fields. */ in vm_set_register()
901 struct vm *vm = vcpu->vm; in vm_rendezvous() local
904 mtx_assert(&vcpu->vm->rendezvous_mtx, MA_OWNED); in vm_rendezvous()
905 KASSERT(vcpu->vm->rendezvous_func != NULL, in vm_rendezvous()
909 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, in vm_rendezvous()
910 &vm->active_cpus); in vm_rendezvous()
913 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && in vm_rendezvous()
914 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { in vm_rendezvous()
916 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_rendezvous()
917 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); in vm_rendezvous()
919 if (CPU_CMP(&vm->rendezvous_req_cpus, in vm_rendezvous()
920 &vm->rendezvous_done_cpus) == 0) { in vm_rendezvous()
922 CPU_ZERO(&vm->rendezvous_req_cpus); in vm_rendezvous()
923 vm->rendezvous_func = NULL; in vm_rendezvous()
924 wakeup(&vm->rendezvous_func); in vm_rendezvous()
1010 * Try to lock all of the vCPUs in the VM while taking care to avoid deadlocks
1016 vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate) in vcpu_set_state_all() argument
1028 maxcpus = vm->maxcpus; in vcpu_set_state_all()
1030 mtx_lock(&vm->rendezvous_mtx); in vcpu_set_state_all()
1032 if (vm->rendezvous_func != NULL) { in vcpu_set_state_all()
1041 vcpu = vm_vcpu(vm, i); in vcpu_set_state_all()
1071 vcpu = vm_vcpu(vm, i); in vcpu_set_state_all()
1076 mtx_unlock(&vm->rendezvous_mtx); in vcpu_set_state_all()
1079 mtx_lock(&vm->rendezvous_mtx); in vcpu_set_state_all()
1080 if (vm->rendezvous_func != NULL) in vcpu_set_state_all()
1094 mtx_unlock(&vm->rendezvous_mtx); in vcpu_set_state_all()
1119 struct vm *vm; in vm_handle_rendezvous() local
1123 vm = vcpu->vm; in vm_handle_rendezvous()
1125 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1126 while (vm->rendezvous_func != NULL) { in vm_handle_rendezvous()
1131 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, in vm_handle_rendezvous()
1136 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1140 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1143 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1153 struct vm *vm = vcpu->vm; in vm_handle_hlt() local
1164 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1177 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1206 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1208 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { in vm_handle_hlt()
1231 &vm->halted_cpus); in vm_handle_hlt()
1240 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1245 vm_suspend(vm, VM_SUSPEND_HALT); in vm_handle_hlt()
1253 struct vm *vm = vcpu->vm; in vm_handle_paging() local
1269 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm_vmspace(vm)), in vm_handle_paging()
1279 map = &vm_vmspace(vm)->vm_map; in vm_handle_paging()
1370 struct vm *vm = vcpu->vm; in vm_handle_suspend() local
1377 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1382 * Since a VM may be suspended at any time including when one or in vm_handle_suspend()
1388 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_handle_suspend()
1393 if (vm->rendezvous_func == NULL) { in vm_handle_suspend()
1415 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1416 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1417 vcpu_notify_event(vm_vcpu(vm, i)); in vm_handle_suspend()
1471 vm_suspend(struct vm *vm, enum vm_suspend_how how) in vm_suspend() argument
1478 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
1479 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
1480 vm->suspend, how); in vm_suspend()
1484 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
1489 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
1490 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
1491 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend()
1500 struct vm *vm = vcpu->vm; in vm_exit_suspended() local
1503 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
1504 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
1510 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
1563 struct vm *vm = vcpu->vm; in vm_run() local
1574 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1577 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1580 pmap = vmspace_pmap(vm_vmspace(vm)); in vm_run()
1582 evinfo.rptr = &vm->rendezvous_req_cpus; in vm_run()
1583 evinfo.sptr = &vm->suspend; in vm_run()
1619 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); in vm_run()
1799 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
2036 struct vm *
2039 return (vcpu->vm); in vcpu_vm()
2049 vm_vcpu(struct vm *vm, int vcpuid) in vm_vcpu() argument
2051 return (vm->vcpu[vcpuid]); in vm_vcpu()
2061 vm_ioapic(struct vm *vm) in vm_ioapic() argument
2064 return (vm->vioapic); in vm_ioapic()
2068 vm_hpet(struct vm *vm) in vm_hpet() argument
2071 return (vm->vhpet); in vm_hpet()
2117 vm_iommu_domain(struct vm *vm) in vm_iommu_domain() argument
2120 return (vm->iommu); in vm_iommu_domain()
2152 struct vm *vm = vcpu->vm; in vm_activate_cpu() local
2154 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2158 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2163 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
2166 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
2167 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
2168 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
2169 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend_cpu()
2172 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2175 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2182 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
2186 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
2188 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2191 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2200 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2204 vm_active_cpus(struct vm *vm) in vm_active_cpus() argument
2207 return (vm->active_cpus); in vm_active_cpus()
2211 vm_debug_cpus(struct vm *vm) in vm_debug_cpus() argument
2214 return (vm->debug_cpus); in vm_debug_cpus()
2218 vm_suspended_cpus(struct vm *vm) in vm_suspended_cpus() argument
2221 return (vm->suspended_cpus); in vm_suspended_cpus()
2229 vm_start_cpus(struct vm *vm, const cpuset_t *tostart) in vm_start_cpus() argument
2233 mtx_lock(&vm->rendezvous_mtx); in vm_start_cpus()
2234 CPU_AND(&set, &vm->startup_cpus, tostart); in vm_start_cpus()
2235 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); in vm_start_cpus()
2236 mtx_unlock(&vm->rendezvous_mtx); in vm_start_cpus()
2241 vm_await_start(struct vm *vm, const cpuset_t *waiting) in vm_await_start() argument
2243 mtx_lock(&vm->rendezvous_mtx); in vm_await_start()
2244 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); in vm_await_start()
2245 mtx_unlock(&vm->rendezvous_mtx); in vm_await_start()
2329 vm_mem(struct vm *vm) in vm_mem() argument
2331 return (&vm->mem); in vm_mem()
2335 vm_apicid2vcpuid(struct vm *vm, int apicid) in vm_apicid2vcpuid() argument
2347 struct vm *vm = vcpu->vm; in vm_smp_rendezvous() local
2356 mtx_lock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2357 if (vm->rendezvous_func != NULL) { in vm_smp_rendezvous()
2364 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2370 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " in vm_smp_rendezvous()
2374 vm->rendezvous_req_cpus = dest; in vm_smp_rendezvous()
2375 CPU_ZERO(&vm->rendezvous_done_cpus); in vm_smp_rendezvous()
2376 vm->rendezvous_arg = arg; in vm_smp_rendezvous()
2377 vm->rendezvous_func = func; in vm_smp_rendezvous()
2378 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2381 * Wake up any sleeping vcpus and trigger a VM-exit in any running in vm_smp_rendezvous()
2384 for (i = 0; i < vm->maxcpus; i++) { in vm_smp_rendezvous()
2386 vcpu_notify_event(vm_vcpu(vm, i)); in vm_smp_rendezvous()
2393 vm_atpic(struct vm *vm) in vm_atpic() argument
2395 return (vm->vatpic); in vm_atpic()
2399 vm_atpit(struct vm *vm) in vm_atpit() argument
2401 return (vm->vatpit); in vm_atpit()
2405 vm_pmtmr(struct vm *vm) in vm_pmtmr() argument
2408 return (vm->vpmtmr); in vm_pmtmr()
2412 vm_rtc(struct vm *vm) in vm_rtc() argument
2415 return (vm->vrtc); in vm_rtc()
2527 * Return the amount of in-use and wired memory for the VM. Since
2539 vmspace_resident_count(vm_vmspace(vcpu->vm))); in vm_get_rescnt()
2549 pmap_wired_count(vmspace_pmap(vm_vmspace(vcpu->vm)))); in vm_get_wiredcnt()
2558 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpus() argument
2566 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpus()
2568 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2598 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vm() argument
2602 ret = vm_snapshot_vcpus(vm, meta); in vm_snapshot_vm()
2606 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); in vm_snapshot_vm()
2612 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpu() argument
2620 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpu()
2622 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2642 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_req() argument
2648 ret = vm_snapshot_vcpu(vm, meta); in vm_snapshot_req()
2651 ret = vm_snapshot_vm(vm, meta); in vm_snapshot_req()
2654 ret = vioapic_snapshot(vm_ioapic(vm), meta); in vm_snapshot_req()
2657 ret = vlapic_snapshot(vm, meta); in vm_snapshot_req()
2660 ret = vhpet_snapshot(vm_hpet(vm), meta); in vm_snapshot_req()
2663 ret = vatpic_snapshot(vm_atpic(vm), meta); in vm_snapshot_req()
2666 ret = vatpit_snapshot(vm_atpit(vm), meta); in vm_snapshot_req()
2669 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); in vm_snapshot_req()
2672 ret = vrtc_snapshot(vm_rtc(vm), meta); in vm_snapshot_req()
2689 vm_restore_time(struct vm *vm) in vm_restore_time() argument
2698 error = vhpet_restore_time(vm_hpet(vm)); in vm_restore_time()
2702 maxcpus = vm_get_maxcpus(vm); in vm_restore_time()
2704 vcpu = vm->vcpu[i]; in vm_restore_time()