Lines Matching full:vm

47 #include <vm/vm.h>
48 #include <vm/vm_param.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vnode_pager.h>
57 #include <vm/swap_pager.h>
58 #include <vm/uma.h>
108 struct vm *vm; /* (o) */ member
112 uint64_t exitintinfo; /* (i) events pending at VM exit */
136 * (o) initialized the first time the VM is created
137 * (i) initialized when VM is created and when it is reinitialized
145 struct vm { struct
157 int suspend; /* (i) stop VM execution */
170 /* The following describe the vm cpu topology */ argument
179 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format) argument
182 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
185 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
188 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
191 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
218 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
244 static MALLOC_DEFINE(M_VM, "vm", "vm");
259 "Halt VM if all vcpus execute HLT with interrupts disabled");
272 "WBINVD triggers a VM-exit");
282 VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
283 VMM_STAT(VMEXIT_EXTINT, "vm exits due to external interrupt");
290 VMM_STAT(VMEXIT_INTR_WINDOW, "vm exits due to interrupt window opening");
291 VMM_STAT(VMEXIT_NMI_WINDOW, "vm exits due to nmi window opening");
294 VMM_STAT(VMEXIT_NESTED_FAULT, "vm exits due to nested page fault");
295 VMM_STAT(VMEXIT_INST_EMUL, "vm exits for instruction emulation");
296 VMM_STAT(VMEXIT_UNKNOWN, "number of vm exits for unknown reason");
299 VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
301 VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
346 vcpu_alloc(struct vm *vm, int vcpu_id) in vcpu_alloc() argument
350 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
358 vcpu->vm = vm; in vcpu_alloc()
368 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
500 vm_init(struct vm *vm, bool create) in vm_init() argument
502 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
503 vm->iommu = NULL; in vm_init()
504 vm->vioapic = vioapic_init(vm); in vm_init()
505 vm->vhpet = vhpet_init(vm); in vm_init()
506 vm->vatpic = vatpic_init(vm); in vm_init()
507 vm->vatpit = vatpit_init(vm); in vm_init()
508 vm->vpmtmr = vpmtmr_init(vm); in vm_init()
510 vm->vrtc = vrtc_init(vm); in vm_init()
512 CPU_ZERO(&vm->active_cpus); in vm_init()
513 CPU_ZERO(&vm->debug_cpus); in vm_init()
514 CPU_ZERO(&vm->startup_cpus); in vm_init()
516 vm->suspend = 0; in vm_init()
517 CPU_ZERO(&vm->suspended_cpus); in vm_init()
520 for (int i = 0; i < vm->maxcpus; i++) { in vm_init()
521 if (vm->vcpu[i] != NULL) in vm_init()
522 vcpu_init(vm->vcpu[i]); in vm_init()
528 vm_disable_vcpu_creation(struct vm *vm) in vm_disable_vcpu_creation() argument
530 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
531 vm->dying = true; in vm_disable_vcpu_creation()
532 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
536 vm_alloc_vcpu(struct vm *vm, int vcpuid) in vm_alloc_vcpu() argument
540 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
544 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
548 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
549 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
550 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
551 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
558 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
561 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
566 vm_slock_vcpus(struct vm *vm) in vm_slock_vcpus() argument
568 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
572 vm_unlock_vcpus(struct vm *vm) in vm_unlock_vcpus() argument
574 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
584 vm_create(const char *name, struct vm **retvm) in vm_create()
586 struct vm *vm; in vm_create() local
604 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); in vm_create()
605 strcpy(vm->name, name); in vm_create()
606 vm->vmspace = vmspace; in vm_create()
607 vm_mem_init(&vm->mem); in vm_create()
608 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); in vm_create()
609 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
610 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK | in vm_create()
613 vm->sockets = 1; in vm_create()
614 vm->cores = cores_per_package; /* XXX backwards compatibility */ in vm_create()
615 vm->threads = threads_per_core; /* XXX backwards compatibility */ in vm_create()
616 vm->maxcpus = vm_maxcpu; in vm_create()
618 vm_init(vm, true); in vm_create()
620 *retvm = vm; in vm_create()
625 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, in vm_get_topology() argument
628 *sockets = vm->sockets; in vm_get_topology()
629 *cores = vm->cores; in vm_get_topology()
630 *threads = vm->threads; in vm_get_topology()
631 *maxcpus = vm->maxcpus; in vm_get_topology()
635 vm_get_maxcpus(struct vm *vm) in vm_get_maxcpus() argument
637 return (vm->maxcpus); in vm_get_maxcpus()
641 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, in vm_set_topology() argument
645 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
647 vm->sockets = sockets; in vm_set_topology()
648 vm->cores = cores; in vm_set_topology()
649 vm->threads = threads; in vm_set_topology()
654 vm_cleanup(struct vm *vm, bool destroy) in vm_cleanup() argument
657 vm_xlock_memsegs(vm); in vm_cleanup()
659 vm_assert_memseg_xlocked(vm); in vm_cleanup()
661 ppt_unassign_all(vm); in vm_cleanup()
663 if (vm->iommu != NULL) in vm_cleanup()
664 iommu_destroy_domain(vm->iommu); in vm_cleanup()
667 vrtc_cleanup(vm->vrtc); in vm_cleanup()
669 vrtc_reset(vm->vrtc); in vm_cleanup()
670 vpmtmr_cleanup(vm->vpmtmr); in vm_cleanup()
671 vatpit_cleanup(vm->vatpit); in vm_cleanup()
672 vhpet_cleanup(vm->vhpet); in vm_cleanup()
673 vatpic_cleanup(vm->vatpic); in vm_cleanup()
674 vioapic_cleanup(vm->vioapic); in vm_cleanup()
676 for (int i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
677 if (vm->vcpu[i] != NULL) in vm_cleanup()
678 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
681 vmmops_cleanup(vm->cookie); in vm_cleanup()
683 vm_mem_cleanup(vm); in vm_cleanup()
686 vm_mem_destroy(vm); in vm_cleanup()
688 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
689 vm->vmspace = NULL; in vm_cleanup()
691 free(vm->vcpu, M_VM); in vm_cleanup()
692 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
693 mtx_destroy(&vm->rendezvous_mtx); in vm_cleanup()
698 vm_destroy(struct vm *vm) in vm_destroy() argument
700 vm_cleanup(vm, true); in vm_destroy()
701 free(vm, M_VM); in vm_destroy()
705 vm_reinit(struct vm *vm) in vm_reinit() argument
712 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
713 vm_cleanup(vm, false); in vm_reinit()
714 vm_init(vm, false); in vm_reinit()
724 vm_name(struct vm *vm) in vm_name() argument
726 return (vm->name); in vm_name()
730 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) in vm_map_mmio() argument
734 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) in vm_map_mmio()
741 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_unmap_mmio() argument
744 vmm_mmio_free(vm->vmspace, gpa, len); in vm_unmap_mmio()
749 vm_iommu_map(struct vm *vm) in vm_iommu_map() argument
755 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_map()
758 if (!vm_memseg_sysmem(vm, i)) in vm_iommu_map()
761 mm = &vm->mem.mem_maps[i]; in vm_iommu_map()
770 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa); in vm_iommu_map()
785 ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired", in vm_iommu_map()
786 vm, (uintmax_t)gpa, (uintmax_t)hpa)); in vm_iommu_map()
788 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE); in vm_iommu_map()
796 vm_iommu_unmap(struct vm *vm) in vm_iommu_unmap() argument
802 sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED); in vm_iommu_unmap()
805 if (!vm_memseg_sysmem(vm, i)) in vm_iommu_unmap()
808 mm = &vm->mem.mem_maps[i]; in vm_iommu_unmap()
818 vmspace_pmap(vm->vmspace), gpa))), in vm_iommu_unmap()
819 ("vm_iommu_unmap: vm %p gpa %jx not wired", in vm_iommu_unmap()
820 vm, (uintmax_t)gpa)); in vm_iommu_unmap()
821 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE); in vm_iommu_unmap()
829 iommu_invalidate_tlb(vm->iommu); in vm_iommu_unmap()
833 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_unassign_pptdev() argument
837 error = ppt_unassign_device(vm, bus, slot, func); in vm_unassign_pptdev()
841 if (ppt_assigned_devices(vm) == 0) in vm_unassign_pptdev()
842 vm_iommu_unmap(vm); in vm_unassign_pptdev()
848 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) in vm_assign_pptdev() argument
854 if (ppt_assigned_devices(vm) == 0) { in vm_assign_pptdev()
855 KASSERT(vm->iommu == NULL, in vm_assign_pptdev()
857 maxaddr = vmm_sysmem_maxaddr(vm); in vm_assign_pptdev()
858 vm->iommu = iommu_create_domain(maxaddr); in vm_assign_pptdev()
859 if (vm->iommu == NULL) in vm_assign_pptdev()
861 vm_iommu_map(vm); in vm_assign_pptdev()
864 error = ppt_assign_device(vm, bus, slot, func); in vm_assign_pptdev()
1084 struct vm *vm = vcpu->vm; in vm_handle_rendezvous() local
1091 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1092 while (vm->rendezvous_func != NULL) { in vm_handle_rendezvous()
1094 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus); in vm_handle_rendezvous()
1096 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && in vm_handle_rendezvous()
1097 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { in vm_handle_rendezvous()
1099 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); in vm_handle_rendezvous()
1100 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); in vm_handle_rendezvous()
1102 if (CPU_CMP(&vm->rendezvous_req_cpus, in vm_handle_rendezvous()
1103 &vm->rendezvous_done_cpus) == 0) { in vm_handle_rendezvous()
1105 CPU_ZERO(&vm->rendezvous_req_cpus); in vm_handle_rendezvous()
1106 vm->rendezvous_func = NULL; in vm_handle_rendezvous()
1107 wakeup(&vm->rendezvous_func); in vm_handle_rendezvous()
1111 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, in vm_handle_rendezvous()
1114 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1118 mtx_lock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1121 mtx_unlock(&vm->rendezvous_mtx); in vm_handle_rendezvous()
1131 struct vm *vm = vcpu->vm; in vm_handle_hlt() local
1142 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); in vm_handle_hlt()
1155 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) in vm_handle_hlt()
1184 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1186 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { in vm_handle_hlt()
1209 &vm->halted_cpus); in vm_handle_hlt()
1218 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); in vm_handle_hlt()
1223 vm_suspend(vm, VM_SUSPEND_HALT); in vm_handle_hlt()
1231 struct vm *vm = vcpu->vm; in vm_handle_paging() local
1247 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), in vm_handle_paging()
1257 map = &vm->vmspace->vm_map; in vm_handle_paging()
1348 struct vm *vm = vcpu->vm; in vm_handle_suspend() local
1355 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1360 * Since a VM may be suspended at any time including when one or in vm_handle_suspend()
1366 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_handle_suspend()
1371 if (vm->rendezvous_func == NULL) { in vm_handle_suspend()
1393 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1394 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1395 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_handle_suspend()
1449 vm_suspend(struct vm *vm, enum vm_suspend_how how) in vm_suspend() argument
1456 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
1457 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
1458 vm->suspend, how); in vm_suspend()
1462 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
1467 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
1468 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
1469 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_suspend()
1478 struct vm *vm = vcpu->vm; in vm_exit_suspended() local
1481 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
1482 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
1488 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
1541 struct vm *vm = vcpu->vm; in vm_run() local
1552 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1555 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1558 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1560 evinfo.rptr = &vm->rendezvous_req_cpus; in vm_run()
1561 evinfo.sptr = &vm->suspend; in vm_run()
1597 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector); in vm_run()
1777 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT); in nested_fault()
2014 struct vm *
2017 return (vcpu->vm); in vcpu_vm()
2027 vm_vcpu(struct vm *vm, int vcpuid) in vm_vcpu() argument
2029 return (vm->vcpu[vcpuid]); in vm_vcpu()
2039 vm_ioapic(struct vm *vm) in vm_ioapic() argument
2042 return (vm->vioapic); in vm_ioapic()
2046 vm_hpet(struct vm *vm) in vm_hpet() argument
2049 return (vm->vhpet); in vm_hpet()
2095 vm_iommu_domain(struct vm *vm) in vm_iommu_domain() argument
2098 return (vm->iommu); in vm_iommu_domain()
2130 struct vm *vm = vcpu->vm; in vm_activate_cpu() local
2132 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
2136 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
2141 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
2144 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
2145 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
2146 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
2147 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_suspend_cpu()
2150 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
2153 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
2160 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
2164 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
2166 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
2169 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
2178 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
2182 vm_active_cpus(struct vm *vm) in vm_active_cpus() argument
2185 return (vm->active_cpus); in vm_active_cpus()
2189 vm_debug_cpus(struct vm *vm) in vm_debug_cpus() argument
2192 return (vm->debug_cpus); in vm_debug_cpus()
2196 vm_suspended_cpus(struct vm *vm) in vm_suspended_cpus() argument
2199 return (vm->suspended_cpus); in vm_suspended_cpus()
2207 vm_start_cpus(struct vm *vm, const cpuset_t *tostart) in vm_start_cpus() argument
2211 mtx_lock(&vm->rendezvous_mtx); in vm_start_cpus()
2212 CPU_AND(&set, &vm->startup_cpus, tostart); in vm_start_cpus()
2213 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set); in vm_start_cpus()
2214 mtx_unlock(&vm->rendezvous_mtx); in vm_start_cpus()
2219 vm_await_start(struct vm *vm, const cpuset_t *waiting) in vm_await_start() argument
2221 mtx_lock(&vm->rendezvous_mtx); in vm_await_start()
2222 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting); in vm_await_start()
2223 mtx_unlock(&vm->rendezvous_mtx); in vm_await_start()
2301 vm_vmspace(struct vm *vm) in vm_vmspace() argument
2303 return (vm->vmspace); in vm_vmspace()
2307 vm_mem(struct vm *vm) in vm_mem() argument
2309 return (&vm->mem); in vm_mem()
2313 vm_apicid2vcpuid(struct vm *vm, int apicid) in vm_apicid2vcpuid() argument
2325 struct vm *vm = vcpu->vm; in vm_smp_rendezvous() local
2334 mtx_lock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2335 if (vm->rendezvous_func != NULL) { in vm_smp_rendezvous()
2342 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2348 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " in vm_smp_rendezvous()
2352 vm->rendezvous_req_cpus = dest; in vm_smp_rendezvous()
2353 CPU_ZERO(&vm->rendezvous_done_cpus); in vm_smp_rendezvous()
2354 vm->rendezvous_arg = arg; in vm_smp_rendezvous()
2355 vm->rendezvous_func = func; in vm_smp_rendezvous()
2356 mtx_unlock(&vm->rendezvous_mtx); in vm_smp_rendezvous()
2359 * Wake up any sleeping vcpus and trigger a VM-exit in any running in vm_smp_rendezvous()
2362 for (i = 0; i < vm->maxcpus; i++) { in vm_smp_rendezvous()
2364 vcpu_notify_event(vm_vcpu(vm, i), false); in vm_smp_rendezvous()
2371 vm_atpic(struct vm *vm) in vm_atpic() argument
2373 return (vm->vatpic); in vm_atpic()
2377 vm_atpit(struct vm *vm) in vm_atpit() argument
2379 return (vm->vatpit); in vm_atpit()
2383 vm_pmtmr(struct vm *vm) in vm_pmtmr() argument
2386 return (vm->vpmtmr); in vm_pmtmr()
2390 vm_rtc(struct vm *vm) in vm_rtc() argument
2393 return (vm->vrtc); in vm_rtc()
2505 * Return the amount of in-use and wired memory for the VM. Since
2517 vmspace_resident_count(vcpu->vm->vmspace)); in vm_get_rescnt()
2527 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace))); in vm_get_wiredcnt()
2536 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpus() argument
2544 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpus()
2546 vcpu = vm->vcpu[i]; in vm_snapshot_vcpus()
2576 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vm() argument
2580 ret = vm_snapshot_vcpus(vm, meta); in vm_snapshot_vm()
2584 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done); in vm_snapshot_vm()
2590 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_vcpu() argument
2598 maxcpus = vm_get_maxcpus(vm); in vm_snapshot_vcpu()
2600 vcpu = vm->vcpu[i]; in vm_snapshot_vcpu()
2620 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta) in vm_snapshot_req() argument
2626 ret = vm_snapshot_vcpu(vm, meta); in vm_snapshot_req()
2629 ret = vm_snapshot_vm(vm, meta); in vm_snapshot_req()
2632 ret = vioapic_snapshot(vm_ioapic(vm), meta); in vm_snapshot_req()
2635 ret = vlapic_snapshot(vm, meta); in vm_snapshot_req()
2638 ret = vhpet_snapshot(vm_hpet(vm), meta); in vm_snapshot_req()
2641 ret = vatpic_snapshot(vm_atpic(vm), meta); in vm_snapshot_req()
2644 ret = vatpit_snapshot(vm_atpit(vm), meta); in vm_snapshot_req()
2647 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta); in vm_snapshot_req()
2650 ret = vrtc_snapshot(vm_rtc(vm), meta); in vm_snapshot_req()
2667 vm_restore_time(struct vm *vm) in vm_restore_time() argument
2676 error = vhpet_restore_time(vm_hpet(vm)); in vm_restore_time()
2680 maxcpus = vm_get_maxcpus(vm); in vm_restore_time()
2682 vcpu = vm->vcpu[i]; in vm_restore_time()