Lines Matching full:vm

51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_param.h>
65 #include <machine/vm.h>
90 struct vm *vm; /* (o) */ member
112 * (o) initialized the first time the VM is created
113 * (i) initialized when VM is created and when it is reinitialized
116 struct vm { struct
120 int suspend; /* (i) stop VM execution */
130 /* The following describe the vm cpu topology */ argument
158 VMM_STAT(VMEXIT_COUNT, "total number of vm exits"); argument
181 vcpu_alloc(struct vm *vm, int vcpu_id) in vcpu_alloc() argument
185 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
193 vcpu->vm = vm; in vcpu_alloc()
202 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
284 vm_init(struct vm *vm, bool create) in vm_init() argument
288 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); in vm_init()
289 MPASS(vm->cookie != NULL); in vm_init()
291 CPU_ZERO(&vm->active_cpus); in vm_init()
292 CPU_ZERO(&vm->debug_cpus); in vm_init()
294 vm->suspend = 0; in vm_init()
295 CPU_ZERO(&vm->suspended_cpus); in vm_init()
297 memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); in vm_init()
300 for (i = 0; i < vm->maxcpus; i++) { in vm_init()
301 if (vm->vcpu[i] != NULL) in vm_init()
302 vcpu_init(vm->vcpu[i]); in vm_init()
308 vm_disable_vcpu_creation(struct vm *vm) in vm_disable_vcpu_creation() argument
310 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
311 vm->dying = true; in vm_disable_vcpu_creation()
312 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
316 vm_alloc_vcpu(struct vm *vm, int vcpuid) in vm_alloc_vcpu() argument
320 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
324 if (vcpuid >= aplic_max_cpu_count(vm->cookie)) in vm_alloc_vcpu()
328 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
332 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
333 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
334 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
335 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
342 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
345 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
350 vm_slock_vcpus(struct vm *vm) in vm_slock_vcpus() argument
352 sx_slock(&vm->vcpus_init_lock); in vm_slock_vcpus()
356 vm_unlock_vcpus(struct vm *vm) in vm_unlock_vcpus() argument
358 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
362 vm_create(const char *name, struct vm **retvm) in vm_create()
364 struct vm *vm; in vm_create() local
381 vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); in vm_create()
382 strcpy(vm->name, name); in vm_create()
383 vm->vmspace = vmspace; in vm_create()
384 vm_mem_init(&vm->mem); in vm_create()
385 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
387 vm->sockets = 1; in vm_create()
388 vm->cores = 1; /* XXX backwards compatibility */ in vm_create()
389 vm->threads = 1; /* XXX backwards compatibility */ in vm_create()
390 vm->maxcpus = vm_maxcpu; in vm_create()
392 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, in vm_create()
395 vm_init(vm, true); in vm_create()
397 *retvm = vm; in vm_create()
402 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, in vm_get_topology() argument
405 *sockets = vm->sockets; in vm_get_topology()
406 *cores = vm->cores; in vm_get_topology()
407 *threads = vm->threads; in vm_get_topology()
408 *maxcpus = vm->maxcpus; in vm_get_topology()
412 vm_get_maxcpus(struct vm *vm) in vm_get_maxcpus() argument
414 return (vm->maxcpus); in vm_get_maxcpus()
418 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, in vm_set_topology() argument
422 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
424 vm->sockets = sockets; in vm_set_topology()
425 vm->cores = cores; in vm_set_topology()
426 vm->threads = threads; in vm_set_topology()
431 vm_cleanup(struct vm *vm, bool destroy) in vm_cleanup() argument
436 vm_xlock_memsegs(vm); in vm_cleanup()
438 vm_assert_memseg_xlocked(vm); in vm_cleanup()
440 aplic_detach_from_vm(vm->cookie); in vm_cleanup()
442 for (i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
443 if (vm->vcpu[i] != NULL) in vm_cleanup()
444 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
447 vmmops_cleanup(vm->cookie); in vm_cleanup()
449 vm_mem_cleanup(vm); in vm_cleanup()
451 vm_mem_destroy(vm); in vm_cleanup()
453 vmmops_vmspace_free(vm->vmspace); in vm_cleanup()
454 vm->vmspace = NULL; in vm_cleanup()
456 for (i = 0; i < vm->maxcpus; i++) in vm_cleanup()
457 free(vm->vcpu[i], M_VMM); in vm_cleanup()
458 free(vm->vcpu, M_VMM); in vm_cleanup()
459 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
464 vm_destroy(struct vm *vm) in vm_destroy() argument
467 vm_cleanup(vm, true); in vm_destroy()
469 free(vm, M_VMM); in vm_destroy()
473 vm_reinit(struct vm *vm) in vm_reinit() argument
480 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
481 vm_cleanup(vm, false); in vm_reinit()
482 vm_init(vm, false); in vm_reinit()
492 vm_name(struct vm *vm) in vm_name() argument
494 return (vm->name); in vm_name()
505 vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size, in vm_register_inst_handler() argument
510 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_register_inst_handler()
511 if (vm->mmio_region[i].start == 0 && in vm_register_inst_handler()
512 vm->mmio_region[i].end == 0) { in vm_register_inst_handler()
513 vm->mmio_region[i].start = start; in vm_register_inst_handler()
514 vm->mmio_region[i].end = start + size; in vm_register_inst_handler()
515 vm->mmio_region[i].read = mmio_read; in vm_register_inst_handler()
516 vm->mmio_region[i].write = mmio_write; in vm_register_inst_handler()
525 vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size) in vm_deregister_inst_handler() argument
529 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_deregister_inst_handler()
530 if (vm->mmio_region[i].start == start && in vm_deregister_inst_handler()
531 vm->mmio_region[i].end == start + size) { in vm_deregister_inst_handler()
532 memset(&vm->mmio_region[i], 0, in vm_deregister_inst_handler()
533 sizeof(vm->mmio_region[i])); in vm_deregister_inst_handler()
545 struct vm *vm; in vm_handle_inst_emul() local
554 vm = vcpu->vm; in vm_handle_inst_emul()
555 hyp = vm->cookie; in vm_handle_inst_emul()
566 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_handle_inst_emul()
567 if (vm->mmio_region[i].start <= fault_ipa && in vm_handle_inst_emul()
568 vm->mmio_region[i].end > fault_ipa) { in vm_handle_inst_emul()
569 vmr = &vm->mmio_region[i]; in vm_handle_inst_emul()
586 vm_suspend(struct vm *vm, enum vm_suspend_how how) in vm_suspend() argument
593 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
594 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
595 vm->suspend, how); in vm_suspend()
599 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
604 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
605 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
606 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend()
615 struct vm *vm = vcpu->vm; in vm_exit_suspended() local
618 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
619 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
625 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
642 struct vm *vm = vcpu->vm; in vm_activate_cpu() local
644 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
647 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
653 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
656 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
657 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
658 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
659 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend_cpu()
662 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
665 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
672 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
676 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
678 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
681 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
690 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
694 vm_active_cpus(struct vm *vm) in vm_active_cpus() argument
697 return (vm->active_cpus); in vm_active_cpus()
701 vm_debug_cpus(struct vm *vm) in vm_debug_cpus() argument
704 return (vm->debug_cpus); in vm_debug_cpus()
708 vm_suspended_cpus(struct vm *vm) in vm_suspended_cpus() argument
711 return (vm->suspended_cpus); in vm_suspended_cpus()
764 vm_vmspace(struct vm *vm) in vm_vmspace() argument
766 return (vm->vmspace); in vm_vmspace()
770 vm_mem(struct vm *vm) in vm_mem() argument
772 return (&vm->mem); in vm_mem()
913 struct vm *
917 return (vcpu->vm); in vcpu_vm()
935 vm_vcpu(struct vm *vm, int vcpuid) in vm_vcpu() argument
938 return (vm->vcpu[vcpuid]); in vm_vcpu()
994 vm_get_cookie(struct vm *vm) in vm_get_cookie() argument
997 return (vm->cookie); in vm_get_cookie()
1008 vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr) in vm_attach_aplic() argument
1011 return (aplic_attach_to_vm(vm->cookie, descr)); in vm_attach_aplic()
1015 vm_assert_irq(struct vm *vm, uint32_t irq) in vm_assert_irq() argument
1018 return (aplic_inject_irq(vm->cookie, -1, irq, true)); in vm_assert_irq()
1022 vm_deassert_irq(struct vm *vm, uint32_t irq) in vm_deassert_irq() argument
1025 return (aplic_inject_irq(vm->cookie, -1, irq, false)); in vm_deassert_irq()
1029 vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, in vm_raise_msi() argument
1033 return (aplic_inject_msi(vm->cookie, msg, addr)); in vm_raise_msi()
1073 struct vm *vm; in vm_handle_paging() local
1080 vm = vcpu->vm; in vm_handle_paging()
1083 pmap = vmspace_pmap(vm->vmspace); in vm_handle_paging()
1106 map = &vm->vmspace->vm_map; in vm_handle_paging()
1120 struct vm *vm = vcpu->vm; in vm_handle_suspend() local
1127 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1132 * Since a VM may be suspended at any time including when one or in vm_handle_suspend()
1138 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) in vm_handle_suspend()
1155 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1156 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1157 vcpu_notify_event(vm_vcpu(vm, i)); in vm_handle_suspend()
1170 struct vm *vm; in vm_run() local
1176 vm = vcpu->vm; in vm_run()
1182 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1185 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1188 pmap = vmspace_pmap(vm->vmspace); in vm_run()
1191 evinfo.sptr = &vm->suspend; in vm_run()