Lines Matching full:vm

45 #include <vm/vm.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_param.h>
58 #include <machine/vm.h>
84 struct vm *vm; /* (o) */ member
114 * (o) initialized the first time the VM is created
115 * (i) initialized when VM is created and when it is reinitialized
118 struct vm { struct
122 int suspend; /* (i) stop VM execution */
132 /* The following describe the vm cpu topology */ argument
211 VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
266 vcpu_alloc(struct vm *vm, int vcpu_id) in vcpu_alloc() argument
270 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus, in vcpu_alloc()
278 vcpu->vm = vm; in vcpu_alloc()
287 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid); in vcpu_init()
336 vm_init(struct vm *vm, bool create) in vm_init() argument
340 vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm))); in vm_init()
341 MPASS(vm->cookie != NULL); in vm_init()
343 CPU_ZERO(&vm->active_cpus); in vm_init()
344 CPU_ZERO(&vm->debug_cpus); in vm_init()
346 vm->suspend = 0; in vm_init()
347 CPU_ZERO(&vm->suspended_cpus); in vm_init()
349 memset(vm->mmio_region, 0, sizeof(vm->mmio_region)); in vm_init()
350 memset(vm->special_reg, 0, sizeof(vm->special_reg)); in vm_init()
353 for (i = 0; i < vm->maxcpus; i++) { in vm_init()
354 if (vm->vcpu[i] != NULL) in vm_init()
355 vcpu_init(vm->vcpu[i]); in vm_init()
361 vm_disable_vcpu_creation(struct vm *vm) in vm_disable_vcpu_creation() argument
363 sx_xlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
364 vm->dying = true; in vm_disable_vcpu_creation()
365 sx_xunlock(&vm->vcpus_init_lock); in vm_disable_vcpu_creation()
369 vm_alloc_vcpu(struct vm *vm, int vcpuid) in vm_alloc_vcpu() argument
373 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) in vm_alloc_vcpu()
377 atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); in vm_alloc_vcpu()
381 sx_xlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
382 vcpu = vm->vcpu[vcpuid]; in vm_alloc_vcpu()
383 if (vcpu == NULL && !vm->dying) { in vm_alloc_vcpu()
385 if (vcpuid >= vgic_max_cpu_count(vm->cookie)) { in vm_alloc_vcpu()
386 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
390 vcpu = vcpu_alloc(vm, vcpuid); in vm_alloc_vcpu()
397 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid], in vm_alloc_vcpu()
400 sx_xunlock(&vm->vcpus_init_lock); in vm_alloc_vcpu()
405 vm_lock_vcpus(struct vm *vm) in vm_lock_vcpus() argument
407 sx_xlock(&vm->vcpus_init_lock); in vm_lock_vcpus()
411 vm_unlock_vcpus(struct vm *vm) in vm_unlock_vcpus() argument
413 sx_unlock(&vm->vcpus_init_lock); in vm_unlock_vcpus()
417 vm_create(const char *name, struct vm **retvm) in vm_create()
419 struct vm *vm; in vm_create() local
422 vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); in vm_create()
423 error = vm_mem_init(&vm->mem, 0, 1ul << 39); in vm_create()
425 free(vm, M_VMM); in vm_create()
428 strcpy(vm->name, name); in vm_create()
429 sx_init(&vm->vcpus_init_lock, "vm vcpus"); in vm_create()
431 vm->sockets = 1; in vm_create()
432 vm->cores = 1; /* XXX backwards compatibility */ in vm_create()
433 vm->threads = 1; /* XXX backwards compatibility */ in vm_create()
434 vm->maxcpus = vm_maxcpu; in vm_create()
436 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM, in vm_create()
439 vm_init(vm, true); in vm_create()
441 *retvm = vm; in vm_create()
446 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores, in vm_get_topology() argument
449 *sockets = vm->sockets; in vm_get_topology()
450 *cores = vm->cores; in vm_get_topology()
451 *threads = vm->threads; in vm_get_topology()
452 *maxcpus = vm->maxcpus; in vm_get_topology()
456 vm_get_maxcpus(struct vm *vm) in vm_get_maxcpus() argument
458 return (vm->maxcpus); in vm_get_maxcpus()
462 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores, in vm_set_topology() argument
466 if ((sockets * cores * threads) > vm->maxcpus) in vm_set_topology()
468 vm->sockets = sockets; in vm_set_topology()
469 vm->cores = cores; in vm_set_topology()
470 vm->threads = threads; in vm_set_topology()
475 vm_cleanup(struct vm *vm, bool destroy) in vm_cleanup() argument
481 vm_xlock_memsegs(vm); in vm_cleanup()
482 pmap = vmspace_pmap(vm_vmspace(vm)); in vm_cleanup()
490 vm_assert_memseg_xlocked(vm); in vm_cleanup()
493 vgic_detach_from_vm(vm->cookie); in vm_cleanup()
495 for (i = 0; i < vm->maxcpus; i++) { in vm_cleanup()
496 if (vm->vcpu[i] != NULL) in vm_cleanup()
497 vcpu_cleanup(vm->vcpu[i], destroy); in vm_cleanup()
500 vmmops_cleanup(vm->cookie); in vm_cleanup()
502 vm_mem_cleanup(vm); in vm_cleanup()
504 vm_mem_destroy(vm); in vm_cleanup()
506 free(vm->vcpu, M_VMM); in vm_cleanup()
507 sx_destroy(&vm->vcpus_init_lock); in vm_cleanup()
512 vm_destroy(struct vm *vm) in vm_destroy() argument
514 vm_cleanup(vm, true); in vm_destroy()
515 free(vm, M_VMM); in vm_destroy()
519 vm_reinit(struct vm *vm) in vm_reinit() argument
526 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { in vm_reinit()
527 vm_cleanup(vm, false); in vm_reinit()
528 vm_init(vm, false); in vm_reinit()
538 vm_name(struct vm *vm) in vm_name() argument
540 return (vm->name); in vm_name()
664 vm_register_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask, in vm_register_reg_handler() argument
669 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_register_reg_handler()
670 if (vm->special_reg[i].esr_iss == 0 && in vm_register_reg_handler()
671 vm->special_reg[i].esr_mask == 0) { in vm_register_reg_handler()
672 vm->special_reg[i].esr_iss = iss; in vm_register_reg_handler()
673 vm->special_reg[i].esr_mask = mask; in vm_register_reg_handler()
674 vm->special_reg[i].reg_read = reg_read; in vm_register_reg_handler()
675 vm->special_reg[i].reg_write = reg_write; in vm_register_reg_handler()
676 vm->special_reg[i].arg = arg; in vm_register_reg_handler()
685 vm_deregister_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask) in vm_deregister_reg_handler() argument
689 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_deregister_reg_handler()
690 if (vm->special_reg[i].esr_iss == iss && in vm_deregister_reg_handler()
691 vm->special_reg[i].esr_mask == mask) { in vm_deregister_reg_handler()
692 memset(&vm->special_reg[i], 0, in vm_deregister_reg_handler()
693 sizeof(vm->special_reg[i])); in vm_deregister_reg_handler()
705 struct vm *vm; in vm_handle_reg_emul() local
710 vm = vcpu->vm; in vm_handle_reg_emul()
714 for (i = 0; i < nitems(vm->special_reg); i++) { in vm_handle_reg_emul()
715 if (vm->special_reg[i].esr_iss == 0 && in vm_handle_reg_emul()
716 vm->special_reg[i].esr_mask == 0) in vm_handle_reg_emul()
719 if ((vre->inst_syndrome & vm->special_reg[i].esr_mask) == in vm_handle_reg_emul()
720 vm->special_reg[i].esr_iss) { in vm_handle_reg_emul()
722 vm->special_reg[i].reg_read, in vm_handle_reg_emul()
723 vm->special_reg[i].reg_write, in vm_handle_reg_emul()
724 vm->special_reg[i].arg); in vm_handle_reg_emul()
751 vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size, in vm_register_inst_handler() argument
756 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_register_inst_handler()
757 if (vm->mmio_region[i].start == 0 && in vm_register_inst_handler()
758 vm->mmio_region[i].end == 0) { in vm_register_inst_handler()
759 vm->mmio_region[i].start = start; in vm_register_inst_handler()
760 vm->mmio_region[i].end = start + size; in vm_register_inst_handler()
761 vm->mmio_region[i].read = mmio_read; in vm_register_inst_handler()
762 vm->mmio_region[i].write = mmio_write; in vm_register_inst_handler()
771 vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size) in vm_deregister_inst_handler() argument
775 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_deregister_inst_handler()
776 if (vm->mmio_region[i].start == start && in vm_deregister_inst_handler()
777 vm->mmio_region[i].end == start + size) { in vm_deregister_inst_handler()
778 memset(&vm->mmio_region[i], 0, in vm_deregister_inst_handler()
779 sizeof(vm->mmio_region[i])); in vm_deregister_inst_handler()
791 struct vm *vm; in vm_handle_inst_emul() local
800 vm = vcpu->vm; in vm_handle_inst_emul()
801 hyp = vm->cookie; in vm_handle_inst_emul()
812 for (i = 0; i < nitems(vm->mmio_region); i++) { in vm_handle_inst_emul()
813 if (vm->mmio_region[i].start <= fault_ipa && in vm_handle_inst_emul()
814 vm->mmio_region[i].end > fault_ipa) { in vm_handle_inst_emul()
815 vmr = &vm->mmio_region[i]; in vm_handle_inst_emul()
832 vm_suspend(struct vm *vm, enum vm_suspend_how how) in vm_suspend() argument
839 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { in vm_suspend()
840 VM_CTR2(vm, "virtual machine already suspended %d/%d", in vm_suspend()
841 vm->suspend, how); in vm_suspend()
845 VM_CTR1(vm, "virtual machine successfully suspended %d", how); in vm_suspend()
850 for (i = 0; i < vm->maxcpus; i++) { in vm_suspend()
851 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend()
852 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend()
861 struct vm *vm = vcpu->vm; in vm_exit_suspended() local
864 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, in vm_exit_suspended()
865 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); in vm_exit_suspended()
871 vmexit->u.suspended.how = vm->suspend; in vm_exit_suspended()
888 struct vm *vm = vcpu->vm; in vm_activate_cpu() local
890 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_activate_cpu()
893 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus); in vm_activate_cpu()
899 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu) in vm_suspend_cpu() argument
902 vm->debug_cpus = vm->active_cpus; in vm_suspend_cpu()
903 for (int i = 0; i < vm->maxcpus; i++) { in vm_suspend_cpu()
904 if (CPU_ISSET(i, &vm->active_cpus)) in vm_suspend_cpu()
905 vcpu_notify_event(vm_vcpu(vm, i)); in vm_suspend_cpu()
908 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus)) in vm_suspend_cpu()
911 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_suspend_cpu()
918 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu) in vm_resume_cpu() argument
922 CPU_ZERO(&vm->debug_cpus); in vm_resume_cpu()
924 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus)) in vm_resume_cpu()
927 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus); in vm_resume_cpu()
936 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus)); in vcpu_debugged()
940 vm_active_cpus(struct vm *vm) in vm_active_cpus() argument
943 return (vm->active_cpus); in vm_active_cpus()
947 vm_debug_cpus(struct vm *vm) in vm_debug_cpus() argument
950 return (vm->debug_cpus); in vm_debug_cpus()
954 vm_suspended_cpus(struct vm *vm) in vm_suspended_cpus() argument
957 return (vm->suspended_cpus); in vm_suspended_cpus()
1010 vm_mem(struct vm *vm) in vm_mem() argument
1012 return (&vm->mem); in vm_mem()
1152 struct vm *
1155 return (vcpu->vm); in vcpu_vm()
1171 vm_vcpu(struct vm *vm, int vcpuid) in vm_vcpu() argument
1173 return (vm->vcpu[vcpuid]); in vm_vcpu()
1228 vm_get_cookie(struct vm *vm) in vm_get_cookie() argument
1230 return (vm->cookie); in vm_get_cookie()
1240 vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr) in vm_attach_vgic() argument
1242 return (vgic_attach_to_vm(vm->cookie, descr)); in vm_attach_vgic()
1246 vm_assert_irq(struct vm *vm, uint32_t irq) in vm_assert_irq() argument
1248 return (vgic_inject_irq(vm->cookie, -1, irq, true)); in vm_assert_irq()
1252 vm_deassert_irq(struct vm *vm, uint32_t irq) in vm_deassert_irq() argument
1254 return (vgic_inject_irq(vm->cookie, -1, irq, false)); in vm_deassert_irq()
1258 vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, in vm_raise_msi() argument
1262 return (vgic_inject_msi(vm->cookie, msg, addr)); in vm_raise_msi()
1288 struct vm *vm; in vm_handle_wfi() local
1290 vm = vcpu->vm; in vm_handle_wfi()
1293 if (vm->suspend) in vm_handle_wfi()
1319 struct vm *vm = vcpu->vm; in vm_handle_paging() local
1328 pmap = vmspace_pmap(vm_vmspace(vcpu->vm)); in vm_handle_paging()
1345 map = &vm_vmspace(vm)->vm_map; in vm_handle_paging()
1356 struct vm *vm = vcpu->vm; in vm_handle_suspend() local
1363 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus); in vm_handle_suspend()
1368 * Since a VM may be suspended at any time including when one or in vm_handle_suspend()
1374 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) in vm_handle_suspend()
1391 for (i = 0; i < vm->maxcpus; i++) { in vm_handle_suspend()
1392 if (CPU_ISSET(i, &vm->suspended_cpus)) { in vm_handle_suspend()
1393 vcpu_notify_event(vm_vcpu(vm, i)); in vm_handle_suspend()
1404 struct vm *vm = vcpu->vm; in vm_run() local
1413 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) in vm_run()
1416 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) in vm_run()
1419 pmap = vmspace_pmap(vm_vmspace(vm)); in vm_run()
1422 evinfo.sptr = &vm->suspend; in vm_run()