Lines Matching refs:region
496 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
575 struct userspace_mem_region *region; in kvm_vm_restart() local
581 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
582 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in kvm_vm_restart()
588 ret, errno, region->region.slot, in kvm_vm_restart()
589 region->region.flags, in kvm_vm_restart()
590 region->region.guest_phys_addr, in kvm_vm_restart()
591 region->region.memory_size); in kvm_vm_restart()
703 struct userspace_mem_region *region = in userspace_mem_region_find() local
705 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
706 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
707 + region->region.memory_size - 1; in userspace_mem_region_find()
709 return region; in userspace_mem_region_find()
797 struct userspace_mem_region *region) in __vm_mem_region_delete() argument
801 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
802 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
803 hash_del(®ion->slot_node); in __vm_mem_region_delete()
805 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
806 sparsebit_free(®ion->protected_phy_pages); in __vm_mem_region_delete()
807 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
809 if (region->fd >= 0) { in __vm_mem_region_delete()
811 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
813 close(region->fd); in __vm_mem_region_delete()
815 if (region->region.guest_memfd >= 0) in __vm_mem_region_delete()
816 close(region->region.guest_memfd); in __vm_mem_region_delete()
818 free(region); in __vm_mem_region_delete()
828 struct userspace_mem_region *region; in kvm_vm_free() local
834 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
835 __vm_mem_region_delete(vmp, region); in kvm_vm_free()
868 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
877 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
878 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
881 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
882 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
889 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
890 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
894 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
903 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
906 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
914 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
915 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
922 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
930 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
950 struct kvm_userspace_memory_region2 region = { in __vm_set_user_memory_region2() local
962 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion); in __vm_set_user_memory_region2()
983 struct userspace_mem_region *region; in vm_mem_add() local
1009 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_mem_add()
1011 if (region != NULL) in vm_mem_add()
1018 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
1019 (uint64_t) region->region.memory_size); in vm_mem_add()
1022 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
1024 if (region->region.slot != slot) in vm_mem_add()
1032 region->region.slot, in vm_mem_add()
1033 (uint64_t) region->region.guest_phys_addr, in vm_mem_add()
1034 (uint64_t) region->region.memory_size); in vm_mem_add()
1038 region = calloc(1, sizeof(*region)); in vm_mem_add()
1039 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_mem_add()
1040 region->mmap_size = mem_size; in vm_mem_add()
1062 region->mmap_size += alignment; in vm_mem_add()
1064 region->fd = -1; in vm_mem_add()
1066 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_mem_add()
1069 region->mmap_start = mmap(NULL, region->mmap_size, in vm_mem_add()
1072 region->fd, 0); in vm_mem_add()
1073 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_mem_add()
1077 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_mem_add()
1079 region->mmap_start, backing_src_pagesz); in vm_mem_add()
1082 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_mem_add()
1087 ret = madvise(region->host_mem, mem_size, in vm_mem_add()
1090 region->host_mem, mem_size, in vm_mem_add()
1094 region->backing_src_type = src_type; in vm_mem_add()
1113 region->region.guest_memfd = guest_memfd; in vm_mem_add()
1114 region->region.guest_memfd_offset = guest_memfd_offset; in vm_mem_add()
1116 region->region.guest_memfd = -1; in vm_mem_add()
1119 region->unused_phy_pages = sparsebit_alloc(); in vm_mem_add()
1121 region->protected_phy_pages = sparsebit_alloc(); in vm_mem_add()
1122 sparsebit_set_num(region->unused_phy_pages, in vm_mem_add()
1124 region->region.slot = slot; in vm_mem_add()
1125 region->region.flags = flags; in vm_mem_add()
1126 region->region.guest_phys_addr = guest_paddr; in vm_mem_add()
1127 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1128 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_mem_add()
1129 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_add()
1135 guest_paddr, (uint64_t) region->region.memory_size, in vm_mem_add()
1136 region->region.guest_memfd); in vm_mem_add()
1139 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1140 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1141 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_mem_add()
1144 if (region->fd >= 0) { in vm_mem_add()
1145 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_mem_add()
1148 region->fd, 0); in vm_mem_add()
1149 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_mem_add()
1153 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_mem_add()
1183 struct userspace_mem_region *region; in memslot2region() local
1185 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1187 if (region->region.slot == memslot) in memslot2region()
1188 return region; in memslot2region()
1215 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1217 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1219 region->region.flags = flags; in vm_mem_region_set_flags()
1221 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_set_flags()
1244 struct userspace_mem_region *region; in vm_mem_region_move() local
1247 region = memslot2region(vm, slot); in vm_mem_region_move()
1249 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1251 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_move()
1273 struct userspace_mem_region *region = memslot2region(vm, slot); in vm_mem_region_delete() local
1275 region->region.memory_size = 0; in vm_mem_region_delete()
1276 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region); in vm_mem_region_delete()
1278 __vm_mem_region_delete(vm, region); in vm_mem_region_delete()
1285 struct userspace_mem_region *region; in vm_guest_mem_fallocate() local
1294 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1295 TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD, in vm_guest_mem_fallocate()
1298 offset = gpa - region->region.guest_phys_addr; in vm_guest_mem_fallocate()
1299 fd_offset = region->region.guest_memfd_offset + offset; in vm_guest_mem_fallocate()
1300 len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); in vm_guest_mem_fallocate()
1302 ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); in vm_guest_mem_fallocate()
1305 region->region.guest_memfd, mode, fd_offset); in vm_guest_mem_fallocate()
1626 struct userspace_mem_region *region; in addr_gpa2hva() local
1630 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1631 if (!region) { in addr_gpa2hva()
1636 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1637 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1662 struct userspace_mem_region *region = in addr_hva2gpa() local
1665 if (hva >= region->host_mem) { in addr_hva2gpa()
1666 if (hva <= (region->host_mem in addr_hva2gpa()
1667 + region->region.memory_size - 1)) in addr_hva2gpa()
1669 region->region.guest_phys_addr in addr_hva2gpa()
1670 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1702 struct userspace_mem_region *region; in addr_gpa2alias() local
1705 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1706 if (!region) in addr_gpa2alias()
1709 if (!region->host_alias) in addr_gpa2alias()
1712 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1713 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1962 struct userspace_mem_region *region; in vm_dump() local
1969 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1972 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1973 (uint64_t) region->region.memory_size, in vm_dump()
1974 region->host_mem); in vm_dump()
1976 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1977 if (region->protected_phy_pages) { in vm_dump()
1979 sparsebit_dump(stream, region->protected_phy_pages, 0); in vm_dump()
2097 struct userspace_mem_region *region; in __vm_phy_pages_alloc() local
2107 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2108 TEST_ASSERT(!protected || region->protected_phy_pages, in __vm_phy_pages_alloc()
2114 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in __vm_phy_pages_alloc()
2115 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2131 sparsebit_clear(region->unused_phy_pages, pg); in __vm_phy_pages_alloc()
2133 sparsebit_set(region->protected_phy_pages, pg); in __vm_phy_pages_alloc()
2330 struct userspace_mem_region *region; in vm_is_gpa_protected() local
2335 region = userspace_mem_region_find(vm, paddr, paddr); in vm_is_gpa_protected()
2336 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); in vm_is_gpa_protected()
2339 return sparsebit_is_set(region->protected_phy_pages, pg); in vm_is_gpa_protected()