Lines Matching +full:mm +full:- +full:0
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
32 sx_init(&mem->mem_segs_lock, "vm_mem_segs"); in vm_mem_init()
38 if (mem->mem_maps[idx].len != 0 && in sysmem_mapping()
39 mem->mem_segs[mem->mem_maps[idx].segid].sysmem) in sysmem_mapping()
53 if (ident < 0 || ident >= VM_MAX_MEMSEGS) in vm_memseg_sysmem()
56 return (mem->mem_segs[ident].sysmem); in vm_memseg_sysmem()
74 for (int i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mem_cleanup()
88 for (int i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mem_destroy()
93 for (int i = 0; i < VM_MAX_MEMSEGS; i++) in vm_mem_destroy()
96 sx_xunlock(&mem->mem_segs_lock); in vm_mem_destroy()
97 sx_destroy(&mem->mem_segs_lock); in vm_mem_destroy()
103 sx_slock(&vm_mem(vm)->mem_segs_lock); in vm_slock_memsegs()
109 sx_xlock(&vm_mem(vm)->mem_segs_lock); in vm_xlock_memsegs()
115 sx_unlock(&vm_mem(vm)->mem_segs_lock); in vm_unlock_memsegs()
121 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED); in vm_assert_memseg_locked()
127 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED); in vm_assert_memseg_xlocked()
134 * an implicit lock on 'vm->mem_maps[]'.
140 struct vm_mem_map *mm; in vm_mem_allocated() local
150 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mem_allocated()
151 mm = &vm_mem(vm)->mem_maps[i]; in vm_mem_allocated()
152 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) in vm_mem_allocated()
170 if (ident < 0 || ident >= VM_MAX_MEMSEGS) in vm_alloc_memseg()
173 if (len == 0 || (len & PAGE_MASK)) in vm_alloc_memseg()
176 seg = &mem->mem_segs[ident]; in vm_alloc_memseg()
177 if (seg->object != NULL) { in vm_alloc_memseg()
178 if (seg->len == len && seg->sysmem == sysmem) in vm_alloc_memseg()
194 seg->len = len; in vm_alloc_memseg()
195 seg->object = obj; in vm_alloc_memseg()
197 seg->object->domain.dr_policy = obj_domainset; in vm_alloc_memseg()
198 seg->sysmem = sysmem; in vm_alloc_memseg()
200 return (0); in vm_alloc_memseg()
214 if (ident < 0 || ident >= VM_MAX_MEMSEGS) in vm_get_memseg()
217 seg = &mem->mem_segs[ident]; in vm_get_memseg()
219 *len = seg->len; in vm_get_memseg()
221 *sysmem = seg->sysmem; in vm_get_memseg()
223 *objptr = seg->object; in vm_get_memseg()
224 return (0); in vm_get_memseg()
232 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, in vm_free_memseg()
235 seg = &vm_mem(vm)->mem_segs[ident]; in vm_free_memseg()
236 if (seg->object != NULL) { in vm_free_memseg()
237 vm_object_deallocate(seg->object); in vm_free_memseg()
253 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) in vm_mmap_memseg()
259 if (segid < 0 || segid >= VM_MAX_MEMSEGS) in vm_mmap_memseg()
263 seg = &mem->mem_segs[segid]; in vm_mmap_memseg()
264 if (seg->object == NULL) in vm_mmap_memseg()
268 if (first < 0 || first >= last || last > seg->len) in vm_mmap_memseg()
275 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mmap_memseg()
276 m = &mem->mem_maps[i]; in vm_mmap_memseg()
277 if (m->len == 0) { in vm_mmap_memseg()
286 error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa, in vm_mmap_memseg()
287 len, 0, VMFS_NO_SPACE, prot, prot, 0); in vm_mmap_memseg()
291 vm_object_reference(seg->object); in vm_mmap_memseg()
294 error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len, in vm_mmap_memseg()
297 vm_map_remove(&vmspace->vm_map, gpa, gpa + len); in vm_mmap_memseg()
303 map->gpa = gpa; in vm_mmap_memseg()
304 map->len = len; in vm_mmap_memseg()
305 map->segoff = first; in vm_mmap_memseg()
306 map->segid = segid; in vm_mmap_memseg()
307 map->prot = prot; in vm_mmap_memseg()
308 map->flags = flags; in vm_mmap_memseg()
309 return (0); in vm_mmap_memseg()
320 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_munmap_memseg()
321 m = &mem->mem_maps[i]; in vm_munmap_memseg()
323 if ((m->flags & VM_MEMMAP_F_IOMMU) != 0) in vm_munmap_memseg()
326 if (m->gpa == gpa && m->len == len) { in vm_munmap_memseg()
328 return (0); in vm_munmap_memseg()
340 struct vm_mem_map *mm, *mmnext; in vm_mmap_getnext() local
346 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vm_mmap_getnext()
347 mm = &mem->mem_maps[i]; in vm_mmap_getnext()
348 if (mm->len == 0 || mm->gpa < *gpa) in vm_mmap_getnext()
350 if (mmnext == NULL || mm->gpa < mmnext->gpa) in vm_mmap_getnext()
351 mmnext = mm; in vm_mmap_getnext()
355 *gpa = mmnext->gpa; in vm_mmap_getnext()
357 *segid = mmnext->segid; in vm_mmap_getnext()
359 *segoff = mmnext->segoff; in vm_mmap_getnext()
361 *len = mmnext->len; in vm_mmap_getnext()
363 *prot = mmnext->prot; in vm_mmap_getnext()
365 *flags = mmnext->flags; in vm_mmap_getnext()
366 return (0); in vm_mmap_getnext()
375 struct vm_mem_map *mm; in vm_free_memmap() local
378 mm = &vm_mem(vm)->mem_maps[ident]; in vm_free_memmap()
379 if (mm->len) { in vm_free_memmap()
380 error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa, in vm_free_memmap()
381 mm->gpa + mm->len); in vm_free_memmap()
384 bzero(mm, sizeof(struct vm_mem_map)); in vm_free_memmap()
392 struct vm_mem_map *mm; in vmm_sysmem_maxaddr() local
397 maxaddr = 0; in vmm_sysmem_maxaddr()
398 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in vmm_sysmem_maxaddr()
399 mm = &mem->mem_maps[i]; in vmm_sysmem_maxaddr()
401 if (maxaddr < mm->gpa + mm->len) in vmm_sysmem_maxaddr()
402 maxaddr = mm->gpa + mm->len; in vmm_sysmem_maxaddr()
412 struct vm_mem_map *mm; in _vm_gpa_hold() local
417 if (len > PAGE_SIZE - pageoff) in _vm_gpa_hold()
418 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); in _vm_gpa_hold()
420 count = 0; in _vm_gpa_hold()
421 for (i = 0; i < VM_MAX_MEMMAPS; i++) { in _vm_gpa_hold()
422 mm = &vm_mem(vm)->mem_maps[i]; in _vm_gpa_hold()
423 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { in _vm_gpa_hold()
425 &vm_vmspace(vm)->vm_map, trunc_page(gpa), in _vm_gpa_hold()