Lines Matching +full:gpa +full:- +full:1

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
32 sx_init(&mem->mem_segs_lock, "vm_mem_segs"); in vm_mem_init()
38 if (mem->mem_maps[idx].len != 0 && in sysmem_mapping()
39 mem->mem_segs[mem->mem_maps[idx].segid].sysmem) in sysmem_mapping()
56 return (mem->mem_segs[ident].sysmem); in vm_memseg_sysmem()
96 sx_xunlock(&mem->mem_segs_lock); in vm_mem_destroy()
97 sx_destroy(&mem->mem_segs_lock); in vm_mem_destroy()
103 sx_slock(&vm_mem(vm)->mem_segs_lock); in vm_slock_memsegs()
109 sx_xlock(&vm_mem(vm)->mem_segs_lock); in vm_xlock_memsegs()
115 sx_unlock(&vm_mem(vm)->mem_segs_lock); in vm_unlock_memsegs()
121 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED); in vm_assert_memseg_locked()
127 sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED); in vm_assert_memseg_xlocked()
131 * Return 'true' if 'gpa' is allocated in the guest address space.
134 * an implicit lock on 'vm->mem_maps[]'.
137 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa) in vm_mem_allocated() argument
151 mm = &vm_mem(vm)->mem_maps[i]; in vm_mem_allocated()
152 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) in vm_mem_allocated()
153 return (true); /* 'gpa' is sysmem or devmem */ in vm_mem_allocated()
176 seg = &mem->mem_segs[ident]; in vm_alloc_memseg()
177 if (seg->object != NULL) { in vm_alloc_memseg()
178 if (seg->len == len && seg->sysmem == sysmem) in vm_alloc_memseg()
194 seg->len = len; in vm_alloc_memseg()
195 seg->object = obj; in vm_alloc_memseg()
197 seg->object->domain.dr_policy = obj_domainset; in vm_alloc_memseg()
198 seg->sysmem = sysmem; in vm_alloc_memseg()
217 seg = &mem->mem_segs[ident]; in vm_get_memseg()
219 *len = seg->len; in vm_get_memseg()
221 *sysmem = seg->sysmem; in vm_get_memseg()
223 *objptr = seg->object; in vm_get_memseg()
235 seg = &vm_mem(vm)->mem_segs[ident]; in vm_free_memseg()
236 if (seg->object != NULL) { in vm_free_memseg()
237 vm_object_deallocate(seg->object); in vm_free_memseg()
243 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, in vm_mmap_memseg() argument
263 seg = &mem->mem_segs[segid]; in vm_mmap_memseg()
264 if (seg->object == NULL) in vm_mmap_memseg()
268 if (first < 0 || first >= last || last > seg->len) in vm_mmap_memseg()
271 if ((gpa | first | last) & PAGE_MASK) in vm_mmap_memseg()
276 m = &mem->mem_maps[i]; in vm_mmap_memseg()
277 if (m->len == 0) { in vm_mmap_memseg()
286 error = vm_map_find(&vmspace->vm_map, seg->object, first, &gpa, in vm_mmap_memseg()
291 vm_object_reference(seg->object); in vm_mmap_memseg()
294 error = vm_map_wire(&vmspace->vm_map, gpa, gpa + len, in vm_mmap_memseg()
297 vm_map_remove(&vmspace->vm_map, gpa, gpa + len); in vm_mmap_memseg()
303 map->gpa = gpa; in vm_mmap_memseg()
304 map->len = len; in vm_mmap_memseg()
305 map->segoff = first; in vm_mmap_memseg()
306 map->segid = segid; in vm_mmap_memseg()
307 map->prot = prot; in vm_mmap_memseg()
308 map->flags = flags; in vm_mmap_memseg()
313 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len) in vm_munmap_memseg() argument
321 m = &mem->mem_maps[i]; in vm_munmap_memseg()
323 if ((m->flags & VM_MEMMAP_F_IOMMU) != 0) in vm_munmap_memseg()
326 if (m->gpa == gpa && m->len == len) { in vm_munmap_memseg()
336 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, in vm_mmap_getnext() argument
347 mm = &mem->mem_maps[i]; in vm_mmap_getnext()
348 if (mm->len == 0 || mm->gpa < *gpa) in vm_mmap_getnext()
350 if (mmnext == NULL || mm->gpa < mmnext->gpa) in vm_mmap_getnext()
355 *gpa = mmnext->gpa; in vm_mmap_getnext()
357 *segid = mmnext->segid; in vm_mmap_getnext()
359 *segoff = mmnext->segoff; in vm_mmap_getnext()
361 *len = mmnext->len; in vm_mmap_getnext()
363 *prot = mmnext->prot; in vm_mmap_getnext()
365 *flags = mmnext->flags; in vm_mmap_getnext()
378 mm = &vm_mem(vm)->mem_maps[ident]; in vm_free_memmap()
379 if (mm->len) { in vm_free_memmap()
380 error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa, in vm_free_memmap()
381 mm->gpa + mm->len); in vm_free_memmap()
399 mm = &mem->mem_maps[i]; in vmm_sysmem_maxaddr()
401 if (maxaddr < mm->gpa + mm->len) in vmm_sysmem_maxaddr()
402 maxaddr = mm->gpa + mm->len; in vmm_sysmem_maxaddr()
409 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in _vm_gpa_hold() argument
416 pageoff = gpa & PAGE_MASK; in _vm_gpa_hold()
417 if (len > PAGE_SIZE - pageoff) in _vm_gpa_hold()
418 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); in _vm_gpa_hold()
422 mm = &vm_mem(vm)->mem_maps[i]; in _vm_gpa_hold()
423 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) { in _vm_gpa_hold()
425 &vm_vmspace(vm)->vm_map, trunc_page(gpa), in _vm_gpa_hold()
426 PAGE_SIZE, reqprot, &m, 1); in _vm_gpa_hold()
431 if (count == 1) { in _vm_gpa_hold()
441 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold() argument
453 return (_vm_gpa_hold(vcpu_vm(vcpu), gpa, len, reqprot, cookie)); in vm_gpa_hold()
457 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, in vm_gpa_hold_global() argument
461 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie)); in vm_gpa_hold_global()