Lines Matching +full:dma +full:- +full:safe +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
47 * The GPU VA manager internally uses a rb-tree to manage the
62 * entries from within dma-fence signalling critical sections it is enough to
63 * pre-allocate the &drm_gpuva structures.
88 * lists are maintained in order to accelerate locking of dma-resv locks and
107 * sequence of operations to satisfy a given map or unmap request.
112 * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
116 * containing map, unmap and remap operations for a given newly requested
123 * of unmap operations, a maximum of two remap operations and a single map
127 * The single map operation represents the original map operation requested by
138 * one unmap operation and one or two map operations, such that drivers can
150 * created such that those mappings are split up and re-mapped partically.
161 * dma-fence signalling critical path.
172 * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
180 * old: |-----------| (bo_offset=n)
183 * req: |-----------| (bo_offset=n)
186 * new: |-----------| (bo_offset=n)
195 * old: |-----------| (bo_offset=n)
198 * req: |-----------| (bo_offset=m)
201 * new: |-----------| (bo_offset=m)
210 * old: |-----------| (bo_offset=n)
213 * req: |-----------| (bo_offset=n)
216 * new: |-----------| (bo_offset=n)
225 * old: |-----| (bo_offset=n)
228 * req: |-----------| (bo_offset=n)
231 * new: |-----------| (bo_offset=n)
235 * and/or non-contiguous BO offset.
239 * but backed by a different BO. Hence, map the requested mapping and split
245 * old: |-----------| (bo_offset=n)
248 * req: |-----| (bo_offset=n)
251 * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
255 * and/or non-contiguous BO offset.
264 * old: |-----------| (bo_offset=n)
267 * req: |-----| (bo_offset=n)
270 * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
274 * but backed by a different BO. Hence, map the requested mapping and split
280 * old: |-----------| (bo_offset=n)
283 * req: |-----| (bo_offset=m)
286 * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
295 * old: |-----------| (bo_offset=n)
298 * req: |-----| (bo_offset=n+1)
301 * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
305 * by a different BO. Hence, map the requested mapping and split up the
311 * old: |-----------| (bo_offset=n)
314 * req: |-----------| (bo_offset=m)
317 * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
327 * old: |-----------| (bo_offset=n)
330 * req: |-----------| (bo_offset=n+1)
333 * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
337 * having a different backing BO. Hence, map the requested mapping and split
344 * old: |-----------------| (bo_offset=n)
347 * req: |-----| (bo_offset=m)
350 * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
359 * old: |-----------------| (bo_offset=n)
362 * req: |-----| (bo_offset=n+1)
365 * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
374 * old: |-----| (bo_offset=n+1)
377 * req: |-----------| (bo_offset=n)
380 * new: |-----------| (bo_offset=n)
384 * and/or non-contiguous bo_offset.
393 * old: |-----| (bo_offset=n+1)
396 * req: |----------------| (bo_offset=n)
399 * new: |----------------| (bo_offset=n)
403 * and/or non-contiguous bo_offset.
407 * backed by a different BO. Hence, map the requested mapping and split up
413 * old: |-----------| (bo_offset=n)
416 * req: |-----------| (bo_offset=m)
419 * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
467 * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
482 * // structure in individual driver structures and lock the dma-resv with
505 * switch (op->op) {
513 * drm_gpuva_map(gpuvm, va, &op->map);
520 * va = op->remap.unmap->va;
522 * if (op->remap.prev) {
530 * if (op->remap.next) {
539 * drm_gpuva_remap(prev, next, &op->remap);
542 * drm_gpuva_link(prev, va->vm_bo);
544 * drm_gpuva_link(next, va->vm_bo);
550 * va = op->unmap->va;
554 * drm_gpuva_unmap(&op->unmap);
585 * // structure in individual driver structures and lock the dma-resv with
604 * ret = -ENOMEM;
610 * // drm_gpuva_unlink(), hence pre-allocate.
629 * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
631 * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
635 * ctx->new_va = NULL;
643 * struct drm_gpuva *va = op->remap.unmap->va;
645 * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
647 * if (op->remap.prev) {
648 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
649 * ctx->prev_va = NULL;
652 * if (op->remap.next) {
653 * drm_gpuva_link(ctx->next_va, va->vm_bo);
654 * ctx->next_va = NULL;
665 * drm_gpuva_unlink(op->unmap.va);
666 * drm_gpuva_unmap(&op->unmap);
667 * kfree(op->unmap.va);
674 * get_next_vm_bo_from_list() - get the next vm_bo element
693 spin_lock(&(__gpuvm)->__list_name.lock); \
694 if (!(__gpuvm)->__list_name.local_list) \
695 (__gpuvm)->__list_name.local_list = __local_list; \
697 drm_WARN_ON((__gpuvm)->drm, \
698 (__gpuvm)->__list_name.local_list != __local_list); \
700 while (!list_empty(&(__gpuvm)->__list_name.list)) { \
701 __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
704 if (kref_get_unless_zero(&__vm_bo->kref)) { \
705 list_move_tail(&(__vm_bo)->list.entry.__list_name, \
709 list_del_init(&(__vm_bo)->list.entry.__list_name); \
713 spin_unlock(&(__gpuvm)->__list_name.lock); \
719 * for_each_vm_bo_in_list() - internal vm_bo list iterator
729 * It is not allowed to re-assign the vm_bo pointer from inside this loop.
773 * restore_vm_bo_list() - move vm_bo elements back to their original list
781 __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
782 &(__gpuvm)->__list_name.list, \
783 &(__gpuvm)->__list_name.local_list)
810 * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
818 __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
819 __lock ? &(__vm_bo)->vm->__list_name.lock : \
821 &(__vm_bo)->list.entry.__list_name, \
822 &(__vm_bo)->vm->__list_name.list)
839 * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
847 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
848 __lock ? &(__vm_bo)->vm->__list_name.lock : \
850 &(__vm_bo)->list.entry.__list_name, \
854 * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
862 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
863 __lock ? &(__vm_bo)->vm->__list_name.lock : \
865 &(__vm_bo)->list.entry.__list_name, \
870 #define GPUVA_START(node) ((node)->va.addr)
871 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
895 return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range), in drm_gpuvm_warn_check_overflow()
903 u64 mm_start = gpuvm->mm_start; in drm_gpuvm_in_mm_range()
904 u64 mm_end = mm_start + gpuvm->mm_range; in drm_gpuvm_in_mm_range()
913 u64 kstart = gpuvm->kernel_alloc_node.va.addr; in drm_gpuvm_in_kernel_node()
914 u64 krange = gpuvm->kernel_alloc_node.va.range; in drm_gpuvm_in_kernel_node()
921 * drm_gpuvm_range_valid() - checks whether the given range is valid for the
953 * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
971 obj->funcs = &drm_gpuvm_object_funcs; in drm_gpuvm_resv_object_alloc()
979 * drm_gpuvm_init() - initialize a &drm_gpuvm
1005 gpuvm->rb.tree = RB_ROOT_CACHED; in drm_gpuvm_init()
1006 INIT_LIST_HEAD(&gpuvm->rb.list); in drm_gpuvm_init()
1008 INIT_LIST_HEAD(&gpuvm->extobj.list); in drm_gpuvm_init()
1009 spin_lock_init(&gpuvm->extobj.lock); in drm_gpuvm_init()
1011 INIT_LIST_HEAD(&gpuvm->evict.list); in drm_gpuvm_init()
1012 spin_lock_init(&gpuvm->evict.lock); in drm_gpuvm_init()
1014 kref_init(&gpuvm->kref); in drm_gpuvm_init()
1016 gpuvm->name = name ? name : "unknown"; in drm_gpuvm_init()
1017 gpuvm->flags = flags; in drm_gpuvm_init()
1018 gpuvm->ops = ops; in drm_gpuvm_init()
1019 gpuvm->drm = drm; in drm_gpuvm_init()
1020 gpuvm->r_obj = r_obj; in drm_gpuvm_init()
1025 gpuvm->mm_start = start_offset; in drm_gpuvm_init()
1026 gpuvm->mm_range = range; in drm_gpuvm_init()
1028 memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); in drm_gpuvm_init()
1030 gpuvm->kernel_alloc_node.va.addr = reserve_offset; in drm_gpuvm_init()
1031 gpuvm->kernel_alloc_node.va.range = reserve_range; in drm_gpuvm_init()
1035 __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); in drm_gpuvm_init()
1043 gpuvm->name = NULL; in drm_gpuvm_fini()
1045 if (gpuvm->kernel_alloc_node.va.range) in drm_gpuvm_fini()
1046 __drm_gpuva_remove(&gpuvm->kernel_alloc_node); in drm_gpuvm_fini()
1048 drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), in drm_gpuvm_fini()
1051 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list), in drm_gpuvm_fini()
1053 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), in drm_gpuvm_fini()
1056 drm_gem_object_put(gpuvm->r_obj); in drm_gpuvm_fini()
1066 if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) in drm_gpuvm_free()
1069 gpuvm->ops->vm_free(gpuvm); in drm_gpuvm_free()
1073 * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1084 kref_put(&gpuvm->kref, drm_gpuvm_free); in drm_gpuvm_put()
1097 * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1115 return exec_prepare_obj(exec, gpuvm->r_obj, num_fences); in drm_gpuvm_prepare_vm()
1129 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); in __drm_gpuvm_prepare_objects()
1149 list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { in drm_gpuvm_prepare_objects_locked()
1150 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); in drm_gpuvm_prepare_objects_locked()
1154 if (vm_bo->evicted) in drm_gpuvm_prepare_objects_locked()
1162 * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
1174 * Note: This function is safe against concurrent insertion and removal of
1175 * external objects, however it is not safe against concurrent usage itself.
1179 * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1198 * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1220 struct drm_gem_object *obj = va->gem.obj; in drm_gpuvm_prepare_range()
1232 * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
1235 * Acquires all dma-resv locks of all &drm_gem_objects the given
1240 * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1248 struct drm_gpuvm *gpuvm = vm_exec->vm; in drm_gpuvm_exec_lock()
1249 struct drm_exec *exec = &vm_exec->exec; in drm_gpuvm_exec_lock()
1250 unsigned int num_fences = vm_exec->num_fences; in drm_gpuvm_exec_lock()
1253 drm_exec_init(exec, vm_exec->flags, 0); in drm_gpuvm_exec_lock()
1266 if (vm_exec->extra.fn) { in drm_gpuvm_exec_lock()
1267 ret = vm_exec->extra.fn(vm_exec); in drm_gpuvm_exec_lock()
1288 } *args = vm_exec->extra.priv; in fn_lock_array()
1290 return drm_exec_prepare_array(&vm_exec->exec, args->objs, in fn_lock_array()
1291 args->num_objs, vm_exec->num_fences); in fn_lock_array()
1295 * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
1300 * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1318 vm_exec->extra.fn = fn_lock_array; in drm_gpuvm_exec_lock_array()
1319 vm_exec->extra.priv = &args; in drm_gpuvm_exec_lock_array()
1326 * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1331 * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1340 struct drm_gpuvm *gpuvm = vm_exec->vm; in drm_gpuvm_exec_lock_range()
1341 struct drm_exec *exec = &vm_exec->exec; in drm_gpuvm_exec_lock_range()
1344 drm_exec_init(exec, vm_exec->flags, 0); in drm_gpuvm_exec_lock_range()
1348 vm_exec->num_fences); in drm_gpuvm_exec_lock_range()
1365 const struct drm_gpuvm_ops *ops = gpuvm->ops; in __drm_gpuvm_validate()
1371 ret = ops->vm_bo_validate(vm_bo, exec); in __drm_gpuvm_validate()
1385 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_validate_locked()
1391 list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, in drm_gpuvm_validate_locked()
1393 ret = ops->vm_bo_validate(vm_bo, exec); in drm_gpuvm_validate_locked()
1397 dma_resv_assert_held(vm_bo->obj->resv); in drm_gpuvm_validate_locked()
1398 if (!vm_bo->evicted) in drm_gpuvm_validate_locked()
1406 * drm_gpuvm_validate() - validate all BOs marked as evicted
1418 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_validate()
1420 if (unlikely(!ops || !ops->vm_bo_validate)) in drm_gpuvm_validate()
1421 return -EOPNOTSUPP; in drm_gpuvm_validate()
1431 * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1432 * dma-resv
1436 * @private_usage: private dma-resv usage
1437 * @extobj_usage: extobj dma-resv usage
1450 dma_resv_assert_held(obj->resv); in drm_gpuvm_resv_add_fence()
1451 dma_resv_add_fence(obj->resv, fence, in drm_gpuvm_resv_add_fence()
1459 * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1472 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_bo_create()
1475 if (ops && ops->vm_bo_alloc) in drm_gpuvm_bo_create()
1476 vm_bo = ops->vm_bo_alloc(); in drm_gpuvm_bo_create()
1483 vm_bo->vm = drm_gpuvm_get(gpuvm); in drm_gpuvm_bo_create()
1484 vm_bo->obj = obj; in drm_gpuvm_bo_create()
1487 kref_init(&vm_bo->kref); in drm_gpuvm_bo_create()
1488 INIT_LIST_HEAD(&vm_bo->list.gpuva); in drm_gpuvm_bo_create()
1489 INIT_LIST_HEAD(&vm_bo->list.entry.gem); in drm_gpuvm_bo_create()
1491 INIT_LIST_HEAD(&vm_bo->list.entry.extobj); in drm_gpuvm_bo_create()
1492 INIT_LIST_HEAD(&vm_bo->list.entry.evict); in drm_gpuvm_bo_create()
1503 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_destroy()
1504 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_bo_destroy()
1505 struct drm_gem_object *obj = vm_bo->obj; in drm_gpuvm_bo_destroy()
1515 list_del(&vm_bo->list.entry.gem); in drm_gpuvm_bo_destroy()
1517 if (ops && ops->vm_bo_free) in drm_gpuvm_bo_destroy()
1518 ops->vm_bo_free(vm_bo); in drm_gpuvm_bo_destroy()
1527 * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1535 * hold the dma-resv or driver specific GEM gpuva lock.
1537 * This function may only be called from non-atomic context.
1547 return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); in drm_gpuvm_bo_put()
1561 if (vm_bo->vm == gpuvm) in __drm_gpuvm_bo_find()
1568 * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1590 * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
1616 return ERR_PTR(-ENOMEM); in drm_gpuvm_bo_obtain()
1619 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); in drm_gpuvm_bo_obtain()
1626 * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
1628 * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1644 struct drm_gpuvm *gpuvm = __vm_bo->vm; in drm_gpuvm_bo_obtain_prealloc()
1645 struct drm_gem_object *obj = __vm_bo->obj; in drm_gpuvm_bo_obtain_prealloc()
1655 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); in drm_gpuvm_bo_obtain_prealloc()
1662 * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1673 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_extobj_add()
1679 if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj)) in drm_gpuvm_bo_extobj_add()
1685 * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1695 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_evict()
1696 struct drm_gem_object *obj = vm_bo->obj; in drm_gpuvm_bo_evict()
1699 dma_resv_assert_held(obj->resv); in drm_gpuvm_bo_evict()
1700 vm_bo->evicted = evict; in drm_gpuvm_bo_evict()
1704 * with the VM's common dma-resv lock. in drm_gpuvm_bo_evict()
1723 if (drm_gpuva_it_iter_first(&gpuvm->rb.tree, in __drm_gpuva_insert()
1726 return -EEXIST; in __drm_gpuva_insert()
1728 va->vm = gpuvm; in __drm_gpuva_insert()
1730 drm_gpuva_it_insert(va, &gpuvm->rb.tree); in __drm_gpuva_insert()
1732 node = rb_prev(&va->rb.node); in __drm_gpuva_insert()
1734 head = &(to_drm_gpuva(node))->rb.entry; in __drm_gpuva_insert()
1736 head = &gpuvm->rb.list; in __drm_gpuva_insert()
1738 list_add(&va->rb.entry, head); in __drm_gpuva_insert()
1744 * drm_gpuva_insert() - insert a &drm_gpuva
1751 * It is safe to use this function using the safe versions of iterating the GPU
1761 u64 addr = va->va.addr; in drm_gpuva_insert()
1762 u64 range = va->va.range; in drm_gpuva_insert()
1766 return -EINVAL; in drm_gpuva_insert()
1784 drm_gpuva_it_remove(va, &va->vm->rb.tree); in __drm_gpuva_remove()
1785 list_del_init(&va->rb.entry); in __drm_gpuva_remove()
1789 * drm_gpuva_remove() - remove a &drm_gpuva
1794 * It is safe to use this function using the safe versions of iterating the GPU
1801 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remove()
1803 if (unlikely(va == &gpuvm->kernel_alloc_node)) { in drm_gpuva_remove()
1804 drm_WARN(gpuvm->drm, 1, in drm_gpuva_remove()
1810 drm_gpuvm_put(va->vm); in drm_gpuva_remove()
1815 * drm_gpuva_link() - link a &drm_gpuva
1832 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_link()
1833 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_link()
1838 drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj); in drm_gpuva_link()
1840 va->vm_bo = drm_gpuvm_bo_get(vm_bo); in drm_gpuva_link()
1843 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); in drm_gpuva_link()
1848 * drm_gpuva_unlink() - unlink a &drm_gpuva
1868 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_unlink()
1869 struct drm_gpuvm_bo *vm_bo = va->vm_bo; in drm_gpuva_unlink()
1875 list_del_init(&va->gem.entry); in drm_gpuva_unlink()
1877 va->vm_bo = NULL; in drm_gpuva_unlink()
1883 * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
1894 u64 last = addr + range - 1; in drm_gpuva_find_first()
1896 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last); in drm_gpuva_find_first()
1901 * drm_gpuva_find() - find a &drm_gpuva
1918 if (va->va.addr != addr || in drm_gpuva_find()
1919 va->va.range != range) in drm_gpuva_find()
1930 * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
1944 if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1)) in drm_gpuva_find_prev()
1947 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start); in drm_gpuva_find_prev()
1952 * drm_gpuva_find_next() - find the &drm_gpuva after the given address
1969 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1); in drm_gpuva_find_next()
1974 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
1990 * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
2009 * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2023 struct drm_gpuva *va = op->unmap->va; in drm_gpuva_remap()
2024 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remap()
2028 if (op->prev) { in drm_gpuva_remap()
2029 drm_gpuva_init_from_op(prev, op->prev); in drm_gpuva_remap()
2033 if (op->next) { in drm_gpuva_remap()
2034 drm_gpuva_init_from_op(next, op->next); in drm_gpuva_remap()
2041 * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2050 drm_gpuva_remove(op->va); in drm_gpuva_unmap()
2062 op.map.va.addr = addr; in op_map_cb()
2063 op.map.va.range = range; in op_map_cb()
2064 op.map.gem.obj = obj; in op_map_cb()
2065 op.map.gem.offset = offset; in op_map_cb()
2067 return fn->sm_step_map(&op, priv); in op_map_cb()
2081 r->prev = prev; in op_remap_cb()
2082 r->next = next; in op_remap_cb()
2083 r->unmap = unmap; in op_remap_cb()
2085 return fn->sm_step_remap(&op, priv); in op_remap_cb()
2098 return fn->sm_step_unmap(&op, priv); in op_unmap_cb()
2112 return -EINVAL; in __drm_gpuvm_sm_map()
2115 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_map()
2116 u64 offset = va->gem.offset; in __drm_gpuvm_sm_map()
2117 u64 addr = va->va.addr; in __drm_gpuvm_sm_map()
2118 u64 range = va->va.range; in __drm_gpuvm_sm_map()
2120 bool merge = !!va->gem.obj; in __drm_gpuvm_sm_map()
2143 .va.range = range - req_range, in __drm_gpuvm_sm_map()
2158 u64 ls_range = req_addr - addr; in __drm_gpuvm_sm_map()
2188 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2202 (addr - req_addr); in __drm_gpuvm_sm_map()
2221 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2223 .gem.offset = offset + req_end - addr, in __drm_gpuvm_sm_map()
2253 return -EINVAL; in __drm_gpuvm_sm_unmap()
2258 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_unmap()
2259 u64 offset = va->gem.offset; in __drm_gpuvm_sm_unmap()
2260 u64 addr = va->va.addr; in __drm_gpuvm_sm_unmap()
2261 u64 range = va->va.range; in __drm_gpuvm_sm_unmap()
2266 prev.va.range = req_addr - addr; in __drm_gpuvm_sm_unmap()
2275 next.va.range = end - req_end; in __drm_gpuvm_sm_unmap()
2277 next.gem.offset = offset + (req_end - addr); in __drm_gpuvm_sm_unmap()
2302 * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
2306 * @req_obj: the &drm_gem_object to map
2323 * A sequence of callbacks can contain map, unmap and remap operations, but
2328 * operations and a single map operation. The latter one represents the original
2329 * map operation requested by the caller.
2338 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_sm_map()
2340 if (unlikely(!(ops && ops->sm_step_map && in drm_gpuvm_sm_map()
2341 ops->sm_step_remap && in drm_gpuvm_sm_map()
2342 ops->sm_step_unmap))) in drm_gpuvm_sm_map()
2343 return -EINVAL; in drm_gpuvm_sm_map()
2352 * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
2382 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_sm_unmap()
2384 if (unlikely(!(ops && ops->sm_step_remap && in drm_gpuvm_sm_unmap()
2385 ops->sm_step_unmap))) in drm_gpuvm_sm_unmap()
2386 return -EINVAL; in drm_gpuvm_sm_unmap()
2396 const struct drm_gpuvm_ops *fn = gpuvm->ops; in gpuva_op_alloc()
2399 if (fn && fn->op_alloc) in gpuva_op_alloc()
2400 op = fn->op_alloc(); in gpuva_op_alloc()
2414 const struct drm_gpuvm_ops *fn = gpuvm->ops; in gpuva_op_free()
2416 if (fn && fn->op_free) in gpuva_op_free()
2417 fn->op_free(op); in gpuva_op_free()
2430 struct drm_gpuvm *gpuvm = args->vm; in drm_gpuva_sm_step()
2431 struct drm_gpuva_ops *ops = args->ops; in drm_gpuva_sm_step()
2440 if (op->op == DRM_GPUVA_OP_REMAP) { in drm_gpuva_sm_step()
2441 struct drm_gpuva_op_remap *__r = &__op->remap; in drm_gpuva_sm_step()
2442 struct drm_gpuva_op_remap *r = &op->remap; in drm_gpuva_sm_step()
2444 r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), in drm_gpuva_sm_step()
2446 if (unlikely(!r->unmap)) in drm_gpuva_sm_step()
2449 if (__r->prev) { in drm_gpuva_sm_step()
2450 r->prev = kmemdup(__r->prev, sizeof(*r->prev), in drm_gpuva_sm_step()
2452 if (unlikely(!r->prev)) in drm_gpuva_sm_step()
2456 if (__r->next) { in drm_gpuva_sm_step()
2457 r->next = kmemdup(__r->next, sizeof(*r->next), in drm_gpuva_sm_step()
2459 if (unlikely(!r->next)) in drm_gpuva_sm_step()
2464 list_add_tail(&op->entry, &ops->list); in drm_gpuva_sm_step()
2469 kfree(op->remap.unmap); in drm_gpuva_sm_step()
2471 kfree(op->remap.prev); in drm_gpuva_sm_step()
2475 return -ENOMEM; in drm_gpuva_sm_step()
2485 * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2489 * @req_obj: the &drm_gem_object to map
2496 * in the given order. It can contain map, unmap and remap operations, but it
2501 * operations and a single map operation. The latter one represents the original
2502 * map operation requested by the caller.
2530 return ERR_PTR(-ENOMEM); in drm_gpuvm_sm_map_ops_create()
2532 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_sm_map_ops_create()
2552 * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
2593 return ERR_PTR(-ENOMEM); in drm_gpuvm_sm_unmap_ops_create()
2595 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_sm_unmap_ops_create()
2614 * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
2643 return ERR_PTR(-ENOMEM); in drm_gpuvm_prefetch_ops_create()
2645 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_prefetch_ops_create()
2650 ret = -ENOMEM; in drm_gpuvm_prefetch_ops_create()
2654 op->op = DRM_GPUVA_OP_PREFETCH; in drm_gpuvm_prefetch_ops_create()
2655 op->prefetch.va = va; in drm_gpuvm_prefetch_ops_create()
2656 list_add_tail(&op->entry, &ops->list); in drm_gpuvm_prefetch_ops_create()
2668 * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
2693 drm_gem_gpuva_assert_lock_held(vm_bo->obj); in drm_gpuvm_bo_unmap_ops_create()
2697 return ERR_PTR(-ENOMEM); in drm_gpuvm_bo_unmap_ops_create()
2699 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_bo_unmap_ops_create()
2702 op = gpuva_op_alloc(vm_bo->vm); in drm_gpuvm_bo_unmap_ops_create()
2704 ret = -ENOMEM; in drm_gpuvm_bo_unmap_ops_create()
2708 op->op = DRM_GPUVA_OP_UNMAP; in drm_gpuvm_bo_unmap_ops_create()
2709 op->unmap.va = va; in drm_gpuvm_bo_unmap_ops_create()
2710 list_add_tail(&op->entry, &ops->list); in drm_gpuvm_bo_unmap_ops_create()
2716 drm_gpuva_ops_free(vm_bo->vm, ops); in drm_gpuvm_bo_unmap_ops_create()
2722 * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
2736 list_del(&op->entry); in drm_gpuva_ops_free()
2738 if (op->op == DRM_GPUVA_OP_REMAP) { in drm_gpuva_ops_free()
2739 kfree(op->remap.prev); in drm_gpuva_ops_free()
2740 kfree(op->remap.next); in drm_gpuva_ops_free()
2741 kfree(op->remap.unmap); in drm_gpuva_ops_free()