Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
48 * The GPU VA manager internally uses a rb-tree to manage the
63 * entries from within dma-fence signalling critical sections it is enough to
64 * pre-allocate the &drm_gpuva structures.
66 * &drm_gem_objects which are private to a single VM can share a common
89 * lists are maintained in order to accelerate locking of dma-resv locks and
108 * sequence of operations to satisfy a given map or unmap request.
113 * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
114 * as VM BIND.
117 * containing map, unmap and remap operations for a given newly requested
124 * of unmap operations, a maximum of two remap operations and a single map
128 * The single map operation represents the original map operation requested by
139 * one unmap operation and one or two map operations, such that drivers can
151 * created such that those mappings are split up and re-mapped partially.
162 * dma-fence signalling critical path.
173 * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
181 * old: |-----------| (bo_offset=n)
184 * req: |-----------| (bo_offset=n)
187 * new: |-----------| (bo_offset=n)
196 * old: |-----------| (bo_offset=n)
199 * req: |-----------| (bo_offset=m)
202 * new: |-----------| (bo_offset=m)
211 * old: |-----------| (bo_offset=n)
214 * req: |-----------| (bo_offset=n)
217 * new: |-----------| (bo_offset=n)
226 * old: |-----| (bo_offset=n)
229 * req: |-----------| (bo_offset=n)
232 * new: |-----------| (bo_offset=n)
236 * and/or non-contiguous BO offset.
240 * but backed by a different BO. Hence, map the requested mapping and split
246 * old: |-----------| (bo_offset=n)
249 * req: |-----| (bo_offset=n)
252 * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
256 * and/or non-contiguous BO offset.
265 * old: |-----------| (bo_offset=n)
268 * req: |-----| (bo_offset=n)
271 * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
275 * but backed by a different BO. Hence, map the requested mapping and split
281 * old: |-----------| (bo_offset=n)
284 * req: |-----| (bo_offset=m)
287 * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
296 * old: |-----------| (bo_offset=n)
299 * req: |-----| (bo_offset=n+1)
302 * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
306 * by a different BO. Hence, map the requested mapping and split up the
312 * old: |-----------| (bo_offset=n)
315 * req: |-----------| (bo_offset=m)
318 * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
328 * old: |-----------| (bo_offset=n)
331 * req: |-----------| (bo_offset=n+1)
334 * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
338 * having a different backing BO. Hence, map the requested mapping and split
345 * old: |-----------------| (bo_offset=n)
348 * req: |-----| (bo_offset=m)
351 * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
360 * old: |-----------------| (bo_offset=n)
363 * req: |-----| (bo_offset=n+1)
366 * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
375 * old: |-----| (bo_offset=n+1)
378 * req: |-----------| (bo_offset=n)
381 * new: |-----------| (bo_offset=n)
385 * and/or non-contiguous bo_offset.
394 * old: |-----| (bo_offset=n+1)
397 * req: |----------------| (bo_offset=n)
400 * new: |----------------| (bo_offset=n)
404 * and/or non-contiguous bo_offset.
408 * backed by a different BO. Hence, map the requested mapping and split up
414 * old: |-----------| (bo_offset=n)
417 * req: |-----------| (bo_offset=m)
420 * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
424 * DOC: Madvise Logic - Splitting and Traversal
426 * This logic handles GPU VA range updates by generating remap and map operations
430 * the existing mapping at the start and end boundaries and inserts a new map.
434 * pre: |-----------------------|
438 * new: |-----|=========|-------|
439 * remap map remap
441 * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
445 * map operations to cover the full range.
448 * pre: |------------------|--------------|------------------|
452 * new: |-------|==========|--------------|========|---------|
457 * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
462 * pre: |------------------|--------------|------------------|
466 * new: |------------------|--------------|========|---------|
470 * pre: |------------------|--------------|------------------|
474 * new: |-------|==========|--------------|------------------|
477 * one REMAP and one MAP
485 * focusing solely on remap and map operations for efficient traversal and update.
532 * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
547 * // structure in individual driver structures and lock the dma-resv with
554 * .map.va.addr = addr,
555 * .map.va.range = range,
556 * .map.gem.obj = obj,
557 * .map.gem.offset = offset,
575 * switch (op->op) {
583 * drm_gpuva_map(gpuvm, va, &op->map);
590 * va = op->remap.unmap->va;
592 * if (op->remap.prev) {
600 * if (op->remap.next) {
609 * drm_gpuva_remap(prev, next, &op->remap);
612 * drm_gpuva_link(prev, va->vm_bo);
614 * drm_gpuva_link(next, va->vm_bo);
620 * va = op->unmap->va;
624 * drm_gpuva_unmap(&op->unmap);
655 * // structure in individual driver structures and lock the dma-resv with
674 * ret = -ENOMEM;
680 * // drm_gpuva_unlink(), hence pre-allocate.
699 * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
701 * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
705 * ctx->new_va = NULL;
713 * struct drm_gpuva *va = op->remap.unmap->va;
715 * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
717 * if (op->remap.prev) {
718 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
719 * ctx->prev_va = NULL;
722 * if (op->remap.next) {
723 * drm_gpuva_link(ctx->next_va, va->vm_bo);
724 * ctx->next_va = NULL;
735 * drm_gpuva_unlink(op->unmap.va);
736 * drm_gpuva_unmap(&op->unmap);
737 * kfree(op->unmap.va);
744 * get_next_vm_bo_from_list() - get the next vm_bo element
763 spin_lock(&(__gpuvm)->__list_name.lock); \
764 if (!(__gpuvm)->__list_name.local_list) \
765 (__gpuvm)->__list_name.local_list = __local_list; \
767 drm_WARN_ON((__gpuvm)->drm, \
768 (__gpuvm)->__list_name.local_list != __local_list); \
770 while (!list_empty(&(__gpuvm)->__list_name.list)) { \
771 __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
774 if (kref_get_unless_zero(&__vm_bo->kref)) { \
775 list_move_tail(&(__vm_bo)->list.entry.__list_name, \
779 list_del_init(&(__vm_bo)->list.entry.__list_name); \
783 spin_unlock(&(__gpuvm)->__list_name.lock); \
789 * for_each_vm_bo_in_list() - internal vm_bo list iterator
799 * It is not allowed to re-assign the vm_bo pointer from inside this loop.
843 * restore_vm_bo_list() - move vm_bo elements back to their original list
851 __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
852 &(__gpuvm)->__list_name.list, \
853 &(__gpuvm)->__list_name.local_list)
880 * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
888 __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
889 __lock ? &(__vm_bo)->vm->__list_name.lock : \
891 &(__vm_bo)->list.entry.__list_name, \
892 &(__vm_bo)->vm->__list_name.list)
909 * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
917 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
918 __lock ? &(__vm_bo)->vm->__list_name.lock : \
920 &(__vm_bo)->list.entry.__list_name, \
924 * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
932 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
933 __lock ? &(__vm_bo)->vm->__list_name.lock : \
935 &(__vm_bo)->list.entry.__list_name, \
940 #define GPUVA_START(node) ((node)->va.addr)
941 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
965 return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range), in drm_gpuvm_warn_check_overflow()
973 u64 mm_start = gpuvm->mm_start; in drm_gpuvm_in_mm_range()
974 u64 mm_end = mm_start + gpuvm->mm_range; in drm_gpuvm_in_mm_range()
983 u64 kstart = gpuvm->kernel_alloc_node.va.addr; in drm_gpuvm_in_kernel_node()
984 u64 krange = gpuvm->kernel_alloc_node.va.range; in drm_gpuvm_in_kernel_node()
991 * drm_gpuvm_range_valid() - checks whether the given range is valid for the
1023 * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
1041 obj->funcs = &drm_gpuvm_object_funcs; in drm_gpuvm_resv_object_alloc()
1049 * drm_gpuvm_init() - initialize a &drm_gpuvm
1053 * @drm: the &drm_device this VM resides in
1075 gpuvm->rb.tree = RB_ROOT_CACHED; in drm_gpuvm_init()
1076 INIT_LIST_HEAD(&gpuvm->rb.list); in drm_gpuvm_init()
1078 INIT_LIST_HEAD(&gpuvm->extobj.list); in drm_gpuvm_init()
1079 spin_lock_init(&gpuvm->extobj.lock); in drm_gpuvm_init()
1081 INIT_LIST_HEAD(&gpuvm->evict.list); in drm_gpuvm_init()
1082 spin_lock_init(&gpuvm->evict.lock); in drm_gpuvm_init()
1084 kref_init(&gpuvm->kref); in drm_gpuvm_init()
1086 gpuvm->name = name ? name : "unknown"; in drm_gpuvm_init()
1087 gpuvm->flags = flags; in drm_gpuvm_init()
1088 gpuvm->ops = ops; in drm_gpuvm_init()
1089 gpuvm->drm = drm; in drm_gpuvm_init()
1090 gpuvm->r_obj = r_obj; in drm_gpuvm_init()
1095 gpuvm->mm_start = start_offset; in drm_gpuvm_init()
1096 gpuvm->mm_range = range; in drm_gpuvm_init()
1098 memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); in drm_gpuvm_init()
1100 gpuvm->kernel_alloc_node.va.addr = reserve_offset; in drm_gpuvm_init()
1101 gpuvm->kernel_alloc_node.va.range = reserve_range; in drm_gpuvm_init()
1105 __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); in drm_gpuvm_init()
1113 gpuvm->name = NULL; in drm_gpuvm_fini()
1115 if (gpuvm->kernel_alloc_node.va.range) in drm_gpuvm_fini()
1116 __drm_gpuva_remove(&gpuvm->kernel_alloc_node); in drm_gpuvm_fini()
1118 drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), in drm_gpuvm_fini()
1121 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list), in drm_gpuvm_fini()
1123 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), in drm_gpuvm_fini()
1126 drm_gem_object_put(gpuvm->r_obj); in drm_gpuvm_fini()
1136 if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) in drm_gpuvm_free()
1139 gpuvm->ops->vm_free(gpuvm); in drm_gpuvm_free()
1143 * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1154 kref_put(&gpuvm->kref, drm_gpuvm_free); in drm_gpuvm_put()
1167 * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1185 return exec_prepare_obj(exec, gpuvm->r_obj, num_fences); in drm_gpuvm_prepare_vm()
1199 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); in __drm_gpuvm_prepare_objects()
1219 list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { in drm_gpuvm_prepare_objects_locked()
1220 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); in drm_gpuvm_prepare_objects_locked()
1224 if (vm_bo->evicted) in drm_gpuvm_prepare_objects_locked()
1232 * drm_gpuvm_prepare_objects() - prepare all associated BOs
1247 * Drivers need to make sure to protect this case with either an outer VM lock
1249 * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1268 * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1290 struct drm_gem_object *obj = va->gem.obj; in drm_gpuvm_prepare_range()
1302 * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
1305 * Acquires all dma-resv locks of all &drm_gem_objects the given
1310 * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1318 struct drm_gpuvm *gpuvm = vm_exec->vm; in drm_gpuvm_exec_lock()
1319 struct drm_exec *exec = &vm_exec->exec; in drm_gpuvm_exec_lock()
1320 unsigned int num_fences = vm_exec->num_fences; in drm_gpuvm_exec_lock()
1323 drm_exec_init(exec, vm_exec->flags, 0); in drm_gpuvm_exec_lock()
1336 if (vm_exec->extra.fn) { in drm_gpuvm_exec_lock()
1337 ret = vm_exec->extra.fn(vm_exec); in drm_gpuvm_exec_lock()
1358 } *args = vm_exec->extra.priv; in fn_lock_array()
1360 return drm_exec_prepare_array(&vm_exec->exec, args->objs, in fn_lock_array()
1361 args->num_objs, vm_exec->num_fences); in fn_lock_array()
1365 * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
1370 * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1388 vm_exec->extra.fn = fn_lock_array; in drm_gpuvm_exec_lock_array()
1389 vm_exec->extra.priv = &args; in drm_gpuvm_exec_lock_array()
1396 * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1401 * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1410 struct drm_gpuvm *gpuvm = vm_exec->vm; in drm_gpuvm_exec_lock_range()
1411 struct drm_exec *exec = &vm_exec->exec; in drm_gpuvm_exec_lock_range()
1414 drm_exec_init(exec, vm_exec->flags, 0); in drm_gpuvm_exec_lock_range()
1418 vm_exec->num_fences); in drm_gpuvm_exec_lock_range()
1435 const struct drm_gpuvm_ops *ops = gpuvm->ops; in __drm_gpuvm_validate()
1441 ret = ops->vm_bo_validate(vm_bo, exec); in __drm_gpuvm_validate()
1455 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_validate_locked()
1461 list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, in drm_gpuvm_validate_locked()
1463 ret = ops->vm_bo_validate(vm_bo, exec); in drm_gpuvm_validate_locked()
1467 dma_resv_assert_held(vm_bo->obj->resv); in drm_gpuvm_validate_locked()
1468 if (!vm_bo->evicted) in drm_gpuvm_validate_locked()
1476 * drm_gpuvm_validate() - validate all BOs marked as evicted
1488 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_validate()
1490 if (unlikely(!ops || !ops->vm_bo_validate)) in drm_gpuvm_validate()
1491 return -EOPNOTSUPP; in drm_gpuvm_validate()
1501 * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1502 * dma-resv
1506 * @private_usage: private dma-resv usage
1507 * @extobj_usage: extobj dma-resv usage
1520 dma_resv_assert_held(obj->resv); in drm_gpuvm_resv_add_fence()
1521 dma_resv_add_fence(obj->resv, fence, in drm_gpuvm_resv_add_fence()
1529 * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1542 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_bo_create()
1545 if (ops && ops->vm_bo_alloc) in drm_gpuvm_bo_create()
1546 vm_bo = ops->vm_bo_alloc(); in drm_gpuvm_bo_create()
1553 vm_bo->vm = drm_gpuvm_get(gpuvm); in drm_gpuvm_bo_create()
1554 vm_bo->obj = obj; in drm_gpuvm_bo_create()
1557 kref_init(&vm_bo->kref); in drm_gpuvm_bo_create()
1558 INIT_LIST_HEAD(&vm_bo->list.gpuva); in drm_gpuvm_bo_create()
1559 INIT_LIST_HEAD(&vm_bo->list.entry.gem); in drm_gpuvm_bo_create()
1561 INIT_LIST_HEAD(&vm_bo->list.entry.extobj); in drm_gpuvm_bo_create()
1562 INIT_LIST_HEAD(&vm_bo->list.entry.evict); in drm_gpuvm_bo_create()
1573 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_destroy()
1574 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_bo_destroy()
1575 struct drm_gem_object *obj = vm_bo->obj; in drm_gpuvm_bo_destroy()
1585 list_del(&vm_bo->list.entry.gem); in drm_gpuvm_bo_destroy()
1587 if (ops && ops->vm_bo_free) in drm_gpuvm_bo_destroy()
1588 ops->vm_bo_free(vm_bo); in drm_gpuvm_bo_destroy()
1597 * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1606 * dma-resv or gpuva.lock mutex).
1608 * This function may only be called from non-atomic context.
1618 return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); in drm_gpuvm_bo_put()
1632 if (vm_bo->vm == gpuvm) in __drm_gpuvm_bo_find()
1639 * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1661 * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
1687 return ERR_PTR(-ENOMEM); in drm_gpuvm_bo_obtain()
1690 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); in drm_gpuvm_bo_obtain()
1697 * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
1699 * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1715 struct drm_gpuvm *gpuvm = __vm_bo->vm; in drm_gpuvm_bo_obtain_prealloc()
1716 struct drm_gem_object *obj = __vm_bo->obj; in drm_gpuvm_bo_obtain_prealloc()
1726 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); in drm_gpuvm_bo_obtain_prealloc()
1733 * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1744 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_extobj_add()
1750 if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj)) in drm_gpuvm_bo_extobj_add()
1756 * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1766 struct drm_gpuvm *gpuvm = vm_bo->vm; in drm_gpuvm_bo_evict()
1767 struct drm_gem_object *obj = vm_bo->obj; in drm_gpuvm_bo_evict()
1770 dma_resv_assert_held(obj->resv); in drm_gpuvm_bo_evict()
1771 vm_bo->evicted = evict; in drm_gpuvm_bo_evict()
1775 * with the VM's common dma-resv lock. in drm_gpuvm_bo_evict()
1794 if (drm_gpuva_it_iter_first(&gpuvm->rb.tree, in __drm_gpuva_insert()
1797 return -EEXIST; in __drm_gpuva_insert()
1799 va->vm = gpuvm; in __drm_gpuva_insert()
1801 drm_gpuva_it_insert(va, &gpuvm->rb.tree); in __drm_gpuva_insert()
1803 node = rb_prev(&va->rb.node); in __drm_gpuva_insert()
1805 head = &(to_drm_gpuva(node))->rb.entry; in __drm_gpuva_insert()
1807 head = &gpuvm->rb.list; in __drm_gpuva_insert()
1809 list_add(&va->rb.entry, head); in __drm_gpuva_insert()
1815 * drm_gpuva_insert() - insert a &drm_gpuva
1832 u64 addr = va->va.addr; in drm_gpuva_insert()
1833 u64 range = va->va.range; in drm_gpuva_insert()
1837 return -EINVAL; in drm_gpuva_insert()
1855 drm_gpuva_it_remove(va, &va->vm->rb.tree); in __drm_gpuva_remove()
1856 list_del_init(&va->rb.entry); in __drm_gpuva_remove()
1860 * drm_gpuva_remove() - remove a &drm_gpuva
1872 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remove()
1874 if (unlikely(va == &gpuvm->kernel_alloc_node)) { in drm_gpuva_remove()
1875 drm_WARN(gpuvm->drm, 1, in drm_gpuva_remove()
1881 drm_gpuvm_put(va->vm); in drm_gpuva_remove()
1886 * drm_gpuva_link() - link a &drm_gpuva
1897 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
1902 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_link()
1903 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_link()
1908 drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj); in drm_gpuva_link()
1910 va->vm_bo = drm_gpuvm_bo_get(vm_bo); in drm_gpuva_link()
1913 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); in drm_gpuva_link()
1918 * drm_gpuva_unlink() - unlink a &drm_gpuva
1932 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
1937 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_unlink()
1938 struct drm_gpuvm_bo *vm_bo = va->vm_bo; in drm_gpuva_unlink()
1943 drm_gem_gpuva_assert_lock_held(va->vm, obj); in drm_gpuva_unlink()
1944 list_del_init(&va->gem.entry); in drm_gpuva_unlink()
1946 va->vm_bo = NULL; in drm_gpuva_unlink()
1952 * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
1963 u64 last = addr + range - 1; in drm_gpuva_find_first()
1965 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last); in drm_gpuva_find_first()
1970 * drm_gpuva_find() - find a &drm_gpuva
1987 if (va->va.addr != addr || in drm_gpuva_find()
1988 va->va.range != range) in drm_gpuva_find()
1999 * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
2013 if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1)) in drm_gpuva_find_prev()
2016 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start); in drm_gpuva_find_prev()
2021 * drm_gpuva_find_next() - find the &drm_gpuva after the given address
2038 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1); in drm_gpuva_find_next()
2043 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
2059 * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
2078 * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2092 struct drm_gpuva *va = op->unmap->va; in drm_gpuva_remap()
2093 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remap()
2097 if (op->prev) { in drm_gpuva_remap()
2098 drm_gpuva_init_from_op(prev, op->prev); in drm_gpuva_remap()
2102 if (op->next) { in drm_gpuva_remap()
2103 drm_gpuva_init_from_op(next, op->next); in drm_gpuva_remap()
2110 * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2119 drm_gpuva_remove(op->va); in drm_gpuva_unmap()
2133 op.map.va.addr = req->map.va.addr; in op_map_cb()
2134 op.map.va.range = req->map.va.range; in op_map_cb()
2135 op.map.gem.obj = req->map.gem.obj; in op_map_cb()
2136 op.map.gem.offset = req->map.gem.offset; in op_map_cb()
2138 return fn->sm_step_map(&op, priv); in op_map_cb()
2152 r->prev = prev; in op_remap_cb()
2153 r->next = next; in op_remap_cb()
2154 r->unmap = unmap; in op_remap_cb()
2156 return fn->sm_step_remap(&op, priv); in op_remap_cb()
2172 return fn->sm_step_unmap(&op, priv); in op_unmap_cb()
2181 struct drm_gem_object *req_obj = req->map.gem.obj; in __drm_gpuvm_sm_map()
2184 u64 req_offset = req->map.gem.offset; in __drm_gpuvm_sm_map()
2185 u64 req_range = req->map.va.range; in __drm_gpuvm_sm_map()
2186 u64 req_addr = req->map.va.addr; in __drm_gpuvm_sm_map()
2191 return -EINVAL; in __drm_gpuvm_sm_map()
2194 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_map()
2195 u64 offset = va->gem.offset; in __drm_gpuvm_sm_map()
2196 u64 addr = va->va.addr; in __drm_gpuvm_sm_map()
2197 u64 range = va->va.range; in __drm_gpuvm_sm_map()
2199 bool merge = !!va->gem.obj; in __drm_gpuvm_sm_map()
2225 .va.range = range - req_range, in __drm_gpuvm_sm_map()
2243 u64 ls_range = req_addr - addr; in __drm_gpuvm_sm_map()
2273 .map.va.addr = req_addr, in __drm_gpuvm_sm_map()
2274 .map.va.range = end - req_addr, in __drm_gpuvm_sm_map()
2288 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2305 (addr - req_addr); in __drm_gpuvm_sm_map()
2326 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2328 .gem.offset = offset + req_end - addr, in __drm_gpuvm_sm_map()
2341 .map.va.addr = addr, in __drm_gpuvm_sm_map()
2342 .map.va.range = req_end - addr, in __drm_gpuvm_sm_map()
2364 return -EINVAL; in __drm_gpuvm_sm_unmap()
2369 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_unmap()
2370 u64 offset = va->gem.offset; in __drm_gpuvm_sm_unmap()
2371 u64 addr = va->va.addr; in __drm_gpuvm_sm_unmap()
2372 u64 range = va->va.range; in __drm_gpuvm_sm_unmap()
2377 prev.va.range = req_addr - addr; in __drm_gpuvm_sm_unmap()
2386 next.va.range = end - req_end; in __drm_gpuvm_sm_unmap()
2388 next.gem.offset = offset + (req_end - addr); in __drm_gpuvm_sm_unmap()
2413 * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
2431 * A sequence of callbacks can contain map, unmap and remap operations, but
2436 * operations and a single map operation. The latter one represents the original
2437 * map operation requested by the caller.
2445 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_sm_map()
2447 if (unlikely(!(ops && ops->sm_step_map && in drm_gpuvm_sm_map()
2448 ops->sm_step_remap && in drm_gpuvm_sm_map()
2449 ops->sm_step_unmap))) in drm_gpuvm_sm_map()
2450 return -EINVAL; in drm_gpuvm_sm_map()
2457 * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
2487 const struct drm_gpuvm_ops *ops = gpuvm->ops; in drm_gpuvm_sm_unmap()
2489 if (unlikely(!(ops && ops->sm_step_remap && in drm_gpuvm_sm_unmap()
2490 ops->sm_step_unmap))) in drm_gpuvm_sm_unmap()
2491 return -EINVAL; in drm_gpuvm_sm_unmap()
2503 switch (op->op) { in drm_gpuva_sm_step_lock()
2505 if (op->remap.unmap->va->gem.obj) in drm_gpuva_sm_step_lock()
2506 return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj); in drm_gpuva_sm_step_lock()
2509 if (op->unmap.va->gem.obj) in drm_gpuva_sm_step_lock()
2510 return drm_exec_lock_obj(exec, op->unmap.va->gem.obj); in drm_gpuva_sm_step_lock()
2524 * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
2544 * switch (op->op) {
2546 * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
2561 * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
2582 struct drm_gem_object *req_obj = req->map.gem.obj; in drm_gpuvm_sm_map_exec_lock()
2596 * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
2621 const struct drm_gpuvm_ops *fn = gpuvm->ops; in gpuva_op_alloc()
2624 if (fn && fn->op_alloc) in gpuva_op_alloc()
2625 op = fn->op_alloc(); in gpuva_op_alloc()
2639 const struct drm_gpuvm_ops *fn = gpuvm->ops; in gpuva_op_free()
2641 if (fn && fn->op_free) in gpuva_op_free()
2642 fn->op_free(op); in gpuva_op_free()
2652 struct drm_gpuvm *vm; in drm_gpuva_sm_step() member
2655 struct drm_gpuvm *gpuvm = args->vm; in drm_gpuva_sm_step()
2656 struct drm_gpuva_ops *ops = args->ops; in drm_gpuva_sm_step()
2665 if (op->op == DRM_GPUVA_OP_REMAP) { in drm_gpuva_sm_step()
2666 struct drm_gpuva_op_remap *__r = &__op->remap; in drm_gpuva_sm_step()
2667 struct drm_gpuva_op_remap *r = &op->remap; in drm_gpuva_sm_step()
2669 r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), in drm_gpuva_sm_step()
2671 if (unlikely(!r->unmap)) in drm_gpuva_sm_step()
2674 if (__r->prev) { in drm_gpuva_sm_step()
2675 r->prev = kmemdup(__r->prev, sizeof(*r->prev), in drm_gpuva_sm_step()
2677 if (unlikely(!r->prev)) in drm_gpuva_sm_step()
2681 if (__r->next) { in drm_gpuva_sm_step()
2682 r->next = kmemdup(__r->next, sizeof(*r->next), in drm_gpuva_sm_step()
2684 if (unlikely(!r->next)) in drm_gpuva_sm_step()
2689 list_add_tail(&op->entry, &ops->list); in drm_gpuva_sm_step()
2694 kfree(op->remap.unmap); in drm_gpuva_sm_step()
2696 kfree(op->remap.prev); in drm_gpuva_sm_step()
2700 return -ENOMEM; in drm_gpuva_sm_step()
2716 struct drm_gpuvm *vm; in __drm_gpuvm_sm_map_ops_create() member
2723 return ERR_PTR(-ENOMEM); in __drm_gpuvm_sm_map_ops_create()
2725 INIT_LIST_HEAD(&ops->list); in __drm_gpuvm_sm_map_ops_create()
2727 args.vm = gpuvm; in __drm_gpuvm_sm_map_ops_create()
2742 * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2744 * @req: map request arguments
2750 * in the given order. It can contain map, unmap and remap operations, but it
2755 * operations and a single map operation. The latter one represents the original
2756 * map operation requested by the caller.
2779 * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
2781 * @req: map request arguments
2784 * of existent mapping(s) at start or end, based on the request map.
2787 * in the given order. It can contain map and remap operations, but it
2792 * map operations. The two map operations correspond to: one from start to the
2816 * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
2850 struct drm_gpuvm *vm; in drm_gpuvm_sm_unmap_ops_create() member
2857 return ERR_PTR(-ENOMEM); in drm_gpuvm_sm_unmap_ops_create()
2859 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_sm_unmap_ops_create()
2861 args.vm = gpuvm; in drm_gpuvm_sm_unmap_ops_create()
2878 * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
2907 return ERR_PTR(-ENOMEM); in drm_gpuvm_prefetch_ops_create()
2909 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_prefetch_ops_create()
2914 ret = -ENOMEM; in drm_gpuvm_prefetch_ops_create()
2918 op->op = DRM_GPUVA_OP_PREFETCH; in drm_gpuvm_prefetch_ops_create()
2919 op->prefetch.va = va; in drm_gpuvm_prefetch_ops_create()
2920 list_add_tail(&op->entry, &ops->list); in drm_gpuvm_prefetch_ops_create()
2932 * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
2945 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
2957 drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj); in drm_gpuvm_bo_unmap_ops_create()
2961 return ERR_PTR(-ENOMEM); in drm_gpuvm_bo_unmap_ops_create()
2963 INIT_LIST_HEAD(&ops->list); in drm_gpuvm_bo_unmap_ops_create()
2966 op = gpuva_op_alloc(vm_bo->vm); in drm_gpuvm_bo_unmap_ops_create()
2968 ret = -ENOMEM; in drm_gpuvm_bo_unmap_ops_create()
2972 op->op = DRM_GPUVA_OP_UNMAP; in drm_gpuvm_bo_unmap_ops_create()
2973 op->unmap.va = va; in drm_gpuvm_bo_unmap_ops_create()
2974 list_add_tail(&op->entry, &ops->list); in drm_gpuvm_bo_unmap_ops_create()
2980 drm_gpuva_ops_free(vm_bo->vm, ops); in drm_gpuvm_bo_unmap_ops_create()
2986 * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
3000 list_del(&op->entry); in drm_gpuva_ops_free()
3002 if (op->op == DRM_GPUVA_OP_REMAP) { in drm_gpuva_ops_free()
3003 kfree(op->remap.prev); in drm_gpuva_ops_free()
3004 kfree(op->remap.next); in drm_gpuva_ops_free()
3005 kfree(op->remap.unmap); in drm_gpuva_ops_free()