Lines Matching full:va
37 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
38 * GPU's virtual address (VA) space and manages the corresponding virtual
43 * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
48 * The GPU VA manager internally uses a rb-tree to manage the
52 * portion of VA space reserved by the kernel. This node is initialized together
53 * with the GPU VA manager instance and removed when the GPU VA manager is
106 * Besides its capability to manage and represent a GPU VA space, the
107 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
110 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
111 * and merging of existing GPU VA mappings with the ones that are requested to
120 * of the GPU VA space.
122 * Depending on how the new GPU VA mapping intersects with the existing mappings
123 * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
147 * call back into the driver in order to unmap a range of GPU VA space. The
164 * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
171 * The following diagram depicts the basic relationships of existing GPU VA
426 * This logic handles GPU VA range updates by generating remap and map operations
498 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
503 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
554 * .map.va.addr = addr,
555 * .map.va.range = range,
573 * struct drm_gpuva *va;
577 * va = driver_gpuva_alloc();
578 * if (!va)
579 * ; // unwind previous VA space updates,
583 * drm_gpuva_map(gpuvm, va, &op->map);
584 * drm_gpuva_link(va, vm_bo);
590 * va = op->remap.unmap->va;
595 * ; // unwind previous VA space
603 * ; // unwind previous VA space
612 * drm_gpuva_link(prev, va->vm_bo);
614 * drm_gpuva_link(next, va->vm_bo);
615 * drm_gpuva_unlink(va);
620 * va = op->unmap->va;
623 * drm_gpuva_unlink(va);
713 * struct drm_gpuva *va = op->remap.unmap->va;
718 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
723 * drm_gpuva_link(ctx->next_va, va->vm_bo);
727 * drm_gpuva_unlink(va);
728 * kfree(va);
735 * drm_gpuva_unlink(op->unmap.va);
737 * kfree(op->unmap.va);
940 #define GPUVA_START(node) ((node)->va.addr)
941 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
951 struct drm_gpuva *va);
952 static void __drm_gpuva_remove(struct drm_gpuva *va);
983 u64 kstart = gpuvm->kernel_alloc_node.va.addr; in drm_gpuvm_in_kernel_node()
984 u64 krange = gpuvm->kernel_alloc_node.va.range; in drm_gpuvm_in_kernel_node()
1051 * @name: the name of the GPU VA space
1055 * @start_offset: the start offset of the GPU VA space
1056 * @range: the size of the GPU VA space
1057 * @reserve_offset: the start of the kernel reserved GPU VA area
1058 * @reserve_range: the size of the kernel reserved GPU VA area
1100 gpuvm->kernel_alloc_node.va.addr = reserve_offset; in drm_gpuvm_init()
1101 gpuvm->kernel_alloc_node.va.range = reserve_range; in drm_gpuvm_init()
1115 if (gpuvm->kernel_alloc_node.va.range) in drm_gpuvm_fini()
1271 * @addr: the start address within the VA space
1272 * @range: the range to iterate within the VA space
1285 struct drm_gpuva *va; in drm_gpuvm_prepare_range() local
1289 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { in drm_gpuvm_prepare_range()
1290 struct drm_gem_object *obj = va->gem.obj; in drm_gpuvm_prepare_range()
1398 * @addr: the start address within the VA space
1399 * @range: the range to iterate within the VA space
1789 struct drm_gpuva *va) in __drm_gpuva_insert() argument
1795 GPUVA_START(va), in __drm_gpuva_insert()
1796 GPUVA_LAST(va))) in __drm_gpuva_insert()
1799 va->vm = gpuvm; in __drm_gpuva_insert()
1801 drm_gpuva_it_insert(va, &gpuvm->rb.tree); in __drm_gpuva_insert()
1803 node = rb_prev(&va->rb.node); in __drm_gpuva_insert()
1809 list_add(&va->rb.entry, head); in __drm_gpuva_insert()
1817 * @va: the &drm_gpuva to insert
1823 * VA space, such as drm_gpuvm_for_each_va_safe() and
1830 struct drm_gpuva *va) in drm_gpuva_insert() argument
1832 u64 addr = va->va.addr; in drm_gpuva_insert()
1833 u64 range = va->va.range; in drm_gpuva_insert()
1839 ret = __drm_gpuva_insert(gpuvm, va); in drm_gpuva_insert()
1853 __drm_gpuva_remove(struct drm_gpuva *va) in __drm_gpuva_remove() argument
1855 drm_gpuva_it_remove(va, &va->vm->rb.tree); in __drm_gpuva_remove()
1856 list_del_init(&va->rb.entry); in __drm_gpuva_remove()
1861 * @va: the &drm_gpuva to remove
1863 * This removes the given &va from the underlying tree.
1866 * VA space, such as drm_gpuvm_for_each_va_safe() and
1870 drm_gpuva_remove(struct drm_gpuva *va) in drm_gpuva_remove() argument
1872 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remove()
1874 if (unlikely(va == &gpuvm->kernel_alloc_node)) { in drm_gpuva_remove()
1880 __drm_gpuva_remove(va); in drm_gpuva_remove()
1881 drm_gpuvm_put(va->vm); in drm_gpuva_remove()
1887 * @va: the &drm_gpuva to link
1890 * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
1900 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) in drm_gpuva_link() argument
1902 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_link()
1903 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_link()
1910 va->vm_bo = drm_gpuvm_bo_get(vm_bo); in drm_gpuva_link()
1913 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); in drm_gpuva_link()
1919 * @va: the &drm_gpuva to unlink
1921 * This removes the given &va from the GPU VA list of the &drm_gem_object it is
1924 * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
1935 drm_gpuva_unlink(struct drm_gpuva *va) in drm_gpuva_unlink() argument
1937 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_unlink()
1938 struct drm_gpuvm_bo *vm_bo = va->vm_bo; in drm_gpuva_unlink()
1943 drm_gem_gpuva_assert_lock_held(va->vm, obj); in drm_gpuva_unlink()
1944 list_del_init(&va->gem.entry); in drm_gpuva_unlink()
1946 va->vm_bo = NULL; in drm_gpuva_unlink()
1981 struct drm_gpuva *va; in drm_gpuva_find() local
1983 va = drm_gpuva_find_first(gpuvm, addr, range); in drm_gpuva_find()
1984 if (!va) in drm_gpuva_find()
1987 if (va->va.addr != addr || in drm_gpuva_find()
1988 va->va.range != range) in drm_gpuva_find()
1991 return va; in drm_gpuva_find()
2001 * @start: the given GPU VA's start address
2003 * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
2005 * Note that if there is any free space between the GPU VA mappings no mapping
2023 * @end: the given GPU VA's end address
2025 * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
2027 * Note that if there is any free space between the GPU VA mappings no mapping
2043 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
2062 * @va: the &drm_gpuva to insert
2063 * @op: the &drm_gpuva_op_map to initialize @va with
2065 * Initializes the @va from the @op and inserts it into the given @gpuvm.
2069 struct drm_gpuva *va, in drm_gpuva_map() argument
2072 drm_gpuva_init_from_op(va, op); in drm_gpuva_map()
2073 drm_gpuva_insert(gpuvm, va); in drm_gpuva_map()
2092 struct drm_gpuva *va = op->unmap->va; in drm_gpuva_remap() local
2093 struct drm_gpuvm *gpuvm = va->vm; in drm_gpuva_remap()
2095 drm_gpuva_remove(va); in drm_gpuva_remap()
2119 drm_gpuva_remove(op->va); in drm_gpuva_unmap()
2133 op.map.va.addr = req->map.va.addr; in op_map_cb()
2134 op.map.va.range = req->map.va.range; in op_map_cb()
2161 struct drm_gpuva *va, bool merge, bool madvise) in op_unmap_cb() argument
2169 op.unmap.va = va; in op_unmap_cb()
2183 struct drm_gpuva *va, *next; in __drm_gpuvm_sm_map() local
2185 u64 req_range = req->map.va.range; in __drm_gpuvm_sm_map()
2186 u64 req_addr = req->map.va.addr; in __drm_gpuvm_sm_map()
2193 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { in __drm_gpuvm_sm_map()
2194 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_map()
2195 u64 offset = va->gem.offset; in __drm_gpuvm_sm_map()
2196 u64 addr = va->va.addr; in __drm_gpuvm_sm_map()
2197 u64 range = va->va.range; in __drm_gpuvm_sm_map()
2199 bool merge = !!va->gem.obj; in __drm_gpuvm_sm_map()
2209 ret = op_unmap_cb(ops, priv, va, merge, madvise); in __drm_gpuvm_sm_map()
2216 ret = op_unmap_cb(ops, priv, va, merge, madvise); in __drm_gpuvm_sm_map()
2224 .va.addr = req_end, in __drm_gpuvm_sm_map()
2225 .va.range = range - req_range, in __drm_gpuvm_sm_map()
2230 .va = va, in __drm_gpuvm_sm_map()
2245 .va.addr = addr, in __drm_gpuvm_sm_map()
2246 .va.range = ls_range, in __drm_gpuvm_sm_map()
2250 struct drm_gpuva_op_unmap u = { .va = va }; in __drm_gpuvm_sm_map()
2273 .map.va.addr = req_addr, in __drm_gpuvm_sm_map()
2274 .map.va.range = end - req_addr, in __drm_gpuvm_sm_map()
2287 .va.addr = req_end, in __drm_gpuvm_sm_map()
2288 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2308 ret = op_unmap_cb(ops, priv, va, merge, madvise); in __drm_gpuvm_sm_map()
2316 ret = op_unmap_cb(ops, priv, va, merge, madvise); in __drm_gpuvm_sm_map()
2325 .va.addr = req_end, in __drm_gpuvm_sm_map()
2326 .va.range = end - req_end, in __drm_gpuvm_sm_map()
2331 .va = va, in __drm_gpuvm_sm_map()
2341 .map.va.addr = addr, in __drm_gpuvm_sm_map()
2342 .map.va.range = req_end - addr, in __drm_gpuvm_sm_map()
2359 struct drm_gpuva *va, *next; in __drm_gpuvm_sm_unmap() local
2366 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { in __drm_gpuvm_sm_unmap()
2369 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuvm_sm_unmap()
2370 u64 offset = va->gem.offset; in __drm_gpuvm_sm_unmap()
2371 u64 addr = va->va.addr; in __drm_gpuvm_sm_unmap()
2372 u64 range = va->va.range; in __drm_gpuvm_sm_unmap()
2376 prev.va.addr = addr; in __drm_gpuvm_sm_unmap()
2377 prev.va.range = req_addr - addr; in __drm_gpuvm_sm_unmap()
2385 next.va.addr = req_end; in __drm_gpuvm_sm_unmap()
2386 next.va.range = end - req_end; in __drm_gpuvm_sm_unmap()
2394 struct drm_gpuva_op_unmap unmap = { .va = va }; in __drm_gpuvm_sm_unmap()
2403 ret = op_unmap_cb(ops, priv, va, false, false); in __drm_gpuvm_sm_unmap()
2414 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2418 * This function iterates the given range of the GPU VA space. It utilizes the
2422 * Drivers may use these callbacks to update the GPU VA space right away within
2425 * be called before the &drm_gpuvm's view of the GPU VA space was
2427 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2458 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2463 * This function iterates the given range of the GPU VA space. It utilizes the
2467 * Drivers may use these callbacks to update the GPU VA space right away within
2470 * called before the &drm_gpuvm's view of the GPU VA space was updated
2472 * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2505 if (op->remap.unmap->va->gem.obj) in drm_gpuva_sm_step_lock()
2506 return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj); in drm_gpuva_sm_step_lock()
2509 if (op->unmap.va->gem.obj) in drm_gpuva_sm_step_lock()
2510 return drm_exec_lock_obj(exec, op->unmap.va->gem.obj); in drm_gpuva_sm_step_lock()
2525 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2597 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2743 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2759 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2761 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2780 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2796 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2798 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2818 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2833 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2835 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2879 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2901 struct drm_gpuva *va; in drm_gpuvm_prefetch_ops_create() local
2911 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { in drm_gpuvm_prefetch_ops_create()
2919 op->prefetch.va = va; in drm_gpuvm_prefetch_ops_create()
2954 struct drm_gpuva *va; in drm_gpuvm_bo_unmap_ops_create() local
2965 drm_gpuvm_bo_for_each_va(va, vm_bo) { in drm_gpuvm_bo_unmap_ops_create()
2973 op->unmap.va = va; in drm_gpuvm_bo_unmap_ops_create()