Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
9 * Permission is hereby granted, free of charge, to any person obtaining a
28 #include <linux/dma-resv.h>
42 * enum drm_gpuva_flags - flags for struct drm_gpuva
55 * Flag indicating that the &drm_gpuva is a sparse mapping.
66 * struct drm_gpuva - structure to track a GPU VA mapping
68 * This structure represents a GPU VA mapping and is associated with a
86 * @flags: the &drm_gpuva_flags for this mapping
126 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
130 * @rb.node: the rb-tree node
138 * through the rb-tree while doing modifications on the rb-tree
144 * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
166 va->va.addr = addr; in drm_gpuva_init()
167 va->va.range = range; in drm_gpuva_init()
168 va->gem.obj = obj; in drm_gpuva_init()
169 va->gem.offset = offset; in drm_gpuva_init()
173 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
181 va->flags |= DRM_GPUVA_INVALIDATED; in drm_gpuva_invalidate()
183 va->flags &= ~DRM_GPUVA_INVALIDATED; in drm_gpuva_invalidate()
187 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
195 return va->flags & DRM_GPUVA_INVALIDATED; in drm_gpuva_invalidated()
199 * enum drm_gpuvm_flags - flags for struct drm_gpuvm
215 * struct drm_gpuvm - DRM GPU VA Manager
257 * @rb.tree: the rb-tree to track GPU VA mappings
344 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
355 kref_get(&gpuvm->kref); in drm_gpuvm_get()
369 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
378 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; in drm_gpuvm_resv_protected()
382 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
387 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
390 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
397 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
412 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
424 return obj && obj->resv != drm_gpuvm_resv(gpuvm); in drm_gpuvm_is_extobj()
430 if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) in __drm_gpuva_next()
437 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
448 * of elements. It assumes that @end__ is within (or is the upper limit of) the
453 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
454 va__ && (va__->va.addr < (end__)); \
458 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
472 * upper limit of) the &drm_gpuvm. This iterator does not skip over the
476 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
478 va__ && (va__->va.addr < (end__)); \
482 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
490 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
493 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
503 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
506 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
575 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
578 * Releases all dma-resv locks of all &drm_gem_objects previously acquired
586 drm_exec_fini(&vm_exec->exec); in drm_gpuvm_exec_unlock()
597 * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
600 * @private_usage: private dma-resv usage
601 * @extobj_usage: extobj dma-resv usage
611 drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, in drm_gpuvm_exec_resv_add_fence()
616 * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
626 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); in drm_gpuvm_exec_validate()
630 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
638 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
642 * a GEM object is mapped first in a GPU-VM and release the instance once the
643 * last mapping of the GEM object in this GPU-VM is unmapped.
660 * protected by the &drm_gem_object's dma-resv lock.
718 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
729 kref_get(&vm_bo->kref); in drm_gpuvm_bo_get()
742 * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
762 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
772 list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
775 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
788 list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
791 * enum drm_gpuva_op_type - GPU VA operation type
818 * struct drm_gpuva_op_map - GPU VA map operation
830 * @va.addr: the base address of the new mapping
835 * @va.range: the range of the new mapping
857 * struct drm_gpuva_op_unmap - GPU VA unmap operation
872 * original mapping request.
882 * struct drm_gpuva_op_remap - GPU VA remap operation
887 * by inserting a new GPU VA mapping or by partially unmapping existent
888 * mapping(s), hence it consists of a maximum of two map and one unmap
891 * The @unmap operation takes care of removing the original existing mapping.
894 * If either a new mapping's start address is aligned with the start address
895 * of the old mapping or the new mapping's end address is aligned with the
896 * end address of the old mapping, either @prev or @next is NULL.
906 * @prev: the preceding part of a split mapping
911 * @next: the subsequent part of a split mapping
916 * @unmap: the unmap operation for the original existing mapping
922 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
935 * struct drm_gpuva_op - GPU VA operation
979 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
989 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
995 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
998 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
1007 list_for_each_entry_safe(op, next, &(ops)->list, entry)
1010 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
1018 list_for_each_entry_from_reverse(op, &(ops)->list, entry)
1021 * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
1028 list_for_each_entry_reverse(op, &(ops)->list, entry)
1031 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
1035 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
1038 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
1042 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
1045 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
1046 * @op: the current &drm_gpuva_op
1051 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
1052 * @op: the current &drm_gpuva_op
1077 drm_gpuva_init(va, op->va.addr, op->va.range, in drm_gpuva_init_from_op()
1078 op->gem.obj, op->gem.offset); in drm_gpuva_init_from_op()
1082 * struct drm_gpuvm_ops - callbacks for split/merge steps
1159 * mapping once all previous steps were completed
1170 * &drm_gpuvm_sm_unmap to split up an existent mapping
1172 * This callback is called when existent mapping needs to be split up.
1173 * This is the case when either a newly requested mapping overlaps or
1174 * is enclosed by an existent mapping or a partial unmap of an existent
1175 * mapping is requested.
1187 * &drm_gpuvm_sm_unmap to unmap an existent mapping
1189 * This callback is called when existent mapping needs to be unmapped.
1190 * This is the case when either a newly requested mapping encloses an
1191 * existent mapping or an unmap of an existent mapping is requested.
1220 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
1227 * range of the address space that was previously covered by the mapping being
1228 * re-mapped, but is now empty.
1234 const u64 va_start = op->prev ? in drm_gpuva_op_remap_to_unmap_range()
1235 op->prev->va.addr + op->prev->va.range : in drm_gpuva_op_remap_to_unmap_range()
1236 op->unmap->va->va.addr; in drm_gpuva_op_remap_to_unmap_range()
1237 const u64 va_end = op->next ? in drm_gpuva_op_remap_to_unmap_range()
1238 op->next->va.addr : in drm_gpuva_op_remap_to_unmap_range()
1239 op->unmap->va->va.addr + op->unmap->va->va.range; in drm_gpuva_op_remap_to_unmap_range()
1244 *range = va_end - va_start; in drm_gpuva_op_remap_to_unmap_range()