1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #ifndef __I915_VMA_RESOURCE_H__ 7 #define __I915_VMA_RESOURCE_H__ 8 9 #include <linux/dma-fence.h> 10 #include <linux/refcount.h> 11 12 #include "i915_gem.h" 13 #include "i915_scatterlist.h" 14 #include "i915_sw_fence.h" 15 #include "intel_runtime_pm.h" 16 17 struct intel_memory_region; 18 19 struct i915_page_sizes { 20 /** 21 * The sg mask of the pages sg_table. i.e the mask of 22 * the lengths for each sg entry. 23 */ 24 unsigned int phys; 25 26 /** 27 * The gtt page sizes we are allowed to use given the 28 * sg mask and the supported page sizes. This will 29 * express the smallest unit we can use for the whole 30 * object, as well as the larger sizes we may be able 31 * to use opportunistically. 32 */ 33 unsigned int sg; 34 }; 35 36 /** 37 * struct i915_vma_resource - Snapshotted unbind information. 38 * @unbind_fence: Fence to mark unbinding complete. Note that this fence 39 * is not considered published until unbind is scheduled, and as such it 40 * is illegal to access this fence before scheduled unbind other than 41 * for refcounting. 42 * @lock: The @unbind_fence lock. 43 * @hold_count: Number of holders blocking the fence from finishing. 44 * The vma itself is keeping a hold, which is released when unbind 45 * is scheduled. 46 * @work: Work struct for deferred unbind work. 47 * @chain: Pointer to struct i915_sw_fence used to await dependencies. 48 * @rb: Rb node for the vm's pending unbind interval tree. 49 * @__subtree_last: Interval tree private member. 50 * @vm: non-refcounted pointer to the vm. This is for internal use only and 51 * this member is cleared after vm_resource unbind. 52 * @mr: The memory region of the object pointed to by the vma. 53 * @ops: Pointer to the backend i915_vma_ops. 54 * @private: Bind backend private info. 55 * @start: Offset into the address space of bind range start. 56 * @node_size: Size of the allocated range manager node. 57 * @vma_size: Bind size. 58 * @page_sizes_gtt: Resulting page sizes from the bind operation. 59 * @bound_flags: Flags indicating binding status. 60 * @allocated: Backend private data. TODO: Should move into @private. 61 * @immediate_unbind: Unbind can be done immediately and doesn't need to be 62 * deferred to a work item awaiting unsignaled fences. This is a hack. 63 * (dma_fence_work uses a fence flag for this, but this seems slightly 64 * cleaner). 65 * 66 * The lifetime of a struct i915_vma_resource is from a binding request to 67 * the actual possible asynchronous unbind has completed. 68 */ 69 struct i915_vma_resource { 70 struct dma_fence unbind_fence; 71 /* See above for description of the lock. */ 72 spinlock_t lock; 73 refcount_t hold_count; 74 struct work_struct work; 75 struct i915_sw_fence chain; 76 struct rb_node rb; 77 u64 __subtree_last; 78 struct i915_address_space *vm; 79 intel_wakeref_t wakeref; 80 81 /** 82 * struct i915_vma_bindinfo - Information needed for async bind 83 * only but that can be dropped after the bind has taken place. 84 * Consider making this a separate argument to the bind_vma 85 * op, coalescing with other arguments like vm, stash, cache_level 86 * and flags 87 * @pages: The pages sg-table. 88 * @page_sizes: Page sizes of the pages. 89 * @pages_rsgt: Refcounted sg-table when delayed object destruction 90 * is supported. May be NULL. 91 * @readonly: Whether the vma should be bound read-only. 92 * @lmem: Whether the vma points to lmem. 93 */ 94 struct i915_vma_bindinfo { 95 struct sg_table *pages; 96 struct i915_page_sizes page_sizes; 97 struct i915_refct_sgt *pages_rsgt; 98 bool readonly:1; 99 bool lmem:1; 100 } bi; 101 102 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 103 struct intel_memory_region *mr; 104 #endif 105 const struct i915_vma_ops *ops; 106 void *private; 107 u64 start; 108 u64 node_size; 109 u64 vma_size; 110 u32 page_sizes_gtt; 111 112 u32 bound_flags; 113 bool allocated:1; 114 bool immediate_unbind:1; 115 bool needs_wakeref:1; 116 }; 117 118 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res, 119 bool *lockdep_cookie); 120 121 void i915_vma_resource_unhold(struct i915_vma_resource *vma_res, 122 bool lockdep_cookie); 123 124 struct i915_vma_resource *i915_vma_resource_alloc(void); 125 126 void i915_vma_resource_free(struct i915_vma_resource *vma_res); 127 128 struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res); 129 130 void __i915_vma_resource_init(struct i915_vma_resource *vma_res); 131 132 /** 133 * i915_vma_resource_get - Take a reference on a vma resource 134 * @vma_res: The vma resource on which to take a reference. 135 * 136 * Return: The @vma_res pointer 137 */ 138 static inline struct i915_vma_resource 139 *i915_vma_resource_get(struct i915_vma_resource *vma_res) 140 { 141 dma_fence_get(&vma_res->unbind_fence); 142 return vma_res; 143 } 144 145 /** 146 * i915_vma_resource_put - Release a reference to a struct i915_vma_resource 147 * @vma_res: The resource 148 */ 149 static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res) 150 { 151 dma_fence_put(&vma_res->unbind_fence); 152 } 153 154 /** 155 * i915_vma_resource_init - Initialize a vma resource. 156 * @vma_res: The vma resource to initialize 157 * @vm: Pointer to the vm. 158 * @pages: The pages sg-table. 159 * @page_sizes: Page sizes of the pages. 160 * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with 161 * delayed destruction. 162 * @readonly: Whether the vma should be bound read-only. 163 * @lmem: Whether the vma points to lmem. 164 * @mr: The memory region of the object the vma points to. 165 * @ops: The backend ops. 166 * @private: Bind backend private info. 167 * @start: Offset into the address space of bind range start. 168 * @node_size: Size of the allocated range manager node. 169 * @size: Bind size. 170 * 171 * Initializes a vma resource allocated using i915_vma_resource_alloc(). 172 * The reason for having separate allocate and initialize function is that 173 * initialization may need to be performed from under a lock where 174 * allocation is not allowed. 175 */ 176 static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res, 177 struct i915_address_space *vm, 178 struct sg_table *pages, 179 const struct i915_page_sizes *page_sizes, 180 struct i915_refct_sgt *pages_rsgt, 181 bool readonly, 182 bool lmem, 183 struct intel_memory_region *mr, 184 const struct i915_vma_ops *ops, 185 void *private, 186 u64 start, 187 u64 node_size, 188 u64 size) 189 { 190 __i915_vma_resource_init(vma_res); 191 vma_res->vm = vm; 192 vma_res->bi.pages = pages; 193 vma_res->bi.page_sizes = *page_sizes; 194 if (pages_rsgt) 195 vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt); 196 vma_res->bi.readonly = readonly; 197 vma_res->bi.lmem = lmem; 198 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 199 vma_res->mr = mr; 200 #endif 201 vma_res->ops = ops; 202 vma_res->private = private; 203 vma_res->start = start; 204 vma_res->node_size = node_size; 205 vma_res->vma_size = size; 206 } 207 208 static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res) 209 { 210 GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1); 211 if (vma_res->bi.pages_rsgt) 212 i915_refct_sgt_put(vma_res->bi.pages_rsgt); 213 i915_sw_fence_fini(&vma_res->chain); 214 } 215 216 int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm, 217 u64 first, 218 u64 last, 219 bool intr); 220 221 int i915_vma_resource_bind_dep_await(struct i915_address_space *vm, 222 struct i915_sw_fence *sw_fence, 223 u64 first, 224 u64 last, 225 bool intr, 226 gfp_t gfp); 227 228 void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm); 229 230 void i915_vma_resource_module_exit(void); 231 232 int i915_vma_resource_module_init(void); 233 234 #endif 235