xref: /linux/drivers/gpu/drm/i915/i915_vma_resource.h (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef __I915_VMA_RESOURCE_H__
7 #define __I915_VMA_RESOURCE_H__
8 
9 #include <linux/dma-fence.h>
10 #include <linux/refcount.h>
11 
12 #include "i915_gem.h"
13 #include "i915_scatterlist.h"
14 #include "i915_sw_fence.h"
15 #include "intel_runtime_pm.h"
16 
17 struct intel_memory_region;
18 
19 struct i915_page_sizes {
20 	/**
21 	 * The sg mask of the pages sg_table. i.e the mask of
22 	 * the lengths for each sg entry.
23 	 */
24 	unsigned int phys;
25 
26 	/**
27 	 * The gtt page sizes we are allowed to use given the
28 	 * sg mask and the supported page sizes. This will
29 	 * express the smallest unit we can use for the whole
30 	 * object, as well as the larger sizes we may be able
31 	 * to use opportunistically.
32 	 */
33 	unsigned int sg;
34 };
35 
36 /**
37  * struct i915_vma_bindinfo - Information needed for async bind
38  * only but that can be dropped after the bind has taken place.
39  * Consider making this a separate argument to the bind_vma
40  * op, coalescing with other arguments like vm, stash, cache_level
41  * and flags
42  * @pages: The pages sg-table.
43  * @page_sizes: Page sizes of the pages.
44  * @pages_rsgt: Refcounted sg-table when delayed object destruction
45  * is supported. May be NULL.
46  * @readonly: Whether the vma should be bound read-only.
47  * @lmem: Whether the vma points to lmem.
48  */
49 struct i915_vma_bindinfo {
50 	struct sg_table *pages;
51 	struct i915_page_sizes page_sizes;
52 	struct i915_refct_sgt *pages_rsgt;
53 	bool readonly:1;
54 	bool lmem:1;
55 };
56 
57 /**
58  * struct i915_vma_resource - Snapshotted unbind information.
59  * @unbind_fence: Fence to mark unbinding complete. Note that this fence
60  * is not considered published until unbind is scheduled, and as such it
61  * is illegal to access this fence before scheduled unbind other than
62  * for refcounting.
63  * @lock: The @unbind_fence lock.
64  * @hold_count: Number of holders blocking the fence from finishing.
65  * The vma itself is keeping a hold, which is released when unbind
66  * is scheduled.
67  * @work: Work struct for deferred unbind work.
68  * @chain: Pointer to struct i915_sw_fence used to await dependencies.
69  * @rb: Rb node for the vm's pending unbind interval tree.
70  * @__subtree_last: Interval tree private member.
71  * @wakeref: wakeref.
72  * @vm: non-refcounted pointer to the vm. This is for internal use only and
73  * this member is cleared after vm_resource unbind.
74  * @mr: The memory region of the object pointed to by the vma.
75  * @ops: Pointer to the backend i915_vma_ops.
76  * @private: Bind backend private info.
77  * @start: Offset into the address space of bind range start. Note that
78  * this is after any padding that might have been allocated.
79  * @node_size: Size of the allocated range manager node with padding
80  * subtracted.
81  * @vma_size: Bind size.
82  * @guard: The size of guard area preceding and trailing the bind.
83  * @page_sizes_gtt: Resulting page sizes from the bind operation.
84  * @bound_flags: Flags indicating binding status.
85  * @allocated: Backend private data. TODO: Should move into @private.
86  * @immediate_unbind: Unbind can be done immediately and doesn't need to be
87  * deferred to a work item awaiting unsignaled fences. This is a hack.
88  * (dma_fence_work uses a fence flag for this, but this seems slightly
89  * cleaner).
90  * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
91  * take a wakeref in the dma-fence signalling critical path, it needs to be
92  * taken when the unbind is scheduled.
93  * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
94  * needs to be skipped for unbind.
95  * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
96  *
97  * The lifetime of a struct i915_vma_resource is from a binding request to
98  * the actual possible asynchronous unbind has completed.
99  */
100 struct i915_vma_resource {
101 	struct dma_fence unbind_fence;
102 	/* See above for description of the lock. */
103 	spinlock_t lock;
104 	refcount_t hold_count;
105 	struct work_struct work;
106 	struct i915_sw_fence chain;
107 	struct rb_node rb;
108 	u64 __subtree_last;
109 	struct i915_address_space *vm;
110 	intel_wakeref_t wakeref;
111 
112 	/**
113 	 * @bi: Information needed for async bind only but that can be dropped
114 	 * after the bind has taken place.
115 	 *
116 	 * Consider making this a separate argument to the bind_vma op,
117 	 * coalescing with other arguments like vm, stash, cache_level and flags
118 	 */
119 	struct i915_vma_bindinfo bi;
120 
121 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
122 	struct intel_memory_region *mr;
123 #endif
124 	const struct i915_vma_ops *ops;
125 	void *private;
126 	u64 start;
127 	u64 node_size;
128 	u64 vma_size;
129 	u32 guard;
130 	u32 page_sizes_gtt;
131 
132 	u32 bound_flags;
133 	bool allocated:1;
134 	bool immediate_unbind:1;
135 	bool needs_wakeref:1;
136 	bool skip_pte_rewrite:1;
137 
138 	u32 *tlb;
139 };
140 
141 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
142 			    bool *lockdep_cookie);
143 
144 void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
145 			      bool lockdep_cookie);
146 
147 struct i915_vma_resource *i915_vma_resource_alloc(void);
148 
149 void i915_vma_resource_free(struct i915_vma_resource *vma_res);
150 
151 struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
152 					   u32 *tlb);
153 
154 void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
155 
156 /**
157  * i915_vma_resource_get - Take a reference on a vma resource
158  * @vma_res: The vma resource on which to take a reference.
159  *
160  * Return: The @vma_res pointer
161  */
162 static inline struct i915_vma_resource
i915_vma_resource_get(struct i915_vma_resource * vma_res)163 *i915_vma_resource_get(struct i915_vma_resource *vma_res)
164 {
165 	dma_fence_get(&vma_res->unbind_fence);
166 	return vma_res;
167 }
168 
169 /**
170  * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
171  * @vma_res: The resource
172  */
i915_vma_resource_put(struct i915_vma_resource * vma_res)173 static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
174 {
175 	dma_fence_put(&vma_res->unbind_fence);
176 }
177 
178 /**
179  * i915_vma_resource_init - Initialize a vma resource.
180  * @vma_res: The vma resource to initialize
181  * @vm: Pointer to the vm.
182  * @pages: The pages sg-table.
183  * @page_sizes: Page sizes of the pages.
184  * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
185  * delayed destruction.
186  * @readonly: Whether the vma should be bound read-only.
187  * @lmem: Whether the vma points to lmem.
188  * @mr: The memory region of the object the vma points to.
189  * @ops: The backend ops.
190  * @private: Bind backend private info.
191  * @start: Offset into the address space of bind range start after padding.
192  * @node_size: Size of the allocated range manager node minus padding.
193  * @size: Bind size.
194  * @guard: The size of the guard area preceding and trailing the bind.
195  *
196  * Initializes a vma resource allocated using i915_vma_resource_alloc().
197  * The reason for having separate allocate and initialize function is that
198  * initialization may need to be performed from under a lock where
199  * allocation is not allowed.
200  */
i915_vma_resource_init(struct i915_vma_resource * vma_res,struct i915_address_space * vm,struct sg_table * pages,const struct i915_page_sizes * page_sizes,struct i915_refct_sgt * pages_rsgt,bool readonly,bool lmem,struct intel_memory_region * mr,const struct i915_vma_ops * ops,void * private,u64 start,u64 node_size,u64 size,u32 guard)201 static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
202 					  struct i915_address_space *vm,
203 					  struct sg_table *pages,
204 					  const struct i915_page_sizes *page_sizes,
205 					  struct i915_refct_sgt *pages_rsgt,
206 					  bool readonly,
207 					  bool lmem,
208 					  struct intel_memory_region *mr,
209 					  const struct i915_vma_ops *ops,
210 					  void *private,
211 					  u64 start,
212 					  u64 node_size,
213 					  u64 size,
214 					  u32 guard)
215 {
216 	__i915_vma_resource_init(vma_res);
217 	vma_res->vm = vm;
218 	vma_res->bi.pages = pages;
219 	vma_res->bi.page_sizes = *page_sizes;
220 	if (pages_rsgt)
221 		vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
222 	vma_res->bi.readonly = readonly;
223 	vma_res->bi.lmem = lmem;
224 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
225 	vma_res->mr = mr;
226 #endif
227 	vma_res->ops = ops;
228 	vma_res->private = private;
229 	vma_res->start = start;
230 	vma_res->node_size = node_size;
231 	vma_res->vma_size = size;
232 	vma_res->guard = guard;
233 }
234 
i915_vma_resource_fini(struct i915_vma_resource * vma_res)235 static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
236 {
237 	GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
238 	if (vma_res->bi.pages_rsgt)
239 		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
240 	i915_sw_fence_fini(&vma_res->chain);
241 }
242 
243 int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
244 				    u64 first,
245 				    u64 last,
246 				    bool intr);
247 
248 int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
249 				     struct i915_sw_fence *sw_fence,
250 				     u64 first,
251 				     u64 last,
252 				     bool intr,
253 				     gfp_t gfp);
254 
255 void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
256 
257 void i915_vma_resource_module_exit(void);
258 
259 int i915_vma_resource_module_init(void);
260 
261 #endif
262