xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 
21 struct dma_fence;
22 
23 struct xe_exec_queue;
24 struct xe_file;
25 struct xe_sync_entry;
26 struct xe_svm_range;
27 struct drm_exec;
28 
29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
30 
31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
32 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
33 
xe_vm_get(struct xe_vm * vm)34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
35 {
36 	drm_gpuvm_get(&vm->gpuvm);
37 	return vm;
38 }
39 
xe_vm_put(struct xe_vm * vm)40 static inline void xe_vm_put(struct xe_vm *vm)
41 {
42 	drm_gpuvm_put(&vm->gpuvm);
43 }
44 
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
46 
47 void xe_vm_unlock(struct xe_vm *vm);
48 
xe_vm_is_closed(struct xe_vm * vm)49 static inline bool xe_vm_is_closed(struct xe_vm *vm)
50 {
51 	/* Only guaranteed not to change when vm->lock is held */
52 	return !vm->size;
53 }
54 
xe_vm_is_banned(struct xe_vm * vm)55 static inline bool xe_vm_is_banned(struct xe_vm *vm)
56 {
57 	return vm->flags & XE_VM_FLAG_BANNED;
58 }
59 
xe_vm_is_closed_or_banned(struct xe_vm * vm)60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
61 {
62 	lockdep_assert_held(&vm->lock);
63 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 }
65 
66 struct xe_vma *
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68 
69 /**
70  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
71  * @vm: The vm
72  *
73  * Return: whether the vm populates unmapped areas with scratch PTEs
74  */
xe_vm_has_scratch(const struct xe_vm * vm)75 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
76 {
77 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
78 }
79 
80 /**
81  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
82  * @gpuvm: The struct drm_gpuvm pointer
83  *
84  * Return: Pointer to the embedding struct xe_vm.
85  */
gpuvm_to_vm(struct drm_gpuvm * gpuvm)86 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
87 {
88 	return container_of(gpuvm, struct xe_vm, gpuvm);
89 }
90 
gpuva_to_vm(struct drm_gpuva * gpuva)91 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
92 {
93 	return gpuvm_to_vm(gpuva->vm);
94 }
95 
gpuva_to_vma(struct drm_gpuva * gpuva)96 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
97 {
98 	return container_of(gpuva, struct xe_vma, gpuva);
99 }
100 
gpuva_op_to_vma_op(struct drm_gpuva_op * op)101 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
102 {
103 	return container_of(op, struct xe_vma_op, base);
104 }
105 
106 /**
107  * DOC: Provide accessors for vma members to facilitate easy change of
108  * implementation.
109  */
xe_vma_start(struct xe_vma * vma)110 static inline u64 xe_vma_start(struct xe_vma *vma)
111 {
112 	return vma->gpuva.va.addr;
113 }
114 
xe_vma_size(struct xe_vma * vma)115 static inline u64 xe_vma_size(struct xe_vma *vma)
116 {
117 	return vma->gpuva.va.range;
118 }
119 
xe_vma_end(struct xe_vma * vma)120 static inline u64 xe_vma_end(struct xe_vma *vma)
121 {
122 	return xe_vma_start(vma) + xe_vma_size(vma);
123 }
124 
xe_vma_bo_offset(struct xe_vma * vma)125 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
126 {
127 	return vma->gpuva.gem.offset;
128 }
129 
xe_vma_bo(struct xe_vma * vma)130 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
131 {
132 	return !vma->gpuva.gem.obj ? NULL :
133 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
134 }
135 
xe_vma_vm(struct xe_vma * vma)136 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
137 {
138 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
139 }
140 
xe_vma_read_only(struct xe_vma * vma)141 static inline bool xe_vma_read_only(struct xe_vma *vma)
142 {
143 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
144 }
145 
xe_vma_userptr(struct xe_vma * vma)146 static inline u64 xe_vma_userptr(struct xe_vma *vma)
147 {
148 	return vma->gpuva.gem.offset;
149 }
150 
xe_vma_is_null(struct xe_vma * vma)151 static inline bool xe_vma_is_null(struct xe_vma *vma)
152 {
153 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
154 }
155 
xe_vma_is_cpu_addr_mirror(struct xe_vma * vma)156 static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
157 {
158 	return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
159 }
160 
xe_vma_has_no_bo(struct xe_vma * vma)161 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
162 {
163 	return !xe_vma_bo(vma);
164 }
165 
xe_vma_is_userptr(struct xe_vma * vma)166 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
167 {
168 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
169 		!xe_vma_is_cpu_addr_mirror(vma);
170 }
171 
172 /**
173  * to_userptr_vma() - Return a pointer to an embedding userptr vma
174  * @vma: Pointer to the embedded struct xe_vma
175  *
176  * Return: Pointer to the embedding userptr vma
177  */
to_userptr_vma(struct xe_vma * vma)178 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
179 {
180 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
181 	return container_of(vma, struct xe_userptr_vma, vma);
182 }
183 
184 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
185 
186 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
187 		       struct drm_file *file);
188 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
189 			struct drm_file *file);
190 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
191 		     struct drm_file *file);
192 
193 void xe_vm_close_and_put(struct xe_vm *vm);
194 
xe_vm_in_fault_mode(struct xe_vm * vm)195 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
196 {
197 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
198 }
199 
xe_vm_in_lr_mode(struct xe_vm * vm)200 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
201 {
202 	return vm->flags & XE_VM_FLAG_LR_MODE;
203 }
204 
xe_vm_in_preempt_fence_mode(struct xe_vm * vm)205 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
206 {
207 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
208 }
209 
210 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
211 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
212 
213 int xe_vm_userptr_pin(struct xe_vm *vm);
214 
215 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
216 
217 int xe_vm_userptr_check_repin(struct xe_vm *vm);
218 
219 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
220 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
221 				u8 tile_mask);
222 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
223 				     struct xe_vma *vma,
224 				     struct xe_svm_range *range,
225 				     u8 tile_mask);
226 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
227 				     struct xe_svm_range *range);
228 
229 int xe_vm_invalidate_vma(struct xe_vma *vma);
230 
231 int xe_vm_validate_protected(struct xe_vm *vm);
232 
xe_vm_queue_rebind_worker(struct xe_vm * vm)233 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
234 {
235 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
236 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
237 }
238 
239 /**
240  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
241  * vms.
242  * @vm: The vm.
243  *
244  * If the rebind functionality on a compute vm was disabled due
245  * to nothing to execute. Reactivate it and run the rebind worker.
246  * This function should be called after submitting a batch to a compute vm.
247  */
xe_vm_reactivate_rebind(struct xe_vm * vm)248 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
249 {
250 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
251 		vm->preempt.rebind_deactivated = false;
252 		xe_vm_queue_rebind_worker(vm);
253 	}
254 }
255 
256 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
257 
258 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
259 
260 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
261 
262 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
263 
264 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
265 			  unsigned int num_fences);
266 
267 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
268 				       struct xe_exec_queue *q, u64 addr,
269 				       enum xe_cache_level cache_lvl);
270 
271 /**
272  * xe_vm_resv() - Return's the vm's reservation object
273  * @vm: The vm
274  *
275  * Return: Pointer to the vm's reservation object.
276  */
xe_vm_resv(struct xe_vm * vm)277 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
278 {
279 	return drm_gpuvm_resv(&vm->gpuvm);
280 }
281 
282 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
283 
284 /**
285  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
286  * @vm: The vm
287  */
288 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
289 
290 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
291 #define vm_dbg drm_dbg
292 #else
293 __printf(2, 3)
vm_dbg(const struct drm_device * dev,const char * format,...)294 static inline void vm_dbg(const struct drm_device *dev,
295 			  const char *format, ...)
296 { /* noop */ }
297 #endif
298 
299 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
300 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
301 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
302 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
303 
304 /**
305  * xe_vm_set_validating() - Register this task as currently making bos resident
306  * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
307  * validating.
308  * @vm: Pointer to the vm or NULL.
309  *
310  * Register this task as currently making bos resident for the vm. Intended
311  * to avoid eviction by the same task of shared bos bound to the vm.
312  * Call with the vm's resv lock held.
313  *
314  * Return: A pin cookie that should be used for xe_vm_clear_validating().
315  */
xe_vm_set_validating(struct xe_vm * vm,bool allow_res_evict)316 static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
317 						     bool allow_res_evict)
318 {
319 	struct pin_cookie cookie = {};
320 
321 	if (vm && !allow_res_evict) {
322 		xe_vm_assert_held(vm);
323 		cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
324 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
325 		WRITE_ONCE(vm->validating, current);
326 	}
327 
328 	return cookie;
329 }
330 
331 /**
332  * xe_vm_clear_validating() - Unregister this task as currently making bos resident
333  * @vm: Pointer to the vm or NULL
334  * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
335  * value as for xe_vm_set_validation().
336  * @cookie: Cookie obtained from xe_vm_set_validating().
337  *
338  * Register this task as currently making bos resident for the vm. Intended
339  * to avoid eviction by the same task of shared bos bound to the vm.
340  * Call with the vm's resv lock held.
341  */
xe_vm_clear_validating(struct xe_vm * vm,bool allow_res_evict,struct pin_cookie cookie)342 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
343 					  struct pin_cookie cookie)
344 {
345 	if (vm && !allow_res_evict) {
346 		lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
347 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
348 		WRITE_ONCE(vm->validating, NULL);
349 	}
350 }
351 
352 /**
353  * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
354  * by the current task.
355  * @vm: Pointer to the vm.
356  *
357  * If this function returns %true, we should be in a vm resv locked region, since
358  * the current process is the same task that called xe_vm_set_validating().
359  * The function asserts that that's indeed the case.
360  *
361  * Return: %true if the task is currently making bos resident, %false otherwise.
362  */
xe_vm_is_validating(struct xe_vm * vm)363 static inline bool xe_vm_is_validating(struct xe_vm *vm)
364 {
365 	/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
366 	if (READ_ONCE(vm->validating) == current) {
367 		xe_vm_assert_held(vm);
368 		return true;
369 	}
370 	return false;
371 }
372 
373 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
374 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
375 #else
xe_vma_userptr_force_invalidate(struct xe_userptr_vma * uvma)376 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
377 {
378 }
379 #endif
380 #endif
381