xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 6f17ab9a63e670bd62a287f95e3982f99eafd77e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 
21 struct dma_fence;
22 
23 struct xe_exec_queue;
24 struct xe_file;
25 struct xe_sync_entry;
26 struct xe_svm_range;
27 struct drm_exec;
28 
29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
30 
31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
32 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
33 
34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
35 {
36 	drm_gpuvm_get(&vm->gpuvm);
37 	return vm;
38 }
39 
40 static inline void xe_vm_put(struct xe_vm *vm)
41 {
42 	drm_gpuvm_put(&vm->gpuvm);
43 }
44 
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
46 
47 void xe_vm_unlock(struct xe_vm *vm);
48 
49 static inline bool xe_vm_is_closed(struct xe_vm *vm)
50 {
51 	/* Only guaranteed not to change when vm->lock is held */
52 	return !vm->size;
53 }
54 
55 static inline bool xe_vm_is_banned(struct xe_vm *vm)
56 {
57 	return vm->flags & XE_VM_FLAG_BANNED;
58 }
59 
60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
61 {
62 	lockdep_assert_held(&vm->lock);
63 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 }
65 
66 struct xe_vma *
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68 
69 bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
70 
71 /**
72  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
73  * @vm: The vm
74  *
75  * Return: whether the vm populates unmapped areas with scratch PTEs
76  */
77 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
78 {
79 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
80 }
81 
82 /**
83  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
84  * @gpuvm: The struct drm_gpuvm pointer
85  *
86  * Return: Pointer to the embedding struct xe_vm.
87  */
88 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
89 {
90 	return container_of(gpuvm, struct xe_vm, gpuvm);
91 }
92 
93 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
94 {
95 	return gpuvm_to_vm(gpuva->vm);
96 }
97 
98 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
99 {
100 	return container_of(gpuva, struct xe_vma, gpuva);
101 }
102 
103 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
104 {
105 	return container_of(op, struct xe_vma_op, base);
106 }
107 
108 /**
109  * DOC: Provide accessors for vma members to facilitate easy change of
110  * implementation.
111  */
112 static inline u64 xe_vma_start(struct xe_vma *vma)
113 {
114 	return vma->gpuva.va.addr;
115 }
116 
117 static inline u64 xe_vma_size(struct xe_vma *vma)
118 {
119 	return vma->gpuva.va.range;
120 }
121 
122 static inline u64 xe_vma_end(struct xe_vma *vma)
123 {
124 	return xe_vma_start(vma) + xe_vma_size(vma);
125 }
126 
127 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
128 {
129 	return vma->gpuva.gem.offset;
130 }
131 
132 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
133 {
134 	return !vma->gpuva.gem.obj ? NULL :
135 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
136 }
137 
138 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
139 {
140 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
141 }
142 
143 static inline bool xe_vma_read_only(struct xe_vma *vma)
144 {
145 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
146 }
147 
148 static inline u64 xe_vma_userptr(struct xe_vma *vma)
149 {
150 	return vma->gpuva.gem.offset;
151 }
152 
153 static inline bool xe_vma_is_null(struct xe_vma *vma)
154 {
155 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
156 }
157 
158 static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
159 {
160 	return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
161 }
162 
163 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
164 {
165 	return !xe_vma_bo(vma);
166 }
167 
168 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
169 {
170 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
171 		!xe_vma_is_cpu_addr_mirror(vma);
172 }
173 
174 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
175 
176 int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
177 
178 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
179 
180 int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
181 
182 /**
183  * to_userptr_vma() - Return a pointer to an embedding userptr vma
184  * @vma: Pointer to the embedded struct xe_vma
185  *
186  * Return: Pointer to the embedding userptr vma
187  */
188 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
189 {
190 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
191 	return container_of(vma, struct xe_userptr_vma, vma);
192 }
193 
194 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
195 
196 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
197 		       struct drm_file *file);
198 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
199 			struct drm_file *file);
200 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
201 		     struct drm_file *file);
202 int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
203 void xe_vm_close_and_put(struct xe_vm *vm);
204 
205 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
206 {
207 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
208 }
209 
210 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
211 {
212 	return vm->flags & XE_VM_FLAG_LR_MODE;
213 }
214 
215 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
216 {
217 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
218 }
219 
220 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
221 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
222 
223 int xe_vm_userptr_pin(struct xe_vm *vm);
224 
225 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
226 
227 int xe_vm_userptr_check_repin(struct xe_vm *vm);
228 
229 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
230 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
231 				u8 tile_mask);
232 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
233 				     struct xe_vma *vma,
234 				     struct xe_svm_range *range,
235 				     u8 tile_mask);
236 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
237 				     struct xe_svm_range *range);
238 
239 int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
240 				   u64 end, u8 tile_mask);
241 
242 int xe_vm_invalidate_vma(struct xe_vma *vma);
243 
244 int xe_vm_validate_protected(struct xe_vm *vm);
245 
246 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
247 {
248 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
249 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
250 }
251 
252 /**
253  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
254  * vms.
255  * @vm: The vm.
256  *
257  * If the rebind functionality on a compute vm was disabled due
258  * to nothing to execute. Reactivate it and run the rebind worker.
259  * This function should be called after submitting a batch to a compute vm.
260  */
261 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
262 {
263 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
264 		vm->preempt.rebind_deactivated = false;
265 		xe_vm_queue_rebind_worker(vm);
266 	}
267 }
268 
269 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
270 
271 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
272 
273 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
274 
275 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
276 
277 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
278 			  unsigned int num_fences);
279 
280 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
281 				       struct xe_exec_queue *q, u64 addr,
282 				       enum xe_cache_level cache_lvl);
283 
284 void xe_vm_resume_rebind_worker(struct xe_vm *vm);
285 
286 /**
287  * xe_vm_resv() - Return's the vm's reservation object
288  * @vm: The vm
289  *
290  * Return: Pointer to the vm's reservation object.
291  */
292 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
293 {
294 	return drm_gpuvm_resv(&vm->gpuvm);
295 }
296 
297 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
298 
299 /**
300  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
301  * @vm: The vm
302  */
303 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
304 
305 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
306 #define vm_dbg drm_dbg
307 #else
308 __printf(2, 3)
309 static inline void vm_dbg(const struct drm_device *dev,
310 			  const char *format, ...)
311 { /* noop */ }
312 #endif
313 
314 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
315 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
316 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
317 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
318 
319 /**
320  * xe_vm_set_validating() - Register this task as currently making bos resident
321  * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
322  * validating.
323  * @vm: Pointer to the vm or NULL.
324  *
325  * Register this task as currently making bos resident for the vm. Intended
326  * to avoid eviction by the same task of shared bos bound to the vm.
327  * Call with the vm's resv lock held.
328  */
329 static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
330 {
331 	if (vm && !allow_res_evict) {
332 		xe_vm_assert_held(vm);
333 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
334 		WRITE_ONCE(vm->validating, current);
335 	}
336 }
337 
338 /**
339  * xe_vm_clear_validating() - Unregister this task as currently making bos resident
340  * @vm: Pointer to the vm or NULL
341  * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
342  * value as for xe_vm_set_validation().
343  *
344  * Register this task as currently making bos resident for the vm. Intended
345  * to avoid eviction by the same task of shared bos bound to the vm.
346  * Call with the vm's resv lock held.
347  */
348 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
349 {
350 	if (vm && !allow_res_evict) {
351 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
352 		WRITE_ONCE(vm->validating, NULL);
353 	}
354 }
355 
356 /**
357  * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
358  * by the current task.
359  * @vm: Pointer to the vm.
360  *
361  * If this function returns %true, we should be in a vm resv locked region, since
362  * the current process is the same task that called xe_vm_set_validating().
363  * The function asserts that that's indeed the case.
364  *
365  * Return: %true if the task is currently making bos resident, %false otherwise.
366  */
367 static inline bool xe_vm_is_validating(struct xe_vm *vm)
368 {
369 	/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
370 	if (READ_ONCE(vm->validating) == current) {
371 		xe_vm_assert_held(vm);
372 		return true;
373 	}
374 	return false;
375 }
376 
377 /**
378  * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
379  * a valid GPU mapping
380  * @tile: The tile which the GPU mapping belongs to
381  * @tile_present: Tile present mask
382  * @tile_invalidated: Tile invalidated mask
383  *
384  * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
385  * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
386  * without the notifier lock in userptr or SVM cases, and not reliable without
387  * the BO dma-resv lock in the BO case. As such, they should only be used in
388  * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
389  * invalidation) where it is harmless.
390  *
391  * Return: True is there are valid GPU pages, False otherwise
392  */
393 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)	\
394 	((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
395 
396 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
397 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
398 #else
399 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
400 {
401 }
402 #endif
403 #endif
404