xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision f9f0b4a1f35d39a1a2a2f8ec46eb7b81efc70a63)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 
21 struct dma_fence;
22 
23 struct xe_exec_queue;
24 struct xe_file;
25 struct xe_sync_entry;
26 struct xe_svm_range;
27 struct drm_exec;
28 
29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
30 
31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
32 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
33 
34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
35 {
36 	drm_gpuvm_get(&vm->gpuvm);
37 	return vm;
38 }
39 
40 static inline void xe_vm_put(struct xe_vm *vm)
41 {
42 	drm_gpuvm_put(&vm->gpuvm);
43 }
44 
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
46 
47 void xe_vm_unlock(struct xe_vm *vm);
48 
49 static inline bool xe_vm_is_closed(struct xe_vm *vm)
50 {
51 	/* Only guaranteed not to change when vm->lock is held */
52 	return !vm->size;
53 }
54 
55 static inline bool xe_vm_is_banned(struct xe_vm *vm)
56 {
57 	return vm->flags & XE_VM_FLAG_BANNED;
58 }
59 
60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
61 {
62 	lockdep_assert_held(&vm->lock);
63 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 }
65 
66 struct xe_vma *
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68 
69 bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
70 
71 void xe_vm_find_cpu_addr_mirror_vma_range(struct xe_vm *vm,
72 					  u64 *start,
73 					  u64 *end);
74 /**
75  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
76  * @vm: The vm
77  *
78  * Return: whether the vm populates unmapped areas with scratch PTEs
79  */
80 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
81 {
82 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
83 }
84 
85 /**
86  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
87  * @gpuvm: The struct drm_gpuvm pointer
88  *
89  * Return: Pointer to the embedding struct xe_vm.
90  */
91 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
92 {
93 	return container_of(gpuvm, struct xe_vm, gpuvm);
94 }
95 
96 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
97 {
98 	return gpuvm_to_vm(gpuva->vm);
99 }
100 
101 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
102 {
103 	return container_of(gpuva, struct xe_vma, gpuva);
104 }
105 
106 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
107 {
108 	return container_of(op, struct xe_vma_op, base);
109 }
110 
111 /**
112  * DOC: Provide accessors for vma members to facilitate easy change of
113  * implementation.
114  */
115 static inline u64 xe_vma_start(struct xe_vma *vma)
116 {
117 	return vma->gpuva.va.addr;
118 }
119 
120 static inline u64 xe_vma_size(struct xe_vma *vma)
121 {
122 	return vma->gpuva.va.range;
123 }
124 
125 static inline u64 xe_vma_end(struct xe_vma *vma)
126 {
127 	return xe_vma_start(vma) + xe_vma_size(vma);
128 }
129 
130 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
131 {
132 	return vma->gpuva.gem.offset;
133 }
134 
135 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
136 {
137 	return !vma->gpuva.gem.obj ? NULL :
138 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
139 }
140 
141 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
142 {
143 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
144 }
145 
146 static inline bool xe_vma_read_only(struct xe_vma *vma)
147 {
148 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
149 }
150 
151 static inline u64 xe_vma_userptr(struct xe_vma *vma)
152 {
153 	return vma->gpuva.gem.offset;
154 }
155 
156 static inline bool xe_vma_is_null(struct xe_vma *vma)
157 {
158 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
159 }
160 
161 static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
162 {
163 	return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
164 }
165 
166 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
167 {
168 	return !xe_vma_bo(vma);
169 }
170 
171 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
172 {
173 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
174 		!xe_vma_is_cpu_addr_mirror(vma);
175 }
176 
177 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
178 
179 int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
180 
181 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
182 
183 int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
184 
185 /**
186  * to_userptr_vma() - Return a pointer to an embedding userptr vma
187  * @vma: Pointer to the embedded struct xe_vma
188  *
189  * Return: Pointer to the embedding userptr vma
190  */
191 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
192 {
193 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
194 	return container_of(vma, struct xe_userptr_vma, vma);
195 }
196 
197 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
198 
199 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
200 		       struct drm_file *file);
201 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
202 			struct drm_file *file);
203 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
204 		     struct drm_file *file);
205 int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
206 void xe_vm_close_and_put(struct xe_vm *vm);
207 
208 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
209 {
210 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
211 }
212 
213 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
214 {
215 	return vm->flags & XE_VM_FLAG_LR_MODE;
216 }
217 
218 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
219 {
220 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
221 }
222 
223 static inline bool xe_vm_allow_vm_eviction(struct xe_vm *vm)
224 {
225 	return !xe_vm_in_lr_mode(vm) ||
226 		(xe_vm_in_fault_mode(vm) &&
227 		 !(vm->flags & XE_VM_FLAG_NO_VM_OVERCOMMIT));
228 }
229 
230 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
231 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
232 
233 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
234 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
235 				u8 tile_mask);
236 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
237 				     struct xe_vma *vma,
238 				     struct xe_svm_range *range,
239 				     u8 tile_mask);
240 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
241 				     struct xe_svm_range *range);
242 
243 int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
244 				   u64 end, u8 tile_mask);
245 
246 int xe_vm_invalidate_vma(struct xe_vma *vma);
247 
248 int xe_vm_validate_protected(struct xe_vm *vm);
249 
250 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
251 {
252 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
253 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
254 }
255 
256 /**
257  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
258  * vms.
259  * @vm: The vm.
260  *
261  * If the rebind functionality on a compute vm was disabled due
262  * to nothing to execute. Reactivate it and run the rebind worker.
263  * This function should be called after submitting a batch to a compute vm.
264  */
265 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
266 {
267 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
268 		vm->preempt.rebind_deactivated = false;
269 		xe_vm_queue_rebind_worker(vm);
270 	}
271 }
272 
273 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
274 
275 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
276 			  unsigned int num_fences);
277 
278 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
279 				       struct xe_exec_queue *q, u64 addr,
280 				       enum xe_cache_level cache_lvl);
281 
282 void xe_vm_resume_rebind_worker(struct xe_vm *vm);
283 
284 /**
285  * xe_vm_resv() - Return's the vm's reservation object
286  * @vm: The vm
287  *
288  * Return: Pointer to the vm's reservation object.
289  */
290 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
291 {
292 	return drm_gpuvm_resv(&vm->gpuvm);
293 }
294 
295 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
296 
297 void xe_vm_add_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
298 void xe_vm_remove_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
299 
300 /**
301  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
302  * @vm: The vm
303  */
304 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
305 
306 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
307 
308 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
309 #define vm_dbg drm_dbg
310 #else
311 __printf(2, 3)
312 static inline void vm_dbg(const struct drm_device *dev,
313 			  const char *format, ...)
314 { /* noop */ }
315 #endif
316 
317 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
318 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
319 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
320 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
321 
322 /**
323  * xe_vm_set_validating() - Register this task as currently making bos resident
324  * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
325  * validating.
326  * @vm: Pointer to the vm or NULL.
327  *
328  * Register this task as currently making bos resident for the vm. Intended
329  * to avoid eviction by the same task of shared bos bound to the vm.
330  * Call with the vm's resv lock held.
331  */
332 static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
333 {
334 	if (vm && !allow_res_evict) {
335 		xe_vm_assert_held(vm);
336 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
337 		WRITE_ONCE(vm->validation.validating, current);
338 	}
339 }
340 
341 /**
342  * xe_vm_clear_validating() - Unregister this task as currently making bos resident
343  * @vm: Pointer to the vm or NULL
344  * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
345  * value as for xe_vm_set_validation().
346  *
347  * Register this task as currently making bos resident for the vm. Intended
348  * to avoid eviction by the same task of shared bos bound to the vm.
349  * Call with the vm's resv lock held.
350  */
351 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
352 {
353 	if (vm && !allow_res_evict) {
354 		/* Pairs with READ_ONCE in xe_vm_is_validating() */
355 		WRITE_ONCE(vm->validation.validating, NULL);
356 	}
357 }
358 
359 /**
360  * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
361  * by the current task.
362  * @vm: Pointer to the vm.
363  *
364  * If this function returns %true, we should be in a vm resv locked region, since
365  * the current process is the same task that called xe_vm_set_validating().
366  * The function asserts that that's indeed the case.
367  *
368  * Return: %true if the task is currently making bos resident, %false otherwise.
369  */
370 static inline bool xe_vm_is_validating(struct xe_vm *vm)
371 {
372 	/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
373 	if (READ_ONCE(vm->validation.validating) == current) {
374 		xe_vm_assert_held(vm);
375 		return true;
376 	}
377 	return false;
378 }
379 
380 /**
381  * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
382  * @vm: The vm we want to register a drm_exec object with.
383  * @exec: The exec object we want to register.
384  *
385  * Set the drm_exec object used to lock the vm's resv.
386  */
387 static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
388 {
389 	xe_vm_assert_held(vm);
390 	xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
391 	vm->validation._exec = exec;
392 }
393 
394 /**
395  * xe_vm_validation_exec() - Accessor to read the drm_exec object
396  * @vm: The vm we want to register a drm_exec object with.
397  *
398  * Return: The drm_exec object used to lock the vm's resv. The value
399  * is a valid pointer, %NULL, or one of the special values defined in
400  * xe_validation.h.
401  */
402 static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
403 {
404 	xe_vm_assert_held(vm);
405 	return vm->validation._exec;
406 }
407 
408 /**
409  * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
410  * a valid GPU mapping
411  * @tile: The tile which the GPU mapping belongs to
412  * @tile_present: Tile present mask
413  * @tile_invalidated: Tile invalidated mask
414  *
415  * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
416  * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
417  * without the notifier lock in userptr or SVM cases, and not reliable without
418  * the BO dma-resv lock in the BO case. As such, they should only be used in
419  * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
420  * invalidation) where it is harmless.
421  *
422  * Return: True is there are valid GPU pages, False otherwise
423  */
424 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated)	\
425 	((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
426 
427 void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from);
428 #endif
429