1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18
19 struct ttm_buffer_object;
20
21 struct dma_fence;
22
23 struct xe_exec_queue;
24 struct xe_file;
25 struct xe_sync_entry;
26 struct xe_svm_range;
27 struct drm_exec;
28
29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
30
31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
32 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
33
xe_vm_get(struct xe_vm * vm)34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
35 {
36 drm_gpuvm_get(&vm->gpuvm);
37 return vm;
38 }
39
xe_vm_put(struct xe_vm * vm)40 static inline void xe_vm_put(struct xe_vm *vm)
41 {
42 drm_gpuvm_put(&vm->gpuvm);
43 }
44
45 int xe_vm_lock(struct xe_vm *vm, bool intr);
46
47 void xe_vm_unlock(struct xe_vm *vm);
48
xe_vm_is_closed(struct xe_vm * vm)49 static inline bool xe_vm_is_closed(struct xe_vm *vm)
50 {
51 /* Only guaranteed not to change when vm->lock is held */
52 return !vm->size;
53 }
54
xe_vm_is_banned(struct xe_vm * vm)55 static inline bool xe_vm_is_banned(struct xe_vm *vm)
56 {
57 return vm->flags & XE_VM_FLAG_BANNED;
58 }
59
xe_vm_is_closed_or_banned(struct xe_vm * vm)60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
61 {
62 lockdep_assert_held(&vm->lock);
63 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
64 }
65
66 struct xe_vma *
67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
68
69 /**
70 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
71 * @vm: The vm
72 *
73 * Return: whether the vm populates unmapped areas with scratch PTEs
74 */
xe_vm_has_scratch(const struct xe_vm * vm)75 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
76 {
77 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
78 }
79
80 /**
81 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
82 * @gpuvm: The struct drm_gpuvm pointer
83 *
84 * Return: Pointer to the embedding struct xe_vm.
85 */
gpuvm_to_vm(struct drm_gpuvm * gpuvm)86 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
87 {
88 return container_of(gpuvm, struct xe_vm, gpuvm);
89 }
90
gpuva_to_vm(struct drm_gpuva * gpuva)91 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
92 {
93 return gpuvm_to_vm(gpuva->vm);
94 }
95
gpuva_to_vma(struct drm_gpuva * gpuva)96 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
97 {
98 return container_of(gpuva, struct xe_vma, gpuva);
99 }
100
gpuva_op_to_vma_op(struct drm_gpuva_op * op)101 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
102 {
103 return container_of(op, struct xe_vma_op, base);
104 }
105
106 /**
107 * DOC: Provide accessors for vma members to facilitate easy change of
108 * implementation.
109 */
xe_vma_start(struct xe_vma * vma)110 static inline u64 xe_vma_start(struct xe_vma *vma)
111 {
112 return vma->gpuva.va.addr;
113 }
114
xe_vma_size(struct xe_vma * vma)115 static inline u64 xe_vma_size(struct xe_vma *vma)
116 {
117 return vma->gpuva.va.range;
118 }
119
xe_vma_end(struct xe_vma * vma)120 static inline u64 xe_vma_end(struct xe_vma *vma)
121 {
122 return xe_vma_start(vma) + xe_vma_size(vma);
123 }
124
xe_vma_bo_offset(struct xe_vma * vma)125 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
126 {
127 return vma->gpuva.gem.offset;
128 }
129
xe_vma_bo(struct xe_vma * vma)130 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
131 {
132 return !vma->gpuva.gem.obj ? NULL :
133 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
134 }
135
xe_vma_vm(struct xe_vma * vma)136 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
137 {
138 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
139 }
140
xe_vma_read_only(struct xe_vma * vma)141 static inline bool xe_vma_read_only(struct xe_vma *vma)
142 {
143 return vma->gpuva.flags & XE_VMA_READ_ONLY;
144 }
145
xe_vma_userptr(struct xe_vma * vma)146 static inline u64 xe_vma_userptr(struct xe_vma *vma)
147 {
148 return vma->gpuva.gem.offset;
149 }
150
xe_vma_is_null(struct xe_vma * vma)151 static inline bool xe_vma_is_null(struct xe_vma *vma)
152 {
153 return vma->gpuva.flags & DRM_GPUVA_SPARSE;
154 }
155
xe_vma_is_cpu_addr_mirror(struct xe_vma * vma)156 static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
157 {
158 return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
159 }
160
xe_vma_has_no_bo(struct xe_vma * vma)161 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
162 {
163 return !xe_vma_bo(vma);
164 }
165
xe_vma_is_userptr(struct xe_vma * vma)166 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
167 {
168 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
169 !xe_vma_is_cpu_addr_mirror(vma);
170 }
171
172 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
173
174 /**
175 * to_userptr_vma() - Return a pointer to an embedding userptr vma
176 * @vma: Pointer to the embedded struct xe_vma
177 *
178 * Return: Pointer to the embedding userptr vma
179 */
to_userptr_vma(struct xe_vma * vma)180 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
181 {
182 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
183 return container_of(vma, struct xe_userptr_vma, vma);
184 }
185
186 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
187
188 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
189 struct drm_file *file);
190 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file);
192 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
193 struct drm_file *file);
194
195 void xe_vm_close_and_put(struct xe_vm *vm);
196
xe_vm_in_fault_mode(struct xe_vm * vm)197 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
198 {
199 return vm->flags & XE_VM_FLAG_FAULT_MODE;
200 }
201
xe_vm_in_lr_mode(struct xe_vm * vm)202 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
203 {
204 return vm->flags & XE_VM_FLAG_LR_MODE;
205 }
206
xe_vm_in_preempt_fence_mode(struct xe_vm * vm)207 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
208 {
209 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
210 }
211
212 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
213 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
214
215 int xe_vm_userptr_pin(struct xe_vm *vm);
216
217 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
218
219 int xe_vm_userptr_check_repin(struct xe_vm *vm);
220
221 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
222 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
223 u8 tile_mask);
224 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
225 struct xe_vma *vma,
226 struct xe_svm_range *range,
227 u8 tile_mask);
228 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
229 struct xe_svm_range *range);
230
231 int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
232 u64 end, u8 tile_mask);
233
234 int xe_vm_invalidate_vma(struct xe_vma *vma);
235
236 int xe_vm_validate_protected(struct xe_vm *vm);
237
xe_vm_queue_rebind_worker(struct xe_vm * vm)238 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
239 {
240 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
241 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
242 }
243
244 /**
245 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
246 * vms.
247 * @vm: The vm.
248 *
249 * If the rebind functionality on a compute vm was disabled due
250 * to nothing to execute. Reactivate it and run the rebind worker.
251 * This function should be called after submitting a batch to a compute vm.
252 */
xe_vm_reactivate_rebind(struct xe_vm * vm)253 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
254 {
255 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
256 vm->preempt.rebind_deactivated = false;
257 xe_vm_queue_rebind_worker(vm);
258 }
259 }
260
261 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
262
263 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
264
265 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
266
267 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
268
269 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
270 unsigned int num_fences);
271
272 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
273 struct xe_exec_queue *q, u64 addr,
274 enum xe_cache_level cache_lvl);
275
276 /**
277 * xe_vm_resv() - Return's the vm's reservation object
278 * @vm: The vm
279 *
280 * Return: Pointer to the vm's reservation object.
281 */
xe_vm_resv(struct xe_vm * vm)282 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
283 {
284 return drm_gpuvm_resv(&vm->gpuvm);
285 }
286
287 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
288
289 /**
290 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
291 * @vm: The vm
292 */
293 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
294
295 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
296 #define vm_dbg drm_dbg
297 #else
298 __printf(2, 3)
vm_dbg(const struct drm_device * dev,const char * format,...)299 static inline void vm_dbg(const struct drm_device *dev,
300 const char *format, ...)
301 { /* noop */ }
302 #endif
303
304 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
305 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
306 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
307 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
308
309 /**
310 * xe_vm_set_validating() - Register this task as currently making bos resident
311 * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
312 * validating.
313 * @vm: Pointer to the vm or NULL.
314 *
315 * Register this task as currently making bos resident for the vm. Intended
316 * to avoid eviction by the same task of shared bos bound to the vm.
317 * Call with the vm's resv lock held.
318 *
319 * Return: A pin cookie that should be used for xe_vm_clear_validating().
320 */
xe_vm_set_validating(struct xe_vm * vm,bool allow_res_evict)321 static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
322 bool allow_res_evict)
323 {
324 struct pin_cookie cookie = {};
325
326 if (vm && !allow_res_evict) {
327 xe_vm_assert_held(vm);
328 cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
329 /* Pairs with READ_ONCE in xe_vm_is_validating() */
330 WRITE_ONCE(vm->validating, current);
331 }
332
333 return cookie;
334 }
335
336 /**
337 * xe_vm_clear_validating() - Unregister this task as currently making bos resident
338 * @vm: Pointer to the vm or NULL
339 * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
340 * value as for xe_vm_set_validation().
341 * @cookie: Cookie obtained from xe_vm_set_validating().
342 *
343 * Register this task as currently making bos resident for the vm. Intended
344 * to avoid eviction by the same task of shared bos bound to the vm.
345 * Call with the vm's resv lock held.
346 */
xe_vm_clear_validating(struct xe_vm * vm,bool allow_res_evict,struct pin_cookie cookie)347 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
348 struct pin_cookie cookie)
349 {
350 if (vm && !allow_res_evict) {
351 lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
352 /* Pairs with READ_ONCE in xe_vm_is_validating() */
353 WRITE_ONCE(vm->validating, NULL);
354 }
355 }
356
357 /**
358 * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
359 * by the current task.
360 * @vm: Pointer to the vm.
361 *
362 * If this function returns %true, we should be in a vm resv locked region, since
363 * the current process is the same task that called xe_vm_set_validating().
364 * The function asserts that that's indeed the case.
365 *
366 * Return: %true if the task is currently making bos resident, %false otherwise.
367 */
xe_vm_is_validating(struct xe_vm * vm)368 static inline bool xe_vm_is_validating(struct xe_vm *vm)
369 {
370 /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
371 if (READ_ONCE(vm->validating) == current) {
372 xe_vm_assert_held(vm);
373 return true;
374 }
375 return false;
376 }
377
378 /**
379 * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
380 * a valid GPU mapping
381 * @tile: The tile which the GPU mapping belongs to
382 * @tile_present: Tile present mask
383 * @tile_invalidated: Tile invalidated mask
384 *
385 * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
386 * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
387 * without the notifier lock in userptr or SVM cases, and not reliable without
388 * the BO dma-resv lock in the BO case. As such, they should only be used in
389 * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
390 * invalidation) where it is harmless.
391 *
392 * Return: True is there are valid GPU pages, False otherwise
393 */
394 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
395 ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
396
397 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
398 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
399 #else
xe_vma_userptr_force_invalidate(struct xe_userptr_vma * uvma)400 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
401 {
402 }
403 #endif
404 #endif
405