xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision dcdd6b84d9acaa0794c29de7024cfdb20cfd7b92)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 
21 struct dma_fence;
22 
23 struct xe_exec_queue;
24 struct xe_file;
25 struct xe_sync_entry;
26 struct drm_exec;
27 
28 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
29 
30 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
31 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
32 
33 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
34 {
35 	drm_gpuvm_get(&vm->gpuvm);
36 	return vm;
37 }
38 
39 static inline void xe_vm_put(struct xe_vm *vm)
40 {
41 	drm_gpuvm_put(&vm->gpuvm);
42 }
43 
44 int xe_vm_lock(struct xe_vm *vm, bool intr);
45 
46 void xe_vm_unlock(struct xe_vm *vm);
47 
48 static inline bool xe_vm_is_closed(struct xe_vm *vm)
49 {
50 	/* Only guaranteed not to change when vm->lock is held */
51 	return !vm->size;
52 }
53 
54 static inline bool xe_vm_is_banned(struct xe_vm *vm)
55 {
56 	return vm->flags & XE_VM_FLAG_BANNED;
57 }
58 
59 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
60 {
61 	lockdep_assert_held(&vm->lock);
62 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
63 }
64 
65 struct xe_vma *
66 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
67 
68 /**
69  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
70  * @vm: The vm
71  *
72  * Return: whether the vm populates unmapped areas with scratch PTEs
73  */
74 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
75 {
76 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
77 }
78 
79 /**
80  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
81  * @gpuvm: The struct drm_gpuvm pointer
82  *
83  * Return: Pointer to the embedding struct xe_vm.
84  */
85 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
86 {
87 	return container_of(gpuvm, struct xe_vm, gpuvm);
88 }
89 
90 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
91 {
92 	return gpuvm_to_vm(gpuva->vm);
93 }
94 
95 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
96 {
97 	return container_of(gpuva, struct xe_vma, gpuva);
98 }
99 
100 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
101 {
102 	return container_of(op, struct xe_vma_op, base);
103 }
104 
105 /**
106  * DOC: Provide accessors for vma members to facilitate easy change of
107  * implementation.
108  */
109 static inline u64 xe_vma_start(struct xe_vma *vma)
110 {
111 	return vma->gpuva.va.addr;
112 }
113 
114 static inline u64 xe_vma_size(struct xe_vma *vma)
115 {
116 	return vma->gpuva.va.range;
117 }
118 
119 static inline u64 xe_vma_end(struct xe_vma *vma)
120 {
121 	return xe_vma_start(vma) + xe_vma_size(vma);
122 }
123 
124 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
125 {
126 	return vma->gpuva.gem.offset;
127 }
128 
129 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
130 {
131 	return !vma->gpuva.gem.obj ? NULL :
132 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
133 }
134 
135 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
136 {
137 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
138 }
139 
140 static inline bool xe_vma_read_only(struct xe_vma *vma)
141 {
142 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
143 }
144 
145 static inline u64 xe_vma_userptr(struct xe_vma *vma)
146 {
147 	return vma->gpuva.gem.offset;
148 }
149 
150 static inline bool xe_vma_is_null(struct xe_vma *vma)
151 {
152 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
153 }
154 
155 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
156 {
157 	return !xe_vma_bo(vma);
158 }
159 
160 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
161 {
162 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
163 }
164 
165 /**
166  * to_userptr_vma() - Return a pointer to an embedding userptr vma
167  * @vma: Pointer to the embedded struct xe_vma
168  *
169  * Return: Pointer to the embedding userptr vma
170  */
171 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
172 {
173 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
174 	return container_of(vma, struct xe_userptr_vma, vma);
175 }
176 
177 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
178 
179 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
180 		       struct drm_file *file);
181 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
182 			struct drm_file *file);
183 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
184 		     struct drm_file *file);
185 
186 void xe_vm_close_and_put(struct xe_vm *vm);
187 
188 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
189 {
190 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
191 }
192 
193 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
194 {
195 	return vm->flags & XE_VM_FLAG_LR_MODE;
196 }
197 
198 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
199 {
200 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
201 }
202 
203 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
205 
206 int xe_vm_userptr_pin(struct xe_vm *vm);
207 
208 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
209 
210 int xe_vm_userptr_check_repin(struct xe_vm *vm);
211 
212 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
213 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
214 				u8 tile_mask);
215 
216 int xe_vm_invalidate_vma(struct xe_vma *vma);
217 
218 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
219 {
220 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
221 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
222 }
223 
224 /**
225  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
226  * vms.
227  * @vm: The vm.
228  *
229  * If the rebind functionality on a compute vm was disabled due
230  * to nothing to execute. Reactivate it and run the rebind worker.
231  * This function should be called after submitting a batch to a compute vm.
232  */
233 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
234 {
235 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
236 		vm->preempt.rebind_deactivated = false;
237 		xe_vm_queue_rebind_worker(vm);
238 	}
239 }
240 
241 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
242 
243 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
244 
245 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
246 
247 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
248 
249 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
250 			  unsigned int num_fences);
251 
252 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
253 				       struct xe_exec_queue *q, u64 addr,
254 				       enum xe_cache_level cache_lvl);
255 
256 /**
257  * xe_vm_resv() - Return's the vm's reservation object
258  * @vm: The vm
259  *
260  * Return: Pointer to the vm's reservation object.
261  */
262 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
263 {
264 	return drm_gpuvm_resv(&vm->gpuvm);
265 }
266 
267 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
268 
269 /**
270  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
271  * @vm: The vm
272  */
273 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
274 
275 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
276 #define vm_dbg drm_dbg
277 #else
278 __printf(2, 3)
279 static inline void vm_dbg(const struct drm_device *dev,
280 			  const char *format, ...)
281 { /* noop */ }
282 #endif
283 #endif
284 
285 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
286 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
287 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
288 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
289