xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 28f587adb69957125241a8df359b68b134f3c4a1)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 
21 struct xe_exec_queue;
22 struct xe_file;
23 struct xe_sync_entry;
24 struct drm_exec;
25 
26 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
27 
28 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
29 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
30 
xe_vm_get(struct xe_vm * vm)31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
32 {
33 	drm_gpuvm_get(&vm->gpuvm);
34 	return vm;
35 }
36 
xe_vm_put(struct xe_vm * vm)37 static inline void xe_vm_put(struct xe_vm *vm)
38 {
39 	drm_gpuvm_put(&vm->gpuvm);
40 }
41 
42 int xe_vm_lock(struct xe_vm *vm, bool intr);
43 
44 void xe_vm_unlock(struct xe_vm *vm);
45 
xe_vm_is_closed(struct xe_vm * vm)46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
47 {
48 	/* Only guaranteed not to change when vm->lock is held */
49 	return !vm->size;
50 }
51 
xe_vm_is_banned(struct xe_vm * vm)52 static inline bool xe_vm_is_banned(struct xe_vm *vm)
53 {
54 	return vm->flags & XE_VM_FLAG_BANNED;
55 }
56 
xe_vm_is_closed_or_banned(struct xe_vm * vm)57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
58 {
59 	lockdep_assert_held(&vm->lock);
60 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
61 }
62 
63 struct xe_vma *
64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
65 
66 /**
67  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
68  * @vm: The vm
69  *
70  * Return: whether the vm populates unmapped areas with scratch PTEs
71  */
xe_vm_has_scratch(const struct xe_vm * vm)72 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
73 {
74 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
75 }
76 
77 /**
78  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
79  * @gpuvm: The struct drm_gpuvm pointer
80  *
81  * Return: Pointer to the embedding struct xe_vm.
82  */
gpuvm_to_vm(struct drm_gpuvm * gpuvm)83 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
84 {
85 	return container_of(gpuvm, struct xe_vm, gpuvm);
86 }
87 
gpuva_to_vm(struct drm_gpuva * gpuva)88 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
89 {
90 	return gpuvm_to_vm(gpuva->vm);
91 }
92 
gpuva_to_vma(struct drm_gpuva * gpuva)93 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
94 {
95 	return container_of(gpuva, struct xe_vma, gpuva);
96 }
97 
gpuva_op_to_vma_op(struct drm_gpuva_op * op)98 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
99 {
100 	return container_of(op, struct xe_vma_op, base);
101 }
102 
103 /**
104  * DOC: Provide accessors for vma members to facilitate easy change of
105  * implementation.
106  */
xe_vma_start(struct xe_vma * vma)107 static inline u64 xe_vma_start(struct xe_vma *vma)
108 {
109 	return vma->gpuva.va.addr;
110 }
111 
xe_vma_size(struct xe_vma * vma)112 static inline u64 xe_vma_size(struct xe_vma *vma)
113 {
114 	return vma->gpuva.va.range;
115 }
116 
xe_vma_end(struct xe_vma * vma)117 static inline u64 xe_vma_end(struct xe_vma *vma)
118 {
119 	return xe_vma_start(vma) + xe_vma_size(vma);
120 }
121 
xe_vma_bo_offset(struct xe_vma * vma)122 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
123 {
124 	return vma->gpuva.gem.offset;
125 }
126 
xe_vma_bo(struct xe_vma * vma)127 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
128 {
129 	return !vma->gpuva.gem.obj ? NULL :
130 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
131 }
132 
xe_vma_vm(struct xe_vma * vma)133 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
134 {
135 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
136 }
137 
xe_vma_read_only(struct xe_vma * vma)138 static inline bool xe_vma_read_only(struct xe_vma *vma)
139 {
140 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
141 }
142 
xe_vma_userptr(struct xe_vma * vma)143 static inline u64 xe_vma_userptr(struct xe_vma *vma)
144 {
145 	return vma->gpuva.gem.offset;
146 }
147 
xe_vma_is_null(struct xe_vma * vma)148 static inline bool xe_vma_is_null(struct xe_vma *vma)
149 {
150 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
151 }
152 
xe_vma_has_no_bo(struct xe_vma * vma)153 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
154 {
155 	return !xe_vma_bo(vma);
156 }
157 
xe_vma_is_userptr(struct xe_vma * vma)158 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
159 {
160 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
161 }
162 
163 /**
164  * to_userptr_vma() - Return a pointer to an embedding userptr vma
165  * @vma: Pointer to the embedded struct xe_vma
166  *
167  * Return: Pointer to the embedding userptr vma
168  */
to_userptr_vma(struct xe_vma * vma)169 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
170 {
171 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
172 	return container_of(vma, struct xe_userptr_vma, vma);
173 }
174 
175 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
176 
177 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
178 		       struct drm_file *file);
179 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
180 			struct drm_file *file);
181 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
182 		     struct drm_file *file);
183 
184 void xe_vm_close_and_put(struct xe_vm *vm);
185 
xe_vm_in_fault_mode(struct xe_vm * vm)186 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
187 {
188 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
189 }
190 
xe_vm_in_lr_mode(struct xe_vm * vm)191 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
192 {
193 	return vm->flags & XE_VM_FLAG_LR_MODE;
194 }
195 
xe_vm_in_preempt_fence_mode(struct xe_vm * vm)196 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
197 {
198 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
199 }
200 
201 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
202 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203 
204 int xe_vm_userptr_pin(struct xe_vm *vm);
205 
206 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
207 
208 int xe_vm_userptr_check_repin(struct xe_vm *vm);
209 
210 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
211 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
212 				u8 tile_mask);
213 
214 int xe_vm_invalidate_vma(struct xe_vma *vma);
215 
xe_vm_queue_rebind_worker(struct xe_vm * vm)216 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
217 {
218 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
219 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
220 }
221 
222 /**
223  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
224  * vms.
225  * @vm: The vm.
226  *
227  * If the rebind functionality on a compute vm was disabled due
228  * to nothing to execute. Reactivate it and run the rebind worker.
229  * This function should be called after submitting a batch to a compute vm.
230  */
xe_vm_reactivate_rebind(struct xe_vm * vm)231 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
232 {
233 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
234 		vm->preempt.rebind_deactivated = false;
235 		xe_vm_queue_rebind_worker(vm);
236 	}
237 }
238 
239 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
240 
241 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
242 
243 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
244 
245 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
246 
247 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
248 			  unsigned int num_fences);
249 
250 /**
251  * xe_vm_resv() - Return's the vm's reservation object
252  * @vm: The vm
253  *
254  * Return: Pointer to the vm's reservation object.
255  */
xe_vm_resv(struct xe_vm * vm)256 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
257 {
258 	return drm_gpuvm_resv(&vm->gpuvm);
259 }
260 
261 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
262 
263 /**
264  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
265  * @vm: The vm
266  */
267 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
268 
269 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
270 #define vm_dbg drm_dbg
271 #else
272 __printf(2, 3)
vm_dbg(const struct drm_device * dev,const char * format,...)273 static inline void vm_dbg(const struct drm_device *dev,
274 			  const char *format, ...)
275 { /* noop */ }
276 #endif
277 
278 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
279 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
280 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
281 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
282 
283 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
284 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
285 #else
xe_vma_userptr_force_invalidate(struct xe_userptr_vma * uvma)286 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
287 {
288 }
289 #endif
290 #endif
291