xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 527a0f2bdcfe77fce22f006b97e42e4da3137c86)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_assert.h"
10 #include "xe_bo_types.h"
11 #include "xe_macros.h"
12 #include "xe_map.h"
13 #include "xe_vm_types.h"
14 
15 struct drm_device;
16 struct drm_printer;
17 struct drm_file;
18 
19 struct ttm_buffer_object;
20 struct ttm_validate_buffer;
21 
22 struct xe_exec_queue;
23 struct xe_file;
24 struct xe_sync_entry;
25 struct drm_exec;
26 
27 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
28 
29 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
30 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
31 
32 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
33 {
34 	drm_gpuvm_get(&vm->gpuvm);
35 	return vm;
36 }
37 
38 static inline void xe_vm_put(struct xe_vm *vm)
39 {
40 	drm_gpuvm_put(&vm->gpuvm);
41 }
42 
43 int xe_vm_lock(struct xe_vm *vm, bool intr);
44 
45 void xe_vm_unlock(struct xe_vm *vm);
46 
47 static inline bool xe_vm_is_closed(struct xe_vm *vm)
48 {
49 	/* Only guaranteed not to change when vm->lock is held */
50 	return !vm->size;
51 }
52 
53 static inline bool xe_vm_is_banned(struct xe_vm *vm)
54 {
55 	return vm->flags & XE_VM_FLAG_BANNED;
56 }
57 
58 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
59 {
60 	lockdep_assert_held(&vm->lock);
61 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
62 }
63 
64 struct xe_vma *
65 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
66 
67 /**
68  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
69  * @vm: The vm
70  *
71  * Return: whether the vm populates unmapped areas with scratch PTEs
72  */
73 static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
74 {
75 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
76 }
77 
78 /**
79  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
80  * @gpuvm: The struct drm_gpuvm pointer
81  *
82  * Return: Pointer to the embedding struct xe_vm.
83  */
84 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
85 {
86 	return container_of(gpuvm, struct xe_vm, gpuvm);
87 }
88 
89 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
90 {
91 	return gpuvm_to_vm(gpuva->vm);
92 }
93 
94 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
95 {
96 	return container_of(gpuva, struct xe_vma, gpuva);
97 }
98 
99 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
100 {
101 	return container_of(op, struct xe_vma_op, base);
102 }
103 
104 /**
105  * DOC: Provide accessors for vma members to facilitate easy change of
106  * implementation.
107  */
108 static inline u64 xe_vma_start(struct xe_vma *vma)
109 {
110 	return vma->gpuva.va.addr;
111 }
112 
113 static inline u64 xe_vma_size(struct xe_vma *vma)
114 {
115 	return vma->gpuva.va.range;
116 }
117 
118 static inline u64 xe_vma_end(struct xe_vma *vma)
119 {
120 	return xe_vma_start(vma) + xe_vma_size(vma);
121 }
122 
123 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
124 {
125 	return vma->gpuva.gem.offset;
126 }
127 
128 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
129 {
130 	return !vma->gpuva.gem.obj ? NULL :
131 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
132 }
133 
134 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
135 {
136 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
137 }
138 
139 static inline bool xe_vma_read_only(struct xe_vma *vma)
140 {
141 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
142 }
143 
144 static inline u64 xe_vma_userptr(struct xe_vma *vma)
145 {
146 	return vma->gpuva.gem.offset;
147 }
148 
149 static inline bool xe_vma_is_null(struct xe_vma *vma)
150 {
151 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
152 }
153 
154 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
155 {
156 	return !xe_vma_bo(vma);
157 }
158 
159 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
160 {
161 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
162 }
163 
164 /**
165  * to_userptr_vma() - Return a pointer to an embedding userptr vma
166  * @vma: Pointer to the embedded struct xe_vma
167  *
168  * Return: Pointer to the embedding userptr vma
169  */
170 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
171 {
172 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
173 	return container_of(vma, struct xe_userptr_vma, vma);
174 }
175 
176 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
177 
178 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
179 		       struct drm_file *file);
180 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
181 			struct drm_file *file);
182 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
183 		     struct drm_file *file);
184 
185 void xe_vm_close_and_put(struct xe_vm *vm);
186 
187 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
188 {
189 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
190 }
191 
192 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
193 {
194 	return vm->flags & XE_VM_FLAG_LR_MODE;
195 }
196 
197 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
198 {
199 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
200 }
201 
202 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204 
205 int xe_vm_userptr_pin(struct xe_vm *vm);
206 
207 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
208 
209 int xe_vm_userptr_check_repin(struct xe_vm *vm);
210 
211 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
212 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
213 				u8 tile_mask);
214 
215 int xe_vm_invalidate_vma(struct xe_vma *vma);
216 
217 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
218 {
219 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
220 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
221 }
222 
223 /**
224  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
225  * vms.
226  * @vm: The vm.
227  *
228  * If the rebind functionality on a compute vm was disabled due
229  * to nothing to execute. Reactivate it and run the rebind worker.
230  * This function should be called after submitting a batch to a compute vm.
231  */
232 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
233 {
234 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
235 		vm->preempt.rebind_deactivated = false;
236 		xe_vm_queue_rebind_worker(vm);
237 	}
238 }
239 
240 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
241 
242 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
243 
244 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
245 
246 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
247 
248 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
249 			  unsigned int num_fences);
250 
251 /**
252  * xe_vm_resv() - Return's the vm's reservation object
253  * @vm: The vm
254  *
255  * Return: Pointer to the vm's reservation object.
256  */
257 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
258 {
259 	return drm_gpuvm_resv(&vm->gpuvm);
260 }
261 
262 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
263 
264 /**
265  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
266  * @vm: The vm
267  */
268 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
269 
270 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
271 #define vm_dbg drm_dbg
272 #else
273 __printf(2, 3)
274 static inline void vm_dbg(const struct drm_device *dev,
275 			  const char *format, ...)
276 { /* noop */ }
277 #endif
278 #endif
279 
280 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
281 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
282 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
283 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
284