xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_bo_types.h"
10 #include "xe_macros.h"
11 #include "xe_map.h"
12 #include "xe_vm_types.h"
13 
14 struct drm_device;
15 struct drm_printer;
16 struct drm_file;
17 
18 struct ttm_buffer_object;
19 struct ttm_validate_buffer;
20 
21 struct xe_exec_queue;
22 struct xe_file;
23 struct xe_sync_entry;
24 struct drm_exec;
25 
26 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
27 
28 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
29 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
30 
31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
32 {
33 	drm_gpuvm_get(&vm->gpuvm);
34 	return vm;
35 }
36 
37 static inline void xe_vm_put(struct xe_vm *vm)
38 {
39 	drm_gpuvm_put(&vm->gpuvm);
40 }
41 
42 int xe_vm_lock(struct xe_vm *vm, bool intr);
43 
44 void xe_vm_unlock(struct xe_vm *vm);
45 
46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
47 {
48 	/* Only guaranteed not to change when vm->lock is held */
49 	return !vm->size;
50 }
51 
52 static inline bool xe_vm_is_banned(struct xe_vm *vm)
53 {
54 	return vm->flags & XE_VM_FLAG_BANNED;
55 }
56 
57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
58 {
59 	lockdep_assert_held(&vm->lock);
60 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
61 }
62 
63 struct xe_vma *
64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
65 
66 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
67 {
68 	return container_of(gpuva->vm, struct xe_vm, gpuvm);
69 }
70 
71 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
72 {
73 	return container_of(gpuva, struct xe_vma, gpuva);
74 }
75 
76 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
77 {
78 	return container_of(op, struct xe_vma_op, base);
79 }
80 
81 /**
82  * DOC: Provide accessors for vma members to facilitate easy change of
83  * implementation.
84  */
85 static inline u64 xe_vma_start(struct xe_vma *vma)
86 {
87 	return vma->gpuva.va.addr;
88 }
89 
90 static inline u64 xe_vma_size(struct xe_vma *vma)
91 {
92 	return vma->gpuva.va.range;
93 }
94 
95 static inline u64 xe_vma_end(struct xe_vma *vma)
96 {
97 	return xe_vma_start(vma) + xe_vma_size(vma);
98 }
99 
100 static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
101 {
102 	return vma->gpuva.gem.offset;
103 }
104 
105 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
106 {
107 	return !vma->gpuva.gem.obj ? NULL :
108 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
109 }
110 
111 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
112 {
113 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
114 }
115 
116 static inline bool xe_vma_read_only(struct xe_vma *vma)
117 {
118 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
119 }
120 
121 static inline u64 xe_vma_userptr(struct xe_vma *vma)
122 {
123 	return vma->gpuva.gem.offset;
124 }
125 
126 static inline bool xe_vma_is_null(struct xe_vma *vma)
127 {
128 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
129 }
130 
131 static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
132 {
133 	return !xe_vma_bo(vma);
134 }
135 
136 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
137 {
138 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
139 }
140 
141 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
142 
143 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
144 		       struct drm_file *file);
145 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
146 			struct drm_file *file);
147 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
148 		     struct drm_file *file);
149 
150 void xe_vm_close_and_put(struct xe_vm *vm);
151 
152 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
153 {
154 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
155 }
156 
157 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
158 {
159 	return vm->flags & XE_VM_FLAG_LR_MODE;
160 }
161 
162 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
163 {
164 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
165 }
166 
167 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
168 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
169 
170 int xe_vm_userptr_pin(struct xe_vm *vm);
171 
172 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
173 
174 int xe_vm_userptr_check_repin(struct xe_vm *vm);
175 
176 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
177 
178 int xe_vm_invalidate_vma(struct xe_vma *vma);
179 
180 extern struct ttm_device_funcs xe_ttm_funcs;
181 
182 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
183 {
184 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
185 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
186 }
187 
188 /**
189  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
190  * vms.
191  * @vm: The vm.
192  *
193  * If the rebind functionality on a compute vm was disabled due
194  * to nothing to execute. Reactivate it and run the rebind worker.
195  * This function should be called after submitting a batch to a compute vm.
196  */
197 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
198 {
199 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
200 		vm->preempt.rebind_deactivated = false;
201 		xe_vm_queue_rebind_worker(vm);
202 	}
203 }
204 
205 int xe_vma_userptr_pin_pages(struct xe_vma *vma);
206 
207 int xe_vma_userptr_check_repin(struct xe_vma *vma);
208 
209 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
210 
211 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
212 			unsigned int num_shared, bool lock_vm);
213 
214 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
215 			     enum dma_resv_usage usage);
216 
217 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
218 
219 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
220 		      unsigned int num_shared);
221 
222 /**
223  * xe_vm_resv() - Return's the vm's reservation object
224  * @vm: The vm
225  *
226  * Return: Pointer to the vm's reservation object.
227  */
228 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
229 {
230 	return drm_gpuvm_resv(&vm->gpuvm);
231 }
232 
233 /**
234  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
235  * @vm: The vm
236  */
237 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
238 
239 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
240 #define vm_dbg drm_dbg
241 #else
242 __printf(2, 3)
243 static inline void vm_dbg(const struct drm_device *dev,
244 			  const char *format, ...)
245 { /* noop */ }
246 #endif
247 #endif
248