1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2025 Intel Corporation
4 */
5
6 #include "xe_userptr.h"
7
8 #include <linux/mm.h>
9
10 #include "xe_trace_bo.h"
11
12 /**
13 * xe_vma_userptr_check_repin() - Advisory check for repin needed
14 * @uvma: The userptr vma
15 *
16 * Check if the userptr vma has been invalidated since last successful
17 * repin. The check is advisory only and can the function can be called
18 * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
19 * vma userptr will remain valid after a lockless check, so typically
20 * the call needs to be followed by a proper check under the notifier_lock.
21 *
22 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
23 */
xe_vma_userptr_check_repin(struct xe_userptr_vma * uvma)24 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
25 {
26 return mmu_interval_check_retry(&uvma->userptr.notifier,
27 uvma->userptr.pages.notifier_seq) ?
28 -EAGAIN : 0;
29 }
30
31 /**
32 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
33 * that need repinning.
34 * @vm: The VM.
35 *
36 * This function checks for whether the VM has userptrs that need repinning,
37 * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
38 * checking.
39 *
40 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
41 */
__xe_vm_userptr_needs_repin(struct xe_vm * vm)42 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
43 {
44 lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);
45
46 return (list_empty(&vm->userptr.repin_list) &&
47 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
48 }
49
xe_vma_userptr_pin_pages(struct xe_userptr_vma * uvma)50 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
51 {
52 struct xe_vma *vma = &uvma->vma;
53 struct xe_vm *vm = xe_vma_vm(vma);
54 struct xe_device *xe = vm->xe;
55 struct drm_gpusvm_ctx ctx = {
56 .read_only = xe_vma_read_only(vma),
57 .device_private_page_owner = NULL,
58 };
59
60 lockdep_assert_held(&vm->lock);
61 xe_assert(xe, xe_vma_is_userptr(vma));
62
63 if (vma->gpuva.flags & XE_VMA_DESTROYED)
64 return 0;
65
66 return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
67 uvma->userptr.notifier.mm,
68 &uvma->userptr.notifier,
69 xe_vma_userptr(vma),
70 xe_vma_userptr(vma) + xe_vma_size(vma),
71 &ctx);
72 }
73
__vma_userptr_invalidate(struct xe_vm * vm,struct xe_userptr_vma * uvma)74 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
75 {
76 struct xe_userptr *userptr = &uvma->userptr;
77 struct xe_vma *vma = &uvma->vma;
78 struct dma_resv_iter cursor;
79 struct dma_fence *fence;
80 struct drm_gpusvm_ctx ctx = {
81 .in_notifier = true,
82 .read_only = xe_vma_read_only(vma),
83 };
84 long err;
85
86 /*
87 * Tell exec and rebind worker they need to repin and rebind this
88 * userptr.
89 */
90 if (!xe_vm_in_fault_mode(vm) &&
91 !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
92 spin_lock(&vm->userptr.invalidated_lock);
93 list_move_tail(&userptr->invalidate_link,
94 &vm->userptr.invalidated);
95 spin_unlock(&vm->userptr.invalidated_lock);
96 }
97
98 /*
99 * Preempt fences turn into schedule disables, pipeline these.
100 * Note that even in fault mode, we need to wait for binds and
101 * unbinds to complete, and those are attached as BOOKMARK fences
102 * to the vm.
103 */
104 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
105 DMA_RESV_USAGE_BOOKKEEP);
106 dma_resv_for_each_fence_unlocked(&cursor, fence)
107 dma_fence_enable_sw_signaling(fence);
108 dma_resv_iter_end(&cursor);
109
110 err = dma_resv_wait_timeout(xe_vm_resv(vm),
111 DMA_RESV_USAGE_BOOKKEEP,
112 false, MAX_SCHEDULE_TIMEOUT);
113 XE_WARN_ON(err <= 0);
114
115 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
116 err = xe_vm_invalidate_vma(vma);
117 XE_WARN_ON(err);
118 }
119
120 drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
121 xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
122 }
123
vma_userptr_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)124 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
125 const struct mmu_notifier_range *range,
126 unsigned long cur_seq)
127 {
128 struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
129 struct xe_vma *vma = &uvma->vma;
130 struct xe_vm *vm = xe_vma_vm(vma);
131
132 xe_assert(vm->xe, xe_vma_is_userptr(vma));
133 trace_xe_vma_userptr_invalidate(vma);
134
135 if (!mmu_notifier_range_blockable(range))
136 return false;
137
138 vm_dbg(&xe_vma_vm(vma)->xe->drm,
139 "NOTIFIER: addr=0x%016llx, range=0x%016llx",
140 xe_vma_start(vma), xe_vma_size(vma));
141
142 down_write(&vm->svm.gpusvm.notifier_lock);
143 mmu_interval_set_seq(mni, cur_seq);
144
145 __vma_userptr_invalidate(vm, uvma);
146 up_write(&vm->svm.gpusvm.notifier_lock);
147 trace_xe_vma_userptr_invalidate_complete(vma);
148
149 return true;
150 }
151
152 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
153 .invalidate = vma_userptr_invalidate,
154 };
155
156 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
157 /**
158 * xe_vma_userptr_force_invalidate() - force invalidate a userptr
159 * @uvma: The userptr vma to invalidate
160 *
161 * Perform a forced userptr invalidation for testing purposes.
162 */
xe_vma_userptr_force_invalidate(struct xe_userptr_vma * uvma)163 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
164 {
165 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
166
167 /* Protect against concurrent userptr pinning */
168 lockdep_assert_held(&vm->lock);
169 /* Protect against concurrent notifiers */
170 lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
171 /*
172 * Protect against concurrent instances of this function and
173 * the critical exec sections
174 */
175 xe_vm_assert_held(vm);
176
177 if (!mmu_interval_read_retry(&uvma->userptr.notifier,
178 uvma->userptr.pages.notifier_seq))
179 uvma->userptr.pages.notifier_seq -= 2;
180 __vma_userptr_invalidate(vm, uvma);
181 }
182 #endif
183
xe_vm_userptr_pin(struct xe_vm * vm)184 int xe_vm_userptr_pin(struct xe_vm *vm)
185 {
186 struct xe_userptr_vma *uvma, *next;
187 int err = 0;
188
189 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
190 lockdep_assert_held_write(&vm->lock);
191
192 /* Collect invalidated userptrs */
193 spin_lock(&vm->userptr.invalidated_lock);
194 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
195 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
196 userptr.invalidate_link) {
197 list_del_init(&uvma->userptr.invalidate_link);
198 list_add_tail(&uvma->userptr.repin_link,
199 &vm->userptr.repin_list);
200 }
201 spin_unlock(&vm->userptr.invalidated_lock);
202
203 /* Pin and move to bind list */
204 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
205 userptr.repin_link) {
206 err = xe_vma_userptr_pin_pages(uvma);
207 if (err == -EFAULT) {
208 list_del_init(&uvma->userptr.repin_link);
209 /*
210 * We might have already done the pin once already, but
211 * then had to retry before the re-bind happened, due
212 * some other condition in the caller, but in the
213 * meantime the userptr got dinged by the notifier such
214 * that we need to revalidate here, but this time we hit
215 * the EFAULT. In such a case make sure we remove
216 * ourselves from the rebind list to avoid going down in
217 * flames.
218 */
219 if (!list_empty(&uvma->vma.combined_links.rebind))
220 list_del_init(&uvma->vma.combined_links.rebind);
221
222 /* Wait for pending binds */
223 xe_vm_lock(vm, false);
224 dma_resv_wait_timeout(xe_vm_resv(vm),
225 DMA_RESV_USAGE_BOOKKEEP,
226 false, MAX_SCHEDULE_TIMEOUT);
227
228 down_read(&vm->svm.gpusvm.notifier_lock);
229 err = xe_vm_invalidate_vma(&uvma->vma);
230 up_read(&vm->svm.gpusvm.notifier_lock);
231 xe_vm_unlock(vm);
232 if (err)
233 break;
234 } else {
235 if (err)
236 break;
237
238 list_del_init(&uvma->userptr.repin_link);
239 list_move_tail(&uvma->vma.combined_links.rebind,
240 &vm->rebind_list);
241 }
242 }
243
244 if (err) {
245 down_write(&vm->svm.gpusvm.notifier_lock);
246 spin_lock(&vm->userptr.invalidated_lock);
247 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
248 userptr.repin_link) {
249 list_del_init(&uvma->userptr.repin_link);
250 list_move_tail(&uvma->userptr.invalidate_link,
251 &vm->userptr.invalidated);
252 }
253 spin_unlock(&vm->userptr.invalidated_lock);
254 up_write(&vm->svm.gpusvm.notifier_lock);
255 }
256 return err;
257 }
258
259 /**
260 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
261 * that need repinning.
262 * @vm: The VM.
263 *
264 * This function does an advisory check for whether the VM has userptrs that
265 * need repinning.
266 *
267 * Return: 0 if there are no indications of userptrs needing repinning,
268 * -EAGAIN if there are.
269 */
xe_vm_userptr_check_repin(struct xe_vm * vm)270 int xe_vm_userptr_check_repin(struct xe_vm *vm)
271 {
272 return (list_empty_careful(&vm->userptr.repin_list) &&
273 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
274 }
275
xe_userptr_setup(struct xe_userptr_vma * uvma,unsigned long start,unsigned long range)276 int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
277 unsigned long range)
278 {
279 struct xe_userptr *userptr = &uvma->userptr;
280 int err;
281
282 INIT_LIST_HEAD(&userptr->invalidate_link);
283 INIT_LIST_HEAD(&userptr->repin_link);
284
285 err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
286 start, range,
287 &vma_userptr_notifier_ops);
288 if (err)
289 return err;
290
291 userptr->pages.notifier_seq = LONG_MAX;
292
293 return 0;
294 }
295
xe_userptr_remove(struct xe_userptr_vma * uvma)296 void xe_userptr_remove(struct xe_userptr_vma *uvma)
297 {
298 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
299 struct xe_userptr *userptr = &uvma->userptr;
300
301 drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
302 xe_vma_size(&uvma->vma) >> PAGE_SHIFT);
303
304 /*
305 * Since userptr pages are not pinned, we can't remove
306 * the notifier until we're sure the GPU is not accessing
307 * them anymore
308 */
309 mmu_interval_notifier_remove(&userptr->notifier);
310 }
311
xe_userptr_destroy(struct xe_userptr_vma * uvma)312 void xe_userptr_destroy(struct xe_userptr_vma *uvma)
313 {
314 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
315
316 spin_lock(&vm->userptr.invalidated_lock);
317 xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
318 list_del(&uvma->userptr.invalidate_link);
319 spin_unlock(&vm->userptr.invalidated_lock);
320 }
321