1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_vm.h"
7
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_exec.h>
13 #include <drm/drm_print.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <uapi/drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21
22 #include <generated/xe_wa_oob.h>
23
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_pxp.h"
38 #include "xe_res_cursor.h"
39 #include "xe_svm.h"
40 #include "xe_sync.h"
41 #include "xe_trace_bo.h"
42 #include "xe_wa.h"
43 #include "xe_hmm.h"
44
xe_vm_obj(struct xe_vm * vm)45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
46 {
47 return vm->gpuvm.r_obj;
48 }
49
50 /**
51 * xe_vma_userptr_check_repin() - Advisory check for repin needed
52 * @uvma: The userptr vma
53 *
54 * Check if the userptr vma has been invalidated since last successful
55 * repin. The check is advisory only and can the function can be called
56 * without the vm->userptr.notifier_lock held. There is no guarantee that the
57 * vma userptr will remain valid after a lockless check, so typically
58 * the call needs to be followed by a proper check under the notifier_lock.
59 *
60 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
61 */
xe_vma_userptr_check_repin(struct xe_userptr_vma * uvma)62 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
63 {
64 return mmu_interval_check_retry(&uvma->userptr.notifier,
65 uvma->userptr.notifier_seq) ?
66 -EAGAIN : 0;
67 }
68
xe_vma_userptr_pin_pages(struct xe_userptr_vma * uvma)69 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
70 {
71 struct xe_vma *vma = &uvma->vma;
72 struct xe_vm *vm = xe_vma_vm(vma);
73 struct xe_device *xe = vm->xe;
74
75 lockdep_assert_held(&vm->lock);
76 xe_assert(xe, xe_vma_is_userptr(vma));
77
78 return xe_hmm_userptr_populate_range(uvma, false);
79 }
80
preempt_fences_waiting(struct xe_vm * vm)81 static bool preempt_fences_waiting(struct xe_vm *vm)
82 {
83 struct xe_exec_queue *q;
84
85 lockdep_assert_held(&vm->lock);
86 xe_vm_assert_held(vm);
87
88 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
89 if (!q->lr.pfence ||
90 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
91 &q->lr.pfence->flags)) {
92 return true;
93 }
94 }
95
96 return false;
97 }
98
free_preempt_fences(struct list_head * list)99 static void free_preempt_fences(struct list_head *list)
100 {
101 struct list_head *link, *next;
102
103 list_for_each_safe(link, next, list)
104 xe_preempt_fence_free(to_preempt_fence_from_link(link));
105 }
106
alloc_preempt_fences(struct xe_vm * vm,struct list_head * list,unsigned int * count)107 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
108 unsigned int *count)
109 {
110 lockdep_assert_held(&vm->lock);
111 xe_vm_assert_held(vm);
112
113 if (*count >= vm->preempt.num_exec_queues)
114 return 0;
115
116 for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
117 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
118
119 if (IS_ERR(pfence))
120 return PTR_ERR(pfence);
121
122 list_move_tail(xe_preempt_fence_link(pfence), list);
123 }
124
125 return 0;
126 }
127
wait_for_existing_preempt_fences(struct xe_vm * vm)128 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
129 {
130 struct xe_exec_queue *q;
131
132 xe_vm_assert_held(vm);
133
134 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
135 if (q->lr.pfence) {
136 long timeout = dma_fence_wait(q->lr.pfence, false);
137
138 /* Only -ETIME on fence indicates VM needs to be killed */
139 if (timeout < 0 || q->lr.pfence->error == -ETIME)
140 return -ETIME;
141
142 dma_fence_put(q->lr.pfence);
143 q->lr.pfence = NULL;
144 }
145 }
146
147 return 0;
148 }
149
xe_vm_is_idle(struct xe_vm * vm)150 static bool xe_vm_is_idle(struct xe_vm *vm)
151 {
152 struct xe_exec_queue *q;
153
154 xe_vm_assert_held(vm);
155 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
156 if (!xe_exec_queue_is_idle(q))
157 return false;
158 }
159
160 return true;
161 }
162
arm_preempt_fences(struct xe_vm * vm,struct list_head * list)163 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
164 {
165 struct list_head *link;
166 struct xe_exec_queue *q;
167
168 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
169 struct dma_fence *fence;
170
171 link = list->next;
172 xe_assert(vm->xe, link != list);
173
174 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
175 q, q->lr.context,
176 ++q->lr.seqno);
177 dma_fence_put(q->lr.pfence);
178 q->lr.pfence = fence;
179 }
180 }
181
add_preempt_fences(struct xe_vm * vm,struct xe_bo * bo)182 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
183 {
184 struct xe_exec_queue *q;
185 int err;
186
187 xe_bo_assert_held(bo);
188
189 if (!vm->preempt.num_exec_queues)
190 return 0;
191
192 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
193 if (err)
194 return err;
195
196 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
197 if (q->lr.pfence) {
198 dma_resv_add_fence(bo->ttm.base.resv,
199 q->lr.pfence,
200 DMA_RESV_USAGE_BOOKKEEP);
201 }
202
203 return 0;
204 }
205
resume_and_reinstall_preempt_fences(struct xe_vm * vm,struct drm_exec * exec)206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
207 struct drm_exec *exec)
208 {
209 struct xe_exec_queue *q;
210
211 lockdep_assert_held(&vm->lock);
212 xe_vm_assert_held(vm);
213
214 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
215 q->ops->resume(q);
216
217 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
218 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
219 }
220 }
221
xe_vm_add_compute_exec_queue(struct xe_vm * vm,struct xe_exec_queue * q)222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
223 {
224 struct drm_gpuvm_exec vm_exec = {
225 .vm = &vm->gpuvm,
226 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
227 .num_fences = 1,
228 };
229 struct drm_exec *exec = &vm_exec.exec;
230 struct dma_fence *pfence;
231 int err;
232 bool wait;
233
234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
235
236 down_write(&vm->lock);
237 err = drm_gpuvm_exec_lock(&vm_exec);
238 if (err)
239 goto out_up_write;
240
241 pfence = xe_preempt_fence_create(q, q->lr.context,
242 ++q->lr.seqno);
243 if (!pfence) {
244 err = -ENOMEM;
245 goto out_fini;
246 }
247
248 list_add(&q->lr.link, &vm->preempt.exec_queues);
249 ++vm->preempt.num_exec_queues;
250 q->lr.pfence = pfence;
251
252 down_read(&vm->userptr.notifier_lock);
253
254 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
255 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
256
257 /*
258 * Check to see if a preemption on VM is in flight or userptr
259 * invalidation, if so trigger this preempt fence to sync state with
260 * other preempt fences on the VM.
261 */
262 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
263 if (wait)
264 dma_fence_enable_sw_signaling(pfence);
265
266 up_read(&vm->userptr.notifier_lock);
267
268 out_fini:
269 drm_exec_fini(exec);
270 out_up_write:
271 up_write(&vm->lock);
272
273 return err;
274 }
275 ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
276
277 /**
278 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
279 * @vm: The VM.
280 * @q: The exec_queue
281 *
282 * Note that this function might be called multiple times on the same queue.
283 */
xe_vm_remove_compute_exec_queue(struct xe_vm * vm,struct xe_exec_queue * q)284 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
285 {
286 if (!xe_vm_in_preempt_fence_mode(vm))
287 return;
288
289 down_write(&vm->lock);
290 if (!list_empty(&q->lr.link)) {
291 list_del_init(&q->lr.link);
292 --vm->preempt.num_exec_queues;
293 }
294 if (q->lr.pfence) {
295 dma_fence_enable_sw_signaling(q->lr.pfence);
296 dma_fence_put(q->lr.pfence);
297 q->lr.pfence = NULL;
298 }
299 up_write(&vm->lock);
300 }
301
302 /**
303 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
304 * that need repinning.
305 * @vm: The VM.
306 *
307 * This function checks for whether the VM has userptrs that need repinning,
308 * and provides a release-type barrier on the userptr.notifier_lock after
309 * checking.
310 *
311 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
312 */
__xe_vm_userptr_needs_repin(struct xe_vm * vm)313 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
314 {
315 lockdep_assert_held_read(&vm->userptr.notifier_lock);
316
317 return (list_empty(&vm->userptr.repin_list) &&
318 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
319 }
320
321 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
322
323 /**
324 * xe_vm_kill() - VM Kill
325 * @vm: The VM.
326 * @unlocked: Flag indicates the VM's dma-resv is not held
327 *
328 * Kill the VM by setting banned flag indicated VM is no longer available for
329 * use. If in preempt fence mode, also kill all exec queue attached to the VM.
330 */
xe_vm_kill(struct xe_vm * vm,bool unlocked)331 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
332 {
333 struct xe_exec_queue *q;
334
335 lockdep_assert_held(&vm->lock);
336
337 if (unlocked)
338 xe_vm_lock(vm, false);
339
340 vm->flags |= XE_VM_FLAG_BANNED;
341 trace_xe_vm_kill(vm);
342
343 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
344 q->ops->kill(q);
345
346 if (unlocked)
347 xe_vm_unlock(vm);
348
349 /* TODO: Inform user the VM is banned */
350 }
351
352 /**
353 * xe_vm_validate_should_retry() - Whether to retry after a validate error.
354 * @exec: The drm_exec object used for locking before validation.
355 * @err: The error returned from ttm_bo_validate().
356 * @end: A ktime_t cookie that should be set to 0 before first use and
357 * that should be reused on subsequent calls.
358 *
359 * With multiple active VMs, under memory pressure, it is possible that
360 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
361 * Until ttm properly handles locking in such scenarios, best thing the
362 * driver can do is retry with a timeout. Check if that is necessary, and
363 * if so unlock the drm_exec's objects while keeping the ticket to prepare
364 * for a rerun.
365 *
366 * Return: true if a retry after drm_exec_init() is recommended;
367 * false otherwise.
368 */
xe_vm_validate_should_retry(struct drm_exec * exec,int err,ktime_t * end)369 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
370 {
371 ktime_t cur;
372
373 if (err != -ENOMEM)
374 return false;
375
376 cur = ktime_get();
377 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
378 if (!ktime_before(cur, *end))
379 return false;
380
381 msleep(20);
382 return true;
383 }
384
xe_gpuvm_validate(struct drm_gpuvm_bo * vm_bo,struct drm_exec * exec)385 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
386 {
387 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
388 struct drm_gpuva *gpuva;
389 int ret;
390
391 lockdep_assert_held(&vm->lock);
392 drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
393 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
394 &vm->rebind_list);
395
396 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
397 if (ret)
398 return ret;
399
400 vm_bo->evicted = false;
401 return 0;
402 }
403
404 /**
405 * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
406 * @vm: The vm for which we are rebinding.
407 * @exec: The struct drm_exec with the locked GEM objects.
408 * @num_fences: The number of fences to reserve for the operation, not
409 * including rebinds and validations.
410 *
411 * Validates all evicted gem objects and rebinds their vmas. Note that
412 * rebindings may cause evictions and hence the validation-rebind
413 * sequence is rerun until there are no more objects to validate.
414 *
415 * Return: 0 on success, negative error code on error. In particular,
416 * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
417 * the drm_exec transaction needs to be restarted.
418 */
xe_vm_validate_rebind(struct xe_vm * vm,struct drm_exec * exec,unsigned int num_fences)419 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
420 unsigned int num_fences)
421 {
422 struct drm_gem_object *obj;
423 unsigned long index;
424 int ret;
425
426 do {
427 ret = drm_gpuvm_validate(&vm->gpuvm, exec);
428 if (ret)
429 return ret;
430
431 ret = xe_vm_rebind(vm, false);
432 if (ret)
433 return ret;
434 } while (!list_empty(&vm->gpuvm.evict.list));
435
436 drm_exec_for_each_locked_object(exec, index, obj) {
437 ret = dma_resv_reserve_fences(obj->resv, num_fences);
438 if (ret)
439 return ret;
440 }
441
442 return 0;
443 }
444
xe_preempt_work_begin(struct drm_exec * exec,struct xe_vm * vm,bool * done)445 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
446 bool *done)
447 {
448 int err;
449
450 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
451 if (err)
452 return err;
453
454 if (xe_vm_is_idle(vm)) {
455 vm->preempt.rebind_deactivated = true;
456 *done = true;
457 return 0;
458 }
459
460 if (!preempt_fences_waiting(vm)) {
461 *done = true;
462 return 0;
463 }
464
465 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
466 if (err)
467 return err;
468
469 err = wait_for_existing_preempt_fences(vm);
470 if (err)
471 return err;
472
473 /*
474 * Add validation and rebinding to the locking loop since both can
475 * cause evictions which may require blocing dma_resv locks.
476 * The fence reservation here is intended for the new preempt fences
477 * we attach at the end of the rebind work.
478 */
479 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
480 }
481
preempt_rebind_work_func(struct work_struct * w)482 static void preempt_rebind_work_func(struct work_struct *w)
483 {
484 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
485 struct drm_exec exec;
486 unsigned int fence_count = 0;
487 LIST_HEAD(preempt_fences);
488 ktime_t end = 0;
489 int err = 0;
490 long wait;
491 int __maybe_unused tries = 0;
492
493 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
494 trace_xe_vm_rebind_worker_enter(vm);
495
496 down_write(&vm->lock);
497
498 if (xe_vm_is_closed_or_banned(vm)) {
499 up_write(&vm->lock);
500 trace_xe_vm_rebind_worker_exit(vm);
501 return;
502 }
503
504 retry:
505 if (xe_vm_userptr_check_repin(vm)) {
506 err = xe_vm_userptr_pin(vm);
507 if (err)
508 goto out_unlock_outer;
509 }
510
511 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
512
513 drm_exec_until_all_locked(&exec) {
514 bool done = false;
515
516 err = xe_preempt_work_begin(&exec, vm, &done);
517 drm_exec_retry_on_contention(&exec);
518 if (err || done) {
519 drm_exec_fini(&exec);
520 if (err && xe_vm_validate_should_retry(&exec, err, &end))
521 err = -EAGAIN;
522
523 goto out_unlock_outer;
524 }
525 }
526
527 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
528 if (err)
529 goto out_unlock;
530
531 err = xe_vm_rebind(vm, true);
532 if (err)
533 goto out_unlock;
534
535 /* Wait on rebinds and munmap style VM unbinds */
536 wait = dma_resv_wait_timeout(xe_vm_resv(vm),
537 DMA_RESV_USAGE_KERNEL,
538 false, MAX_SCHEDULE_TIMEOUT);
539 if (wait <= 0) {
540 err = -ETIME;
541 goto out_unlock;
542 }
543
544 #define retry_required(__tries, __vm) \
545 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
546 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
547 __xe_vm_userptr_needs_repin(__vm))
548
549 down_read(&vm->userptr.notifier_lock);
550 if (retry_required(tries, vm)) {
551 up_read(&vm->userptr.notifier_lock);
552 err = -EAGAIN;
553 goto out_unlock;
554 }
555
556 #undef retry_required
557
558 spin_lock(&vm->xe->ttm.lru_lock);
559 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
560 spin_unlock(&vm->xe->ttm.lru_lock);
561
562 /* Point of no return. */
563 arm_preempt_fences(vm, &preempt_fences);
564 resume_and_reinstall_preempt_fences(vm, &exec);
565 up_read(&vm->userptr.notifier_lock);
566
567 out_unlock:
568 drm_exec_fini(&exec);
569 out_unlock_outer:
570 if (err == -EAGAIN) {
571 trace_xe_vm_rebind_worker_retry(vm);
572 goto retry;
573 }
574
575 if (err) {
576 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
577 xe_vm_kill(vm, true);
578 }
579 up_write(&vm->lock);
580
581 free_preempt_fences(&preempt_fences);
582
583 trace_xe_vm_rebind_worker_exit(vm);
584 }
585
__vma_userptr_invalidate(struct xe_vm * vm,struct xe_userptr_vma * uvma)586 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
587 {
588 struct xe_userptr *userptr = &uvma->userptr;
589 struct xe_vma *vma = &uvma->vma;
590 struct dma_resv_iter cursor;
591 struct dma_fence *fence;
592 long err;
593
594 /*
595 * Tell exec and rebind worker they need to repin and rebind this
596 * userptr.
597 */
598 if (!xe_vm_in_fault_mode(vm) &&
599 !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
600 spin_lock(&vm->userptr.invalidated_lock);
601 list_move_tail(&userptr->invalidate_link,
602 &vm->userptr.invalidated);
603 spin_unlock(&vm->userptr.invalidated_lock);
604 }
605
606 /*
607 * Preempt fences turn into schedule disables, pipeline these.
608 * Note that even in fault mode, we need to wait for binds and
609 * unbinds to complete, and those are attached as BOOKMARK fences
610 * to the vm.
611 */
612 dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
613 DMA_RESV_USAGE_BOOKKEEP);
614 dma_resv_for_each_fence_unlocked(&cursor, fence)
615 dma_fence_enable_sw_signaling(fence);
616 dma_resv_iter_end(&cursor);
617
618 err = dma_resv_wait_timeout(xe_vm_resv(vm),
619 DMA_RESV_USAGE_BOOKKEEP,
620 false, MAX_SCHEDULE_TIMEOUT);
621 XE_WARN_ON(err <= 0);
622
623 if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
624 err = xe_vm_invalidate_vma(vma);
625 XE_WARN_ON(err);
626 }
627
628 xe_hmm_userptr_unmap(uvma);
629 }
630
vma_userptr_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)631 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
632 const struct mmu_notifier_range *range,
633 unsigned long cur_seq)
634 {
635 struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
636 struct xe_vma *vma = &uvma->vma;
637 struct xe_vm *vm = xe_vma_vm(vma);
638
639 xe_assert(vm->xe, xe_vma_is_userptr(vma));
640 trace_xe_vma_userptr_invalidate(vma);
641
642 if (!mmu_notifier_range_blockable(range))
643 return false;
644
645 vm_dbg(&xe_vma_vm(vma)->xe->drm,
646 "NOTIFIER: addr=0x%016llx, range=0x%016llx",
647 xe_vma_start(vma), xe_vma_size(vma));
648
649 down_write(&vm->userptr.notifier_lock);
650 mmu_interval_set_seq(mni, cur_seq);
651
652 __vma_userptr_invalidate(vm, uvma);
653 up_write(&vm->userptr.notifier_lock);
654 trace_xe_vma_userptr_invalidate_complete(vma);
655
656 return true;
657 }
658
659 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
660 .invalidate = vma_userptr_invalidate,
661 };
662
663 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
664 /**
665 * xe_vma_userptr_force_invalidate() - force invalidate a userptr
666 * @uvma: The userptr vma to invalidate
667 *
668 * Perform a forced userptr invalidation for testing purposes.
669 */
xe_vma_userptr_force_invalidate(struct xe_userptr_vma * uvma)670 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
671 {
672 struct xe_vm *vm = xe_vma_vm(&uvma->vma);
673
674 /* Protect against concurrent userptr pinning */
675 lockdep_assert_held(&vm->lock);
676 /* Protect against concurrent notifiers */
677 lockdep_assert_held(&vm->userptr.notifier_lock);
678 /*
679 * Protect against concurrent instances of this function and
680 * the critical exec sections
681 */
682 xe_vm_assert_held(vm);
683
684 if (!mmu_interval_read_retry(&uvma->userptr.notifier,
685 uvma->userptr.notifier_seq))
686 uvma->userptr.notifier_seq -= 2;
687 __vma_userptr_invalidate(vm, uvma);
688 }
689 #endif
690
xe_vm_userptr_pin(struct xe_vm * vm)691 int xe_vm_userptr_pin(struct xe_vm *vm)
692 {
693 struct xe_userptr_vma *uvma, *next;
694 int err = 0;
695
696 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
697 lockdep_assert_held_write(&vm->lock);
698
699 /* Collect invalidated userptrs */
700 spin_lock(&vm->userptr.invalidated_lock);
701 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
702 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
703 userptr.invalidate_link) {
704 list_del_init(&uvma->userptr.invalidate_link);
705 list_add_tail(&uvma->userptr.repin_link,
706 &vm->userptr.repin_list);
707 }
708 spin_unlock(&vm->userptr.invalidated_lock);
709
710 /* Pin and move to bind list */
711 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
712 userptr.repin_link) {
713 err = xe_vma_userptr_pin_pages(uvma);
714 if (err == -EFAULT) {
715 list_del_init(&uvma->userptr.repin_link);
716 /*
717 * We might have already done the pin once already, but
718 * then had to retry before the re-bind happened, due
719 * some other condition in the caller, but in the
720 * meantime the userptr got dinged by the notifier such
721 * that we need to revalidate here, but this time we hit
722 * the EFAULT. In such a case make sure we remove
723 * ourselves from the rebind list to avoid going down in
724 * flames.
725 */
726 if (!list_empty(&uvma->vma.combined_links.rebind))
727 list_del_init(&uvma->vma.combined_links.rebind);
728
729 /* Wait for pending binds */
730 xe_vm_lock(vm, false);
731 dma_resv_wait_timeout(xe_vm_resv(vm),
732 DMA_RESV_USAGE_BOOKKEEP,
733 false, MAX_SCHEDULE_TIMEOUT);
734
735 err = xe_vm_invalidate_vma(&uvma->vma);
736 xe_vm_unlock(vm);
737 if (err)
738 break;
739 } else {
740 if (err)
741 break;
742
743 list_del_init(&uvma->userptr.repin_link);
744 list_move_tail(&uvma->vma.combined_links.rebind,
745 &vm->rebind_list);
746 }
747 }
748
749 if (err) {
750 down_write(&vm->userptr.notifier_lock);
751 spin_lock(&vm->userptr.invalidated_lock);
752 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
753 userptr.repin_link) {
754 list_del_init(&uvma->userptr.repin_link);
755 list_move_tail(&uvma->userptr.invalidate_link,
756 &vm->userptr.invalidated);
757 }
758 spin_unlock(&vm->userptr.invalidated_lock);
759 up_write(&vm->userptr.notifier_lock);
760 }
761 return err;
762 }
763
764 /**
765 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
766 * that need repinning.
767 * @vm: The VM.
768 *
769 * This function does an advisory check for whether the VM has userptrs that
770 * need repinning.
771 *
772 * Return: 0 if there are no indications of userptrs needing repinning,
773 * -EAGAIN if there are.
774 */
xe_vm_userptr_check_repin(struct xe_vm * vm)775 int xe_vm_userptr_check_repin(struct xe_vm *vm)
776 {
777 return (list_empty_careful(&vm->userptr.repin_list) &&
778 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
779 }
780
xe_vma_ops_alloc(struct xe_vma_ops * vops,bool array_of_binds)781 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
782 {
783 int i;
784
785 for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
786 if (!vops->pt_update_ops[i].num_ops)
787 continue;
788
789 vops->pt_update_ops[i].ops =
790 kmalloc_array(vops->pt_update_ops[i].num_ops,
791 sizeof(*vops->pt_update_ops[i].ops),
792 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
793 if (!vops->pt_update_ops[i].ops)
794 return array_of_binds ? -ENOBUFS : -ENOMEM;
795 }
796
797 return 0;
798 }
799 ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
800
xe_vma_ops_fini(struct xe_vma_ops * vops)801 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
802 {
803 int i;
804
805 for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
806 kfree(vops->pt_update_ops[i].ops);
807 }
808
xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops * vops,u8 tile_mask)809 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
810 {
811 int i;
812
813 for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
814 if (BIT(i) & tile_mask)
815 ++vops->pt_update_ops[i].num_ops;
816 }
817
xe_vm_populate_rebind(struct xe_vma_op * op,struct xe_vma * vma,u8 tile_mask)818 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
819 u8 tile_mask)
820 {
821 INIT_LIST_HEAD(&op->link);
822 op->tile_mask = tile_mask;
823 op->base.op = DRM_GPUVA_OP_MAP;
824 op->base.map.va.addr = vma->gpuva.va.addr;
825 op->base.map.va.range = vma->gpuva.va.range;
826 op->base.map.gem.obj = vma->gpuva.gem.obj;
827 op->base.map.gem.offset = vma->gpuva.gem.offset;
828 op->map.vma = vma;
829 op->map.immediate = true;
830 op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
831 op->map.is_null = xe_vma_is_null(vma);
832 }
833
xe_vm_ops_add_rebind(struct xe_vma_ops * vops,struct xe_vma * vma,u8 tile_mask)834 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
835 u8 tile_mask)
836 {
837 struct xe_vma_op *op;
838
839 op = kzalloc(sizeof(*op), GFP_KERNEL);
840 if (!op)
841 return -ENOMEM;
842
843 xe_vm_populate_rebind(op, vma, tile_mask);
844 list_add_tail(&op->link, &vops->list);
845 xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
846
847 return 0;
848 }
849
850 static struct dma_fence *ops_execute(struct xe_vm *vm,
851 struct xe_vma_ops *vops);
852 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
853 struct xe_exec_queue *q,
854 struct xe_sync_entry *syncs, u32 num_syncs);
855
xe_vm_rebind(struct xe_vm * vm,bool rebind_worker)856 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
857 {
858 struct dma_fence *fence;
859 struct xe_vma *vma, *next;
860 struct xe_vma_ops vops;
861 struct xe_vma_op *op, *next_op;
862 int err, i;
863
864 lockdep_assert_held(&vm->lock);
865 if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
866 list_empty(&vm->rebind_list))
867 return 0;
868
869 xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
870 for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
871 vops.pt_update_ops[i].wait_vm_bookkeep = true;
872
873 xe_vm_assert_held(vm);
874 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
875 xe_assert(vm->xe, vma->tile_present);
876
877 if (rebind_worker)
878 trace_xe_vma_rebind_worker(vma);
879 else
880 trace_xe_vma_rebind_exec(vma);
881
882 err = xe_vm_ops_add_rebind(&vops, vma,
883 vma->tile_present);
884 if (err)
885 goto free_ops;
886 }
887
888 err = xe_vma_ops_alloc(&vops, false);
889 if (err)
890 goto free_ops;
891
892 fence = ops_execute(vm, &vops);
893 if (IS_ERR(fence)) {
894 err = PTR_ERR(fence);
895 } else {
896 dma_fence_put(fence);
897 list_for_each_entry_safe(vma, next, &vm->rebind_list,
898 combined_links.rebind)
899 list_del_init(&vma->combined_links.rebind);
900 }
901 free_ops:
902 list_for_each_entry_safe(op, next_op, &vops.list, link) {
903 list_del(&op->link);
904 kfree(op);
905 }
906 xe_vma_ops_fini(&vops);
907
908 return err;
909 }
910
xe_vma_rebind(struct xe_vm * vm,struct xe_vma * vma,u8 tile_mask)911 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
912 {
913 struct dma_fence *fence = NULL;
914 struct xe_vma_ops vops;
915 struct xe_vma_op *op, *next_op;
916 struct xe_tile *tile;
917 u8 id;
918 int err;
919
920 lockdep_assert_held(&vm->lock);
921 xe_vm_assert_held(vm);
922 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
923
924 xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
925 for_each_tile(tile, vm->xe, id) {
926 vops.pt_update_ops[id].wait_vm_bookkeep = true;
927 vops.pt_update_ops[tile->id].q =
928 xe_tile_migrate_exec_queue(tile);
929 }
930
931 err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
932 if (err)
933 return ERR_PTR(err);
934
935 err = xe_vma_ops_alloc(&vops, false);
936 if (err) {
937 fence = ERR_PTR(err);
938 goto free_ops;
939 }
940
941 fence = ops_execute(vm, &vops);
942
943 free_ops:
944 list_for_each_entry_safe(op, next_op, &vops.list, link) {
945 list_del(&op->link);
946 kfree(op);
947 }
948 xe_vma_ops_fini(&vops);
949
950 return fence;
951 }
952
xe_vm_populate_range_rebind(struct xe_vma_op * op,struct xe_vma * vma,struct xe_svm_range * range,u8 tile_mask)953 static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
954 struct xe_vma *vma,
955 struct xe_svm_range *range,
956 u8 tile_mask)
957 {
958 INIT_LIST_HEAD(&op->link);
959 op->tile_mask = tile_mask;
960 op->base.op = DRM_GPUVA_OP_DRIVER;
961 op->subop = XE_VMA_SUBOP_MAP_RANGE;
962 op->map_range.vma = vma;
963 op->map_range.range = range;
964 }
965
966 static int
xe_vm_ops_add_range_rebind(struct xe_vma_ops * vops,struct xe_vma * vma,struct xe_svm_range * range,u8 tile_mask)967 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
968 struct xe_vma *vma,
969 struct xe_svm_range *range,
970 u8 tile_mask)
971 {
972 struct xe_vma_op *op;
973
974 op = kzalloc(sizeof(*op), GFP_KERNEL);
975 if (!op)
976 return -ENOMEM;
977
978 xe_vm_populate_range_rebind(op, vma, range, tile_mask);
979 list_add_tail(&op->link, &vops->list);
980 xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
981
982 return 0;
983 }
984
985 /**
986 * xe_vm_range_rebind() - VM range (re)bind
987 * @vm: The VM which the range belongs to.
988 * @vma: The VMA which the range belongs to.
989 * @range: SVM range to rebind.
990 * @tile_mask: Tile mask to bind the range to.
991 *
992 * (re)bind SVM range setting up GPU page tables for the range.
993 *
994 * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
995 * failure
996 */
xe_vm_range_rebind(struct xe_vm * vm,struct xe_vma * vma,struct xe_svm_range * range,u8 tile_mask)997 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
998 struct xe_vma *vma,
999 struct xe_svm_range *range,
1000 u8 tile_mask)
1001 {
1002 struct dma_fence *fence = NULL;
1003 struct xe_vma_ops vops;
1004 struct xe_vma_op *op, *next_op;
1005 struct xe_tile *tile;
1006 u8 id;
1007 int err;
1008
1009 lockdep_assert_held(&vm->lock);
1010 xe_vm_assert_held(vm);
1011 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1012 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1013
1014 xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
1015 for_each_tile(tile, vm->xe, id) {
1016 vops.pt_update_ops[id].wait_vm_bookkeep = true;
1017 vops.pt_update_ops[tile->id].q =
1018 xe_tile_migrate_exec_queue(tile);
1019 }
1020
1021 err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
1022 if (err)
1023 return ERR_PTR(err);
1024
1025 err = xe_vma_ops_alloc(&vops, false);
1026 if (err) {
1027 fence = ERR_PTR(err);
1028 goto free_ops;
1029 }
1030
1031 fence = ops_execute(vm, &vops);
1032
1033 free_ops:
1034 list_for_each_entry_safe(op, next_op, &vops.list, link) {
1035 list_del(&op->link);
1036 kfree(op);
1037 }
1038 xe_vma_ops_fini(&vops);
1039
1040 return fence;
1041 }
1042
xe_vm_populate_range_unbind(struct xe_vma_op * op,struct xe_svm_range * range)1043 static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
1044 struct xe_svm_range *range)
1045 {
1046 INIT_LIST_HEAD(&op->link);
1047 op->tile_mask = range->tile_present;
1048 op->base.op = DRM_GPUVA_OP_DRIVER;
1049 op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
1050 op->unmap_range.range = range;
1051 }
1052
1053 static int
xe_vm_ops_add_range_unbind(struct xe_vma_ops * vops,struct xe_svm_range * range)1054 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
1055 struct xe_svm_range *range)
1056 {
1057 struct xe_vma_op *op;
1058
1059 op = kzalloc(sizeof(*op), GFP_KERNEL);
1060 if (!op)
1061 return -ENOMEM;
1062
1063 xe_vm_populate_range_unbind(op, range);
1064 list_add_tail(&op->link, &vops->list);
1065 xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
1066
1067 return 0;
1068 }
1069
1070 /**
1071 * xe_vm_range_unbind() - VM range unbind
1072 * @vm: The VM which the range belongs to.
1073 * @range: SVM range to rebind.
1074 *
1075 * Unbind SVM range removing the GPU page tables for the range.
1076 *
1077 * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
1078 * failure
1079 */
xe_vm_range_unbind(struct xe_vm * vm,struct xe_svm_range * range)1080 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
1081 struct xe_svm_range *range)
1082 {
1083 struct dma_fence *fence = NULL;
1084 struct xe_vma_ops vops;
1085 struct xe_vma_op *op, *next_op;
1086 struct xe_tile *tile;
1087 u8 id;
1088 int err;
1089
1090 lockdep_assert_held(&vm->lock);
1091 xe_vm_assert_held(vm);
1092 xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1093
1094 if (!range->tile_present)
1095 return dma_fence_get_stub();
1096
1097 xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
1098 for_each_tile(tile, vm->xe, id) {
1099 vops.pt_update_ops[id].wait_vm_bookkeep = true;
1100 vops.pt_update_ops[tile->id].q =
1101 xe_tile_migrate_exec_queue(tile);
1102 }
1103
1104 err = xe_vm_ops_add_range_unbind(&vops, range);
1105 if (err)
1106 return ERR_PTR(err);
1107
1108 err = xe_vma_ops_alloc(&vops, false);
1109 if (err) {
1110 fence = ERR_PTR(err);
1111 goto free_ops;
1112 }
1113
1114 fence = ops_execute(vm, &vops);
1115
1116 free_ops:
1117 list_for_each_entry_safe(op, next_op, &vops.list, link) {
1118 list_del(&op->link);
1119 kfree(op);
1120 }
1121 xe_vma_ops_fini(&vops);
1122
1123 return fence;
1124 }
1125
xe_vma_free(struct xe_vma * vma)1126 static void xe_vma_free(struct xe_vma *vma)
1127 {
1128 if (xe_vma_is_userptr(vma))
1129 kfree(to_userptr_vma(vma));
1130 else
1131 kfree(vma);
1132 }
1133
1134 #define VMA_CREATE_FLAG_READ_ONLY BIT(0)
1135 #define VMA_CREATE_FLAG_IS_NULL BIT(1)
1136 #define VMA_CREATE_FLAG_DUMPABLE BIT(2)
1137 #define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
1138
xe_vma_create(struct xe_vm * vm,struct xe_bo * bo,u64 bo_offset_or_userptr,u64 start,u64 end,u16 pat_index,unsigned int flags)1139 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
1140 struct xe_bo *bo,
1141 u64 bo_offset_or_userptr,
1142 u64 start, u64 end,
1143 u16 pat_index, unsigned int flags)
1144 {
1145 struct xe_vma *vma;
1146 struct xe_tile *tile;
1147 u8 id;
1148 bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
1149 bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
1150 bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
1151 bool is_cpu_addr_mirror =
1152 (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
1153
1154 xe_assert(vm->xe, start < end);
1155 xe_assert(vm->xe, end < vm->size);
1156
1157 /*
1158 * Allocate and ensure that the xe_vma_is_userptr() return
1159 * matches what was allocated.
1160 */
1161 if (!bo && !is_null && !is_cpu_addr_mirror) {
1162 struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
1163
1164 if (!uvma)
1165 return ERR_PTR(-ENOMEM);
1166
1167 vma = &uvma->vma;
1168 } else {
1169 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1170 if (!vma)
1171 return ERR_PTR(-ENOMEM);
1172
1173 if (is_cpu_addr_mirror)
1174 vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
1175 if (is_null)
1176 vma->gpuva.flags |= DRM_GPUVA_SPARSE;
1177 if (bo)
1178 vma->gpuva.gem.obj = &bo->ttm.base;
1179 }
1180
1181 INIT_LIST_HEAD(&vma->combined_links.rebind);
1182
1183 INIT_LIST_HEAD(&vma->gpuva.gem.entry);
1184 vma->gpuva.vm = &vm->gpuvm;
1185 vma->gpuva.va.addr = start;
1186 vma->gpuva.va.range = end - start + 1;
1187 if (read_only)
1188 vma->gpuva.flags |= XE_VMA_READ_ONLY;
1189 if (dumpable)
1190 vma->gpuva.flags |= XE_VMA_DUMPABLE;
1191
1192 for_each_tile(tile, vm->xe, id)
1193 vma->tile_mask |= 0x1 << id;
1194
1195 if (vm->xe->info.has_atomic_enable_pte_bit)
1196 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
1197
1198 vma->pat_index = pat_index;
1199
1200 if (bo) {
1201 struct drm_gpuvm_bo *vm_bo;
1202
1203 xe_bo_assert_held(bo);
1204
1205 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
1206 if (IS_ERR(vm_bo)) {
1207 xe_vma_free(vma);
1208 return ERR_CAST(vm_bo);
1209 }
1210
1211 drm_gpuvm_bo_extobj_add(vm_bo);
1212 drm_gem_object_get(&bo->ttm.base);
1213 vma->gpuva.gem.offset = bo_offset_or_userptr;
1214 drm_gpuva_link(&vma->gpuva, vm_bo);
1215 drm_gpuvm_bo_put(vm_bo);
1216 } else /* userptr or null */ {
1217 if (!is_null && !is_cpu_addr_mirror) {
1218 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
1219 u64 size = end - start + 1;
1220 int err;
1221
1222 INIT_LIST_HEAD(&userptr->invalidate_link);
1223 INIT_LIST_HEAD(&userptr->repin_link);
1224 vma->gpuva.gem.offset = bo_offset_or_userptr;
1225 mutex_init(&userptr->unmap_mutex);
1226
1227 err = mmu_interval_notifier_insert(&userptr->notifier,
1228 current->mm,
1229 xe_vma_userptr(vma), size,
1230 &vma_userptr_notifier_ops);
1231 if (err) {
1232 xe_vma_free(vma);
1233 return ERR_PTR(err);
1234 }
1235
1236 userptr->notifier_seq = LONG_MAX;
1237 }
1238
1239 xe_vm_get(vm);
1240 }
1241
1242 return vma;
1243 }
1244
xe_vma_destroy_late(struct xe_vma * vma)1245 static void xe_vma_destroy_late(struct xe_vma *vma)
1246 {
1247 struct xe_vm *vm = xe_vma_vm(vma);
1248
1249 if (vma->ufence) {
1250 xe_sync_ufence_put(vma->ufence);
1251 vma->ufence = NULL;
1252 }
1253
1254 if (xe_vma_is_userptr(vma)) {
1255 struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1256 struct xe_userptr *userptr = &uvma->userptr;
1257
1258 if (userptr->sg)
1259 xe_hmm_userptr_free_sg(uvma);
1260
1261 /*
1262 * Since userptr pages are not pinned, we can't remove
1263 * the notifier until we're sure the GPU is not accessing
1264 * them anymore
1265 */
1266 mmu_interval_notifier_remove(&userptr->notifier);
1267 mutex_destroy(&userptr->unmap_mutex);
1268 xe_vm_put(vm);
1269 } else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
1270 xe_vm_put(vm);
1271 } else {
1272 xe_bo_put(xe_vma_bo(vma));
1273 }
1274
1275 xe_vma_free(vma);
1276 }
1277
vma_destroy_work_func(struct work_struct * w)1278 static void vma_destroy_work_func(struct work_struct *w)
1279 {
1280 struct xe_vma *vma =
1281 container_of(w, struct xe_vma, destroy_work);
1282
1283 xe_vma_destroy_late(vma);
1284 }
1285
vma_destroy_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1286 static void vma_destroy_cb(struct dma_fence *fence,
1287 struct dma_fence_cb *cb)
1288 {
1289 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1290
1291 INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1292 queue_work(system_unbound_wq, &vma->destroy_work);
1293 }
1294
xe_vma_destroy(struct xe_vma * vma,struct dma_fence * fence)1295 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1296 {
1297 struct xe_vm *vm = xe_vma_vm(vma);
1298
1299 lockdep_assert_held_write(&vm->lock);
1300 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1301
1302 if (xe_vma_is_userptr(vma)) {
1303 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1304
1305 spin_lock(&vm->userptr.invalidated_lock);
1306 xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
1307 list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1308 spin_unlock(&vm->userptr.invalidated_lock);
1309 } else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
1310 xe_bo_assert_held(xe_vma_bo(vma));
1311
1312 drm_gpuva_unlink(&vma->gpuva);
1313 }
1314
1315 xe_vm_assert_held(vm);
1316 if (fence) {
1317 int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1318 vma_destroy_cb);
1319
1320 if (ret) {
1321 XE_WARN_ON(ret != -ENOENT);
1322 xe_vma_destroy_late(vma);
1323 }
1324 } else {
1325 xe_vma_destroy_late(vma);
1326 }
1327 }
1328
1329 /**
1330 * xe_vm_lock_vma() - drm_exec utility to lock a vma
1331 * @exec: The drm_exec object we're currently locking for.
1332 * @vma: The vma for witch we want to lock the vm resv and any attached
1333 * object's resv.
1334 *
1335 * Return: 0 on success, negative error code on error. In particular
1336 * may return -EDEADLK on WW transaction contention and -EINTR if
1337 * an interruptible wait is terminated by a signal.
1338 */
xe_vm_lock_vma(struct drm_exec * exec,struct xe_vma * vma)1339 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1340 {
1341 struct xe_vm *vm = xe_vma_vm(vma);
1342 struct xe_bo *bo = xe_vma_bo(vma);
1343 int err;
1344
1345 XE_WARN_ON(!vm);
1346
1347 err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1348 if (!err && bo && !bo->vm)
1349 err = drm_exec_lock_obj(exec, &bo->ttm.base);
1350
1351 return err;
1352 }
1353
xe_vma_destroy_unlocked(struct xe_vma * vma)1354 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1355 {
1356 struct drm_exec exec;
1357 int err;
1358
1359 drm_exec_init(&exec, 0, 0);
1360 drm_exec_until_all_locked(&exec) {
1361 err = xe_vm_lock_vma(&exec, vma);
1362 drm_exec_retry_on_contention(&exec);
1363 if (XE_WARN_ON(err))
1364 break;
1365 }
1366
1367 xe_vma_destroy(vma, NULL);
1368
1369 drm_exec_fini(&exec);
1370 }
1371
1372 struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm * vm,u64 start,u64 range)1373 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1374 {
1375 struct drm_gpuva *gpuva;
1376
1377 lockdep_assert_held(&vm->lock);
1378
1379 if (xe_vm_is_closed_or_banned(vm))
1380 return NULL;
1381
1382 xe_assert(vm->xe, start + range <= vm->size);
1383
1384 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1385
1386 return gpuva ? gpuva_to_vma(gpuva) : NULL;
1387 }
1388
xe_vm_insert_vma(struct xe_vm * vm,struct xe_vma * vma)1389 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1390 {
1391 int err;
1392
1393 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1394 lockdep_assert_held(&vm->lock);
1395
1396 mutex_lock(&vm->snap_mutex);
1397 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1398 mutex_unlock(&vm->snap_mutex);
1399 XE_WARN_ON(err); /* Shouldn't be possible */
1400
1401 return err;
1402 }
1403
xe_vm_remove_vma(struct xe_vm * vm,struct xe_vma * vma)1404 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1405 {
1406 xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1407 lockdep_assert_held(&vm->lock);
1408
1409 mutex_lock(&vm->snap_mutex);
1410 drm_gpuva_remove(&vma->gpuva);
1411 mutex_unlock(&vm->snap_mutex);
1412 if (vm->usm.last_fault_vma == vma)
1413 vm->usm.last_fault_vma = NULL;
1414 }
1415
xe_vm_op_alloc(void)1416 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1417 {
1418 struct xe_vma_op *op;
1419
1420 op = kzalloc(sizeof(*op), GFP_KERNEL);
1421
1422 if (unlikely(!op))
1423 return NULL;
1424
1425 return &op->base;
1426 }
1427
1428 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1429
1430 static const struct drm_gpuvm_ops gpuvm_ops = {
1431 .op_alloc = xe_vm_op_alloc,
1432 .vm_bo_validate = xe_gpuvm_validate,
1433 .vm_free = xe_vm_free,
1434 };
1435
pde_encode_pat_index(u16 pat_index)1436 static u64 pde_encode_pat_index(u16 pat_index)
1437 {
1438 u64 pte = 0;
1439
1440 if (pat_index & BIT(0))
1441 pte |= XE_PPGTT_PTE_PAT0;
1442
1443 if (pat_index & BIT(1))
1444 pte |= XE_PPGTT_PTE_PAT1;
1445
1446 return pte;
1447 }
1448
pte_encode_pat_index(u16 pat_index,u32 pt_level)1449 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1450 {
1451 u64 pte = 0;
1452
1453 if (pat_index & BIT(0))
1454 pte |= XE_PPGTT_PTE_PAT0;
1455
1456 if (pat_index & BIT(1))
1457 pte |= XE_PPGTT_PTE_PAT1;
1458
1459 if (pat_index & BIT(2)) {
1460 if (pt_level)
1461 pte |= XE_PPGTT_PDE_PDPE_PAT2;
1462 else
1463 pte |= XE_PPGTT_PTE_PAT2;
1464 }
1465
1466 if (pat_index & BIT(3))
1467 pte |= XELPG_PPGTT_PTE_PAT3;
1468
1469 if (pat_index & (BIT(4)))
1470 pte |= XE2_PPGTT_PTE_PAT4;
1471
1472 return pte;
1473 }
1474
pte_encode_ps(u32 pt_level)1475 static u64 pte_encode_ps(u32 pt_level)
1476 {
1477 XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1478
1479 if (pt_level == 1)
1480 return XE_PDE_PS_2M;
1481 else if (pt_level == 2)
1482 return XE_PDPE_PS_1G;
1483
1484 return 0;
1485 }
1486
xelp_pde_encode_bo(struct xe_bo * bo,u64 bo_offset,const u16 pat_index)1487 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1488 const u16 pat_index)
1489 {
1490 u64 pde;
1491
1492 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1493 pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1494 pde |= pde_encode_pat_index(pat_index);
1495
1496 return pde;
1497 }
1498
xelp_pte_encode_bo(struct xe_bo * bo,u64 bo_offset,u16 pat_index,u32 pt_level)1499 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1500 u16 pat_index, u32 pt_level)
1501 {
1502 u64 pte;
1503
1504 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1505 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1506 pte |= pte_encode_pat_index(pat_index, pt_level);
1507 pte |= pte_encode_ps(pt_level);
1508
1509 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1510 pte |= XE_PPGTT_PTE_DM;
1511
1512 return pte;
1513 }
1514
xelp_pte_encode_vma(u64 pte,struct xe_vma * vma,u16 pat_index,u32 pt_level)1515 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1516 u16 pat_index, u32 pt_level)
1517 {
1518 pte |= XE_PAGE_PRESENT;
1519
1520 if (likely(!xe_vma_read_only(vma)))
1521 pte |= XE_PAGE_RW;
1522
1523 pte |= pte_encode_pat_index(pat_index, pt_level);
1524 pte |= pte_encode_ps(pt_level);
1525
1526 if (unlikely(xe_vma_is_null(vma)))
1527 pte |= XE_PTE_NULL;
1528
1529 return pte;
1530 }
1531
xelp_pte_encode_addr(struct xe_device * xe,u64 addr,u16 pat_index,u32 pt_level,bool devmem,u64 flags)1532 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1533 u16 pat_index,
1534 u32 pt_level, bool devmem, u64 flags)
1535 {
1536 u64 pte;
1537
1538 /* Avoid passing random bits directly as flags */
1539 xe_assert(xe, !(flags & ~XE_PTE_PS64));
1540
1541 pte = addr;
1542 pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1543 pte |= pte_encode_pat_index(pat_index, pt_level);
1544 pte |= pte_encode_ps(pt_level);
1545
1546 if (devmem)
1547 pte |= XE_PPGTT_PTE_DM;
1548
1549 pte |= flags;
1550
1551 return pte;
1552 }
1553
1554 static const struct xe_pt_ops xelp_pt_ops = {
1555 .pte_encode_bo = xelp_pte_encode_bo,
1556 .pte_encode_vma = xelp_pte_encode_vma,
1557 .pte_encode_addr = xelp_pte_encode_addr,
1558 .pde_encode_bo = xelp_pde_encode_bo,
1559 };
1560
1561 static void vm_destroy_work_func(struct work_struct *w);
1562
1563 /**
1564 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1565 * given tile and vm.
1566 * @xe: xe device.
1567 * @tile: tile to set up for.
1568 * @vm: vm to set up for.
1569 *
1570 * Sets up a pagetable tree with one page-table per level and a single
1571 * leaf PTE. All pagetable entries point to the single page-table or,
1572 * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1573 * writes become NOPs.
1574 *
1575 * Return: 0 on success, negative error code on error.
1576 */
xe_vm_create_scratch(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm)1577 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1578 struct xe_vm *vm)
1579 {
1580 u8 id = tile->id;
1581 int i;
1582
1583 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1584 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1585 if (IS_ERR(vm->scratch_pt[id][i]))
1586 return PTR_ERR(vm->scratch_pt[id][i]);
1587
1588 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1589 }
1590
1591 return 0;
1592 }
1593 ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
1594
xe_vm_free_scratch(struct xe_vm * vm)1595 static void xe_vm_free_scratch(struct xe_vm *vm)
1596 {
1597 struct xe_tile *tile;
1598 u8 id;
1599
1600 if (!xe_vm_has_scratch(vm))
1601 return;
1602
1603 for_each_tile(tile, vm->xe, id) {
1604 u32 i;
1605
1606 if (!vm->pt_root[id])
1607 continue;
1608
1609 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1610 if (vm->scratch_pt[id][i])
1611 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1612 }
1613 }
1614
xe_vm_create(struct xe_device * xe,u32 flags)1615 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1616 {
1617 struct drm_gem_object *vm_resv_obj;
1618 struct xe_vm *vm;
1619 int err, number_tiles = 0;
1620 struct xe_tile *tile;
1621 u8 id;
1622
1623 /*
1624 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
1625 * ever be in faulting mode.
1626 */
1627 xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
1628
1629 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1630 if (!vm)
1631 return ERR_PTR(-ENOMEM);
1632
1633 vm->xe = xe;
1634
1635 vm->size = 1ull << xe->info.va_bits;
1636
1637 vm->flags = flags;
1638
1639 /**
1640 * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
1641 * manipulated under the PXP mutex. However, the PXP mutex can be taken
1642 * under a user-VM lock when the PXP session is started at exec_queue
1643 * creation time. Those are different VMs and therefore there is no risk
1644 * of deadlock, but we need to tell lockdep that this is the case or it
1645 * will print a warning.
1646 */
1647 if (flags & XE_VM_FLAG_GSC) {
1648 static struct lock_class_key gsc_vm_key;
1649
1650 __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
1651 } else {
1652 init_rwsem(&vm->lock);
1653 }
1654 mutex_init(&vm->snap_mutex);
1655
1656 INIT_LIST_HEAD(&vm->rebind_list);
1657
1658 INIT_LIST_HEAD(&vm->userptr.repin_list);
1659 INIT_LIST_HEAD(&vm->userptr.invalidated);
1660 init_rwsem(&vm->userptr.notifier_lock);
1661 spin_lock_init(&vm->userptr.invalidated_lock);
1662
1663 ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1664
1665 INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1666
1667 INIT_LIST_HEAD(&vm->preempt.exec_queues);
1668 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
1669
1670 for_each_tile(tile, xe, id)
1671 xe_range_fence_tree_init(&vm->rftree[id]);
1672
1673 vm->pt_ops = &xelp_pt_ops;
1674
1675 /*
1676 * Long-running workloads are not protected by the scheduler references.
1677 * By design, run_job for long-running workloads returns NULL and the
1678 * scheduler drops all the references of it, hence protecting the VM
1679 * for this case is necessary.
1680 */
1681 if (flags & XE_VM_FLAG_LR_MODE)
1682 xe_pm_runtime_get_noresume(xe);
1683
1684 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1685 if (!vm_resv_obj) {
1686 err = -ENOMEM;
1687 goto err_no_resv;
1688 }
1689
1690 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1691 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1692
1693 drm_gem_object_put(vm_resv_obj);
1694
1695 err = xe_vm_lock(vm, true);
1696 if (err)
1697 goto err_close;
1698
1699 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1700 vm->flags |= XE_VM_FLAG_64K;
1701
1702 for_each_tile(tile, xe, id) {
1703 if (flags & XE_VM_FLAG_MIGRATION &&
1704 tile->id != XE_VM_FLAG_TILE_ID(flags))
1705 continue;
1706
1707 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1708 if (IS_ERR(vm->pt_root[id])) {
1709 err = PTR_ERR(vm->pt_root[id]);
1710 vm->pt_root[id] = NULL;
1711 goto err_unlock_close;
1712 }
1713 }
1714
1715 if (xe_vm_has_scratch(vm)) {
1716 for_each_tile(tile, xe, id) {
1717 if (!vm->pt_root[id])
1718 continue;
1719
1720 err = xe_vm_create_scratch(xe, tile, vm);
1721 if (err)
1722 goto err_unlock_close;
1723 }
1724 vm->batch_invalidate_tlb = true;
1725 }
1726
1727 if (vm->flags & XE_VM_FLAG_LR_MODE) {
1728 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1729 vm->batch_invalidate_tlb = false;
1730 }
1731
1732 /* Fill pt_root after allocating scratch tables */
1733 for_each_tile(tile, xe, id) {
1734 if (!vm->pt_root[id])
1735 continue;
1736
1737 xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1738 }
1739 xe_vm_unlock(vm);
1740
1741 /* Kernel migration VM shouldn't have a circular loop.. */
1742 if (!(flags & XE_VM_FLAG_MIGRATION)) {
1743 for_each_tile(tile, xe, id) {
1744 struct xe_exec_queue *q;
1745 u32 create_flags = EXEC_QUEUE_FLAG_VM;
1746
1747 if (!vm->pt_root[id])
1748 continue;
1749
1750 q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
1751 if (IS_ERR(q)) {
1752 err = PTR_ERR(q);
1753 goto err_close;
1754 }
1755 vm->q[id] = q;
1756 number_tiles++;
1757 }
1758 }
1759
1760 if (flags & XE_VM_FLAG_FAULT_MODE) {
1761 err = xe_svm_init(vm);
1762 if (err)
1763 goto err_close;
1764 }
1765
1766 if (number_tiles > 1)
1767 vm->composite_fence_ctx = dma_fence_context_alloc(1);
1768
1769 trace_xe_vm_create(vm);
1770
1771 return vm;
1772
1773 err_unlock_close:
1774 xe_vm_unlock(vm);
1775 err_close:
1776 xe_vm_close_and_put(vm);
1777 return ERR_PTR(err);
1778
1779 err_no_resv:
1780 mutex_destroy(&vm->snap_mutex);
1781 for_each_tile(tile, xe, id)
1782 xe_range_fence_tree_fini(&vm->rftree[id]);
1783 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1784 kfree(vm);
1785 if (flags & XE_VM_FLAG_LR_MODE)
1786 xe_pm_runtime_put(xe);
1787 return ERR_PTR(err);
1788 }
1789
xe_vm_close(struct xe_vm * vm)1790 static void xe_vm_close(struct xe_vm *vm)
1791 {
1792 struct xe_device *xe = vm->xe;
1793 bool bound;
1794 int idx;
1795
1796 bound = drm_dev_enter(&xe->drm, &idx);
1797
1798 down_write(&vm->lock);
1799 if (xe_vm_in_fault_mode(vm))
1800 xe_svm_notifier_lock(vm);
1801
1802 vm->size = 0;
1803
1804 if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
1805 struct xe_tile *tile;
1806 struct xe_gt *gt;
1807 u8 id;
1808
1809 /* Wait for pending binds */
1810 dma_resv_wait_timeout(xe_vm_resv(vm),
1811 DMA_RESV_USAGE_BOOKKEEP,
1812 false, MAX_SCHEDULE_TIMEOUT);
1813
1814 if (bound) {
1815 for_each_tile(tile, xe, id)
1816 if (vm->pt_root[id])
1817 xe_pt_clear(xe, vm->pt_root[id]);
1818
1819 for_each_gt(gt, xe, id)
1820 xe_gt_tlb_invalidation_vm(gt, vm);
1821 }
1822 }
1823
1824 if (xe_vm_in_fault_mode(vm))
1825 xe_svm_notifier_unlock(vm);
1826 up_write(&vm->lock);
1827
1828 if (bound)
1829 drm_dev_exit(idx);
1830 }
1831
xe_vm_close_and_put(struct xe_vm * vm)1832 void xe_vm_close_and_put(struct xe_vm *vm)
1833 {
1834 LIST_HEAD(contested);
1835 struct xe_device *xe = vm->xe;
1836 struct xe_tile *tile;
1837 struct xe_vma *vma, *next_vma;
1838 struct drm_gpuva *gpuva, *next;
1839 u8 id;
1840
1841 xe_assert(xe, !vm->preempt.num_exec_queues);
1842
1843 xe_vm_close(vm);
1844 if (xe_vm_in_preempt_fence_mode(vm))
1845 flush_work(&vm->preempt.rebind_work);
1846 if (xe_vm_in_fault_mode(vm))
1847 xe_svm_close(vm);
1848
1849 down_write(&vm->lock);
1850 for_each_tile(tile, xe, id) {
1851 if (vm->q[id])
1852 xe_exec_queue_last_fence_put(vm->q[id], vm);
1853 }
1854 up_write(&vm->lock);
1855
1856 for_each_tile(tile, xe, id) {
1857 if (vm->q[id]) {
1858 xe_exec_queue_kill(vm->q[id]);
1859 xe_exec_queue_put(vm->q[id]);
1860 vm->q[id] = NULL;
1861 }
1862 }
1863
1864 down_write(&vm->lock);
1865 xe_vm_lock(vm, false);
1866 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1867 vma = gpuva_to_vma(gpuva);
1868
1869 if (xe_vma_has_no_bo(vma)) {
1870 down_read(&vm->userptr.notifier_lock);
1871 vma->gpuva.flags |= XE_VMA_DESTROYED;
1872 up_read(&vm->userptr.notifier_lock);
1873 }
1874
1875 xe_vm_remove_vma(vm, vma);
1876
1877 /* easy case, remove from VMA? */
1878 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1879 list_del_init(&vma->combined_links.rebind);
1880 xe_vma_destroy(vma, NULL);
1881 continue;
1882 }
1883
1884 list_move_tail(&vma->combined_links.destroy, &contested);
1885 vma->gpuva.flags |= XE_VMA_DESTROYED;
1886 }
1887
1888 /*
1889 * All vm operations will add shared fences to resv.
1890 * The only exception is eviction for a shared object,
1891 * but even so, the unbind when evicted would still
1892 * install a fence to resv. Hence it's safe to
1893 * destroy the pagetables immediately.
1894 */
1895 xe_vm_free_scratch(vm);
1896
1897 for_each_tile(tile, xe, id) {
1898 if (vm->pt_root[id]) {
1899 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1900 vm->pt_root[id] = NULL;
1901 }
1902 }
1903 xe_vm_unlock(vm);
1904
1905 /*
1906 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1907 * Since we hold a refcount to the bo, we can remove and free
1908 * the members safely without locking.
1909 */
1910 list_for_each_entry_safe(vma, next_vma, &contested,
1911 combined_links.destroy) {
1912 list_del_init(&vma->combined_links.destroy);
1913 xe_vma_destroy_unlocked(vma);
1914 }
1915
1916 if (xe_vm_in_fault_mode(vm))
1917 xe_svm_fini(vm);
1918
1919 up_write(&vm->lock);
1920
1921 down_write(&xe->usm.lock);
1922 if (vm->usm.asid) {
1923 void *lookup;
1924
1925 xe_assert(xe, xe->info.has_asid);
1926 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1927
1928 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1929 xe_assert(xe, lookup == vm);
1930 }
1931 up_write(&xe->usm.lock);
1932
1933 for_each_tile(tile, xe, id)
1934 xe_range_fence_tree_fini(&vm->rftree[id]);
1935
1936 xe_vm_put(vm);
1937 }
1938
vm_destroy_work_func(struct work_struct * w)1939 static void vm_destroy_work_func(struct work_struct *w)
1940 {
1941 struct xe_vm *vm =
1942 container_of(w, struct xe_vm, destroy_work);
1943 struct xe_device *xe = vm->xe;
1944 struct xe_tile *tile;
1945 u8 id;
1946
1947 /* xe_vm_close_and_put was not called? */
1948 xe_assert(xe, !vm->size);
1949
1950 if (xe_vm_in_preempt_fence_mode(vm))
1951 flush_work(&vm->preempt.rebind_work);
1952
1953 mutex_destroy(&vm->snap_mutex);
1954
1955 if (vm->flags & XE_VM_FLAG_LR_MODE)
1956 xe_pm_runtime_put(xe);
1957
1958 for_each_tile(tile, xe, id)
1959 XE_WARN_ON(vm->pt_root[id]);
1960
1961 trace_xe_vm_free(vm);
1962
1963 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1964
1965 if (vm->xef)
1966 xe_file_put(vm->xef);
1967
1968 kfree(vm);
1969 }
1970
xe_vm_free(struct drm_gpuvm * gpuvm)1971 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1972 {
1973 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1974
1975 /* To destroy the VM we need to be able to sleep */
1976 queue_work(system_unbound_wq, &vm->destroy_work);
1977 }
1978
xe_vm_lookup(struct xe_file * xef,u32 id)1979 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1980 {
1981 struct xe_vm *vm;
1982
1983 mutex_lock(&xef->vm.lock);
1984 vm = xa_load(&xef->vm.xa, id);
1985 if (vm)
1986 xe_vm_get(vm);
1987 mutex_unlock(&xef->vm.lock);
1988
1989 return vm;
1990 }
1991
xe_vm_pdp4_descriptor(struct xe_vm * vm,struct xe_tile * tile)1992 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1993 {
1994 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1995 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1996 }
1997
1998 static struct xe_exec_queue *
to_wait_exec_queue(struct xe_vm * vm,struct xe_exec_queue * q)1999 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
2000 {
2001 return q ? q : vm->q[0];
2002 }
2003
2004 static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry * syncs,u32 num_syncs)2005 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
2006 {
2007 unsigned int i;
2008
2009 for (i = 0; i < num_syncs; i++) {
2010 struct xe_sync_entry *e = &syncs[i];
2011
2012 if (xe_sync_is_ufence(e))
2013 return xe_sync_ufence_get(e);
2014 }
2015
2016 return NULL;
2017 }
2018
2019 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
2020 DRM_XE_VM_CREATE_FLAG_LR_MODE | \
2021 DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
2022
xe_vm_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2023 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
2024 struct drm_file *file)
2025 {
2026 struct xe_device *xe = to_xe_device(dev);
2027 struct xe_file *xef = to_xe_file(file);
2028 struct drm_xe_vm_create *args = data;
2029 struct xe_tile *tile;
2030 struct xe_vm *vm;
2031 u32 id, asid;
2032 int err;
2033 u32 flags = 0;
2034
2035 if (XE_IOCTL_DBG(xe, args->extensions))
2036 return -EINVAL;
2037
2038 if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
2039 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
2040
2041 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
2042 !xe->info.has_usm))
2043 return -EINVAL;
2044
2045 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2046 return -EINVAL;
2047
2048 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
2049 return -EINVAL;
2050
2051 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
2052 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
2053 return -EINVAL;
2054
2055 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
2056 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
2057 return -EINVAL;
2058
2059 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
2060 flags |= XE_VM_FLAG_SCRATCH_PAGE;
2061 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
2062 flags |= XE_VM_FLAG_LR_MODE;
2063 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
2064 flags |= XE_VM_FLAG_FAULT_MODE;
2065
2066 vm = xe_vm_create(xe, flags);
2067 if (IS_ERR(vm))
2068 return PTR_ERR(vm);
2069
2070 if (xe->info.has_asid) {
2071 down_write(&xe->usm.lock);
2072 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2073 XA_LIMIT(1, XE_MAX_ASID - 1),
2074 &xe->usm.next_asid, GFP_KERNEL);
2075 up_write(&xe->usm.lock);
2076 if (err < 0)
2077 goto err_close_and_put;
2078
2079 vm->usm.asid = asid;
2080 }
2081
2082 vm->xef = xe_file_get(xef);
2083
2084 /* Record BO memory for VM pagetable created against client */
2085 for_each_tile(tile, xe, id)
2086 if (vm->pt_root[id])
2087 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2088
2089 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2090 /* Warning: Security issue - never enable by default */
2091 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2092 #endif
2093
2094 /* user id alloc must always be last in ioctl to prevent UAF */
2095 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2096 if (err)
2097 goto err_close_and_put;
2098
2099 args->vm_id = id;
2100
2101 return 0;
2102
2103 err_close_and_put:
2104 xe_vm_close_and_put(vm);
2105
2106 return err;
2107 }
2108
xe_vm_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2109 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2110 struct drm_file *file)
2111 {
2112 struct xe_device *xe = to_xe_device(dev);
2113 struct xe_file *xef = to_xe_file(file);
2114 struct drm_xe_vm_destroy *args = data;
2115 struct xe_vm *vm;
2116 int err = 0;
2117
2118 if (XE_IOCTL_DBG(xe, args->pad) ||
2119 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2120 return -EINVAL;
2121
2122 mutex_lock(&xef->vm.lock);
2123 vm = xa_load(&xef->vm.xa, args->vm_id);
2124 if (XE_IOCTL_DBG(xe, !vm))
2125 err = -ENOENT;
2126 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2127 err = -EBUSY;
2128 else
2129 xa_erase(&xef->vm.xa, args->vm_id);
2130 mutex_unlock(&xef->vm.lock);
2131
2132 if (!err)
2133 xe_vm_close_and_put(vm);
2134
2135 return err;
2136 }
2137
2138 static const u32 region_to_mem_type[] = {
2139 XE_PL_TT,
2140 XE_PL_VRAM0,
2141 XE_PL_VRAM1,
2142 };
2143
prep_vma_destroy(struct xe_vm * vm,struct xe_vma * vma,bool post_commit)2144 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2145 bool post_commit)
2146 {
2147 down_read(&vm->userptr.notifier_lock);
2148 vma->gpuva.flags |= XE_VMA_DESTROYED;
2149 up_read(&vm->userptr.notifier_lock);
2150 if (post_commit)
2151 xe_vm_remove_vma(vm, vma);
2152 }
2153
2154 #undef ULL
2155 #define ULL unsigned long long
2156
2157 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
print_op(struct xe_device * xe,struct drm_gpuva_op * op)2158 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2159 {
2160 struct xe_vma *vma;
2161
2162 switch (op->op) {
2163 case DRM_GPUVA_OP_MAP:
2164 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2165 (ULL)op->map.va.addr, (ULL)op->map.va.range);
2166 break;
2167 case DRM_GPUVA_OP_REMAP:
2168 vma = gpuva_to_vma(op->remap.unmap->va);
2169 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2170 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2171 op->remap.unmap->keep ? 1 : 0);
2172 if (op->remap.prev)
2173 vm_dbg(&xe->drm,
2174 "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2175 (ULL)op->remap.prev->va.addr,
2176 (ULL)op->remap.prev->va.range);
2177 if (op->remap.next)
2178 vm_dbg(&xe->drm,
2179 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2180 (ULL)op->remap.next->va.addr,
2181 (ULL)op->remap.next->va.range);
2182 break;
2183 case DRM_GPUVA_OP_UNMAP:
2184 vma = gpuva_to_vma(op->unmap.va);
2185 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2186 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2187 op->unmap.keep ? 1 : 0);
2188 break;
2189 case DRM_GPUVA_OP_PREFETCH:
2190 vma = gpuva_to_vma(op->prefetch.va);
2191 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2192 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2193 break;
2194 default:
2195 drm_warn(&xe->drm, "NOT POSSIBLE");
2196 }
2197 }
2198 #else
print_op(struct xe_device * xe,struct drm_gpuva_op * op)2199 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2200 {
2201 }
2202 #endif
2203
2204 /*
2205 * Create operations list from IOCTL arguments, setup operations fields so parse
2206 * and commit steps are decoupled from IOCTL arguments. This step can fail.
2207 */
2208 static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm * vm,struct xe_bo * bo,u64 bo_offset_or_userptr,u64 addr,u64 range,u32 operation,u32 flags,u32 prefetch_region,u16 pat_index)2209 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2210 u64 bo_offset_or_userptr, u64 addr, u64 range,
2211 u32 operation, u32 flags,
2212 u32 prefetch_region, u16 pat_index)
2213 {
2214 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2215 struct drm_gpuva_ops *ops;
2216 struct drm_gpuva_op *__op;
2217 struct drm_gpuvm_bo *vm_bo;
2218 int err;
2219
2220 lockdep_assert_held_write(&vm->lock);
2221
2222 vm_dbg(&vm->xe->drm,
2223 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2224 operation, (ULL)addr, (ULL)range,
2225 (ULL)bo_offset_or_userptr);
2226
2227 switch (operation) {
2228 case DRM_XE_VM_BIND_OP_MAP:
2229 case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2230 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2231 obj, bo_offset_or_userptr);
2232 break;
2233 case DRM_XE_VM_BIND_OP_UNMAP:
2234 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2235 break;
2236 case DRM_XE_VM_BIND_OP_PREFETCH:
2237 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2238 break;
2239 case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2240 xe_assert(vm->xe, bo);
2241
2242 err = xe_bo_lock(bo, true);
2243 if (err)
2244 return ERR_PTR(err);
2245
2246 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2247 if (IS_ERR(vm_bo)) {
2248 xe_bo_unlock(bo);
2249 return ERR_CAST(vm_bo);
2250 }
2251
2252 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2253 drm_gpuvm_bo_put(vm_bo);
2254 xe_bo_unlock(bo);
2255 break;
2256 default:
2257 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2258 ops = ERR_PTR(-EINVAL);
2259 }
2260 if (IS_ERR(ops))
2261 return ops;
2262
2263 drm_gpuva_for_each_op(__op, ops) {
2264 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2265
2266 if (__op->op == DRM_GPUVA_OP_MAP) {
2267 op->map.immediate =
2268 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2269 op->map.read_only =
2270 flags & DRM_XE_VM_BIND_FLAG_READONLY;
2271 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2272 op->map.is_cpu_addr_mirror = flags &
2273 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
2274 op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2275 op->map.pat_index = pat_index;
2276 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2277 op->prefetch.region = prefetch_region;
2278 }
2279
2280 print_op(vm->xe, __op);
2281 }
2282
2283 return ops;
2284 }
2285 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
2286
new_vma(struct xe_vm * vm,struct drm_gpuva_op_map * op,u16 pat_index,unsigned int flags)2287 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2288 u16 pat_index, unsigned int flags)
2289 {
2290 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2291 struct drm_exec exec;
2292 struct xe_vma *vma;
2293 int err = 0;
2294
2295 lockdep_assert_held_write(&vm->lock);
2296
2297 if (bo) {
2298 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2299 drm_exec_until_all_locked(&exec) {
2300 err = 0;
2301 if (!bo->vm) {
2302 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2303 drm_exec_retry_on_contention(&exec);
2304 }
2305 if (!err) {
2306 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2307 drm_exec_retry_on_contention(&exec);
2308 }
2309 if (err) {
2310 drm_exec_fini(&exec);
2311 return ERR_PTR(err);
2312 }
2313 }
2314 }
2315 vma = xe_vma_create(vm, bo, op->gem.offset,
2316 op->va.addr, op->va.addr +
2317 op->va.range - 1, pat_index, flags);
2318 if (IS_ERR(vma))
2319 goto err_unlock;
2320
2321 if (xe_vma_is_userptr(vma))
2322 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2323 else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2324 err = add_preempt_fences(vm, bo);
2325
2326 err_unlock:
2327 if (bo)
2328 drm_exec_fini(&exec);
2329
2330 if (err) {
2331 prep_vma_destroy(vm, vma, false);
2332 xe_vma_destroy_unlocked(vma);
2333 vma = ERR_PTR(err);
2334 }
2335
2336 return vma;
2337 }
2338
xe_vma_max_pte_size(struct xe_vma * vma)2339 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2340 {
2341 if (vma->gpuva.flags & XE_VMA_PTE_1G)
2342 return SZ_1G;
2343 else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2344 return SZ_2M;
2345 else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2346 return SZ_64K;
2347 else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2348 return SZ_4K;
2349
2350 return SZ_1G; /* Uninitialized, used max size */
2351 }
2352
xe_vma_set_pte_size(struct xe_vma * vma,u64 size)2353 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2354 {
2355 switch (size) {
2356 case SZ_1G:
2357 vma->gpuva.flags |= XE_VMA_PTE_1G;
2358 break;
2359 case SZ_2M:
2360 vma->gpuva.flags |= XE_VMA_PTE_2M;
2361 break;
2362 case SZ_64K:
2363 vma->gpuva.flags |= XE_VMA_PTE_64K;
2364 break;
2365 case SZ_4K:
2366 vma->gpuva.flags |= XE_VMA_PTE_4K;
2367 break;
2368 }
2369 }
2370
xe_vma_op_commit(struct xe_vm * vm,struct xe_vma_op * op)2371 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2372 {
2373 int err = 0;
2374
2375 lockdep_assert_held_write(&vm->lock);
2376
2377 switch (op->base.op) {
2378 case DRM_GPUVA_OP_MAP:
2379 err |= xe_vm_insert_vma(vm, op->map.vma);
2380 if (!err)
2381 op->flags |= XE_VMA_OP_COMMITTED;
2382 break;
2383 case DRM_GPUVA_OP_REMAP:
2384 {
2385 u8 tile_present =
2386 gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2387
2388 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2389 true);
2390 op->flags |= XE_VMA_OP_COMMITTED;
2391
2392 if (op->remap.prev) {
2393 err |= xe_vm_insert_vma(vm, op->remap.prev);
2394 if (!err)
2395 op->flags |= XE_VMA_OP_PREV_COMMITTED;
2396 if (!err && op->remap.skip_prev) {
2397 op->remap.prev->tile_present =
2398 tile_present;
2399 op->remap.prev = NULL;
2400 }
2401 }
2402 if (op->remap.next) {
2403 err |= xe_vm_insert_vma(vm, op->remap.next);
2404 if (!err)
2405 op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2406 if (!err && op->remap.skip_next) {
2407 op->remap.next->tile_present =
2408 tile_present;
2409 op->remap.next = NULL;
2410 }
2411 }
2412
2413 /* Adjust for partial unbind after removing VMA from VM */
2414 if (!err) {
2415 op->base.remap.unmap->va->va.addr = op->remap.start;
2416 op->base.remap.unmap->va->va.range = op->remap.range;
2417 }
2418 break;
2419 }
2420 case DRM_GPUVA_OP_UNMAP:
2421 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2422 op->flags |= XE_VMA_OP_COMMITTED;
2423 break;
2424 case DRM_GPUVA_OP_PREFETCH:
2425 op->flags |= XE_VMA_OP_COMMITTED;
2426 break;
2427 default:
2428 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2429 }
2430
2431 return err;
2432 }
2433
vm_bind_ioctl_ops_parse(struct xe_vm * vm,struct drm_gpuva_ops * ops,struct xe_vma_ops * vops)2434 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2435 struct xe_vma_ops *vops)
2436 {
2437 struct xe_device *xe = vm->xe;
2438 struct drm_gpuva_op *__op;
2439 struct xe_tile *tile;
2440 u8 id, tile_mask = 0;
2441 int err = 0;
2442
2443 lockdep_assert_held_write(&vm->lock);
2444
2445 for_each_tile(tile, vm->xe, id)
2446 tile_mask |= 0x1 << id;
2447
2448 drm_gpuva_for_each_op(__op, ops) {
2449 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2450 struct xe_vma *vma;
2451 unsigned int flags = 0;
2452
2453 INIT_LIST_HEAD(&op->link);
2454 list_add_tail(&op->link, &vops->list);
2455 op->tile_mask = tile_mask;
2456
2457 switch (op->base.op) {
2458 case DRM_GPUVA_OP_MAP:
2459 {
2460 flags |= op->map.read_only ?
2461 VMA_CREATE_FLAG_READ_ONLY : 0;
2462 flags |= op->map.is_null ?
2463 VMA_CREATE_FLAG_IS_NULL : 0;
2464 flags |= op->map.dumpable ?
2465 VMA_CREATE_FLAG_DUMPABLE : 0;
2466 flags |= op->map.is_cpu_addr_mirror ?
2467 VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
2468
2469 vma = new_vma(vm, &op->base.map, op->map.pat_index,
2470 flags);
2471 if (IS_ERR(vma))
2472 return PTR_ERR(vma);
2473
2474 op->map.vma = vma;
2475 if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
2476 !op->map.is_cpu_addr_mirror)
2477 xe_vma_ops_incr_pt_update_ops(vops,
2478 op->tile_mask);
2479 break;
2480 }
2481 case DRM_GPUVA_OP_REMAP:
2482 {
2483 struct xe_vma *old =
2484 gpuva_to_vma(op->base.remap.unmap->va);
2485 bool skip = xe_vma_is_cpu_addr_mirror(old);
2486 u64 start = xe_vma_start(old), end = xe_vma_end(old);
2487
2488 if (op->base.remap.prev)
2489 start = op->base.remap.prev->va.addr +
2490 op->base.remap.prev->va.range;
2491 if (op->base.remap.next)
2492 end = op->base.remap.next->va.addr;
2493
2494 if (xe_vma_is_cpu_addr_mirror(old) &&
2495 xe_svm_has_mapping(vm, start, end))
2496 return -EBUSY;
2497
2498 op->remap.start = xe_vma_start(old);
2499 op->remap.range = xe_vma_size(old);
2500
2501 flags |= op->base.remap.unmap->va->flags &
2502 XE_VMA_READ_ONLY ?
2503 VMA_CREATE_FLAG_READ_ONLY : 0;
2504 flags |= op->base.remap.unmap->va->flags &
2505 DRM_GPUVA_SPARSE ?
2506 VMA_CREATE_FLAG_IS_NULL : 0;
2507 flags |= op->base.remap.unmap->va->flags &
2508 XE_VMA_DUMPABLE ?
2509 VMA_CREATE_FLAG_DUMPABLE : 0;
2510 flags |= xe_vma_is_cpu_addr_mirror(old) ?
2511 VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
2512
2513 if (op->base.remap.prev) {
2514 vma = new_vma(vm, op->base.remap.prev,
2515 old->pat_index, flags);
2516 if (IS_ERR(vma))
2517 return PTR_ERR(vma);
2518
2519 op->remap.prev = vma;
2520
2521 /*
2522 * Userptr creates a new SG mapping so
2523 * we must also rebind.
2524 */
2525 op->remap.skip_prev = skip ||
2526 (!xe_vma_is_userptr(old) &&
2527 IS_ALIGNED(xe_vma_end(vma),
2528 xe_vma_max_pte_size(old)));
2529 if (op->remap.skip_prev) {
2530 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2531 op->remap.range -=
2532 xe_vma_end(vma) -
2533 xe_vma_start(old);
2534 op->remap.start = xe_vma_end(vma);
2535 vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2536 (ULL)op->remap.start,
2537 (ULL)op->remap.range);
2538 } else {
2539 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2540 }
2541 }
2542
2543 if (op->base.remap.next) {
2544 vma = new_vma(vm, op->base.remap.next,
2545 old->pat_index, flags);
2546 if (IS_ERR(vma))
2547 return PTR_ERR(vma);
2548
2549 op->remap.next = vma;
2550
2551 /*
2552 * Userptr creates a new SG mapping so
2553 * we must also rebind.
2554 */
2555 op->remap.skip_next = skip ||
2556 (!xe_vma_is_userptr(old) &&
2557 IS_ALIGNED(xe_vma_start(vma),
2558 xe_vma_max_pte_size(old)));
2559 if (op->remap.skip_next) {
2560 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2561 op->remap.range -=
2562 xe_vma_end(old) -
2563 xe_vma_start(vma);
2564 vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2565 (ULL)op->remap.start,
2566 (ULL)op->remap.range);
2567 } else {
2568 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2569 }
2570 }
2571 if (!skip)
2572 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2573 break;
2574 }
2575 case DRM_GPUVA_OP_UNMAP:
2576 vma = gpuva_to_vma(op->base.unmap.va);
2577
2578 if (xe_vma_is_cpu_addr_mirror(vma) &&
2579 xe_svm_has_mapping(vm, xe_vma_start(vma),
2580 xe_vma_end(vma)))
2581 return -EBUSY;
2582
2583 if (!xe_vma_is_cpu_addr_mirror(vma))
2584 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2585 break;
2586 case DRM_GPUVA_OP_PREFETCH:
2587 vma = gpuva_to_vma(op->base.prefetch.va);
2588
2589 if (xe_vma_is_userptr(vma)) {
2590 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2591 if (err)
2592 return err;
2593 }
2594
2595 if (!xe_vma_is_cpu_addr_mirror(vma))
2596 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2597 break;
2598 default:
2599 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2600 }
2601
2602 err = xe_vma_op_commit(vm, op);
2603 if (err)
2604 return err;
2605 }
2606
2607 return 0;
2608 }
2609
xe_vma_op_unwind(struct xe_vm * vm,struct xe_vma_op * op,bool post_commit,bool prev_post_commit,bool next_post_commit)2610 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2611 bool post_commit, bool prev_post_commit,
2612 bool next_post_commit)
2613 {
2614 lockdep_assert_held_write(&vm->lock);
2615
2616 switch (op->base.op) {
2617 case DRM_GPUVA_OP_MAP:
2618 if (op->map.vma) {
2619 prep_vma_destroy(vm, op->map.vma, post_commit);
2620 xe_vma_destroy_unlocked(op->map.vma);
2621 }
2622 break;
2623 case DRM_GPUVA_OP_UNMAP:
2624 {
2625 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2626
2627 if (vma) {
2628 down_read(&vm->userptr.notifier_lock);
2629 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2630 up_read(&vm->userptr.notifier_lock);
2631 if (post_commit)
2632 xe_vm_insert_vma(vm, vma);
2633 }
2634 break;
2635 }
2636 case DRM_GPUVA_OP_REMAP:
2637 {
2638 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2639
2640 if (op->remap.prev) {
2641 prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2642 xe_vma_destroy_unlocked(op->remap.prev);
2643 }
2644 if (op->remap.next) {
2645 prep_vma_destroy(vm, op->remap.next, next_post_commit);
2646 xe_vma_destroy_unlocked(op->remap.next);
2647 }
2648 if (vma) {
2649 down_read(&vm->userptr.notifier_lock);
2650 vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2651 up_read(&vm->userptr.notifier_lock);
2652 if (post_commit)
2653 xe_vm_insert_vma(vm, vma);
2654 }
2655 break;
2656 }
2657 case DRM_GPUVA_OP_PREFETCH:
2658 /* Nothing to do */
2659 break;
2660 default:
2661 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2662 }
2663 }
2664
vm_bind_ioctl_ops_unwind(struct xe_vm * vm,struct drm_gpuva_ops ** ops,int num_ops_list)2665 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2666 struct drm_gpuva_ops **ops,
2667 int num_ops_list)
2668 {
2669 int i;
2670
2671 for (i = num_ops_list - 1; i >= 0; --i) {
2672 struct drm_gpuva_ops *__ops = ops[i];
2673 struct drm_gpuva_op *__op;
2674
2675 if (!__ops)
2676 continue;
2677
2678 drm_gpuva_for_each_op_reverse(__op, __ops) {
2679 struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2680
2681 xe_vma_op_unwind(vm, op,
2682 op->flags & XE_VMA_OP_COMMITTED,
2683 op->flags & XE_VMA_OP_PREV_COMMITTED,
2684 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2685 }
2686 }
2687 }
2688
vma_lock_and_validate(struct drm_exec * exec,struct xe_vma * vma,bool validate)2689 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2690 bool validate)
2691 {
2692 struct xe_bo *bo = xe_vma_bo(vma);
2693 struct xe_vm *vm = xe_vma_vm(vma);
2694 int err = 0;
2695
2696 if (bo) {
2697 if (!bo->vm)
2698 err = drm_exec_lock_obj(exec, &bo->ttm.base);
2699 if (!err && validate)
2700 err = xe_bo_validate(bo, vm,
2701 !xe_vm_in_preempt_fence_mode(vm));
2702 }
2703
2704 return err;
2705 }
2706
check_ufence(struct xe_vma * vma)2707 static int check_ufence(struct xe_vma *vma)
2708 {
2709 if (vma->ufence) {
2710 struct xe_user_fence * const f = vma->ufence;
2711
2712 if (!xe_sync_ufence_get_status(f))
2713 return -EBUSY;
2714
2715 vma->ufence = NULL;
2716 xe_sync_ufence_put(f);
2717 }
2718
2719 return 0;
2720 }
2721
op_lock_and_prep(struct drm_exec * exec,struct xe_vm * vm,struct xe_vma_op * op)2722 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2723 struct xe_vma_op *op)
2724 {
2725 int err = 0;
2726
2727 switch (op->base.op) {
2728 case DRM_GPUVA_OP_MAP:
2729 err = vma_lock_and_validate(exec, op->map.vma,
2730 !xe_vm_in_fault_mode(vm) ||
2731 op->map.immediate);
2732 break;
2733 case DRM_GPUVA_OP_REMAP:
2734 err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2735 if (err)
2736 break;
2737
2738 err = vma_lock_and_validate(exec,
2739 gpuva_to_vma(op->base.remap.unmap->va),
2740 false);
2741 if (!err && op->remap.prev)
2742 err = vma_lock_and_validate(exec, op->remap.prev, true);
2743 if (!err && op->remap.next)
2744 err = vma_lock_and_validate(exec, op->remap.next, true);
2745 break;
2746 case DRM_GPUVA_OP_UNMAP:
2747 err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2748 if (err)
2749 break;
2750
2751 err = vma_lock_and_validate(exec,
2752 gpuva_to_vma(op->base.unmap.va),
2753 false);
2754 break;
2755 case DRM_GPUVA_OP_PREFETCH:
2756 {
2757 struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2758 u32 region = op->prefetch.region;
2759
2760 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2761
2762 err = vma_lock_and_validate(exec,
2763 gpuva_to_vma(op->base.prefetch.va),
2764 false);
2765 if (!err && !xe_vma_has_no_bo(vma))
2766 err = xe_bo_migrate(xe_vma_bo(vma),
2767 region_to_mem_type[region]);
2768 break;
2769 }
2770 default:
2771 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2772 }
2773
2774 return err;
2775 }
2776
vm_bind_ioctl_ops_lock_and_prep(struct drm_exec * exec,struct xe_vm * vm,struct xe_vma_ops * vops)2777 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2778 struct xe_vm *vm,
2779 struct xe_vma_ops *vops)
2780 {
2781 struct xe_vma_op *op;
2782 int err;
2783
2784 err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2785 if (err)
2786 return err;
2787
2788 list_for_each_entry(op, &vops->list, link) {
2789 err = op_lock_and_prep(exec, vm, op);
2790 if (err)
2791 return err;
2792 }
2793
2794 #ifdef TEST_VM_OPS_ERROR
2795 if (vops->inject_error &&
2796 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
2797 return -ENOSPC;
2798 #endif
2799
2800 return 0;
2801 }
2802
op_trace(struct xe_vma_op * op)2803 static void op_trace(struct xe_vma_op *op)
2804 {
2805 switch (op->base.op) {
2806 case DRM_GPUVA_OP_MAP:
2807 trace_xe_vma_bind(op->map.vma);
2808 break;
2809 case DRM_GPUVA_OP_REMAP:
2810 trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
2811 if (op->remap.prev)
2812 trace_xe_vma_bind(op->remap.prev);
2813 if (op->remap.next)
2814 trace_xe_vma_bind(op->remap.next);
2815 break;
2816 case DRM_GPUVA_OP_UNMAP:
2817 trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
2818 break;
2819 case DRM_GPUVA_OP_PREFETCH:
2820 trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
2821 break;
2822 case DRM_GPUVA_OP_DRIVER:
2823 break;
2824 default:
2825 XE_WARN_ON("NOT POSSIBLE");
2826 }
2827 }
2828
trace_xe_vm_ops_execute(struct xe_vma_ops * vops)2829 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
2830 {
2831 struct xe_vma_op *op;
2832
2833 list_for_each_entry(op, &vops->list, link)
2834 op_trace(op);
2835 }
2836
vm_ops_setup_tile_args(struct xe_vm * vm,struct xe_vma_ops * vops)2837 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
2838 {
2839 struct xe_exec_queue *q = vops->q;
2840 struct xe_tile *tile;
2841 int number_tiles = 0;
2842 u8 id;
2843
2844 for_each_tile(tile, vm->xe, id) {
2845 if (vops->pt_update_ops[id].num_ops)
2846 ++number_tiles;
2847
2848 if (vops->pt_update_ops[id].q)
2849 continue;
2850
2851 if (q) {
2852 vops->pt_update_ops[id].q = q;
2853 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
2854 q = list_next_entry(q, multi_gt_list);
2855 } else {
2856 vops->pt_update_ops[id].q = vm->q[id];
2857 }
2858 }
2859
2860 return number_tiles;
2861 }
2862
ops_execute(struct xe_vm * vm,struct xe_vma_ops * vops)2863 static struct dma_fence *ops_execute(struct xe_vm *vm,
2864 struct xe_vma_ops *vops)
2865 {
2866 struct xe_tile *tile;
2867 struct dma_fence *fence = NULL;
2868 struct dma_fence **fences = NULL;
2869 struct dma_fence_array *cf = NULL;
2870 int number_tiles = 0, current_fence = 0, err;
2871 u8 id;
2872
2873 number_tiles = vm_ops_setup_tile_args(vm, vops);
2874 if (number_tiles == 0)
2875 return ERR_PTR(-ENODATA);
2876
2877 if (number_tiles > 1) {
2878 fences = kmalloc_array(number_tiles, sizeof(*fences),
2879 GFP_KERNEL);
2880 if (!fences) {
2881 fence = ERR_PTR(-ENOMEM);
2882 goto err_trace;
2883 }
2884 }
2885
2886 for_each_tile(tile, vm->xe, id) {
2887 if (!vops->pt_update_ops[id].num_ops)
2888 continue;
2889
2890 err = xe_pt_update_ops_prepare(tile, vops);
2891 if (err) {
2892 fence = ERR_PTR(err);
2893 goto err_out;
2894 }
2895 }
2896
2897 trace_xe_vm_ops_execute(vops);
2898
2899 for_each_tile(tile, vm->xe, id) {
2900 if (!vops->pt_update_ops[id].num_ops)
2901 continue;
2902
2903 fence = xe_pt_update_ops_run(tile, vops);
2904 if (IS_ERR(fence))
2905 goto err_out;
2906
2907 if (fences)
2908 fences[current_fence++] = fence;
2909 }
2910
2911 if (fences) {
2912 cf = dma_fence_array_create(number_tiles, fences,
2913 vm->composite_fence_ctx,
2914 vm->composite_fence_seqno++,
2915 false);
2916 if (!cf) {
2917 --vm->composite_fence_seqno;
2918 fence = ERR_PTR(-ENOMEM);
2919 goto err_out;
2920 }
2921 fence = &cf->base;
2922 }
2923
2924 for_each_tile(tile, vm->xe, id) {
2925 if (!vops->pt_update_ops[id].num_ops)
2926 continue;
2927
2928 xe_pt_update_ops_fini(tile, vops);
2929 }
2930
2931 return fence;
2932
2933 err_out:
2934 for_each_tile(tile, vm->xe, id) {
2935 if (!vops->pt_update_ops[id].num_ops)
2936 continue;
2937
2938 xe_pt_update_ops_abort(tile, vops);
2939 }
2940 while (current_fence)
2941 dma_fence_put(fences[--current_fence]);
2942 kfree(fences);
2943 kfree(cf);
2944
2945 err_trace:
2946 trace_xe_vm_ops_fail(vm);
2947 return fence;
2948 }
2949
vma_add_ufence(struct xe_vma * vma,struct xe_user_fence * ufence)2950 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2951 {
2952 if (vma->ufence)
2953 xe_sync_ufence_put(vma->ufence);
2954 vma->ufence = __xe_sync_ufence_get(ufence);
2955 }
2956
op_add_ufence(struct xe_vm * vm,struct xe_vma_op * op,struct xe_user_fence * ufence)2957 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2958 struct xe_user_fence *ufence)
2959 {
2960 switch (op->base.op) {
2961 case DRM_GPUVA_OP_MAP:
2962 vma_add_ufence(op->map.vma, ufence);
2963 break;
2964 case DRM_GPUVA_OP_REMAP:
2965 if (op->remap.prev)
2966 vma_add_ufence(op->remap.prev, ufence);
2967 if (op->remap.next)
2968 vma_add_ufence(op->remap.next, ufence);
2969 break;
2970 case DRM_GPUVA_OP_UNMAP:
2971 break;
2972 case DRM_GPUVA_OP_PREFETCH:
2973 vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2974 break;
2975 default:
2976 drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2977 }
2978 }
2979
vm_bind_ioctl_ops_fini(struct xe_vm * vm,struct xe_vma_ops * vops,struct dma_fence * fence)2980 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2981 struct dma_fence *fence)
2982 {
2983 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2984 struct xe_user_fence *ufence;
2985 struct xe_vma_op *op;
2986 int i;
2987
2988 ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2989 list_for_each_entry(op, &vops->list, link) {
2990 if (ufence)
2991 op_add_ufence(vm, op, ufence);
2992
2993 if (op->base.op == DRM_GPUVA_OP_UNMAP)
2994 xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2995 else if (op->base.op == DRM_GPUVA_OP_REMAP)
2996 xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2997 fence);
2998 }
2999 if (ufence)
3000 xe_sync_ufence_put(ufence);
3001 if (fence) {
3002 for (i = 0; i < vops->num_syncs; i++)
3003 xe_sync_entry_signal(vops->syncs + i, fence);
3004 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
3005 }
3006 }
3007
vm_bind_ioctl_ops_execute(struct xe_vm * vm,struct xe_vma_ops * vops)3008 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
3009 struct xe_vma_ops *vops)
3010 {
3011 struct drm_exec exec;
3012 struct dma_fence *fence;
3013 int err;
3014
3015 lockdep_assert_held_write(&vm->lock);
3016
3017 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
3018 DRM_EXEC_IGNORE_DUPLICATES, 0);
3019 drm_exec_until_all_locked(&exec) {
3020 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
3021 drm_exec_retry_on_contention(&exec);
3022 if (err) {
3023 fence = ERR_PTR(err);
3024 goto unlock;
3025 }
3026
3027 fence = ops_execute(vm, vops);
3028 if (IS_ERR(fence)) {
3029 if (PTR_ERR(fence) == -ENODATA)
3030 vm_bind_ioctl_ops_fini(vm, vops, NULL);
3031 goto unlock;
3032 }
3033
3034 vm_bind_ioctl_ops_fini(vm, vops, fence);
3035 }
3036
3037 unlock:
3038 drm_exec_fini(&exec);
3039 return fence;
3040 }
3041 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
3042
3043 #define SUPPORTED_FLAGS_STUB \
3044 (DRM_XE_VM_BIND_FLAG_READONLY | \
3045 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
3046 DRM_XE_VM_BIND_FLAG_NULL | \
3047 DRM_XE_VM_BIND_FLAG_DUMPABLE | \
3048 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
3049 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
3050
3051 #ifdef TEST_VM_OPS_ERROR
3052 #define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
3053 #else
3054 #define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB
3055 #endif
3056
3057 #define XE_64K_PAGE_MASK 0xffffull
3058 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
3059
vm_bind_ioctl_check_args(struct xe_device * xe,struct xe_vm * vm,struct drm_xe_vm_bind * args,struct drm_xe_vm_bind_op ** bind_ops)3060 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
3061 struct drm_xe_vm_bind *args,
3062 struct drm_xe_vm_bind_op **bind_ops)
3063 {
3064 int err;
3065 int i;
3066
3067 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
3068 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3069 return -EINVAL;
3070
3071 if (XE_IOCTL_DBG(xe, args->extensions))
3072 return -EINVAL;
3073
3074 if (args->num_binds > 1) {
3075 u64 __user *bind_user =
3076 u64_to_user_ptr(args->vector_of_binds);
3077
3078 *bind_ops = kvmalloc_array(args->num_binds,
3079 sizeof(struct drm_xe_vm_bind_op),
3080 GFP_KERNEL | __GFP_ACCOUNT |
3081 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3082 if (!*bind_ops)
3083 return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
3084
3085 err = __copy_from_user(*bind_ops, bind_user,
3086 sizeof(struct drm_xe_vm_bind_op) *
3087 args->num_binds);
3088 if (XE_IOCTL_DBG(xe, err)) {
3089 err = -EFAULT;
3090 goto free_bind_ops;
3091 }
3092 } else {
3093 *bind_ops = &args->bind;
3094 }
3095
3096 for (i = 0; i < args->num_binds; ++i) {
3097 u64 range = (*bind_ops)[i].range;
3098 u64 addr = (*bind_ops)[i].addr;
3099 u32 op = (*bind_ops)[i].op;
3100 u32 flags = (*bind_ops)[i].flags;
3101 u32 obj = (*bind_ops)[i].obj;
3102 u64 obj_offset = (*bind_ops)[i].obj_offset;
3103 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
3104 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
3105 bool is_cpu_addr_mirror = flags &
3106 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
3107 u16 pat_index = (*bind_ops)[i].pat_index;
3108 u16 coh_mode;
3109
3110 if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
3111 (!xe_vm_in_fault_mode(vm) ||
3112 !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
3113 err = -EINVAL;
3114 goto free_bind_ops;
3115 }
3116
3117 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
3118 err = -EINVAL;
3119 goto free_bind_ops;
3120 }
3121
3122 pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
3123 (*bind_ops)[i].pat_index = pat_index;
3124 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3125 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
3126 err = -EINVAL;
3127 goto free_bind_ops;
3128 }
3129
3130 if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
3131 err = -EINVAL;
3132 goto free_bind_ops;
3133 }
3134
3135 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
3136 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
3137 XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
3138 XE_IOCTL_DBG(xe, obj_offset && (is_null ||
3139 is_cpu_addr_mirror)) ||
3140 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
3141 (is_null || is_cpu_addr_mirror)) ||
3142 XE_IOCTL_DBG(xe, !obj &&
3143 op == DRM_XE_VM_BIND_OP_MAP &&
3144 !is_null && !is_cpu_addr_mirror) ||
3145 XE_IOCTL_DBG(xe, !obj &&
3146 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3147 XE_IOCTL_DBG(xe, addr &&
3148 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3149 XE_IOCTL_DBG(xe, range &&
3150 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3151 XE_IOCTL_DBG(xe, obj &&
3152 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3153 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3154 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3155 XE_IOCTL_DBG(xe, obj &&
3156 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3157 XE_IOCTL_DBG(xe, prefetch_region &&
3158 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3159 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
3160 xe->info.mem_region_mask)) ||
3161 XE_IOCTL_DBG(xe, obj &&
3162 op == DRM_XE_VM_BIND_OP_UNMAP)) {
3163 err = -EINVAL;
3164 goto free_bind_ops;
3165 }
3166
3167 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3168 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3169 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3170 XE_IOCTL_DBG(xe, !range &&
3171 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3172 err = -EINVAL;
3173 goto free_bind_ops;
3174 }
3175 }
3176
3177 return 0;
3178
3179 free_bind_ops:
3180 if (args->num_binds > 1)
3181 kvfree(*bind_ops);
3182 return err;
3183 }
3184
vm_bind_ioctl_signal_fences(struct xe_vm * vm,struct xe_exec_queue * q,struct xe_sync_entry * syncs,int num_syncs)3185 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3186 struct xe_exec_queue *q,
3187 struct xe_sync_entry *syncs,
3188 int num_syncs)
3189 {
3190 struct dma_fence *fence;
3191 int i, err = 0;
3192
3193 fence = xe_sync_in_fence_get(syncs, num_syncs,
3194 to_wait_exec_queue(vm, q), vm);
3195 if (IS_ERR(fence))
3196 return PTR_ERR(fence);
3197
3198 for (i = 0; i < num_syncs; i++)
3199 xe_sync_entry_signal(&syncs[i], fence);
3200
3201 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
3202 fence);
3203 dma_fence_put(fence);
3204
3205 return err;
3206 }
3207
xe_vma_ops_init(struct xe_vma_ops * vops,struct xe_vm * vm,struct xe_exec_queue * q,struct xe_sync_entry * syncs,u32 num_syncs)3208 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3209 struct xe_exec_queue *q,
3210 struct xe_sync_entry *syncs, u32 num_syncs)
3211 {
3212 memset(vops, 0, sizeof(*vops));
3213 INIT_LIST_HEAD(&vops->list);
3214 vops->vm = vm;
3215 vops->q = q;
3216 vops->syncs = syncs;
3217 vops->num_syncs = num_syncs;
3218 }
3219
xe_vm_bind_ioctl_validate_bo(struct xe_device * xe,struct xe_bo * bo,u64 addr,u64 range,u64 obj_offset,u16 pat_index,u32 op,u32 bind_flags)3220 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3221 u64 addr, u64 range, u64 obj_offset,
3222 u16 pat_index, u32 op, u32 bind_flags)
3223 {
3224 u16 coh_mode;
3225
3226 if (XE_IOCTL_DBG(xe, range > bo->size) ||
3227 XE_IOCTL_DBG(xe, obj_offset >
3228 bo->size - range)) {
3229 return -EINVAL;
3230 }
3231
3232 /*
3233 * Some platforms require 64k VM_BIND alignment,
3234 * specifically those with XE_VRAM_FLAGS_NEED64K.
3235 *
3236 * Other platforms may have BO's set to 64k physical placement,
3237 * but can be mapped at 4k offsets anyway. This check is only
3238 * there for the former case.
3239 */
3240 if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
3241 (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
3242 if (XE_IOCTL_DBG(xe, obj_offset &
3243 XE_64K_PAGE_MASK) ||
3244 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3245 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3246 return -EINVAL;
3247 }
3248 }
3249
3250 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3251 if (bo->cpu_caching) {
3252 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3253 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3254 return -EINVAL;
3255 }
3256 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3257 /*
3258 * Imported dma-buf from a different device should
3259 * require 1way or 2way coherency since we don't know
3260 * how it was mapped on the CPU. Just assume is it
3261 * potentially cached on CPU side.
3262 */
3263 return -EINVAL;
3264 }
3265
3266 /* If a BO is protected it can only be mapped if the key is still valid */
3267 if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
3268 op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
3269 if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
3270 return -ENOEXEC;
3271
3272 return 0;
3273 }
3274
xe_vm_bind_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3275 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3276 {
3277 struct xe_device *xe = to_xe_device(dev);
3278 struct xe_file *xef = to_xe_file(file);
3279 struct drm_xe_vm_bind *args = data;
3280 struct drm_xe_sync __user *syncs_user;
3281 struct xe_bo **bos = NULL;
3282 struct drm_gpuva_ops **ops = NULL;
3283 struct xe_vm *vm;
3284 struct xe_exec_queue *q = NULL;
3285 u32 num_syncs, num_ufence = 0;
3286 struct xe_sync_entry *syncs = NULL;
3287 struct drm_xe_vm_bind_op *bind_ops;
3288 struct xe_vma_ops vops;
3289 struct dma_fence *fence;
3290 int err;
3291 int i;
3292
3293 vm = xe_vm_lookup(xef, args->vm_id);
3294 if (XE_IOCTL_DBG(xe, !vm))
3295 return -EINVAL;
3296
3297 err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
3298 if (err)
3299 goto put_vm;
3300
3301 if (args->exec_queue_id) {
3302 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3303 if (XE_IOCTL_DBG(xe, !q)) {
3304 err = -ENOENT;
3305 goto put_vm;
3306 }
3307
3308 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3309 err = -EINVAL;
3310 goto put_exec_queue;
3311 }
3312 }
3313
3314 /* Ensure all UNMAPs visible */
3315 if (xe_vm_in_fault_mode(vm))
3316 flush_work(&vm->svm.garbage_collector.work);
3317
3318 err = down_write_killable(&vm->lock);
3319 if (err)
3320 goto put_exec_queue;
3321
3322 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3323 err = -ENOENT;
3324 goto release_vm_lock;
3325 }
3326
3327 for (i = 0; i < args->num_binds; ++i) {
3328 u64 range = bind_ops[i].range;
3329 u64 addr = bind_ops[i].addr;
3330
3331 if (XE_IOCTL_DBG(xe, range > vm->size) ||
3332 XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3333 err = -EINVAL;
3334 goto release_vm_lock;
3335 }
3336 }
3337
3338 if (args->num_binds) {
3339 bos = kvcalloc(args->num_binds, sizeof(*bos),
3340 GFP_KERNEL | __GFP_ACCOUNT |
3341 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3342 if (!bos) {
3343 err = -ENOMEM;
3344 goto release_vm_lock;
3345 }
3346
3347 ops = kvcalloc(args->num_binds, sizeof(*ops),
3348 GFP_KERNEL | __GFP_ACCOUNT |
3349 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3350 if (!ops) {
3351 err = -ENOMEM;
3352 goto release_vm_lock;
3353 }
3354 }
3355
3356 for (i = 0; i < args->num_binds; ++i) {
3357 struct drm_gem_object *gem_obj;
3358 u64 range = bind_ops[i].range;
3359 u64 addr = bind_ops[i].addr;
3360 u32 obj = bind_ops[i].obj;
3361 u64 obj_offset = bind_ops[i].obj_offset;
3362 u16 pat_index = bind_ops[i].pat_index;
3363 u32 op = bind_ops[i].op;
3364 u32 bind_flags = bind_ops[i].flags;
3365
3366 if (!obj)
3367 continue;
3368
3369 gem_obj = drm_gem_object_lookup(file, obj);
3370 if (XE_IOCTL_DBG(xe, !gem_obj)) {
3371 err = -ENOENT;
3372 goto put_obj;
3373 }
3374 bos[i] = gem_to_xe_bo(gem_obj);
3375
3376 err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3377 obj_offset, pat_index, op,
3378 bind_flags);
3379 if (err)
3380 goto put_obj;
3381 }
3382
3383 if (args->num_syncs) {
3384 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3385 if (!syncs) {
3386 err = -ENOMEM;
3387 goto put_obj;
3388 }
3389 }
3390
3391 syncs_user = u64_to_user_ptr(args->syncs);
3392 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3393 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3394 &syncs_user[num_syncs],
3395 (xe_vm_in_lr_mode(vm) ?
3396 SYNC_PARSE_FLAG_LR_MODE : 0) |
3397 (!args->num_binds ?
3398 SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3399 if (err)
3400 goto free_syncs;
3401
3402 if (xe_sync_is_ufence(&syncs[num_syncs]))
3403 num_ufence++;
3404 }
3405
3406 if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3407 err = -EINVAL;
3408 goto free_syncs;
3409 }
3410
3411 if (!args->num_binds) {
3412 err = -ENODATA;
3413 goto free_syncs;
3414 }
3415
3416 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3417 for (i = 0; i < args->num_binds; ++i) {
3418 u64 range = bind_ops[i].range;
3419 u64 addr = bind_ops[i].addr;
3420 u32 op = bind_ops[i].op;
3421 u32 flags = bind_ops[i].flags;
3422 u64 obj_offset = bind_ops[i].obj_offset;
3423 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3424 u16 pat_index = bind_ops[i].pat_index;
3425
3426 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3427 addr, range, op, flags,
3428 prefetch_region, pat_index);
3429 if (IS_ERR(ops[i])) {
3430 err = PTR_ERR(ops[i]);
3431 ops[i] = NULL;
3432 goto unwind_ops;
3433 }
3434
3435 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3436 if (err)
3437 goto unwind_ops;
3438
3439 #ifdef TEST_VM_OPS_ERROR
3440 if (flags & FORCE_OP_ERROR) {
3441 vops.inject_error = true;
3442 vm->xe->vm_inject_error_position =
3443 (vm->xe->vm_inject_error_position + 1) %
3444 FORCE_OP_ERROR_COUNT;
3445 }
3446 #endif
3447 }
3448
3449 /* Nothing to do */
3450 if (list_empty(&vops.list)) {
3451 err = -ENODATA;
3452 goto unwind_ops;
3453 }
3454
3455 err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3456 if (err)
3457 goto unwind_ops;
3458
3459 fence = vm_bind_ioctl_ops_execute(vm, &vops);
3460 if (IS_ERR(fence))
3461 err = PTR_ERR(fence);
3462 else
3463 dma_fence_put(fence);
3464
3465 unwind_ops:
3466 if (err && err != -ENODATA)
3467 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3468 xe_vma_ops_fini(&vops);
3469 for (i = args->num_binds - 1; i >= 0; --i)
3470 if (ops[i])
3471 drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3472 free_syncs:
3473 if (err == -ENODATA)
3474 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3475 while (num_syncs--)
3476 xe_sync_entry_cleanup(&syncs[num_syncs]);
3477
3478 kfree(syncs);
3479 put_obj:
3480 for (i = 0; i < args->num_binds; ++i)
3481 xe_bo_put(bos[i]);
3482 release_vm_lock:
3483 up_write(&vm->lock);
3484 put_exec_queue:
3485 if (q)
3486 xe_exec_queue_put(q);
3487 put_vm:
3488 xe_vm_put(vm);
3489 kvfree(bos);
3490 kvfree(ops);
3491 if (args->num_binds > 1)
3492 kvfree(bind_ops);
3493 return err;
3494 }
3495
3496 /**
3497 * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
3498 * @vm: VM to bind the BO to
3499 * @bo: BO to bind
3500 * @q: exec queue to use for the bind (optional)
3501 * @addr: address at which to bind the BO
3502 * @cache_lvl: PAT cache level to use
3503 *
3504 * Execute a VM bind map operation on a kernel-owned BO to bind it into a
3505 * kernel-owned VM.
3506 *
3507 * Returns a dma_fence to track the binding completion if the job to do so was
3508 * successfully submitted, an error pointer otherwise.
3509 */
xe_vm_bind_kernel_bo(struct xe_vm * vm,struct xe_bo * bo,struct xe_exec_queue * q,u64 addr,enum xe_cache_level cache_lvl)3510 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
3511 struct xe_exec_queue *q, u64 addr,
3512 enum xe_cache_level cache_lvl)
3513 {
3514 struct xe_vma_ops vops;
3515 struct drm_gpuva_ops *ops = NULL;
3516 struct dma_fence *fence;
3517 int err;
3518
3519 xe_bo_get(bo);
3520 xe_vm_get(vm);
3521 if (q)
3522 xe_exec_queue_get(q);
3523
3524 down_write(&vm->lock);
3525
3526 xe_vma_ops_init(&vops, vm, q, NULL, 0);
3527
3528 ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
3529 DRM_XE_VM_BIND_OP_MAP, 0, 0,
3530 vm->xe->pat.idx[cache_lvl]);
3531 if (IS_ERR(ops)) {
3532 err = PTR_ERR(ops);
3533 goto release_vm_lock;
3534 }
3535
3536 err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
3537 if (err)
3538 goto release_vm_lock;
3539
3540 xe_assert(vm->xe, !list_empty(&vops.list));
3541
3542 err = xe_vma_ops_alloc(&vops, false);
3543 if (err)
3544 goto unwind_ops;
3545
3546 fence = vm_bind_ioctl_ops_execute(vm, &vops);
3547 if (IS_ERR(fence))
3548 err = PTR_ERR(fence);
3549
3550 unwind_ops:
3551 if (err && err != -ENODATA)
3552 vm_bind_ioctl_ops_unwind(vm, &ops, 1);
3553
3554 xe_vma_ops_fini(&vops);
3555 drm_gpuva_ops_free(&vm->gpuvm, ops);
3556
3557 release_vm_lock:
3558 up_write(&vm->lock);
3559
3560 if (q)
3561 xe_exec_queue_put(q);
3562 xe_vm_put(vm);
3563 xe_bo_put(bo);
3564
3565 if (err)
3566 fence = ERR_PTR(err);
3567
3568 return fence;
3569 }
3570
3571 /**
3572 * xe_vm_lock() - Lock the vm's dma_resv object
3573 * @vm: The struct xe_vm whose lock is to be locked
3574 * @intr: Whether to perform any wait interruptible
3575 *
3576 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3577 * contended lock was interrupted. If @intr is false, the function
3578 * always returns 0.
3579 */
xe_vm_lock(struct xe_vm * vm,bool intr)3580 int xe_vm_lock(struct xe_vm *vm, bool intr)
3581 {
3582 if (intr)
3583 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3584
3585 return dma_resv_lock(xe_vm_resv(vm), NULL);
3586 }
3587
3588 /**
3589 * xe_vm_unlock() - Unlock the vm's dma_resv object
3590 * @vm: The struct xe_vm whose lock is to be released.
3591 *
3592 * Unlock a buffer object lock that was locked by xe_vm_lock().
3593 */
xe_vm_unlock(struct xe_vm * vm)3594 void xe_vm_unlock(struct xe_vm *vm)
3595 {
3596 dma_resv_unlock(xe_vm_resv(vm));
3597 }
3598
3599 /**
3600 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3601 * @vma: VMA to invalidate
3602 *
3603 * Walks a list of page tables leaves which it memset the entries owned by this
3604 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3605 * complete.
3606 *
3607 * Returns 0 for success, negative error code otherwise.
3608 */
xe_vm_invalidate_vma(struct xe_vma * vma)3609 int xe_vm_invalidate_vma(struct xe_vma *vma)
3610 {
3611 struct xe_device *xe = xe_vma_vm(vma)->xe;
3612 struct xe_tile *tile;
3613 struct xe_gt_tlb_invalidation_fence
3614 fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3615 u8 id;
3616 u32 fence_id = 0;
3617 int ret = 0;
3618
3619 xe_assert(xe, !xe_vma_is_null(vma));
3620 xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
3621 trace_xe_vma_invalidate(vma);
3622
3623 vm_dbg(&xe_vma_vm(vma)->xe->drm,
3624 "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3625 xe_vma_start(vma), xe_vma_size(vma));
3626
3627 /* Check that we don't race with page-table updates */
3628 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3629 if (xe_vma_is_userptr(vma)) {
3630 WARN_ON_ONCE(!mmu_interval_check_retry
3631 (&to_userptr_vma(vma)->userptr.notifier,
3632 to_userptr_vma(vma)->userptr.notifier_seq));
3633 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3634 DMA_RESV_USAGE_BOOKKEEP));
3635
3636 } else {
3637 xe_bo_assert_held(xe_vma_bo(vma));
3638 }
3639 }
3640
3641 for_each_tile(tile, xe, id) {
3642 if (xe_pt_zap_ptes(tile, vma)) {
3643 xe_device_wmb(xe);
3644 xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
3645 &fence[fence_id],
3646 true);
3647
3648 ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
3649 &fence[fence_id], vma);
3650 if (ret)
3651 goto wait;
3652 ++fence_id;
3653
3654 if (!tile->media_gt)
3655 continue;
3656
3657 xe_gt_tlb_invalidation_fence_init(tile->media_gt,
3658 &fence[fence_id],
3659 true);
3660
3661 ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
3662 &fence[fence_id], vma);
3663 if (ret)
3664 goto wait;
3665 ++fence_id;
3666 }
3667 }
3668
3669 wait:
3670 for (id = 0; id < fence_id; ++id)
3671 xe_gt_tlb_invalidation_fence_wait(&fence[id]);
3672
3673 vma->tile_invalidated = vma->tile_mask;
3674
3675 return ret;
3676 }
3677
xe_vm_validate_protected(struct xe_vm * vm)3678 int xe_vm_validate_protected(struct xe_vm *vm)
3679 {
3680 struct drm_gpuva *gpuva;
3681 int err = 0;
3682
3683 if (!vm)
3684 return -ENODEV;
3685
3686 mutex_lock(&vm->snap_mutex);
3687
3688 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3689 struct xe_vma *vma = gpuva_to_vma(gpuva);
3690 struct xe_bo *bo = vma->gpuva.gem.obj ?
3691 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3692
3693 if (!bo)
3694 continue;
3695
3696 if (xe_bo_is_protected(bo)) {
3697 err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
3698 if (err)
3699 break;
3700 }
3701 }
3702
3703 mutex_unlock(&vm->snap_mutex);
3704 return err;
3705 }
3706
3707 struct xe_vm_snapshot {
3708 unsigned long num_snaps;
3709 struct {
3710 u64 ofs, bo_ofs;
3711 unsigned long len;
3712 struct xe_bo *bo;
3713 void *data;
3714 struct mm_struct *mm;
3715 } snap[];
3716 };
3717
xe_vm_snapshot_capture(struct xe_vm * vm)3718 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3719 {
3720 unsigned long num_snaps = 0, i;
3721 struct xe_vm_snapshot *snap = NULL;
3722 struct drm_gpuva *gpuva;
3723
3724 if (!vm)
3725 return NULL;
3726
3727 mutex_lock(&vm->snap_mutex);
3728 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3729 if (gpuva->flags & XE_VMA_DUMPABLE)
3730 num_snaps++;
3731 }
3732
3733 if (num_snaps)
3734 snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3735 if (!snap) {
3736 snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3737 goto out_unlock;
3738 }
3739
3740 snap->num_snaps = num_snaps;
3741 i = 0;
3742 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3743 struct xe_vma *vma = gpuva_to_vma(gpuva);
3744 struct xe_bo *bo = vma->gpuva.gem.obj ?
3745 gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3746
3747 if (!(gpuva->flags & XE_VMA_DUMPABLE))
3748 continue;
3749
3750 snap->snap[i].ofs = xe_vma_start(vma);
3751 snap->snap[i].len = xe_vma_size(vma);
3752 if (bo) {
3753 snap->snap[i].bo = xe_bo_get(bo);
3754 snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3755 } else if (xe_vma_is_userptr(vma)) {
3756 struct mm_struct *mm =
3757 to_userptr_vma(vma)->userptr.notifier.mm;
3758
3759 if (mmget_not_zero(mm))
3760 snap->snap[i].mm = mm;
3761 else
3762 snap->snap[i].data = ERR_PTR(-EFAULT);
3763
3764 snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3765 } else {
3766 snap->snap[i].data = ERR_PTR(-ENOENT);
3767 }
3768 i++;
3769 }
3770
3771 out_unlock:
3772 mutex_unlock(&vm->snap_mutex);
3773 return snap;
3774 }
3775
xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot * snap)3776 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3777 {
3778 if (IS_ERR_OR_NULL(snap))
3779 return;
3780
3781 for (int i = 0; i < snap->num_snaps; i++) {
3782 struct xe_bo *bo = snap->snap[i].bo;
3783 int err;
3784
3785 if (IS_ERR(snap->snap[i].data))
3786 continue;
3787
3788 snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3789 if (!snap->snap[i].data) {
3790 snap->snap[i].data = ERR_PTR(-ENOMEM);
3791 goto cleanup_bo;
3792 }
3793
3794 if (bo) {
3795 err = xe_bo_read(bo, snap->snap[i].bo_ofs,
3796 snap->snap[i].data, snap->snap[i].len);
3797 } else {
3798 void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3799
3800 kthread_use_mm(snap->snap[i].mm);
3801 if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3802 err = 0;
3803 else
3804 err = -EFAULT;
3805 kthread_unuse_mm(snap->snap[i].mm);
3806
3807 mmput(snap->snap[i].mm);
3808 snap->snap[i].mm = NULL;
3809 }
3810
3811 if (err) {
3812 kvfree(snap->snap[i].data);
3813 snap->snap[i].data = ERR_PTR(err);
3814 }
3815
3816 cleanup_bo:
3817 xe_bo_put(bo);
3818 snap->snap[i].bo = NULL;
3819 }
3820 }
3821
xe_vm_snapshot_print(struct xe_vm_snapshot * snap,struct drm_printer * p)3822 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3823 {
3824 unsigned long i, j;
3825
3826 if (IS_ERR_OR_NULL(snap)) {
3827 drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3828 return;
3829 }
3830
3831 for (i = 0; i < snap->num_snaps; i++) {
3832 drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3833
3834 if (IS_ERR(snap->snap[i].data)) {
3835 drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3836 PTR_ERR(snap->snap[i].data));
3837 continue;
3838 }
3839
3840 drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3841
3842 for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3843 u32 *val = snap->snap[i].data + j;
3844 char dumped[ASCII85_BUFSZ];
3845
3846 drm_puts(p, ascii85_encode(*val, dumped));
3847 }
3848
3849 drm_puts(p, "\n");
3850 }
3851 }
3852
xe_vm_snapshot_free(struct xe_vm_snapshot * snap)3853 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3854 {
3855 unsigned long i;
3856
3857 if (IS_ERR_OR_NULL(snap))
3858 return;
3859
3860 for (i = 0; i < snap->num_snaps; i++) {
3861 if (!IS_ERR(snap->snap[i].data))
3862 kvfree(snap->snap[i].data);
3863 xe_bo_put(snap->snap[i].bo);
3864 if (snap->snap[i].mm)
3865 mmput(snap->snap[i].mm);
3866 }
3867 kvfree(snap);
3868 }
3869