xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 322a00efec6a7f44a9204fad4e15d7f83e0e1ed2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 #include <linux/ascii85.h>
16 #include <linux/delay.h>
17 #include <linux/kthread.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 
21 #include <generated/xe_wa_oob.h>
22 
23 #include "regs/xe_gtt_defs.h"
24 #include "xe_assert.h"
25 #include "xe_bo.h"
26 #include "xe_device.h"
27 #include "xe_drm_client.h"
28 #include "xe_exec_queue.h"
29 #include "xe_gt_pagefault.h"
30 #include "xe_gt_tlb_invalidation.h"
31 #include "xe_migrate.h"
32 #include "xe_pat.h"
33 #include "xe_pm.h"
34 #include "xe_preempt_fence.h"
35 #include "xe_pt.h"
36 #include "xe_res_cursor.h"
37 #include "xe_sync.h"
38 #include "xe_trace_bo.h"
39 #include "xe_wa.h"
40 #include "xe_hmm.h"
41 
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
43 {
44 	return vm->gpuvm.r_obj;
45 }
46 
47 /**
48  * xe_vma_userptr_check_repin() - Advisory check for repin needed
49  * @uvma: The userptr vma
50  *
51  * Check if the userptr vma has been invalidated since last successful
52  * repin. The check is advisory only and can the function can be called
53  * without the vm->userptr.notifier_lock held. There is no guarantee that the
54  * vma userptr will remain valid after a lockless check, so typically
55  * the call needs to be followed by a proper check under the notifier_lock.
56  *
57  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
58  */
59 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
60 {
61 	return mmu_interval_check_retry(&uvma->userptr.notifier,
62 					uvma->userptr.notifier_seq) ?
63 		-EAGAIN : 0;
64 }
65 
66 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
67 {
68 	struct xe_vma *vma = &uvma->vma;
69 	struct xe_vm *vm = xe_vma_vm(vma);
70 	struct xe_device *xe = vm->xe;
71 
72 	lockdep_assert_held(&vm->lock);
73 	xe_assert(xe, xe_vma_is_userptr(vma));
74 
75 	return xe_hmm_userptr_populate_range(uvma, false);
76 }
77 
78 static bool preempt_fences_waiting(struct xe_vm *vm)
79 {
80 	struct xe_exec_queue *q;
81 
82 	lockdep_assert_held(&vm->lock);
83 	xe_vm_assert_held(vm);
84 
85 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
86 		if (!q->lr.pfence ||
87 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
88 			     &q->lr.pfence->flags)) {
89 			return true;
90 		}
91 	}
92 
93 	return false;
94 }
95 
96 static void free_preempt_fences(struct list_head *list)
97 {
98 	struct list_head *link, *next;
99 
100 	list_for_each_safe(link, next, list)
101 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
102 }
103 
104 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
105 				unsigned int *count)
106 {
107 	lockdep_assert_held(&vm->lock);
108 	xe_vm_assert_held(vm);
109 
110 	if (*count >= vm->preempt.num_exec_queues)
111 		return 0;
112 
113 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
114 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
115 
116 		if (IS_ERR(pfence))
117 			return PTR_ERR(pfence);
118 
119 		list_move_tail(xe_preempt_fence_link(pfence), list);
120 	}
121 
122 	return 0;
123 }
124 
125 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
126 {
127 	struct xe_exec_queue *q;
128 
129 	xe_vm_assert_held(vm);
130 
131 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
132 		if (q->lr.pfence) {
133 			long timeout = dma_fence_wait(q->lr.pfence, false);
134 
135 			/* Only -ETIME on fence indicates VM needs to be killed */
136 			if (timeout < 0 || q->lr.pfence->error == -ETIME)
137 				return -ETIME;
138 
139 			dma_fence_put(q->lr.pfence);
140 			q->lr.pfence = NULL;
141 		}
142 	}
143 
144 	return 0;
145 }
146 
147 static bool xe_vm_is_idle(struct xe_vm *vm)
148 {
149 	struct xe_exec_queue *q;
150 
151 	xe_vm_assert_held(vm);
152 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
153 		if (!xe_exec_queue_is_idle(q))
154 			return false;
155 	}
156 
157 	return true;
158 }
159 
160 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
161 {
162 	struct list_head *link;
163 	struct xe_exec_queue *q;
164 
165 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
166 		struct dma_fence *fence;
167 
168 		link = list->next;
169 		xe_assert(vm->xe, link != list);
170 
171 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
172 					     q, q->lr.context,
173 					     ++q->lr.seqno);
174 		dma_fence_put(q->lr.pfence);
175 		q->lr.pfence = fence;
176 	}
177 }
178 
179 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
180 {
181 	struct xe_exec_queue *q;
182 	int err;
183 
184 	xe_bo_assert_held(bo);
185 
186 	if (!vm->preempt.num_exec_queues)
187 		return 0;
188 
189 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
190 	if (err)
191 		return err;
192 
193 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
194 		if (q->lr.pfence) {
195 			dma_resv_add_fence(bo->ttm.base.resv,
196 					   q->lr.pfence,
197 					   DMA_RESV_USAGE_BOOKKEEP);
198 		}
199 
200 	return 0;
201 }
202 
203 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
204 						struct drm_exec *exec)
205 {
206 	struct xe_exec_queue *q;
207 
208 	lockdep_assert_held(&vm->lock);
209 	xe_vm_assert_held(vm);
210 
211 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
212 		q->ops->resume(q);
213 
214 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
215 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
216 	}
217 }
218 
219 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
220 {
221 	struct drm_gpuvm_exec vm_exec = {
222 		.vm = &vm->gpuvm,
223 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
224 		.num_fences = 1,
225 	};
226 	struct drm_exec *exec = &vm_exec.exec;
227 	struct dma_fence *pfence;
228 	int err;
229 	bool wait;
230 
231 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
232 
233 	down_write(&vm->lock);
234 	err = drm_gpuvm_exec_lock(&vm_exec);
235 	if (err)
236 		goto out_up_write;
237 
238 	pfence = xe_preempt_fence_create(q, q->lr.context,
239 					 ++q->lr.seqno);
240 	if (!pfence) {
241 		err = -ENOMEM;
242 		goto out_fini;
243 	}
244 
245 	list_add(&q->lr.link, &vm->preempt.exec_queues);
246 	++vm->preempt.num_exec_queues;
247 	q->lr.pfence = pfence;
248 
249 	down_read(&vm->userptr.notifier_lock);
250 
251 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
252 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
253 
254 	/*
255 	 * Check to see if a preemption on VM is in flight or userptr
256 	 * invalidation, if so trigger this preempt fence to sync state with
257 	 * other preempt fences on the VM.
258 	 */
259 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
260 	if (wait)
261 		dma_fence_enable_sw_signaling(pfence);
262 
263 	up_read(&vm->userptr.notifier_lock);
264 
265 out_fini:
266 	drm_exec_fini(exec);
267 out_up_write:
268 	up_write(&vm->lock);
269 
270 	return err;
271 }
272 
273 /**
274  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
275  * @vm: The VM.
276  * @q: The exec_queue
277  *
278  * Note that this function might be called multiple times on the same queue.
279  */
280 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
281 {
282 	if (!xe_vm_in_preempt_fence_mode(vm))
283 		return;
284 
285 	down_write(&vm->lock);
286 	if (!list_empty(&q->lr.link)) {
287 		list_del_init(&q->lr.link);
288 		--vm->preempt.num_exec_queues;
289 	}
290 	if (q->lr.pfence) {
291 		dma_fence_enable_sw_signaling(q->lr.pfence);
292 		dma_fence_put(q->lr.pfence);
293 		q->lr.pfence = NULL;
294 	}
295 	up_write(&vm->lock);
296 }
297 
298 /**
299  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
300  * that need repinning.
301  * @vm: The VM.
302  *
303  * This function checks for whether the VM has userptrs that need repinning,
304  * and provides a release-type barrier on the userptr.notifier_lock after
305  * checking.
306  *
307  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
308  */
309 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
310 {
311 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
312 
313 	return (list_empty(&vm->userptr.repin_list) &&
314 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
315 }
316 
317 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
318 
319 /**
320  * xe_vm_kill() - VM Kill
321  * @vm: The VM.
322  * @unlocked: Flag indicates the VM's dma-resv is not held
323  *
324  * Kill the VM by setting banned flag indicated VM is no longer available for
325  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
326  */
327 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
328 {
329 	struct xe_exec_queue *q;
330 
331 	lockdep_assert_held(&vm->lock);
332 
333 	if (unlocked)
334 		xe_vm_lock(vm, false);
335 
336 	vm->flags |= XE_VM_FLAG_BANNED;
337 	trace_xe_vm_kill(vm);
338 
339 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
340 		q->ops->kill(q);
341 
342 	if (unlocked)
343 		xe_vm_unlock(vm);
344 
345 	/* TODO: Inform user the VM is banned */
346 }
347 
348 /**
349  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
350  * @exec: The drm_exec object used for locking before validation.
351  * @err: The error returned from ttm_bo_validate().
352  * @end: A ktime_t cookie that should be set to 0 before first use and
353  * that should be reused on subsequent calls.
354  *
355  * With multiple active VMs, under memory pressure, it is possible that
356  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
357  * Until ttm properly handles locking in such scenarios, best thing the
358  * driver can do is retry with a timeout. Check if that is necessary, and
359  * if so unlock the drm_exec's objects while keeping the ticket to prepare
360  * for a rerun.
361  *
362  * Return: true if a retry after drm_exec_init() is recommended;
363  * false otherwise.
364  */
365 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
366 {
367 	ktime_t cur;
368 
369 	if (err != -ENOMEM)
370 		return false;
371 
372 	cur = ktime_get();
373 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
374 	if (!ktime_before(cur, *end))
375 		return false;
376 
377 	msleep(20);
378 	return true;
379 }
380 
381 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
382 {
383 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
384 	struct drm_gpuva *gpuva;
385 	int ret;
386 
387 	lockdep_assert_held(&vm->lock);
388 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
389 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
390 			       &vm->rebind_list);
391 
392 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
393 	if (ret)
394 		return ret;
395 
396 	vm_bo->evicted = false;
397 	return 0;
398 }
399 
400 /**
401  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
402  * @vm: The vm for which we are rebinding.
403  * @exec: The struct drm_exec with the locked GEM objects.
404  * @num_fences: The number of fences to reserve for the operation, not
405  * including rebinds and validations.
406  *
407  * Validates all evicted gem objects and rebinds their vmas. Note that
408  * rebindings may cause evictions and hence the validation-rebind
409  * sequence is rerun until there are no more objects to validate.
410  *
411  * Return: 0 on success, negative error code on error. In particular,
412  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
413  * the drm_exec transaction needs to be restarted.
414  */
415 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
416 			  unsigned int num_fences)
417 {
418 	struct drm_gem_object *obj;
419 	unsigned long index;
420 	int ret;
421 
422 	do {
423 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
424 		if (ret)
425 			return ret;
426 
427 		ret = xe_vm_rebind(vm, false);
428 		if (ret)
429 			return ret;
430 	} while (!list_empty(&vm->gpuvm.evict.list));
431 
432 	drm_exec_for_each_locked_object(exec, index, obj) {
433 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
434 		if (ret)
435 			return ret;
436 	}
437 
438 	return 0;
439 }
440 
441 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
442 				 bool *done)
443 {
444 	int err;
445 
446 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
447 	if (err)
448 		return err;
449 
450 	if (xe_vm_is_idle(vm)) {
451 		vm->preempt.rebind_deactivated = true;
452 		*done = true;
453 		return 0;
454 	}
455 
456 	if (!preempt_fences_waiting(vm)) {
457 		*done = true;
458 		return 0;
459 	}
460 
461 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
462 	if (err)
463 		return err;
464 
465 	err = wait_for_existing_preempt_fences(vm);
466 	if (err)
467 		return err;
468 
469 	/*
470 	 * Add validation and rebinding to the locking loop since both can
471 	 * cause evictions which may require blocing dma_resv locks.
472 	 * The fence reservation here is intended for the new preempt fences
473 	 * we attach at the end of the rebind work.
474 	 */
475 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
476 }
477 
478 static void preempt_rebind_work_func(struct work_struct *w)
479 {
480 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
481 	struct drm_exec exec;
482 	unsigned int fence_count = 0;
483 	LIST_HEAD(preempt_fences);
484 	ktime_t end = 0;
485 	int err = 0;
486 	long wait;
487 	int __maybe_unused tries = 0;
488 
489 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
490 	trace_xe_vm_rebind_worker_enter(vm);
491 
492 	down_write(&vm->lock);
493 
494 	if (xe_vm_is_closed_or_banned(vm)) {
495 		up_write(&vm->lock);
496 		trace_xe_vm_rebind_worker_exit(vm);
497 		return;
498 	}
499 
500 retry:
501 	if (xe_vm_userptr_check_repin(vm)) {
502 		err = xe_vm_userptr_pin(vm);
503 		if (err)
504 			goto out_unlock_outer;
505 	}
506 
507 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
508 
509 	drm_exec_until_all_locked(&exec) {
510 		bool done = false;
511 
512 		err = xe_preempt_work_begin(&exec, vm, &done);
513 		drm_exec_retry_on_contention(&exec);
514 		if (err || done) {
515 			drm_exec_fini(&exec);
516 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
517 				err = -EAGAIN;
518 
519 			goto out_unlock_outer;
520 		}
521 	}
522 
523 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
524 	if (err)
525 		goto out_unlock;
526 
527 	err = xe_vm_rebind(vm, true);
528 	if (err)
529 		goto out_unlock;
530 
531 	/* Wait on rebinds and munmap style VM unbinds */
532 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
533 				     DMA_RESV_USAGE_KERNEL,
534 				     false, MAX_SCHEDULE_TIMEOUT);
535 	if (wait <= 0) {
536 		err = -ETIME;
537 		goto out_unlock;
538 	}
539 
540 #define retry_required(__tries, __vm) \
541 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
542 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
543 	__xe_vm_userptr_needs_repin(__vm))
544 
545 	down_read(&vm->userptr.notifier_lock);
546 	if (retry_required(tries, vm)) {
547 		up_read(&vm->userptr.notifier_lock);
548 		err = -EAGAIN;
549 		goto out_unlock;
550 	}
551 
552 #undef retry_required
553 
554 	spin_lock(&vm->xe->ttm.lru_lock);
555 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
556 	spin_unlock(&vm->xe->ttm.lru_lock);
557 
558 	/* Point of no return. */
559 	arm_preempt_fences(vm, &preempt_fences);
560 	resume_and_reinstall_preempt_fences(vm, &exec);
561 	up_read(&vm->userptr.notifier_lock);
562 
563 out_unlock:
564 	drm_exec_fini(&exec);
565 out_unlock_outer:
566 	if (err == -EAGAIN) {
567 		trace_xe_vm_rebind_worker_retry(vm);
568 		goto retry;
569 	}
570 
571 	if (err) {
572 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
573 		xe_vm_kill(vm, true);
574 	}
575 	up_write(&vm->lock);
576 
577 	free_preempt_fences(&preempt_fences);
578 
579 	trace_xe_vm_rebind_worker_exit(vm);
580 }
581 
582 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
583 				   const struct mmu_notifier_range *range,
584 				   unsigned long cur_seq)
585 {
586 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
587 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
588 	struct xe_vma *vma = &uvma->vma;
589 	struct xe_vm *vm = xe_vma_vm(vma);
590 	struct dma_resv_iter cursor;
591 	struct dma_fence *fence;
592 	long err;
593 
594 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
595 	trace_xe_vma_userptr_invalidate(vma);
596 
597 	if (!mmu_notifier_range_blockable(range))
598 		return false;
599 
600 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
601 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
602 		xe_vma_start(vma), xe_vma_size(vma));
603 
604 	down_write(&vm->userptr.notifier_lock);
605 	mmu_interval_set_seq(mni, cur_seq);
606 
607 	/* No need to stop gpu access if the userptr is not yet bound. */
608 	if (!userptr->initial_bind) {
609 		up_write(&vm->userptr.notifier_lock);
610 		return true;
611 	}
612 
613 	/*
614 	 * Tell exec and rebind worker they need to repin and rebind this
615 	 * userptr.
616 	 */
617 	if (!xe_vm_in_fault_mode(vm) &&
618 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
619 		spin_lock(&vm->userptr.invalidated_lock);
620 		list_move_tail(&userptr->invalidate_link,
621 			       &vm->userptr.invalidated);
622 		spin_unlock(&vm->userptr.invalidated_lock);
623 	}
624 
625 	up_write(&vm->userptr.notifier_lock);
626 
627 	/*
628 	 * Preempt fences turn into schedule disables, pipeline these.
629 	 * Note that even in fault mode, we need to wait for binds and
630 	 * unbinds to complete, and those are attached as BOOKMARK fences
631 	 * to the vm.
632 	 */
633 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
634 			    DMA_RESV_USAGE_BOOKKEEP);
635 	dma_resv_for_each_fence_unlocked(&cursor, fence)
636 		dma_fence_enable_sw_signaling(fence);
637 	dma_resv_iter_end(&cursor);
638 
639 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
640 				    DMA_RESV_USAGE_BOOKKEEP,
641 				    false, MAX_SCHEDULE_TIMEOUT);
642 	XE_WARN_ON(err <= 0);
643 
644 	if (xe_vm_in_fault_mode(vm)) {
645 		err = xe_vm_invalidate_vma(vma);
646 		XE_WARN_ON(err);
647 	}
648 
649 	trace_xe_vma_userptr_invalidate_complete(vma);
650 
651 	return true;
652 }
653 
654 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
655 	.invalidate = vma_userptr_invalidate,
656 };
657 
658 int xe_vm_userptr_pin(struct xe_vm *vm)
659 {
660 	struct xe_userptr_vma *uvma, *next;
661 	int err = 0;
662 	LIST_HEAD(tmp_evict);
663 
664 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
665 	lockdep_assert_held_write(&vm->lock);
666 
667 	/* Collect invalidated userptrs */
668 	spin_lock(&vm->userptr.invalidated_lock);
669 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
670 				 userptr.invalidate_link) {
671 		list_del_init(&uvma->userptr.invalidate_link);
672 		list_move_tail(&uvma->userptr.repin_link,
673 			       &vm->userptr.repin_list);
674 	}
675 	spin_unlock(&vm->userptr.invalidated_lock);
676 
677 	/* Pin and move to temporary list */
678 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
679 				 userptr.repin_link) {
680 		err = xe_vma_userptr_pin_pages(uvma);
681 		if (err == -EFAULT) {
682 			list_del_init(&uvma->userptr.repin_link);
683 
684 			/* Wait for pending binds */
685 			xe_vm_lock(vm, false);
686 			dma_resv_wait_timeout(xe_vm_resv(vm),
687 					      DMA_RESV_USAGE_BOOKKEEP,
688 					      false, MAX_SCHEDULE_TIMEOUT);
689 
690 			err = xe_vm_invalidate_vma(&uvma->vma);
691 			xe_vm_unlock(vm);
692 			if (err)
693 				return err;
694 		} else {
695 			if (err < 0)
696 				return err;
697 
698 			list_del_init(&uvma->userptr.repin_link);
699 			list_move_tail(&uvma->vma.combined_links.rebind,
700 				       &vm->rebind_list);
701 		}
702 	}
703 
704 	return 0;
705 }
706 
707 /**
708  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
709  * that need repinning.
710  * @vm: The VM.
711  *
712  * This function does an advisory check for whether the VM has userptrs that
713  * need repinning.
714  *
715  * Return: 0 if there are no indications of userptrs needing repinning,
716  * -EAGAIN if there are.
717  */
718 int xe_vm_userptr_check_repin(struct xe_vm *vm)
719 {
720 	return (list_empty_careful(&vm->userptr.repin_list) &&
721 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
722 }
723 
724 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
725 {
726 	int i;
727 
728 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
729 		if (!vops->pt_update_ops[i].num_ops)
730 			continue;
731 
732 		vops->pt_update_ops[i].ops =
733 			kmalloc_array(vops->pt_update_ops[i].num_ops,
734 				      sizeof(*vops->pt_update_ops[i].ops),
735 				      GFP_KERNEL);
736 		if (!vops->pt_update_ops[i].ops)
737 			return array_of_binds ? -ENOBUFS : -ENOMEM;
738 	}
739 
740 	return 0;
741 }
742 
743 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
744 {
745 	int i;
746 
747 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
748 		kfree(vops->pt_update_ops[i].ops);
749 }
750 
751 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
752 {
753 	int i;
754 
755 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
756 		if (BIT(i) & tile_mask)
757 			++vops->pt_update_ops[i].num_ops;
758 }
759 
760 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
761 				  u8 tile_mask)
762 {
763 	INIT_LIST_HEAD(&op->link);
764 	op->tile_mask = tile_mask;
765 	op->base.op = DRM_GPUVA_OP_MAP;
766 	op->base.map.va.addr = vma->gpuva.va.addr;
767 	op->base.map.va.range = vma->gpuva.va.range;
768 	op->base.map.gem.obj = vma->gpuva.gem.obj;
769 	op->base.map.gem.offset = vma->gpuva.gem.offset;
770 	op->map.vma = vma;
771 	op->map.immediate = true;
772 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
773 	op->map.is_null = xe_vma_is_null(vma);
774 }
775 
776 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
777 				u8 tile_mask)
778 {
779 	struct xe_vma_op *op;
780 
781 	op = kzalloc(sizeof(*op), GFP_KERNEL);
782 	if (!op)
783 		return -ENOMEM;
784 
785 	xe_vm_populate_rebind(op, vma, tile_mask);
786 	list_add_tail(&op->link, &vops->list);
787 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
788 
789 	return 0;
790 }
791 
792 static struct dma_fence *ops_execute(struct xe_vm *vm,
793 				     struct xe_vma_ops *vops);
794 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
795 			    struct xe_exec_queue *q,
796 			    struct xe_sync_entry *syncs, u32 num_syncs);
797 
798 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
799 {
800 	struct dma_fence *fence;
801 	struct xe_vma *vma, *next;
802 	struct xe_vma_ops vops;
803 	struct xe_vma_op *op, *next_op;
804 	int err, i;
805 
806 	lockdep_assert_held(&vm->lock);
807 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
808 	    list_empty(&vm->rebind_list))
809 		return 0;
810 
811 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
812 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
813 		vops.pt_update_ops[i].wait_vm_bookkeep = true;
814 
815 	xe_vm_assert_held(vm);
816 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
817 		xe_assert(vm->xe, vma->tile_present);
818 
819 		if (rebind_worker)
820 			trace_xe_vma_rebind_worker(vma);
821 		else
822 			trace_xe_vma_rebind_exec(vma);
823 
824 		err = xe_vm_ops_add_rebind(&vops, vma,
825 					   vma->tile_present);
826 		if (err)
827 			goto free_ops;
828 	}
829 
830 	err = xe_vma_ops_alloc(&vops, false);
831 	if (err)
832 		goto free_ops;
833 
834 	fence = ops_execute(vm, &vops);
835 	if (IS_ERR(fence)) {
836 		err = PTR_ERR(fence);
837 	} else {
838 		dma_fence_put(fence);
839 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
840 					 combined_links.rebind)
841 			list_del_init(&vma->combined_links.rebind);
842 	}
843 free_ops:
844 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
845 		list_del(&op->link);
846 		kfree(op);
847 	}
848 	xe_vma_ops_fini(&vops);
849 
850 	return err;
851 }
852 
853 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
854 {
855 	struct dma_fence *fence = NULL;
856 	struct xe_vma_ops vops;
857 	struct xe_vma_op *op, *next_op;
858 	struct xe_tile *tile;
859 	u8 id;
860 	int err;
861 
862 	lockdep_assert_held(&vm->lock);
863 	xe_vm_assert_held(vm);
864 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
865 
866 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
867 	for_each_tile(tile, vm->xe, id) {
868 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
869 		vops.pt_update_ops[tile->id].q =
870 			xe_tile_migrate_exec_queue(tile);
871 	}
872 
873 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
874 	if (err)
875 		return ERR_PTR(err);
876 
877 	err = xe_vma_ops_alloc(&vops, false);
878 	if (err) {
879 		fence = ERR_PTR(err);
880 		goto free_ops;
881 	}
882 
883 	fence = ops_execute(vm, &vops);
884 
885 free_ops:
886 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
887 		list_del(&op->link);
888 		kfree(op);
889 	}
890 	xe_vma_ops_fini(&vops);
891 
892 	return fence;
893 }
894 
895 static void xe_vma_free(struct xe_vma *vma)
896 {
897 	if (xe_vma_is_userptr(vma))
898 		kfree(to_userptr_vma(vma));
899 	else
900 		kfree(vma);
901 }
902 
903 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
904 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
905 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
906 
907 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
908 				    struct xe_bo *bo,
909 				    u64 bo_offset_or_userptr,
910 				    u64 start, u64 end,
911 				    u16 pat_index, unsigned int flags)
912 {
913 	struct xe_vma *vma;
914 	struct xe_tile *tile;
915 	u8 id;
916 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
917 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
918 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
919 
920 	xe_assert(vm->xe, start < end);
921 	xe_assert(vm->xe, end < vm->size);
922 
923 	/*
924 	 * Allocate and ensure that the xe_vma_is_userptr() return
925 	 * matches what was allocated.
926 	 */
927 	if (!bo && !is_null) {
928 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
929 
930 		if (!uvma)
931 			return ERR_PTR(-ENOMEM);
932 
933 		vma = &uvma->vma;
934 	} else {
935 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
936 		if (!vma)
937 			return ERR_PTR(-ENOMEM);
938 
939 		if (is_null)
940 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
941 		if (bo)
942 			vma->gpuva.gem.obj = &bo->ttm.base;
943 	}
944 
945 	INIT_LIST_HEAD(&vma->combined_links.rebind);
946 
947 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
948 	vma->gpuva.vm = &vm->gpuvm;
949 	vma->gpuva.va.addr = start;
950 	vma->gpuva.va.range = end - start + 1;
951 	if (read_only)
952 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
953 	if (dumpable)
954 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
955 
956 	for_each_tile(tile, vm->xe, id)
957 		vma->tile_mask |= 0x1 << id;
958 
959 	if (vm->xe->info.has_atomic_enable_pte_bit)
960 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
961 
962 	vma->pat_index = pat_index;
963 
964 	if (bo) {
965 		struct drm_gpuvm_bo *vm_bo;
966 
967 		xe_bo_assert_held(bo);
968 
969 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
970 		if (IS_ERR(vm_bo)) {
971 			xe_vma_free(vma);
972 			return ERR_CAST(vm_bo);
973 		}
974 
975 		drm_gpuvm_bo_extobj_add(vm_bo);
976 		drm_gem_object_get(&bo->ttm.base);
977 		vma->gpuva.gem.offset = bo_offset_or_userptr;
978 		drm_gpuva_link(&vma->gpuva, vm_bo);
979 		drm_gpuvm_bo_put(vm_bo);
980 	} else /* userptr or null */ {
981 		if (!is_null) {
982 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
983 			u64 size = end - start + 1;
984 			int err;
985 
986 			INIT_LIST_HEAD(&userptr->invalidate_link);
987 			INIT_LIST_HEAD(&userptr->repin_link);
988 			vma->gpuva.gem.offset = bo_offset_or_userptr;
989 
990 			err = mmu_interval_notifier_insert(&userptr->notifier,
991 							   current->mm,
992 							   xe_vma_userptr(vma), size,
993 							   &vma_userptr_notifier_ops);
994 			if (err) {
995 				xe_vma_free(vma);
996 				return ERR_PTR(err);
997 			}
998 
999 			userptr->notifier_seq = LONG_MAX;
1000 		}
1001 
1002 		xe_vm_get(vm);
1003 	}
1004 
1005 	return vma;
1006 }
1007 
1008 static void xe_vma_destroy_late(struct xe_vma *vma)
1009 {
1010 	struct xe_vm *vm = xe_vma_vm(vma);
1011 
1012 	if (vma->ufence) {
1013 		xe_sync_ufence_put(vma->ufence);
1014 		vma->ufence = NULL;
1015 	}
1016 
1017 	if (xe_vma_is_userptr(vma)) {
1018 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1019 		struct xe_userptr *userptr = &uvma->userptr;
1020 
1021 		if (userptr->sg)
1022 			xe_hmm_userptr_free_sg(uvma);
1023 
1024 		/*
1025 		 * Since userptr pages are not pinned, we can't remove
1026 		 * the notifer until we're sure the GPU is not accessing
1027 		 * them anymore
1028 		 */
1029 		mmu_interval_notifier_remove(&userptr->notifier);
1030 		xe_vm_put(vm);
1031 	} else if (xe_vma_is_null(vma)) {
1032 		xe_vm_put(vm);
1033 	} else {
1034 		xe_bo_put(xe_vma_bo(vma));
1035 	}
1036 
1037 	xe_vma_free(vma);
1038 }
1039 
1040 static void vma_destroy_work_func(struct work_struct *w)
1041 {
1042 	struct xe_vma *vma =
1043 		container_of(w, struct xe_vma, destroy_work);
1044 
1045 	xe_vma_destroy_late(vma);
1046 }
1047 
1048 static void vma_destroy_cb(struct dma_fence *fence,
1049 			   struct dma_fence_cb *cb)
1050 {
1051 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1052 
1053 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1054 	queue_work(system_unbound_wq, &vma->destroy_work);
1055 }
1056 
1057 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1058 {
1059 	struct xe_vm *vm = xe_vma_vm(vma);
1060 
1061 	lockdep_assert_held_write(&vm->lock);
1062 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1063 
1064 	if (xe_vma_is_userptr(vma)) {
1065 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1066 
1067 		spin_lock(&vm->userptr.invalidated_lock);
1068 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1069 		spin_unlock(&vm->userptr.invalidated_lock);
1070 	} else if (!xe_vma_is_null(vma)) {
1071 		xe_bo_assert_held(xe_vma_bo(vma));
1072 
1073 		drm_gpuva_unlink(&vma->gpuva);
1074 	}
1075 
1076 	xe_vm_assert_held(vm);
1077 	if (fence) {
1078 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1079 						 vma_destroy_cb);
1080 
1081 		if (ret) {
1082 			XE_WARN_ON(ret != -ENOENT);
1083 			xe_vma_destroy_late(vma);
1084 		}
1085 	} else {
1086 		xe_vma_destroy_late(vma);
1087 	}
1088 }
1089 
1090 /**
1091  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1092  * @exec: The drm_exec object we're currently locking for.
1093  * @vma: The vma for witch we want to lock the vm resv and any attached
1094  * object's resv.
1095  *
1096  * Return: 0 on success, negative error code on error. In particular
1097  * may return -EDEADLK on WW transaction contention and -EINTR if
1098  * an interruptible wait is terminated by a signal.
1099  */
1100 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1101 {
1102 	struct xe_vm *vm = xe_vma_vm(vma);
1103 	struct xe_bo *bo = xe_vma_bo(vma);
1104 	int err;
1105 
1106 	XE_WARN_ON(!vm);
1107 
1108 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1109 	if (!err && bo && !bo->vm)
1110 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1111 
1112 	return err;
1113 }
1114 
1115 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1116 {
1117 	struct drm_exec exec;
1118 	int err;
1119 
1120 	drm_exec_init(&exec, 0, 0);
1121 	drm_exec_until_all_locked(&exec) {
1122 		err = xe_vm_lock_vma(&exec, vma);
1123 		drm_exec_retry_on_contention(&exec);
1124 		if (XE_WARN_ON(err))
1125 			break;
1126 	}
1127 
1128 	xe_vma_destroy(vma, NULL);
1129 
1130 	drm_exec_fini(&exec);
1131 }
1132 
1133 struct xe_vma *
1134 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1135 {
1136 	struct drm_gpuva *gpuva;
1137 
1138 	lockdep_assert_held(&vm->lock);
1139 
1140 	if (xe_vm_is_closed_or_banned(vm))
1141 		return NULL;
1142 
1143 	xe_assert(vm->xe, start + range <= vm->size);
1144 
1145 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1146 
1147 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1148 }
1149 
1150 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1151 {
1152 	int err;
1153 
1154 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1155 	lockdep_assert_held(&vm->lock);
1156 
1157 	mutex_lock(&vm->snap_mutex);
1158 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1159 	mutex_unlock(&vm->snap_mutex);
1160 	XE_WARN_ON(err);	/* Shouldn't be possible */
1161 
1162 	return err;
1163 }
1164 
1165 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1166 {
1167 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1168 	lockdep_assert_held(&vm->lock);
1169 
1170 	mutex_lock(&vm->snap_mutex);
1171 	drm_gpuva_remove(&vma->gpuva);
1172 	mutex_unlock(&vm->snap_mutex);
1173 	if (vm->usm.last_fault_vma == vma)
1174 		vm->usm.last_fault_vma = NULL;
1175 }
1176 
1177 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1178 {
1179 	struct xe_vma_op *op;
1180 
1181 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1182 
1183 	if (unlikely(!op))
1184 		return NULL;
1185 
1186 	return &op->base;
1187 }
1188 
1189 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1190 
1191 static const struct drm_gpuvm_ops gpuvm_ops = {
1192 	.op_alloc = xe_vm_op_alloc,
1193 	.vm_bo_validate = xe_gpuvm_validate,
1194 	.vm_free = xe_vm_free,
1195 };
1196 
1197 static u64 pde_encode_pat_index(u16 pat_index)
1198 {
1199 	u64 pte = 0;
1200 
1201 	if (pat_index & BIT(0))
1202 		pte |= XE_PPGTT_PTE_PAT0;
1203 
1204 	if (pat_index & BIT(1))
1205 		pte |= XE_PPGTT_PTE_PAT1;
1206 
1207 	return pte;
1208 }
1209 
1210 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1211 {
1212 	u64 pte = 0;
1213 
1214 	if (pat_index & BIT(0))
1215 		pte |= XE_PPGTT_PTE_PAT0;
1216 
1217 	if (pat_index & BIT(1))
1218 		pte |= XE_PPGTT_PTE_PAT1;
1219 
1220 	if (pat_index & BIT(2)) {
1221 		if (pt_level)
1222 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1223 		else
1224 			pte |= XE_PPGTT_PTE_PAT2;
1225 	}
1226 
1227 	if (pat_index & BIT(3))
1228 		pte |= XELPG_PPGTT_PTE_PAT3;
1229 
1230 	if (pat_index & (BIT(4)))
1231 		pte |= XE2_PPGTT_PTE_PAT4;
1232 
1233 	return pte;
1234 }
1235 
1236 static u64 pte_encode_ps(u32 pt_level)
1237 {
1238 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1239 
1240 	if (pt_level == 1)
1241 		return XE_PDE_PS_2M;
1242 	else if (pt_level == 2)
1243 		return XE_PDPE_PS_1G;
1244 
1245 	return 0;
1246 }
1247 
1248 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1249 			      const u16 pat_index)
1250 {
1251 	u64 pde;
1252 
1253 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1254 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1255 	pde |= pde_encode_pat_index(pat_index);
1256 
1257 	return pde;
1258 }
1259 
1260 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1261 			      u16 pat_index, u32 pt_level)
1262 {
1263 	u64 pte;
1264 
1265 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1266 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1267 	pte |= pte_encode_pat_index(pat_index, pt_level);
1268 	pte |= pte_encode_ps(pt_level);
1269 
1270 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1271 		pte |= XE_PPGTT_PTE_DM;
1272 
1273 	return pte;
1274 }
1275 
1276 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1277 			       u16 pat_index, u32 pt_level)
1278 {
1279 	pte |= XE_PAGE_PRESENT;
1280 
1281 	if (likely(!xe_vma_read_only(vma)))
1282 		pte |= XE_PAGE_RW;
1283 
1284 	pte |= pte_encode_pat_index(pat_index, pt_level);
1285 	pte |= pte_encode_ps(pt_level);
1286 
1287 	if (unlikely(xe_vma_is_null(vma)))
1288 		pte |= XE_PTE_NULL;
1289 
1290 	return pte;
1291 }
1292 
1293 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1294 				u16 pat_index,
1295 				u32 pt_level, bool devmem, u64 flags)
1296 {
1297 	u64 pte;
1298 
1299 	/* Avoid passing random bits directly as flags */
1300 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1301 
1302 	pte = addr;
1303 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1304 	pte |= pte_encode_pat_index(pat_index, pt_level);
1305 	pte |= pte_encode_ps(pt_level);
1306 
1307 	if (devmem)
1308 		pte |= XE_PPGTT_PTE_DM;
1309 
1310 	pte |= flags;
1311 
1312 	return pte;
1313 }
1314 
1315 static const struct xe_pt_ops xelp_pt_ops = {
1316 	.pte_encode_bo = xelp_pte_encode_bo,
1317 	.pte_encode_vma = xelp_pte_encode_vma,
1318 	.pte_encode_addr = xelp_pte_encode_addr,
1319 	.pde_encode_bo = xelp_pde_encode_bo,
1320 };
1321 
1322 static void vm_destroy_work_func(struct work_struct *w);
1323 
1324 /**
1325  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1326  * given tile and vm.
1327  * @xe: xe device.
1328  * @tile: tile to set up for.
1329  * @vm: vm to set up for.
1330  *
1331  * Sets up a pagetable tree with one page-table per level and a single
1332  * leaf PTE. All pagetable entries point to the single page-table or,
1333  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1334  * writes become NOPs.
1335  *
1336  * Return: 0 on success, negative error code on error.
1337  */
1338 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1339 				struct xe_vm *vm)
1340 {
1341 	u8 id = tile->id;
1342 	int i;
1343 
1344 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1345 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1346 		if (IS_ERR(vm->scratch_pt[id][i]))
1347 			return PTR_ERR(vm->scratch_pt[id][i]);
1348 
1349 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static void xe_vm_free_scratch(struct xe_vm *vm)
1356 {
1357 	struct xe_tile *tile;
1358 	u8 id;
1359 
1360 	if (!xe_vm_has_scratch(vm))
1361 		return;
1362 
1363 	for_each_tile(tile, vm->xe, id) {
1364 		u32 i;
1365 
1366 		if (!vm->pt_root[id])
1367 			continue;
1368 
1369 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1370 			if (vm->scratch_pt[id][i])
1371 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1372 	}
1373 }
1374 
1375 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1376 {
1377 	struct drm_gem_object *vm_resv_obj;
1378 	struct xe_vm *vm;
1379 	int err, number_tiles = 0;
1380 	struct xe_tile *tile;
1381 	u8 id;
1382 
1383 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1384 	if (!vm)
1385 		return ERR_PTR(-ENOMEM);
1386 
1387 	vm->xe = xe;
1388 
1389 	vm->size = 1ull << xe->info.va_bits;
1390 
1391 	vm->flags = flags;
1392 
1393 	init_rwsem(&vm->lock);
1394 	mutex_init(&vm->snap_mutex);
1395 
1396 	INIT_LIST_HEAD(&vm->rebind_list);
1397 
1398 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1399 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1400 	init_rwsem(&vm->userptr.notifier_lock);
1401 	spin_lock_init(&vm->userptr.invalidated_lock);
1402 
1403 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1404 
1405 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1406 
1407 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1408 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1409 
1410 	for_each_tile(tile, xe, id)
1411 		xe_range_fence_tree_init(&vm->rftree[id]);
1412 
1413 	vm->pt_ops = &xelp_pt_ops;
1414 
1415 	/*
1416 	 * Long-running workloads are not protected by the scheduler references.
1417 	 * By design, run_job for long-running workloads returns NULL and the
1418 	 * scheduler drops all the references of it, hence protecting the VM
1419 	 * for this case is necessary.
1420 	 */
1421 	if (flags & XE_VM_FLAG_LR_MODE)
1422 		xe_pm_runtime_get_noresume(xe);
1423 
1424 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1425 	if (!vm_resv_obj) {
1426 		err = -ENOMEM;
1427 		goto err_no_resv;
1428 	}
1429 
1430 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1431 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1432 
1433 	drm_gem_object_put(vm_resv_obj);
1434 
1435 	err = xe_vm_lock(vm, true);
1436 	if (err)
1437 		goto err_close;
1438 
1439 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1440 		vm->flags |= XE_VM_FLAG_64K;
1441 
1442 	for_each_tile(tile, xe, id) {
1443 		if (flags & XE_VM_FLAG_MIGRATION &&
1444 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1445 			continue;
1446 
1447 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1448 		if (IS_ERR(vm->pt_root[id])) {
1449 			err = PTR_ERR(vm->pt_root[id]);
1450 			vm->pt_root[id] = NULL;
1451 			goto err_unlock_close;
1452 		}
1453 	}
1454 
1455 	if (xe_vm_has_scratch(vm)) {
1456 		for_each_tile(tile, xe, id) {
1457 			if (!vm->pt_root[id])
1458 				continue;
1459 
1460 			err = xe_vm_create_scratch(xe, tile, vm);
1461 			if (err)
1462 				goto err_unlock_close;
1463 		}
1464 		vm->batch_invalidate_tlb = true;
1465 	}
1466 
1467 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1468 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1469 		vm->batch_invalidate_tlb = false;
1470 	}
1471 
1472 	/* Fill pt_root after allocating scratch tables */
1473 	for_each_tile(tile, xe, id) {
1474 		if (!vm->pt_root[id])
1475 			continue;
1476 
1477 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1478 	}
1479 	xe_vm_unlock(vm);
1480 
1481 	/* Kernel migration VM shouldn't have a circular loop.. */
1482 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1483 		for_each_tile(tile, xe, id) {
1484 			struct xe_exec_queue *q;
1485 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1486 
1487 			if (!vm->pt_root[id])
1488 				continue;
1489 
1490 			q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
1491 			if (IS_ERR(q)) {
1492 				err = PTR_ERR(q);
1493 				goto err_close;
1494 			}
1495 			vm->q[id] = q;
1496 			number_tiles++;
1497 		}
1498 	}
1499 
1500 	if (number_tiles > 1)
1501 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1502 
1503 	trace_xe_vm_create(vm);
1504 
1505 	return vm;
1506 
1507 err_unlock_close:
1508 	xe_vm_unlock(vm);
1509 err_close:
1510 	xe_vm_close_and_put(vm);
1511 	return ERR_PTR(err);
1512 
1513 err_no_resv:
1514 	mutex_destroy(&vm->snap_mutex);
1515 	for_each_tile(tile, xe, id)
1516 		xe_range_fence_tree_fini(&vm->rftree[id]);
1517 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1518 	kfree(vm);
1519 	if (flags & XE_VM_FLAG_LR_MODE)
1520 		xe_pm_runtime_put(xe);
1521 	return ERR_PTR(err);
1522 }
1523 
1524 static void xe_vm_close(struct xe_vm *vm)
1525 {
1526 	down_write(&vm->lock);
1527 	vm->size = 0;
1528 	up_write(&vm->lock);
1529 }
1530 
1531 void xe_vm_close_and_put(struct xe_vm *vm)
1532 {
1533 	LIST_HEAD(contested);
1534 	struct xe_device *xe = vm->xe;
1535 	struct xe_tile *tile;
1536 	struct xe_vma *vma, *next_vma;
1537 	struct drm_gpuva *gpuva, *next;
1538 	u8 id;
1539 
1540 	xe_assert(xe, !vm->preempt.num_exec_queues);
1541 
1542 	xe_vm_close(vm);
1543 	if (xe_vm_in_preempt_fence_mode(vm))
1544 		flush_work(&vm->preempt.rebind_work);
1545 
1546 	down_write(&vm->lock);
1547 	for_each_tile(tile, xe, id) {
1548 		if (vm->q[id])
1549 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1550 	}
1551 	up_write(&vm->lock);
1552 
1553 	for_each_tile(tile, xe, id) {
1554 		if (vm->q[id]) {
1555 			xe_exec_queue_kill(vm->q[id]);
1556 			xe_exec_queue_put(vm->q[id]);
1557 			vm->q[id] = NULL;
1558 		}
1559 	}
1560 
1561 	down_write(&vm->lock);
1562 	xe_vm_lock(vm, false);
1563 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1564 		vma = gpuva_to_vma(gpuva);
1565 
1566 		if (xe_vma_has_no_bo(vma)) {
1567 			down_read(&vm->userptr.notifier_lock);
1568 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1569 			up_read(&vm->userptr.notifier_lock);
1570 		}
1571 
1572 		xe_vm_remove_vma(vm, vma);
1573 
1574 		/* easy case, remove from VMA? */
1575 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1576 			list_del_init(&vma->combined_links.rebind);
1577 			xe_vma_destroy(vma, NULL);
1578 			continue;
1579 		}
1580 
1581 		list_move_tail(&vma->combined_links.destroy, &contested);
1582 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1583 	}
1584 
1585 	/*
1586 	 * All vm operations will add shared fences to resv.
1587 	 * The only exception is eviction for a shared object,
1588 	 * but even so, the unbind when evicted would still
1589 	 * install a fence to resv. Hence it's safe to
1590 	 * destroy the pagetables immediately.
1591 	 */
1592 	xe_vm_free_scratch(vm);
1593 
1594 	for_each_tile(tile, xe, id) {
1595 		if (vm->pt_root[id]) {
1596 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1597 			vm->pt_root[id] = NULL;
1598 		}
1599 	}
1600 	xe_vm_unlock(vm);
1601 
1602 	/*
1603 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1604 	 * Since we hold a refcount to the bo, we can remove and free
1605 	 * the members safely without locking.
1606 	 */
1607 	list_for_each_entry_safe(vma, next_vma, &contested,
1608 				 combined_links.destroy) {
1609 		list_del_init(&vma->combined_links.destroy);
1610 		xe_vma_destroy_unlocked(vma);
1611 	}
1612 
1613 	up_write(&vm->lock);
1614 
1615 	down_write(&xe->usm.lock);
1616 	if (vm->usm.asid) {
1617 		void *lookup;
1618 
1619 		xe_assert(xe, xe->info.has_asid);
1620 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1621 
1622 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1623 		xe_assert(xe, lookup == vm);
1624 	}
1625 	up_write(&xe->usm.lock);
1626 
1627 	for_each_tile(tile, xe, id)
1628 		xe_range_fence_tree_fini(&vm->rftree[id]);
1629 
1630 	xe_vm_put(vm);
1631 }
1632 
1633 static void vm_destroy_work_func(struct work_struct *w)
1634 {
1635 	struct xe_vm *vm =
1636 		container_of(w, struct xe_vm, destroy_work);
1637 	struct xe_device *xe = vm->xe;
1638 	struct xe_tile *tile;
1639 	u8 id;
1640 
1641 	/* xe_vm_close_and_put was not called? */
1642 	xe_assert(xe, !vm->size);
1643 
1644 	if (xe_vm_in_preempt_fence_mode(vm))
1645 		flush_work(&vm->preempt.rebind_work);
1646 
1647 	mutex_destroy(&vm->snap_mutex);
1648 
1649 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1650 		xe_pm_runtime_put(xe);
1651 
1652 	for_each_tile(tile, xe, id)
1653 		XE_WARN_ON(vm->pt_root[id]);
1654 
1655 	trace_xe_vm_free(vm);
1656 
1657 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1658 
1659 	if (vm->xef)
1660 		xe_file_put(vm->xef);
1661 
1662 	kfree(vm);
1663 }
1664 
1665 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1666 {
1667 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1668 
1669 	/* To destroy the VM we need to be able to sleep */
1670 	queue_work(system_unbound_wq, &vm->destroy_work);
1671 }
1672 
1673 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1674 {
1675 	struct xe_vm *vm;
1676 
1677 	mutex_lock(&xef->vm.lock);
1678 	vm = xa_load(&xef->vm.xa, id);
1679 	if (vm)
1680 		xe_vm_get(vm);
1681 	mutex_unlock(&xef->vm.lock);
1682 
1683 	return vm;
1684 }
1685 
1686 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1687 {
1688 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1689 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1690 }
1691 
1692 static struct xe_exec_queue *
1693 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1694 {
1695 	return q ? q : vm->q[0];
1696 }
1697 
1698 static struct xe_user_fence *
1699 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1700 {
1701 	unsigned int i;
1702 
1703 	for (i = 0; i < num_syncs; i++) {
1704 		struct xe_sync_entry *e = &syncs[i];
1705 
1706 		if (xe_sync_is_ufence(e))
1707 			return xe_sync_ufence_get(e);
1708 	}
1709 
1710 	return NULL;
1711 }
1712 
1713 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1714 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1715 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1716 
1717 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1718 		       struct drm_file *file)
1719 {
1720 	struct xe_device *xe = to_xe_device(dev);
1721 	struct xe_file *xef = to_xe_file(file);
1722 	struct drm_xe_vm_create *args = data;
1723 	struct xe_tile *tile;
1724 	struct xe_vm *vm;
1725 	u32 id, asid;
1726 	int err;
1727 	u32 flags = 0;
1728 
1729 	if (XE_IOCTL_DBG(xe, args->extensions))
1730 		return -EINVAL;
1731 
1732 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1733 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1734 
1735 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1736 			 !xe->info.has_usm))
1737 		return -EINVAL;
1738 
1739 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1740 		return -EINVAL;
1741 
1742 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1743 		return -EINVAL;
1744 
1745 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1746 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1747 		return -EINVAL;
1748 
1749 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1750 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1751 		return -EINVAL;
1752 
1753 	if (XE_IOCTL_DBG(xe, args->extensions))
1754 		return -EINVAL;
1755 
1756 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1757 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1758 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1759 		flags |= XE_VM_FLAG_LR_MODE;
1760 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1761 		flags |= XE_VM_FLAG_FAULT_MODE;
1762 
1763 	vm = xe_vm_create(xe, flags);
1764 	if (IS_ERR(vm))
1765 		return PTR_ERR(vm);
1766 
1767 	if (xe->info.has_asid) {
1768 		down_write(&xe->usm.lock);
1769 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1770 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1771 				      &xe->usm.next_asid, GFP_KERNEL);
1772 		up_write(&xe->usm.lock);
1773 		if (err < 0)
1774 			goto err_close_and_put;
1775 
1776 		vm->usm.asid = asid;
1777 	}
1778 
1779 	vm->xef = xe_file_get(xef);
1780 
1781 	/* Record BO memory for VM pagetable created against client */
1782 	for_each_tile(tile, xe, id)
1783 		if (vm->pt_root[id])
1784 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1785 
1786 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1787 	/* Warning: Security issue - never enable by default */
1788 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1789 #endif
1790 
1791 	/* user id alloc must always be last in ioctl to prevent UAF */
1792 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1793 	if (err)
1794 		goto err_close_and_put;
1795 
1796 	args->vm_id = id;
1797 
1798 	return 0;
1799 
1800 err_close_and_put:
1801 	xe_vm_close_and_put(vm);
1802 
1803 	return err;
1804 }
1805 
1806 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1807 			struct drm_file *file)
1808 {
1809 	struct xe_device *xe = to_xe_device(dev);
1810 	struct xe_file *xef = to_xe_file(file);
1811 	struct drm_xe_vm_destroy *args = data;
1812 	struct xe_vm *vm;
1813 	int err = 0;
1814 
1815 	if (XE_IOCTL_DBG(xe, args->pad) ||
1816 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1817 		return -EINVAL;
1818 
1819 	mutex_lock(&xef->vm.lock);
1820 	vm = xa_load(&xef->vm.xa, args->vm_id);
1821 	if (XE_IOCTL_DBG(xe, !vm))
1822 		err = -ENOENT;
1823 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1824 		err = -EBUSY;
1825 	else
1826 		xa_erase(&xef->vm.xa, args->vm_id);
1827 	mutex_unlock(&xef->vm.lock);
1828 
1829 	if (!err)
1830 		xe_vm_close_and_put(vm);
1831 
1832 	return err;
1833 }
1834 
1835 static const u32 region_to_mem_type[] = {
1836 	XE_PL_TT,
1837 	XE_PL_VRAM0,
1838 	XE_PL_VRAM1,
1839 };
1840 
1841 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1842 			     bool post_commit)
1843 {
1844 	down_read(&vm->userptr.notifier_lock);
1845 	vma->gpuva.flags |= XE_VMA_DESTROYED;
1846 	up_read(&vm->userptr.notifier_lock);
1847 	if (post_commit)
1848 		xe_vm_remove_vma(vm, vma);
1849 }
1850 
1851 #undef ULL
1852 #define ULL	unsigned long long
1853 
1854 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
1855 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1856 {
1857 	struct xe_vma *vma;
1858 
1859 	switch (op->op) {
1860 	case DRM_GPUVA_OP_MAP:
1861 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
1862 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
1863 		break;
1864 	case DRM_GPUVA_OP_REMAP:
1865 		vma = gpuva_to_vma(op->remap.unmap->va);
1866 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1867 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1868 		       op->remap.unmap->keep ? 1 : 0);
1869 		if (op->remap.prev)
1870 			vm_dbg(&xe->drm,
1871 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
1872 			       (ULL)op->remap.prev->va.addr,
1873 			       (ULL)op->remap.prev->va.range);
1874 		if (op->remap.next)
1875 			vm_dbg(&xe->drm,
1876 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
1877 			       (ULL)op->remap.next->va.addr,
1878 			       (ULL)op->remap.next->va.range);
1879 		break;
1880 	case DRM_GPUVA_OP_UNMAP:
1881 		vma = gpuva_to_vma(op->unmap.va);
1882 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1883 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1884 		       op->unmap.keep ? 1 : 0);
1885 		break;
1886 	case DRM_GPUVA_OP_PREFETCH:
1887 		vma = gpuva_to_vma(op->prefetch.va);
1888 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
1889 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
1890 		break;
1891 	default:
1892 		drm_warn(&xe->drm, "NOT POSSIBLE");
1893 	}
1894 }
1895 #else
1896 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1897 {
1898 }
1899 #endif
1900 
1901 /*
1902  * Create operations list from IOCTL arguments, setup operations fields so parse
1903  * and commit steps are decoupled from IOCTL arguments. This step can fail.
1904  */
1905 static struct drm_gpuva_ops *
1906 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
1907 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
1908 			 u32 operation, u32 flags,
1909 			 u32 prefetch_region, u16 pat_index)
1910 {
1911 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
1912 	struct drm_gpuva_ops *ops;
1913 	struct drm_gpuva_op *__op;
1914 	struct drm_gpuvm_bo *vm_bo;
1915 	int err;
1916 
1917 	lockdep_assert_held_write(&vm->lock);
1918 
1919 	vm_dbg(&vm->xe->drm,
1920 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
1921 	       operation, (ULL)addr, (ULL)range,
1922 	       (ULL)bo_offset_or_userptr);
1923 
1924 	switch (operation) {
1925 	case DRM_XE_VM_BIND_OP_MAP:
1926 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
1927 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
1928 						  obj, bo_offset_or_userptr);
1929 		break;
1930 	case DRM_XE_VM_BIND_OP_UNMAP:
1931 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
1932 		break;
1933 	case DRM_XE_VM_BIND_OP_PREFETCH:
1934 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
1935 		break;
1936 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
1937 		xe_assert(vm->xe, bo);
1938 
1939 		err = xe_bo_lock(bo, true);
1940 		if (err)
1941 			return ERR_PTR(err);
1942 
1943 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
1944 		if (IS_ERR(vm_bo)) {
1945 			xe_bo_unlock(bo);
1946 			return ERR_CAST(vm_bo);
1947 		}
1948 
1949 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
1950 		drm_gpuvm_bo_put(vm_bo);
1951 		xe_bo_unlock(bo);
1952 		break;
1953 	default:
1954 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1955 		ops = ERR_PTR(-EINVAL);
1956 	}
1957 	if (IS_ERR(ops))
1958 		return ops;
1959 
1960 	drm_gpuva_for_each_op(__op, ops) {
1961 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
1962 
1963 		if (__op->op == DRM_GPUVA_OP_MAP) {
1964 			op->map.immediate =
1965 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
1966 			op->map.read_only =
1967 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
1968 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
1969 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
1970 			op->map.pat_index = pat_index;
1971 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
1972 			op->prefetch.region = prefetch_region;
1973 		}
1974 
1975 		print_op(vm->xe, __op);
1976 	}
1977 
1978 	return ops;
1979 }
1980 
1981 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
1982 			      u16 pat_index, unsigned int flags)
1983 {
1984 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
1985 	struct drm_exec exec;
1986 	struct xe_vma *vma;
1987 	int err = 0;
1988 
1989 	lockdep_assert_held_write(&vm->lock);
1990 
1991 	if (bo) {
1992 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
1993 		drm_exec_until_all_locked(&exec) {
1994 			err = 0;
1995 			if (!bo->vm) {
1996 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
1997 				drm_exec_retry_on_contention(&exec);
1998 			}
1999 			if (!err) {
2000 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2001 				drm_exec_retry_on_contention(&exec);
2002 			}
2003 			if (err) {
2004 				drm_exec_fini(&exec);
2005 				return ERR_PTR(err);
2006 			}
2007 		}
2008 	}
2009 	vma = xe_vma_create(vm, bo, op->gem.offset,
2010 			    op->va.addr, op->va.addr +
2011 			    op->va.range - 1, pat_index, flags);
2012 	if (IS_ERR(vma))
2013 		goto err_unlock;
2014 
2015 	if (xe_vma_is_userptr(vma))
2016 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2017 	else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2018 		err = add_preempt_fences(vm, bo);
2019 
2020 err_unlock:
2021 	if (bo)
2022 		drm_exec_fini(&exec);
2023 
2024 	if (err) {
2025 		prep_vma_destroy(vm, vma, false);
2026 		xe_vma_destroy_unlocked(vma);
2027 		vma = ERR_PTR(err);
2028 	}
2029 
2030 	return vma;
2031 }
2032 
2033 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2034 {
2035 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2036 		return SZ_1G;
2037 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2038 		return SZ_2M;
2039 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2040 		return SZ_64K;
2041 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2042 		return SZ_4K;
2043 
2044 	return SZ_1G;	/* Uninitialized, used max size */
2045 }
2046 
2047 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2048 {
2049 	switch (size) {
2050 	case SZ_1G:
2051 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2052 		break;
2053 	case SZ_2M:
2054 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2055 		break;
2056 	case SZ_64K:
2057 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2058 		break;
2059 	case SZ_4K:
2060 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2061 		break;
2062 	}
2063 }
2064 
2065 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2066 {
2067 	int err = 0;
2068 
2069 	lockdep_assert_held_write(&vm->lock);
2070 
2071 	switch (op->base.op) {
2072 	case DRM_GPUVA_OP_MAP:
2073 		err |= xe_vm_insert_vma(vm, op->map.vma);
2074 		if (!err)
2075 			op->flags |= XE_VMA_OP_COMMITTED;
2076 		break;
2077 	case DRM_GPUVA_OP_REMAP:
2078 	{
2079 		u8 tile_present =
2080 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2081 
2082 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2083 				 true);
2084 		op->flags |= XE_VMA_OP_COMMITTED;
2085 
2086 		if (op->remap.prev) {
2087 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2088 			if (!err)
2089 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2090 			if (!err && op->remap.skip_prev) {
2091 				op->remap.prev->tile_present =
2092 					tile_present;
2093 				op->remap.prev = NULL;
2094 			}
2095 		}
2096 		if (op->remap.next) {
2097 			err |= xe_vm_insert_vma(vm, op->remap.next);
2098 			if (!err)
2099 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2100 			if (!err && op->remap.skip_next) {
2101 				op->remap.next->tile_present =
2102 					tile_present;
2103 				op->remap.next = NULL;
2104 			}
2105 		}
2106 
2107 		/* Adjust for partial unbind after removin VMA from VM */
2108 		if (!err) {
2109 			op->base.remap.unmap->va->va.addr = op->remap.start;
2110 			op->base.remap.unmap->va->va.range = op->remap.range;
2111 		}
2112 		break;
2113 	}
2114 	case DRM_GPUVA_OP_UNMAP:
2115 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2116 		op->flags |= XE_VMA_OP_COMMITTED;
2117 		break;
2118 	case DRM_GPUVA_OP_PREFETCH:
2119 		op->flags |= XE_VMA_OP_COMMITTED;
2120 		break;
2121 	default:
2122 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2123 	}
2124 
2125 	return err;
2126 }
2127 
2128 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2129 				   struct xe_vma_ops *vops)
2130 {
2131 	struct xe_device *xe = vm->xe;
2132 	struct drm_gpuva_op *__op;
2133 	struct xe_tile *tile;
2134 	u8 id, tile_mask = 0;
2135 	int err = 0;
2136 
2137 	lockdep_assert_held_write(&vm->lock);
2138 
2139 	for_each_tile(tile, vm->xe, id)
2140 		tile_mask |= 0x1 << id;
2141 
2142 	drm_gpuva_for_each_op(__op, ops) {
2143 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2144 		struct xe_vma *vma;
2145 		unsigned int flags = 0;
2146 
2147 		INIT_LIST_HEAD(&op->link);
2148 		list_add_tail(&op->link, &vops->list);
2149 		op->tile_mask = tile_mask;
2150 
2151 		switch (op->base.op) {
2152 		case DRM_GPUVA_OP_MAP:
2153 		{
2154 			flags |= op->map.read_only ?
2155 				VMA_CREATE_FLAG_READ_ONLY : 0;
2156 			flags |= op->map.is_null ?
2157 				VMA_CREATE_FLAG_IS_NULL : 0;
2158 			flags |= op->map.dumpable ?
2159 				VMA_CREATE_FLAG_DUMPABLE : 0;
2160 
2161 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2162 				      flags);
2163 			if (IS_ERR(vma))
2164 				return PTR_ERR(vma);
2165 
2166 			op->map.vma = vma;
2167 			if (op->map.immediate || !xe_vm_in_fault_mode(vm))
2168 				xe_vma_ops_incr_pt_update_ops(vops,
2169 							      op->tile_mask);
2170 			break;
2171 		}
2172 		case DRM_GPUVA_OP_REMAP:
2173 		{
2174 			struct xe_vma *old =
2175 				gpuva_to_vma(op->base.remap.unmap->va);
2176 
2177 			op->remap.start = xe_vma_start(old);
2178 			op->remap.range = xe_vma_size(old);
2179 
2180 			if (op->base.remap.prev) {
2181 				flags |= op->base.remap.unmap->va->flags &
2182 					XE_VMA_READ_ONLY ?
2183 					VMA_CREATE_FLAG_READ_ONLY : 0;
2184 				flags |= op->base.remap.unmap->va->flags &
2185 					DRM_GPUVA_SPARSE ?
2186 					VMA_CREATE_FLAG_IS_NULL : 0;
2187 				flags |= op->base.remap.unmap->va->flags &
2188 					XE_VMA_DUMPABLE ?
2189 					VMA_CREATE_FLAG_DUMPABLE : 0;
2190 
2191 				vma = new_vma(vm, op->base.remap.prev,
2192 					      old->pat_index, flags);
2193 				if (IS_ERR(vma))
2194 					return PTR_ERR(vma);
2195 
2196 				op->remap.prev = vma;
2197 
2198 				/*
2199 				 * Userptr creates a new SG mapping so
2200 				 * we must also rebind.
2201 				 */
2202 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2203 					IS_ALIGNED(xe_vma_end(vma),
2204 						   xe_vma_max_pte_size(old));
2205 				if (op->remap.skip_prev) {
2206 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2207 					op->remap.range -=
2208 						xe_vma_end(vma) -
2209 						xe_vma_start(old);
2210 					op->remap.start = xe_vma_end(vma);
2211 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2212 					       (ULL)op->remap.start,
2213 					       (ULL)op->remap.range);
2214 				} else {
2215 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2216 				}
2217 			}
2218 
2219 			if (op->base.remap.next) {
2220 				flags |= op->base.remap.unmap->va->flags &
2221 					XE_VMA_READ_ONLY ?
2222 					VMA_CREATE_FLAG_READ_ONLY : 0;
2223 				flags |= op->base.remap.unmap->va->flags &
2224 					DRM_GPUVA_SPARSE ?
2225 					VMA_CREATE_FLAG_IS_NULL : 0;
2226 				flags |= op->base.remap.unmap->va->flags &
2227 					XE_VMA_DUMPABLE ?
2228 					VMA_CREATE_FLAG_DUMPABLE : 0;
2229 
2230 				vma = new_vma(vm, op->base.remap.next,
2231 					      old->pat_index, flags);
2232 				if (IS_ERR(vma))
2233 					return PTR_ERR(vma);
2234 
2235 				op->remap.next = vma;
2236 
2237 				/*
2238 				 * Userptr creates a new SG mapping so
2239 				 * we must also rebind.
2240 				 */
2241 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2242 					IS_ALIGNED(xe_vma_start(vma),
2243 						   xe_vma_max_pte_size(old));
2244 				if (op->remap.skip_next) {
2245 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2246 					op->remap.range -=
2247 						xe_vma_end(old) -
2248 						xe_vma_start(vma);
2249 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2250 					       (ULL)op->remap.start,
2251 					       (ULL)op->remap.range);
2252 				} else {
2253 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2254 				}
2255 			}
2256 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2257 			break;
2258 		}
2259 		case DRM_GPUVA_OP_UNMAP:
2260 		case DRM_GPUVA_OP_PREFETCH:
2261 			/* FIXME: Need to skip some prefetch ops */
2262 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2263 			break;
2264 		default:
2265 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2266 		}
2267 
2268 		err = xe_vma_op_commit(vm, op);
2269 		if (err)
2270 			return err;
2271 	}
2272 
2273 	return 0;
2274 }
2275 
2276 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2277 			     bool post_commit, bool prev_post_commit,
2278 			     bool next_post_commit)
2279 {
2280 	lockdep_assert_held_write(&vm->lock);
2281 
2282 	switch (op->base.op) {
2283 	case DRM_GPUVA_OP_MAP:
2284 		if (op->map.vma) {
2285 			prep_vma_destroy(vm, op->map.vma, post_commit);
2286 			xe_vma_destroy_unlocked(op->map.vma);
2287 		}
2288 		break;
2289 	case DRM_GPUVA_OP_UNMAP:
2290 	{
2291 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2292 
2293 		if (vma) {
2294 			down_read(&vm->userptr.notifier_lock);
2295 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2296 			up_read(&vm->userptr.notifier_lock);
2297 			if (post_commit)
2298 				xe_vm_insert_vma(vm, vma);
2299 		}
2300 		break;
2301 	}
2302 	case DRM_GPUVA_OP_REMAP:
2303 	{
2304 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2305 
2306 		if (op->remap.prev) {
2307 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2308 			xe_vma_destroy_unlocked(op->remap.prev);
2309 		}
2310 		if (op->remap.next) {
2311 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2312 			xe_vma_destroy_unlocked(op->remap.next);
2313 		}
2314 		if (vma) {
2315 			down_read(&vm->userptr.notifier_lock);
2316 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2317 			up_read(&vm->userptr.notifier_lock);
2318 			if (post_commit)
2319 				xe_vm_insert_vma(vm, vma);
2320 		}
2321 		break;
2322 	}
2323 	case DRM_GPUVA_OP_PREFETCH:
2324 		/* Nothing to do */
2325 		break;
2326 	default:
2327 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2328 	}
2329 }
2330 
2331 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2332 				     struct drm_gpuva_ops **ops,
2333 				     int num_ops_list)
2334 {
2335 	int i;
2336 
2337 	for (i = num_ops_list - 1; i >= 0; --i) {
2338 		struct drm_gpuva_ops *__ops = ops[i];
2339 		struct drm_gpuva_op *__op;
2340 
2341 		if (!__ops)
2342 			continue;
2343 
2344 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2345 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2346 
2347 			xe_vma_op_unwind(vm, op,
2348 					 op->flags & XE_VMA_OP_COMMITTED,
2349 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2350 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2351 		}
2352 	}
2353 }
2354 
2355 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2356 				 bool validate)
2357 {
2358 	struct xe_bo *bo = xe_vma_bo(vma);
2359 	int err = 0;
2360 
2361 	if (bo) {
2362 		if (!bo->vm)
2363 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2364 		if (!err && validate)
2365 			err = xe_bo_validate(bo, xe_vma_vm(vma), true);
2366 	}
2367 
2368 	return err;
2369 }
2370 
2371 static int check_ufence(struct xe_vma *vma)
2372 {
2373 	if (vma->ufence) {
2374 		struct xe_user_fence * const f = vma->ufence;
2375 
2376 		if (!xe_sync_ufence_get_status(f))
2377 			return -EBUSY;
2378 
2379 		vma->ufence = NULL;
2380 		xe_sync_ufence_put(f);
2381 	}
2382 
2383 	return 0;
2384 }
2385 
2386 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2387 			    struct xe_vma_op *op)
2388 {
2389 	int err = 0;
2390 
2391 	switch (op->base.op) {
2392 	case DRM_GPUVA_OP_MAP:
2393 		err = vma_lock_and_validate(exec, op->map.vma,
2394 					    !xe_vm_in_fault_mode(vm) ||
2395 					    op->map.immediate);
2396 		break;
2397 	case DRM_GPUVA_OP_REMAP:
2398 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2399 		if (err)
2400 			break;
2401 
2402 		err = vma_lock_and_validate(exec,
2403 					    gpuva_to_vma(op->base.remap.unmap->va),
2404 					    false);
2405 		if (!err && op->remap.prev)
2406 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2407 		if (!err && op->remap.next)
2408 			err = vma_lock_and_validate(exec, op->remap.next, true);
2409 		break;
2410 	case DRM_GPUVA_OP_UNMAP:
2411 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2412 		if (err)
2413 			break;
2414 
2415 		err = vma_lock_and_validate(exec,
2416 					    gpuva_to_vma(op->base.unmap.va),
2417 					    false);
2418 		break;
2419 	case DRM_GPUVA_OP_PREFETCH:
2420 	{
2421 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2422 		u32 region = op->prefetch.region;
2423 
2424 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2425 
2426 		err = vma_lock_and_validate(exec,
2427 					    gpuva_to_vma(op->base.prefetch.va),
2428 					    false);
2429 		if (!err && !xe_vma_has_no_bo(vma))
2430 			err = xe_bo_migrate(xe_vma_bo(vma),
2431 					    region_to_mem_type[region]);
2432 		break;
2433 	}
2434 	default:
2435 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2436 	}
2437 
2438 	return err;
2439 }
2440 
2441 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2442 					   struct xe_vm *vm,
2443 					   struct xe_vma_ops *vops)
2444 {
2445 	struct xe_vma_op *op;
2446 	int err;
2447 
2448 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2449 	if (err)
2450 		return err;
2451 
2452 	list_for_each_entry(op, &vops->list, link) {
2453 		err = op_lock_and_prep(exec, vm, op);
2454 		if (err)
2455 			return err;
2456 	}
2457 
2458 #ifdef TEST_VM_OPS_ERROR
2459 	if (vops->inject_error &&
2460 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
2461 		return -ENOSPC;
2462 #endif
2463 
2464 	return 0;
2465 }
2466 
2467 static void op_trace(struct xe_vma_op *op)
2468 {
2469 	switch (op->base.op) {
2470 	case DRM_GPUVA_OP_MAP:
2471 		trace_xe_vma_bind(op->map.vma);
2472 		break;
2473 	case DRM_GPUVA_OP_REMAP:
2474 		trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
2475 		if (op->remap.prev)
2476 			trace_xe_vma_bind(op->remap.prev);
2477 		if (op->remap.next)
2478 			trace_xe_vma_bind(op->remap.next);
2479 		break;
2480 	case DRM_GPUVA_OP_UNMAP:
2481 		trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
2482 		break;
2483 	case DRM_GPUVA_OP_PREFETCH:
2484 		trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
2485 		break;
2486 	default:
2487 		XE_WARN_ON("NOT POSSIBLE");
2488 	}
2489 }
2490 
2491 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
2492 {
2493 	struct xe_vma_op *op;
2494 
2495 	list_for_each_entry(op, &vops->list, link)
2496 		op_trace(op);
2497 }
2498 
2499 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
2500 {
2501 	struct xe_exec_queue *q = vops->q;
2502 	struct xe_tile *tile;
2503 	int number_tiles = 0;
2504 	u8 id;
2505 
2506 	for_each_tile(tile, vm->xe, id) {
2507 		if (vops->pt_update_ops[id].num_ops)
2508 			++number_tiles;
2509 
2510 		if (vops->pt_update_ops[id].q)
2511 			continue;
2512 
2513 		if (q) {
2514 			vops->pt_update_ops[id].q = q;
2515 			if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
2516 				q = list_next_entry(q, multi_gt_list);
2517 		} else {
2518 			vops->pt_update_ops[id].q = vm->q[id];
2519 		}
2520 	}
2521 
2522 	return number_tiles;
2523 }
2524 
2525 static struct dma_fence *ops_execute(struct xe_vm *vm,
2526 				     struct xe_vma_ops *vops)
2527 {
2528 	struct xe_tile *tile;
2529 	struct dma_fence *fence = NULL;
2530 	struct dma_fence **fences = NULL;
2531 	struct dma_fence_array *cf = NULL;
2532 	int number_tiles = 0, current_fence = 0, err;
2533 	u8 id;
2534 
2535 	number_tiles = vm_ops_setup_tile_args(vm, vops);
2536 	if (number_tiles == 0)
2537 		return ERR_PTR(-ENODATA);
2538 
2539 	if (number_tiles > 1) {
2540 		fences = kmalloc_array(number_tiles, sizeof(*fences),
2541 				       GFP_KERNEL);
2542 		if (!fences) {
2543 			fence = ERR_PTR(-ENOMEM);
2544 			goto err_trace;
2545 		}
2546 	}
2547 
2548 	for_each_tile(tile, vm->xe, id) {
2549 		if (!vops->pt_update_ops[id].num_ops)
2550 			continue;
2551 
2552 		err = xe_pt_update_ops_prepare(tile, vops);
2553 		if (err) {
2554 			fence = ERR_PTR(err);
2555 			goto err_out;
2556 		}
2557 	}
2558 
2559 	trace_xe_vm_ops_execute(vops);
2560 
2561 	for_each_tile(tile, vm->xe, id) {
2562 		if (!vops->pt_update_ops[id].num_ops)
2563 			continue;
2564 
2565 		fence = xe_pt_update_ops_run(tile, vops);
2566 		if (IS_ERR(fence))
2567 			goto err_out;
2568 
2569 		if (fences)
2570 			fences[current_fence++] = fence;
2571 	}
2572 
2573 	if (fences) {
2574 		cf = dma_fence_array_create(number_tiles, fences,
2575 					    vm->composite_fence_ctx,
2576 					    vm->composite_fence_seqno++,
2577 					    false);
2578 		if (!cf) {
2579 			--vm->composite_fence_seqno;
2580 			fence = ERR_PTR(-ENOMEM);
2581 			goto err_out;
2582 		}
2583 		fence = &cf->base;
2584 	}
2585 
2586 	for_each_tile(tile, vm->xe, id) {
2587 		if (!vops->pt_update_ops[id].num_ops)
2588 			continue;
2589 
2590 		xe_pt_update_ops_fini(tile, vops);
2591 	}
2592 
2593 	return fence;
2594 
2595 err_out:
2596 	for_each_tile(tile, vm->xe, id) {
2597 		if (!vops->pt_update_ops[id].num_ops)
2598 			continue;
2599 
2600 		xe_pt_update_ops_abort(tile, vops);
2601 	}
2602 	while (current_fence)
2603 		dma_fence_put(fences[--current_fence]);
2604 	kfree(fences);
2605 	kfree(cf);
2606 
2607 err_trace:
2608 	trace_xe_vm_ops_fail(vm);
2609 	return fence;
2610 }
2611 
2612 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2613 {
2614 	if (vma->ufence)
2615 		xe_sync_ufence_put(vma->ufence);
2616 	vma->ufence = __xe_sync_ufence_get(ufence);
2617 }
2618 
2619 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2620 			  struct xe_user_fence *ufence)
2621 {
2622 	switch (op->base.op) {
2623 	case DRM_GPUVA_OP_MAP:
2624 		vma_add_ufence(op->map.vma, ufence);
2625 		break;
2626 	case DRM_GPUVA_OP_REMAP:
2627 		if (op->remap.prev)
2628 			vma_add_ufence(op->remap.prev, ufence);
2629 		if (op->remap.next)
2630 			vma_add_ufence(op->remap.next, ufence);
2631 		break;
2632 	case DRM_GPUVA_OP_UNMAP:
2633 		break;
2634 	case DRM_GPUVA_OP_PREFETCH:
2635 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2636 		break;
2637 	default:
2638 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2639 	}
2640 }
2641 
2642 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2643 				   struct dma_fence *fence)
2644 {
2645 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2646 	struct xe_user_fence *ufence;
2647 	struct xe_vma_op *op;
2648 	int i;
2649 
2650 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2651 	list_for_each_entry(op, &vops->list, link) {
2652 		if (ufence)
2653 			op_add_ufence(vm, op, ufence);
2654 
2655 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
2656 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2657 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
2658 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2659 				       fence);
2660 	}
2661 	if (ufence)
2662 		xe_sync_ufence_put(ufence);
2663 	for (i = 0; i < vops->num_syncs; i++)
2664 		xe_sync_entry_signal(vops->syncs + i, fence);
2665 	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
2666 	dma_fence_put(fence);
2667 }
2668 
2669 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2670 				     struct xe_vma_ops *vops)
2671 {
2672 	struct drm_exec exec;
2673 	struct dma_fence *fence;
2674 	int err;
2675 
2676 	lockdep_assert_held_write(&vm->lock);
2677 
2678 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
2679 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
2680 	drm_exec_until_all_locked(&exec) {
2681 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
2682 		drm_exec_retry_on_contention(&exec);
2683 		if (err)
2684 			goto unlock;
2685 
2686 		fence = ops_execute(vm, vops);
2687 		if (IS_ERR(fence)) {
2688 			err = PTR_ERR(fence);
2689 			goto unlock;
2690 		}
2691 
2692 		vm_bind_ioctl_ops_fini(vm, vops, fence);
2693 	}
2694 
2695 unlock:
2696 	drm_exec_fini(&exec);
2697 	return err;
2698 }
2699 
2700 #define SUPPORTED_FLAGS_STUB  \
2701 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2702 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2703 	 DRM_XE_VM_BIND_FLAG_NULL | \
2704 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2705 
2706 #ifdef TEST_VM_OPS_ERROR
2707 #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
2708 #else
2709 #define SUPPORTED_FLAGS	SUPPORTED_FLAGS_STUB
2710 #endif
2711 
2712 #define XE_64K_PAGE_MASK 0xffffull
2713 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2714 
2715 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2716 				    struct drm_xe_vm_bind *args,
2717 				    struct drm_xe_vm_bind_op **bind_ops)
2718 {
2719 	int err;
2720 	int i;
2721 
2722 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2723 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2724 		return -EINVAL;
2725 
2726 	if (XE_IOCTL_DBG(xe, args->extensions))
2727 		return -EINVAL;
2728 
2729 	if (args->num_binds > 1) {
2730 		u64 __user *bind_user =
2731 			u64_to_user_ptr(args->vector_of_binds);
2732 
2733 		*bind_ops = kvmalloc_array(args->num_binds,
2734 					   sizeof(struct drm_xe_vm_bind_op),
2735 					   GFP_KERNEL | __GFP_ACCOUNT);
2736 		if (!*bind_ops)
2737 			return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
2738 
2739 		err = __copy_from_user(*bind_ops, bind_user,
2740 				       sizeof(struct drm_xe_vm_bind_op) *
2741 				       args->num_binds);
2742 		if (XE_IOCTL_DBG(xe, err)) {
2743 			err = -EFAULT;
2744 			goto free_bind_ops;
2745 		}
2746 	} else {
2747 		*bind_ops = &args->bind;
2748 	}
2749 
2750 	for (i = 0; i < args->num_binds; ++i) {
2751 		u64 range = (*bind_ops)[i].range;
2752 		u64 addr = (*bind_ops)[i].addr;
2753 		u32 op = (*bind_ops)[i].op;
2754 		u32 flags = (*bind_ops)[i].flags;
2755 		u32 obj = (*bind_ops)[i].obj;
2756 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2757 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2758 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2759 		u16 pat_index = (*bind_ops)[i].pat_index;
2760 		u16 coh_mode;
2761 
2762 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2763 			err = -EINVAL;
2764 			goto free_bind_ops;
2765 		}
2766 
2767 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2768 		(*bind_ops)[i].pat_index = pat_index;
2769 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2770 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2771 			err = -EINVAL;
2772 			goto free_bind_ops;
2773 		}
2774 
2775 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2776 			err = -EINVAL;
2777 			goto free_bind_ops;
2778 		}
2779 
2780 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2781 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2782 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2783 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2784 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2785 				 is_null) ||
2786 		    XE_IOCTL_DBG(xe, !obj &&
2787 				 op == DRM_XE_VM_BIND_OP_MAP &&
2788 				 !is_null) ||
2789 		    XE_IOCTL_DBG(xe, !obj &&
2790 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2791 		    XE_IOCTL_DBG(xe, addr &&
2792 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2793 		    XE_IOCTL_DBG(xe, range &&
2794 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2795 		    XE_IOCTL_DBG(xe, obj &&
2796 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2797 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2798 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2799 		    XE_IOCTL_DBG(xe, obj &&
2800 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2801 		    XE_IOCTL_DBG(xe, prefetch_region &&
2802 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2803 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2804 				       xe->info.mem_region_mask)) ||
2805 		    XE_IOCTL_DBG(xe, obj &&
2806 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2807 			err = -EINVAL;
2808 			goto free_bind_ops;
2809 		}
2810 
2811 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2812 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2813 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2814 		    XE_IOCTL_DBG(xe, !range &&
2815 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2816 			err = -EINVAL;
2817 			goto free_bind_ops;
2818 		}
2819 	}
2820 
2821 	return 0;
2822 
2823 free_bind_ops:
2824 	if (args->num_binds > 1)
2825 		kvfree(*bind_ops);
2826 	return err;
2827 }
2828 
2829 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2830 				       struct xe_exec_queue *q,
2831 				       struct xe_sync_entry *syncs,
2832 				       int num_syncs)
2833 {
2834 	struct dma_fence *fence;
2835 	int i, err = 0;
2836 
2837 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2838 				     to_wait_exec_queue(vm, q), vm);
2839 	if (IS_ERR(fence))
2840 		return PTR_ERR(fence);
2841 
2842 	for (i = 0; i < num_syncs; i++)
2843 		xe_sync_entry_signal(&syncs[i], fence);
2844 
2845 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2846 				     fence);
2847 	dma_fence_put(fence);
2848 
2849 	return err;
2850 }
2851 
2852 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
2853 			    struct xe_exec_queue *q,
2854 			    struct xe_sync_entry *syncs, u32 num_syncs)
2855 {
2856 	memset(vops, 0, sizeof(*vops));
2857 	INIT_LIST_HEAD(&vops->list);
2858 	vops->vm = vm;
2859 	vops->q = q;
2860 	vops->syncs = syncs;
2861 	vops->num_syncs = num_syncs;
2862 }
2863 
2864 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
2865 					u64 addr, u64 range, u64 obj_offset,
2866 					u16 pat_index)
2867 {
2868 	u16 coh_mode;
2869 
2870 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
2871 	    XE_IOCTL_DBG(xe, obj_offset >
2872 			 bo->size - range)) {
2873 		return -EINVAL;
2874 	}
2875 
2876 	/*
2877 	 * Some platforms require 64k VM_BIND alignment,
2878 	 * specifically those with XE_VRAM_FLAGS_NEED64K.
2879 	 *
2880 	 * Other platforms may have BO's set to 64k physical placement,
2881 	 * but can be mapped at 4k offsets anyway. This check is only
2882 	 * there for the former case.
2883 	 */
2884 	if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
2885 	    (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
2886 		if (XE_IOCTL_DBG(xe, obj_offset &
2887 				 XE_64K_PAGE_MASK) ||
2888 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2889 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2890 			return  -EINVAL;
2891 		}
2892 	}
2893 
2894 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2895 	if (bo->cpu_caching) {
2896 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2897 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
2898 			return  -EINVAL;
2899 		}
2900 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
2901 		/*
2902 		 * Imported dma-buf from a different device should
2903 		 * require 1way or 2way coherency since we don't know
2904 		 * how it was mapped on the CPU. Just assume is it
2905 		 * potentially cached on CPU side.
2906 		 */
2907 		return  -EINVAL;
2908 	}
2909 
2910 	return 0;
2911 }
2912 
2913 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2914 {
2915 	struct xe_device *xe = to_xe_device(dev);
2916 	struct xe_file *xef = to_xe_file(file);
2917 	struct drm_xe_vm_bind *args = data;
2918 	struct drm_xe_sync __user *syncs_user;
2919 	struct xe_bo **bos = NULL;
2920 	struct drm_gpuva_ops **ops = NULL;
2921 	struct xe_vm *vm;
2922 	struct xe_exec_queue *q = NULL;
2923 	u32 num_syncs, num_ufence = 0;
2924 	struct xe_sync_entry *syncs = NULL;
2925 	struct drm_xe_vm_bind_op *bind_ops;
2926 	struct xe_vma_ops vops;
2927 	int err;
2928 	int i;
2929 
2930 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2931 	if (err)
2932 		return err;
2933 
2934 	if (args->exec_queue_id) {
2935 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2936 		if (XE_IOCTL_DBG(xe, !q)) {
2937 			err = -ENOENT;
2938 			goto free_objs;
2939 		}
2940 
2941 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2942 			err = -EINVAL;
2943 			goto put_exec_queue;
2944 		}
2945 	}
2946 
2947 	vm = xe_vm_lookup(xef, args->vm_id);
2948 	if (XE_IOCTL_DBG(xe, !vm)) {
2949 		err = -EINVAL;
2950 		goto put_exec_queue;
2951 	}
2952 
2953 	err = down_write_killable(&vm->lock);
2954 	if (err)
2955 		goto put_vm;
2956 
2957 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2958 		err = -ENOENT;
2959 		goto release_vm_lock;
2960 	}
2961 
2962 	for (i = 0; i < args->num_binds; ++i) {
2963 		u64 range = bind_ops[i].range;
2964 		u64 addr = bind_ops[i].addr;
2965 
2966 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2967 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2968 			err = -EINVAL;
2969 			goto release_vm_lock;
2970 		}
2971 	}
2972 
2973 	if (args->num_binds) {
2974 		bos = kvcalloc(args->num_binds, sizeof(*bos),
2975 			       GFP_KERNEL | __GFP_ACCOUNT);
2976 		if (!bos) {
2977 			err = -ENOMEM;
2978 			goto release_vm_lock;
2979 		}
2980 
2981 		ops = kvcalloc(args->num_binds, sizeof(*ops),
2982 			       GFP_KERNEL | __GFP_ACCOUNT);
2983 		if (!ops) {
2984 			err = -ENOMEM;
2985 			goto release_vm_lock;
2986 		}
2987 	}
2988 
2989 	for (i = 0; i < args->num_binds; ++i) {
2990 		struct drm_gem_object *gem_obj;
2991 		u64 range = bind_ops[i].range;
2992 		u64 addr = bind_ops[i].addr;
2993 		u32 obj = bind_ops[i].obj;
2994 		u64 obj_offset = bind_ops[i].obj_offset;
2995 		u16 pat_index = bind_ops[i].pat_index;
2996 
2997 		if (!obj)
2998 			continue;
2999 
3000 		gem_obj = drm_gem_object_lookup(file, obj);
3001 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3002 			err = -ENOENT;
3003 			goto put_obj;
3004 		}
3005 		bos[i] = gem_to_xe_bo(gem_obj);
3006 
3007 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3008 						   obj_offset, pat_index);
3009 		if (err)
3010 			goto put_obj;
3011 	}
3012 
3013 	if (args->num_syncs) {
3014 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3015 		if (!syncs) {
3016 			err = -ENOMEM;
3017 			goto put_obj;
3018 		}
3019 	}
3020 
3021 	syncs_user = u64_to_user_ptr(args->syncs);
3022 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3023 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3024 					  &syncs_user[num_syncs],
3025 					  (xe_vm_in_lr_mode(vm) ?
3026 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3027 					  (!args->num_binds ?
3028 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3029 		if (err)
3030 			goto free_syncs;
3031 
3032 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3033 			num_ufence++;
3034 	}
3035 
3036 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3037 		err = -EINVAL;
3038 		goto free_syncs;
3039 	}
3040 
3041 	if (!args->num_binds) {
3042 		err = -ENODATA;
3043 		goto free_syncs;
3044 	}
3045 
3046 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3047 	for (i = 0; i < args->num_binds; ++i) {
3048 		u64 range = bind_ops[i].range;
3049 		u64 addr = bind_ops[i].addr;
3050 		u32 op = bind_ops[i].op;
3051 		u32 flags = bind_ops[i].flags;
3052 		u64 obj_offset = bind_ops[i].obj_offset;
3053 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3054 		u16 pat_index = bind_ops[i].pat_index;
3055 
3056 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3057 						  addr, range, op, flags,
3058 						  prefetch_region, pat_index);
3059 		if (IS_ERR(ops[i])) {
3060 			err = PTR_ERR(ops[i]);
3061 			ops[i] = NULL;
3062 			goto unwind_ops;
3063 		}
3064 
3065 		err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3066 		if (err)
3067 			goto unwind_ops;
3068 
3069 #ifdef TEST_VM_OPS_ERROR
3070 		if (flags & FORCE_OP_ERROR) {
3071 			vops.inject_error = true;
3072 			vm->xe->vm_inject_error_position =
3073 				(vm->xe->vm_inject_error_position + 1) %
3074 				FORCE_OP_ERROR_COUNT;
3075 		}
3076 #endif
3077 	}
3078 
3079 	/* Nothing to do */
3080 	if (list_empty(&vops.list)) {
3081 		err = -ENODATA;
3082 		goto unwind_ops;
3083 	}
3084 
3085 	err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3086 	if (err)
3087 		goto unwind_ops;
3088 
3089 	err = vm_bind_ioctl_ops_execute(vm, &vops);
3090 
3091 unwind_ops:
3092 	if (err && err != -ENODATA)
3093 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3094 	xe_vma_ops_fini(&vops);
3095 	for (i = args->num_binds - 1; i >= 0; --i)
3096 		if (ops[i])
3097 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3098 free_syncs:
3099 	if (err == -ENODATA)
3100 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3101 	while (num_syncs--)
3102 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3103 
3104 	kfree(syncs);
3105 put_obj:
3106 	for (i = 0; i < args->num_binds; ++i)
3107 		xe_bo_put(bos[i]);
3108 release_vm_lock:
3109 	up_write(&vm->lock);
3110 put_vm:
3111 	xe_vm_put(vm);
3112 put_exec_queue:
3113 	if (q)
3114 		xe_exec_queue_put(q);
3115 free_objs:
3116 	kvfree(bos);
3117 	kvfree(ops);
3118 	if (args->num_binds > 1)
3119 		kvfree(bind_ops);
3120 	return err;
3121 }
3122 
3123 /**
3124  * xe_vm_lock() - Lock the vm's dma_resv object
3125  * @vm: The struct xe_vm whose lock is to be locked
3126  * @intr: Whether to perform any wait interruptible
3127  *
3128  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3129  * contended lock was interrupted. If @intr is false, the function
3130  * always returns 0.
3131  */
3132 int xe_vm_lock(struct xe_vm *vm, bool intr)
3133 {
3134 	if (intr)
3135 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3136 
3137 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3138 }
3139 
3140 /**
3141  * xe_vm_unlock() - Unlock the vm's dma_resv object
3142  * @vm: The struct xe_vm whose lock is to be released.
3143  *
3144  * Unlock a buffer object lock that was locked by xe_vm_lock().
3145  */
3146 void xe_vm_unlock(struct xe_vm *vm)
3147 {
3148 	dma_resv_unlock(xe_vm_resv(vm));
3149 }
3150 
3151 /**
3152  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3153  * @vma: VMA to invalidate
3154  *
3155  * Walks a list of page tables leaves which it memset the entries owned by this
3156  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3157  * complete.
3158  *
3159  * Returns 0 for success, negative error code otherwise.
3160  */
3161 int xe_vm_invalidate_vma(struct xe_vma *vma)
3162 {
3163 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3164 	struct xe_tile *tile;
3165 	struct xe_gt_tlb_invalidation_fence
3166 		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3167 	u8 id;
3168 	u32 fence_id = 0;
3169 	int ret = 0;
3170 
3171 	xe_assert(xe, !xe_vma_is_null(vma));
3172 	trace_xe_vma_invalidate(vma);
3173 
3174 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3175 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3176 		xe_vma_start(vma), xe_vma_size(vma));
3177 
3178 	/* Check that we don't race with page-table updates */
3179 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3180 		if (xe_vma_is_userptr(vma)) {
3181 			WARN_ON_ONCE(!mmu_interval_check_retry
3182 				     (&to_userptr_vma(vma)->userptr.notifier,
3183 				      to_userptr_vma(vma)->userptr.notifier_seq));
3184 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3185 							     DMA_RESV_USAGE_BOOKKEEP));
3186 
3187 		} else {
3188 			xe_bo_assert_held(xe_vma_bo(vma));
3189 		}
3190 	}
3191 
3192 	for_each_tile(tile, xe, id) {
3193 		if (xe_pt_zap_ptes(tile, vma)) {
3194 			xe_device_wmb(xe);
3195 			xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
3196 							  &fence[fence_id],
3197 							  true);
3198 
3199 			ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
3200 							 &fence[fence_id], vma);
3201 			if (ret)
3202 				goto wait;
3203 			++fence_id;
3204 
3205 			if (!tile->media_gt)
3206 				continue;
3207 
3208 			xe_gt_tlb_invalidation_fence_init(tile->media_gt,
3209 							  &fence[fence_id],
3210 							  true);
3211 
3212 			ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
3213 							 &fence[fence_id], vma);
3214 			if (ret)
3215 				goto wait;
3216 			++fence_id;
3217 		}
3218 	}
3219 
3220 wait:
3221 	for (id = 0; id < fence_id; ++id)
3222 		xe_gt_tlb_invalidation_fence_wait(&fence[id]);
3223 
3224 	vma->tile_invalidated = vma->tile_mask;
3225 
3226 	return ret;
3227 }
3228 
3229 struct xe_vm_snapshot {
3230 	unsigned long num_snaps;
3231 	struct {
3232 		u64 ofs, bo_ofs;
3233 		unsigned long len;
3234 		struct xe_bo *bo;
3235 		void *data;
3236 		struct mm_struct *mm;
3237 	} snap[];
3238 };
3239 
3240 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3241 {
3242 	unsigned long num_snaps = 0, i;
3243 	struct xe_vm_snapshot *snap = NULL;
3244 	struct drm_gpuva *gpuva;
3245 
3246 	if (!vm)
3247 		return NULL;
3248 
3249 	mutex_lock(&vm->snap_mutex);
3250 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3251 		if (gpuva->flags & XE_VMA_DUMPABLE)
3252 			num_snaps++;
3253 	}
3254 
3255 	if (num_snaps)
3256 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3257 	if (!snap) {
3258 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3259 		goto out_unlock;
3260 	}
3261 
3262 	snap->num_snaps = num_snaps;
3263 	i = 0;
3264 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3265 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3266 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3267 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3268 
3269 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3270 			continue;
3271 
3272 		snap->snap[i].ofs = xe_vma_start(vma);
3273 		snap->snap[i].len = xe_vma_size(vma);
3274 		if (bo) {
3275 			snap->snap[i].bo = xe_bo_get(bo);
3276 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3277 		} else if (xe_vma_is_userptr(vma)) {
3278 			struct mm_struct *mm =
3279 				to_userptr_vma(vma)->userptr.notifier.mm;
3280 
3281 			if (mmget_not_zero(mm))
3282 				snap->snap[i].mm = mm;
3283 			else
3284 				snap->snap[i].data = ERR_PTR(-EFAULT);
3285 
3286 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3287 		} else {
3288 			snap->snap[i].data = ERR_PTR(-ENOENT);
3289 		}
3290 		i++;
3291 	}
3292 
3293 out_unlock:
3294 	mutex_unlock(&vm->snap_mutex);
3295 	return snap;
3296 }
3297 
3298 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3299 {
3300 	if (IS_ERR_OR_NULL(snap))
3301 		return;
3302 
3303 	for (int i = 0; i < snap->num_snaps; i++) {
3304 		struct xe_bo *bo = snap->snap[i].bo;
3305 		struct iosys_map src;
3306 		int err;
3307 
3308 		if (IS_ERR(snap->snap[i].data))
3309 			continue;
3310 
3311 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3312 		if (!snap->snap[i].data) {
3313 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3314 			goto cleanup_bo;
3315 		}
3316 
3317 		if (bo) {
3318 			xe_bo_lock(bo, false);
3319 			err = ttm_bo_vmap(&bo->ttm, &src);
3320 			if (!err) {
3321 				xe_map_memcpy_from(xe_bo_device(bo),
3322 						   snap->snap[i].data,
3323 						   &src, snap->snap[i].bo_ofs,
3324 						   snap->snap[i].len);
3325 				ttm_bo_vunmap(&bo->ttm, &src);
3326 			}
3327 			xe_bo_unlock(bo);
3328 		} else {
3329 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3330 
3331 			kthread_use_mm(snap->snap[i].mm);
3332 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3333 				err = 0;
3334 			else
3335 				err = -EFAULT;
3336 			kthread_unuse_mm(snap->snap[i].mm);
3337 
3338 			mmput(snap->snap[i].mm);
3339 			snap->snap[i].mm = NULL;
3340 		}
3341 
3342 		if (err) {
3343 			kvfree(snap->snap[i].data);
3344 			snap->snap[i].data = ERR_PTR(err);
3345 		}
3346 
3347 cleanup_bo:
3348 		xe_bo_put(bo);
3349 		snap->snap[i].bo = NULL;
3350 	}
3351 }
3352 
3353 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3354 {
3355 	unsigned long i, j;
3356 
3357 	if (IS_ERR_OR_NULL(snap)) {
3358 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3359 		return;
3360 	}
3361 
3362 	for (i = 0; i < snap->num_snaps; i++) {
3363 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3364 
3365 		if (IS_ERR(snap->snap[i].data)) {
3366 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3367 				   PTR_ERR(snap->snap[i].data));
3368 			continue;
3369 		}
3370 
3371 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3372 
3373 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3374 			u32 *val = snap->snap[i].data + j;
3375 			char dumped[ASCII85_BUFSZ];
3376 
3377 			drm_puts(p, ascii85_encode(*val, dumped));
3378 		}
3379 
3380 		drm_puts(p, "\n");
3381 	}
3382 }
3383 
3384 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3385 {
3386 	unsigned long i;
3387 
3388 	if (IS_ERR_OR_NULL(snap))
3389 		return;
3390 
3391 	for (i = 0; i < snap->num_snaps; i++) {
3392 		if (!IS_ERR(snap->snap[i].data))
3393 			kvfree(snap->snap[i].data);
3394 		xe_bo_put(snap->snap[i].bo);
3395 		if (snap->snap[i].mm)
3396 			mmput(snap->snap[i].mm);
3397 	}
3398 	kvfree(snap);
3399 }
3400