xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 75fd04f276de31cc59419fda169232d097fbf291)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <uapi/drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace_bo.h"
40 #include "xe_wa.h"
41 #include "xe_hmm.h"
42 
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 {
45 	return vm->gpuvm.r_obj;
46 }
47 
48 /**
49  * xe_vma_userptr_check_repin() - Advisory check for repin needed
50  * @uvma: The userptr vma
51  *
52  * Check if the userptr vma has been invalidated since last successful
53  * repin. The check is advisory only and can the function can be called
54  * without the vm->userptr.notifier_lock held. There is no guarantee that the
55  * vma userptr will remain valid after a lockless check, so typically
56  * the call needs to be followed by a proper check under the notifier_lock.
57  *
58  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59  */
60 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
61 {
62 	return mmu_interval_check_retry(&uvma->userptr.notifier,
63 					uvma->userptr.notifier_seq) ?
64 		-EAGAIN : 0;
65 }
66 
67 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
68 {
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 
73 	lockdep_assert_held(&vm->lock);
74 	xe_assert(xe, xe_vma_is_userptr(vma));
75 
76 	return xe_hmm_userptr_populate_range(uvma, false);
77 }
78 
79 static bool preempt_fences_waiting(struct xe_vm *vm)
80 {
81 	struct xe_exec_queue *q;
82 
83 	lockdep_assert_held(&vm->lock);
84 	xe_vm_assert_held(vm);
85 
86 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
87 		if (!q->lr.pfence ||
88 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
89 			     &q->lr.pfence->flags)) {
90 			return true;
91 		}
92 	}
93 
94 	return false;
95 }
96 
97 static void free_preempt_fences(struct list_head *list)
98 {
99 	struct list_head *link, *next;
100 
101 	list_for_each_safe(link, next, list)
102 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
103 }
104 
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
106 				unsigned int *count)
107 {
108 	lockdep_assert_held(&vm->lock);
109 	xe_vm_assert_held(vm);
110 
111 	if (*count >= vm->preempt.num_exec_queues)
112 		return 0;
113 
114 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
115 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
116 
117 		if (IS_ERR(pfence))
118 			return PTR_ERR(pfence);
119 
120 		list_move_tail(xe_preempt_fence_link(pfence), list);
121 	}
122 
123 	return 0;
124 }
125 
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
127 {
128 	struct xe_exec_queue *q;
129 
130 	xe_vm_assert_held(vm);
131 
132 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
133 		if (q->lr.pfence) {
134 			long timeout = dma_fence_wait(q->lr.pfence, false);
135 
136 			/* Only -ETIME on fence indicates VM needs to be killed */
137 			if (timeout < 0 || q->lr.pfence->error == -ETIME)
138 				return -ETIME;
139 
140 			dma_fence_put(q->lr.pfence);
141 			q->lr.pfence = NULL;
142 		}
143 	}
144 
145 	return 0;
146 }
147 
148 static bool xe_vm_is_idle(struct xe_vm *vm)
149 {
150 	struct xe_exec_queue *q;
151 
152 	xe_vm_assert_held(vm);
153 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
154 		if (!xe_exec_queue_is_idle(q))
155 			return false;
156 	}
157 
158 	return true;
159 }
160 
161 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
162 {
163 	struct list_head *link;
164 	struct xe_exec_queue *q;
165 
166 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
167 		struct dma_fence *fence;
168 
169 		link = list->next;
170 		xe_assert(vm->xe, link != list);
171 
172 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
173 					     q, q->lr.context,
174 					     ++q->lr.seqno);
175 		dma_fence_put(q->lr.pfence);
176 		q->lr.pfence = fence;
177 	}
178 }
179 
180 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
181 {
182 	struct xe_exec_queue *q;
183 	int err;
184 
185 	xe_bo_assert_held(bo);
186 
187 	if (!vm->preempt.num_exec_queues)
188 		return 0;
189 
190 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
191 	if (err)
192 		return err;
193 
194 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
195 		if (q->lr.pfence) {
196 			dma_resv_add_fence(bo->ttm.base.resv,
197 					   q->lr.pfence,
198 					   DMA_RESV_USAGE_BOOKKEEP);
199 		}
200 
201 	return 0;
202 }
203 
204 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
205 						struct drm_exec *exec)
206 {
207 	struct xe_exec_queue *q;
208 
209 	lockdep_assert_held(&vm->lock);
210 	xe_vm_assert_held(vm);
211 
212 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
213 		q->ops->resume(q);
214 
215 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
216 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
217 	}
218 }
219 
220 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
221 {
222 	struct drm_gpuvm_exec vm_exec = {
223 		.vm = &vm->gpuvm,
224 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
225 		.num_fences = 1,
226 	};
227 	struct drm_exec *exec = &vm_exec.exec;
228 	struct dma_fence *pfence;
229 	int err;
230 	bool wait;
231 
232 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
233 
234 	down_write(&vm->lock);
235 	err = drm_gpuvm_exec_lock(&vm_exec);
236 	if (err)
237 		goto out_up_write;
238 
239 	pfence = xe_preempt_fence_create(q, q->lr.context,
240 					 ++q->lr.seqno);
241 	if (!pfence) {
242 		err = -ENOMEM;
243 		goto out_fini;
244 	}
245 
246 	list_add(&q->lr.link, &vm->preempt.exec_queues);
247 	++vm->preempt.num_exec_queues;
248 	q->lr.pfence = pfence;
249 
250 	down_read(&vm->userptr.notifier_lock);
251 
252 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
253 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
254 
255 	/*
256 	 * Check to see if a preemption on VM is in flight or userptr
257 	 * invalidation, if so trigger this preempt fence to sync state with
258 	 * other preempt fences on the VM.
259 	 */
260 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
261 	if (wait)
262 		dma_fence_enable_sw_signaling(pfence);
263 
264 	up_read(&vm->userptr.notifier_lock);
265 
266 out_fini:
267 	drm_exec_fini(exec);
268 out_up_write:
269 	up_write(&vm->lock);
270 
271 	return err;
272 }
273 
274 /**
275  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
276  * @vm: The VM.
277  * @q: The exec_queue
278  *
279  * Note that this function might be called multiple times on the same queue.
280  */
281 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
282 {
283 	if (!xe_vm_in_preempt_fence_mode(vm))
284 		return;
285 
286 	down_write(&vm->lock);
287 	if (!list_empty(&q->lr.link)) {
288 		list_del_init(&q->lr.link);
289 		--vm->preempt.num_exec_queues;
290 	}
291 	if (q->lr.pfence) {
292 		dma_fence_enable_sw_signaling(q->lr.pfence);
293 		dma_fence_put(q->lr.pfence);
294 		q->lr.pfence = NULL;
295 	}
296 	up_write(&vm->lock);
297 }
298 
299 /**
300  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
301  * that need repinning.
302  * @vm: The VM.
303  *
304  * This function checks for whether the VM has userptrs that need repinning,
305  * and provides a release-type barrier on the userptr.notifier_lock after
306  * checking.
307  *
308  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
309  */
310 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
311 {
312 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
313 
314 	return (list_empty(&vm->userptr.repin_list) &&
315 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
316 }
317 
318 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
319 
320 /**
321  * xe_vm_kill() - VM Kill
322  * @vm: The VM.
323  * @unlocked: Flag indicates the VM's dma-resv is not held
324  *
325  * Kill the VM by setting banned flag indicated VM is no longer available for
326  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
327  */
328 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
329 {
330 	struct xe_exec_queue *q;
331 
332 	lockdep_assert_held(&vm->lock);
333 
334 	if (unlocked)
335 		xe_vm_lock(vm, false);
336 
337 	vm->flags |= XE_VM_FLAG_BANNED;
338 	trace_xe_vm_kill(vm);
339 
340 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
341 		q->ops->kill(q);
342 
343 	if (unlocked)
344 		xe_vm_unlock(vm);
345 
346 	/* TODO: Inform user the VM is banned */
347 }
348 
349 /**
350  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
351  * @exec: The drm_exec object used for locking before validation.
352  * @err: The error returned from ttm_bo_validate().
353  * @end: A ktime_t cookie that should be set to 0 before first use and
354  * that should be reused on subsequent calls.
355  *
356  * With multiple active VMs, under memory pressure, it is possible that
357  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
358  * Until ttm properly handles locking in such scenarios, best thing the
359  * driver can do is retry with a timeout. Check if that is necessary, and
360  * if so unlock the drm_exec's objects while keeping the ticket to prepare
361  * for a rerun.
362  *
363  * Return: true if a retry after drm_exec_init() is recommended;
364  * false otherwise.
365  */
366 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
367 {
368 	ktime_t cur;
369 
370 	if (err != -ENOMEM)
371 		return false;
372 
373 	cur = ktime_get();
374 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
375 	if (!ktime_before(cur, *end))
376 		return false;
377 
378 	msleep(20);
379 	return true;
380 }
381 
382 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
383 {
384 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
385 	struct drm_gpuva *gpuva;
386 	int ret;
387 
388 	lockdep_assert_held(&vm->lock);
389 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
390 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
391 			       &vm->rebind_list);
392 
393 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
394 	if (ret)
395 		return ret;
396 
397 	vm_bo->evicted = false;
398 	return 0;
399 }
400 
401 /**
402  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
403  * @vm: The vm for which we are rebinding.
404  * @exec: The struct drm_exec with the locked GEM objects.
405  * @num_fences: The number of fences to reserve for the operation, not
406  * including rebinds and validations.
407  *
408  * Validates all evicted gem objects and rebinds their vmas. Note that
409  * rebindings may cause evictions and hence the validation-rebind
410  * sequence is rerun until there are no more objects to validate.
411  *
412  * Return: 0 on success, negative error code on error. In particular,
413  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
414  * the drm_exec transaction needs to be restarted.
415  */
416 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
417 			  unsigned int num_fences)
418 {
419 	struct drm_gem_object *obj;
420 	unsigned long index;
421 	int ret;
422 
423 	do {
424 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
425 		if (ret)
426 			return ret;
427 
428 		ret = xe_vm_rebind(vm, false);
429 		if (ret)
430 			return ret;
431 	} while (!list_empty(&vm->gpuvm.evict.list));
432 
433 	drm_exec_for_each_locked_object(exec, index, obj) {
434 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
435 		if (ret)
436 			return ret;
437 	}
438 
439 	return 0;
440 }
441 
442 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
443 				 bool *done)
444 {
445 	int err;
446 
447 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
448 	if (err)
449 		return err;
450 
451 	if (xe_vm_is_idle(vm)) {
452 		vm->preempt.rebind_deactivated = true;
453 		*done = true;
454 		return 0;
455 	}
456 
457 	if (!preempt_fences_waiting(vm)) {
458 		*done = true;
459 		return 0;
460 	}
461 
462 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
463 	if (err)
464 		return err;
465 
466 	err = wait_for_existing_preempt_fences(vm);
467 	if (err)
468 		return err;
469 
470 	/*
471 	 * Add validation and rebinding to the locking loop since both can
472 	 * cause evictions which may require blocing dma_resv locks.
473 	 * The fence reservation here is intended for the new preempt fences
474 	 * we attach at the end of the rebind work.
475 	 */
476 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
477 }
478 
479 static void preempt_rebind_work_func(struct work_struct *w)
480 {
481 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
482 	struct drm_exec exec;
483 	unsigned int fence_count = 0;
484 	LIST_HEAD(preempt_fences);
485 	ktime_t end = 0;
486 	int err = 0;
487 	long wait;
488 	int __maybe_unused tries = 0;
489 
490 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
491 	trace_xe_vm_rebind_worker_enter(vm);
492 
493 	down_write(&vm->lock);
494 
495 	if (xe_vm_is_closed_or_banned(vm)) {
496 		up_write(&vm->lock);
497 		trace_xe_vm_rebind_worker_exit(vm);
498 		return;
499 	}
500 
501 retry:
502 	if (xe_vm_userptr_check_repin(vm)) {
503 		err = xe_vm_userptr_pin(vm);
504 		if (err)
505 			goto out_unlock_outer;
506 	}
507 
508 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
509 
510 	drm_exec_until_all_locked(&exec) {
511 		bool done = false;
512 
513 		err = xe_preempt_work_begin(&exec, vm, &done);
514 		drm_exec_retry_on_contention(&exec);
515 		if (err || done) {
516 			drm_exec_fini(&exec);
517 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
518 				err = -EAGAIN;
519 
520 			goto out_unlock_outer;
521 		}
522 	}
523 
524 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
525 	if (err)
526 		goto out_unlock;
527 
528 	err = xe_vm_rebind(vm, true);
529 	if (err)
530 		goto out_unlock;
531 
532 	/* Wait on rebinds and munmap style VM unbinds */
533 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
534 				     DMA_RESV_USAGE_KERNEL,
535 				     false, MAX_SCHEDULE_TIMEOUT);
536 	if (wait <= 0) {
537 		err = -ETIME;
538 		goto out_unlock;
539 	}
540 
541 #define retry_required(__tries, __vm) \
542 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
543 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
544 	__xe_vm_userptr_needs_repin(__vm))
545 
546 	down_read(&vm->userptr.notifier_lock);
547 	if (retry_required(tries, vm)) {
548 		up_read(&vm->userptr.notifier_lock);
549 		err = -EAGAIN;
550 		goto out_unlock;
551 	}
552 
553 #undef retry_required
554 
555 	spin_lock(&vm->xe->ttm.lru_lock);
556 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
557 	spin_unlock(&vm->xe->ttm.lru_lock);
558 
559 	/* Point of no return. */
560 	arm_preempt_fences(vm, &preempt_fences);
561 	resume_and_reinstall_preempt_fences(vm, &exec);
562 	up_read(&vm->userptr.notifier_lock);
563 
564 out_unlock:
565 	drm_exec_fini(&exec);
566 out_unlock_outer:
567 	if (err == -EAGAIN) {
568 		trace_xe_vm_rebind_worker_retry(vm);
569 		goto retry;
570 	}
571 
572 	if (err) {
573 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
574 		xe_vm_kill(vm, true);
575 	}
576 	up_write(&vm->lock);
577 
578 	free_preempt_fences(&preempt_fences);
579 
580 	trace_xe_vm_rebind_worker_exit(vm);
581 }
582 
583 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
584 				   const struct mmu_notifier_range *range,
585 				   unsigned long cur_seq)
586 {
587 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
588 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
589 	struct xe_vma *vma = &uvma->vma;
590 	struct xe_vm *vm = xe_vma_vm(vma);
591 	struct dma_resv_iter cursor;
592 	struct dma_fence *fence;
593 	long err;
594 
595 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
596 	trace_xe_vma_userptr_invalidate(vma);
597 
598 	if (!mmu_notifier_range_blockable(range))
599 		return false;
600 
601 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
602 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
603 		xe_vma_start(vma), xe_vma_size(vma));
604 
605 	down_write(&vm->userptr.notifier_lock);
606 	mmu_interval_set_seq(mni, cur_seq);
607 
608 	/* No need to stop gpu access if the userptr is not yet bound. */
609 	if (!userptr->initial_bind) {
610 		up_write(&vm->userptr.notifier_lock);
611 		return true;
612 	}
613 
614 	/*
615 	 * Tell exec and rebind worker they need to repin and rebind this
616 	 * userptr.
617 	 */
618 	if (!xe_vm_in_fault_mode(vm) &&
619 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
620 		spin_lock(&vm->userptr.invalidated_lock);
621 		list_move_tail(&userptr->invalidate_link,
622 			       &vm->userptr.invalidated);
623 		spin_unlock(&vm->userptr.invalidated_lock);
624 	}
625 
626 	up_write(&vm->userptr.notifier_lock);
627 
628 	/*
629 	 * Preempt fences turn into schedule disables, pipeline these.
630 	 * Note that even in fault mode, we need to wait for binds and
631 	 * unbinds to complete, and those are attached as BOOKMARK fences
632 	 * to the vm.
633 	 */
634 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
635 			    DMA_RESV_USAGE_BOOKKEEP);
636 	dma_resv_for_each_fence_unlocked(&cursor, fence)
637 		dma_fence_enable_sw_signaling(fence);
638 	dma_resv_iter_end(&cursor);
639 
640 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
641 				    DMA_RESV_USAGE_BOOKKEEP,
642 				    false, MAX_SCHEDULE_TIMEOUT);
643 	XE_WARN_ON(err <= 0);
644 
645 	if (xe_vm_in_fault_mode(vm)) {
646 		err = xe_vm_invalidate_vma(vma);
647 		XE_WARN_ON(err);
648 	}
649 
650 	trace_xe_vma_userptr_invalidate_complete(vma);
651 
652 	return true;
653 }
654 
655 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
656 	.invalidate = vma_userptr_invalidate,
657 };
658 
659 int xe_vm_userptr_pin(struct xe_vm *vm)
660 {
661 	struct xe_userptr_vma *uvma, *next;
662 	int err = 0;
663 	LIST_HEAD(tmp_evict);
664 
665 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
666 	lockdep_assert_held_write(&vm->lock);
667 
668 	/* Collect invalidated userptrs */
669 	spin_lock(&vm->userptr.invalidated_lock);
670 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
671 				 userptr.invalidate_link) {
672 		list_del_init(&uvma->userptr.invalidate_link);
673 		list_move_tail(&uvma->userptr.repin_link,
674 			       &vm->userptr.repin_list);
675 	}
676 	spin_unlock(&vm->userptr.invalidated_lock);
677 
678 	/* Pin and move to temporary list */
679 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
680 				 userptr.repin_link) {
681 		err = xe_vma_userptr_pin_pages(uvma);
682 		if (err == -EFAULT) {
683 			list_del_init(&uvma->userptr.repin_link);
684 
685 			/* Wait for pending binds */
686 			xe_vm_lock(vm, false);
687 			dma_resv_wait_timeout(xe_vm_resv(vm),
688 					      DMA_RESV_USAGE_BOOKKEEP,
689 					      false, MAX_SCHEDULE_TIMEOUT);
690 
691 			err = xe_vm_invalidate_vma(&uvma->vma);
692 			xe_vm_unlock(vm);
693 			if (err)
694 				return err;
695 		} else {
696 			if (err < 0)
697 				return err;
698 
699 			list_del_init(&uvma->userptr.repin_link);
700 			list_move_tail(&uvma->vma.combined_links.rebind,
701 				       &vm->rebind_list);
702 		}
703 	}
704 
705 	return 0;
706 }
707 
708 /**
709  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
710  * that need repinning.
711  * @vm: The VM.
712  *
713  * This function does an advisory check for whether the VM has userptrs that
714  * need repinning.
715  *
716  * Return: 0 if there are no indications of userptrs needing repinning,
717  * -EAGAIN if there are.
718  */
719 int xe_vm_userptr_check_repin(struct xe_vm *vm)
720 {
721 	return (list_empty_careful(&vm->userptr.repin_list) &&
722 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
723 }
724 
725 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
726 {
727 	int i;
728 
729 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
730 		if (!vops->pt_update_ops[i].num_ops)
731 			continue;
732 
733 		vops->pt_update_ops[i].ops =
734 			kmalloc_array(vops->pt_update_ops[i].num_ops,
735 				      sizeof(*vops->pt_update_ops[i].ops),
736 				      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
737 		if (!vops->pt_update_ops[i].ops)
738 			return array_of_binds ? -ENOBUFS : -ENOMEM;
739 	}
740 
741 	return 0;
742 }
743 ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
744 
745 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
746 {
747 	int i;
748 
749 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
750 		kfree(vops->pt_update_ops[i].ops);
751 }
752 
753 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
754 {
755 	int i;
756 
757 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
758 		if (BIT(i) & tile_mask)
759 			++vops->pt_update_ops[i].num_ops;
760 }
761 
762 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
763 				  u8 tile_mask)
764 {
765 	INIT_LIST_HEAD(&op->link);
766 	op->tile_mask = tile_mask;
767 	op->base.op = DRM_GPUVA_OP_MAP;
768 	op->base.map.va.addr = vma->gpuva.va.addr;
769 	op->base.map.va.range = vma->gpuva.va.range;
770 	op->base.map.gem.obj = vma->gpuva.gem.obj;
771 	op->base.map.gem.offset = vma->gpuva.gem.offset;
772 	op->map.vma = vma;
773 	op->map.immediate = true;
774 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
775 	op->map.is_null = xe_vma_is_null(vma);
776 }
777 
778 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
779 				u8 tile_mask)
780 {
781 	struct xe_vma_op *op;
782 
783 	op = kzalloc(sizeof(*op), GFP_KERNEL);
784 	if (!op)
785 		return -ENOMEM;
786 
787 	xe_vm_populate_rebind(op, vma, tile_mask);
788 	list_add_tail(&op->link, &vops->list);
789 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
790 
791 	return 0;
792 }
793 
794 static struct dma_fence *ops_execute(struct xe_vm *vm,
795 				     struct xe_vma_ops *vops);
796 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
797 			    struct xe_exec_queue *q,
798 			    struct xe_sync_entry *syncs, u32 num_syncs);
799 
800 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
801 {
802 	struct dma_fence *fence;
803 	struct xe_vma *vma, *next;
804 	struct xe_vma_ops vops;
805 	struct xe_vma_op *op, *next_op;
806 	int err, i;
807 
808 	lockdep_assert_held(&vm->lock);
809 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
810 	    list_empty(&vm->rebind_list))
811 		return 0;
812 
813 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
814 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
815 		vops.pt_update_ops[i].wait_vm_bookkeep = true;
816 
817 	xe_vm_assert_held(vm);
818 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
819 		xe_assert(vm->xe, vma->tile_present);
820 
821 		if (rebind_worker)
822 			trace_xe_vma_rebind_worker(vma);
823 		else
824 			trace_xe_vma_rebind_exec(vma);
825 
826 		err = xe_vm_ops_add_rebind(&vops, vma,
827 					   vma->tile_present);
828 		if (err)
829 			goto free_ops;
830 	}
831 
832 	err = xe_vma_ops_alloc(&vops, false);
833 	if (err)
834 		goto free_ops;
835 
836 	fence = ops_execute(vm, &vops);
837 	if (IS_ERR(fence)) {
838 		err = PTR_ERR(fence);
839 	} else {
840 		dma_fence_put(fence);
841 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
842 					 combined_links.rebind)
843 			list_del_init(&vma->combined_links.rebind);
844 	}
845 free_ops:
846 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
847 		list_del(&op->link);
848 		kfree(op);
849 	}
850 	xe_vma_ops_fini(&vops);
851 
852 	return err;
853 }
854 
855 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
856 {
857 	struct dma_fence *fence = NULL;
858 	struct xe_vma_ops vops;
859 	struct xe_vma_op *op, *next_op;
860 	struct xe_tile *tile;
861 	u8 id;
862 	int err;
863 
864 	lockdep_assert_held(&vm->lock);
865 	xe_vm_assert_held(vm);
866 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
867 
868 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
869 	for_each_tile(tile, vm->xe, id) {
870 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
871 		vops.pt_update_ops[tile->id].q =
872 			xe_tile_migrate_exec_queue(tile);
873 	}
874 
875 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
876 	if (err)
877 		return ERR_PTR(err);
878 
879 	err = xe_vma_ops_alloc(&vops, false);
880 	if (err) {
881 		fence = ERR_PTR(err);
882 		goto free_ops;
883 	}
884 
885 	fence = ops_execute(vm, &vops);
886 
887 free_ops:
888 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
889 		list_del(&op->link);
890 		kfree(op);
891 	}
892 	xe_vma_ops_fini(&vops);
893 
894 	return fence;
895 }
896 
897 static void xe_vma_free(struct xe_vma *vma)
898 {
899 	if (xe_vma_is_userptr(vma))
900 		kfree(to_userptr_vma(vma));
901 	else
902 		kfree(vma);
903 }
904 
905 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
906 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
907 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
908 
909 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
910 				    struct xe_bo *bo,
911 				    u64 bo_offset_or_userptr,
912 				    u64 start, u64 end,
913 				    u16 pat_index, unsigned int flags)
914 {
915 	struct xe_vma *vma;
916 	struct xe_tile *tile;
917 	u8 id;
918 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
919 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
920 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
921 
922 	xe_assert(vm->xe, start < end);
923 	xe_assert(vm->xe, end < vm->size);
924 
925 	/*
926 	 * Allocate and ensure that the xe_vma_is_userptr() return
927 	 * matches what was allocated.
928 	 */
929 	if (!bo && !is_null) {
930 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
931 
932 		if (!uvma)
933 			return ERR_PTR(-ENOMEM);
934 
935 		vma = &uvma->vma;
936 	} else {
937 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
938 		if (!vma)
939 			return ERR_PTR(-ENOMEM);
940 
941 		if (is_null)
942 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
943 		if (bo)
944 			vma->gpuva.gem.obj = &bo->ttm.base;
945 	}
946 
947 	INIT_LIST_HEAD(&vma->combined_links.rebind);
948 
949 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
950 	vma->gpuva.vm = &vm->gpuvm;
951 	vma->gpuva.va.addr = start;
952 	vma->gpuva.va.range = end - start + 1;
953 	if (read_only)
954 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
955 	if (dumpable)
956 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
957 
958 	for_each_tile(tile, vm->xe, id)
959 		vma->tile_mask |= 0x1 << id;
960 
961 	if (vm->xe->info.has_atomic_enable_pte_bit)
962 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
963 
964 	vma->pat_index = pat_index;
965 
966 	if (bo) {
967 		struct drm_gpuvm_bo *vm_bo;
968 
969 		xe_bo_assert_held(bo);
970 
971 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
972 		if (IS_ERR(vm_bo)) {
973 			xe_vma_free(vma);
974 			return ERR_CAST(vm_bo);
975 		}
976 
977 		drm_gpuvm_bo_extobj_add(vm_bo);
978 		drm_gem_object_get(&bo->ttm.base);
979 		vma->gpuva.gem.offset = bo_offset_or_userptr;
980 		drm_gpuva_link(&vma->gpuva, vm_bo);
981 		drm_gpuvm_bo_put(vm_bo);
982 	} else /* userptr or null */ {
983 		if (!is_null) {
984 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
985 			u64 size = end - start + 1;
986 			int err;
987 
988 			INIT_LIST_HEAD(&userptr->invalidate_link);
989 			INIT_LIST_HEAD(&userptr->repin_link);
990 			vma->gpuva.gem.offset = bo_offset_or_userptr;
991 
992 			err = mmu_interval_notifier_insert(&userptr->notifier,
993 							   current->mm,
994 							   xe_vma_userptr(vma), size,
995 							   &vma_userptr_notifier_ops);
996 			if (err) {
997 				xe_vma_free(vma);
998 				return ERR_PTR(err);
999 			}
1000 
1001 			userptr->notifier_seq = LONG_MAX;
1002 		}
1003 
1004 		xe_vm_get(vm);
1005 	}
1006 
1007 	return vma;
1008 }
1009 
1010 static void xe_vma_destroy_late(struct xe_vma *vma)
1011 {
1012 	struct xe_vm *vm = xe_vma_vm(vma);
1013 
1014 	if (vma->ufence) {
1015 		xe_sync_ufence_put(vma->ufence);
1016 		vma->ufence = NULL;
1017 	}
1018 
1019 	if (xe_vma_is_userptr(vma)) {
1020 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1021 		struct xe_userptr *userptr = &uvma->userptr;
1022 
1023 		if (userptr->sg)
1024 			xe_hmm_userptr_free_sg(uvma);
1025 
1026 		/*
1027 		 * Since userptr pages are not pinned, we can't remove
1028 		 * the notifier until we're sure the GPU is not accessing
1029 		 * them anymore
1030 		 */
1031 		mmu_interval_notifier_remove(&userptr->notifier);
1032 		xe_vm_put(vm);
1033 	} else if (xe_vma_is_null(vma)) {
1034 		xe_vm_put(vm);
1035 	} else {
1036 		xe_bo_put(xe_vma_bo(vma));
1037 	}
1038 
1039 	xe_vma_free(vma);
1040 }
1041 
1042 static void vma_destroy_work_func(struct work_struct *w)
1043 {
1044 	struct xe_vma *vma =
1045 		container_of(w, struct xe_vma, destroy_work);
1046 
1047 	xe_vma_destroy_late(vma);
1048 }
1049 
1050 static void vma_destroy_cb(struct dma_fence *fence,
1051 			   struct dma_fence_cb *cb)
1052 {
1053 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1054 
1055 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1056 	queue_work(system_unbound_wq, &vma->destroy_work);
1057 }
1058 
1059 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1060 {
1061 	struct xe_vm *vm = xe_vma_vm(vma);
1062 
1063 	lockdep_assert_held_write(&vm->lock);
1064 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1065 
1066 	if (xe_vma_is_userptr(vma)) {
1067 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1068 
1069 		spin_lock(&vm->userptr.invalidated_lock);
1070 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1071 		spin_unlock(&vm->userptr.invalidated_lock);
1072 	} else if (!xe_vma_is_null(vma)) {
1073 		xe_bo_assert_held(xe_vma_bo(vma));
1074 
1075 		drm_gpuva_unlink(&vma->gpuva);
1076 	}
1077 
1078 	xe_vm_assert_held(vm);
1079 	if (fence) {
1080 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1081 						 vma_destroy_cb);
1082 
1083 		if (ret) {
1084 			XE_WARN_ON(ret != -ENOENT);
1085 			xe_vma_destroy_late(vma);
1086 		}
1087 	} else {
1088 		xe_vma_destroy_late(vma);
1089 	}
1090 }
1091 
1092 /**
1093  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1094  * @exec: The drm_exec object we're currently locking for.
1095  * @vma: The vma for witch we want to lock the vm resv and any attached
1096  * object's resv.
1097  *
1098  * Return: 0 on success, negative error code on error. In particular
1099  * may return -EDEADLK on WW transaction contention and -EINTR if
1100  * an interruptible wait is terminated by a signal.
1101  */
1102 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1103 {
1104 	struct xe_vm *vm = xe_vma_vm(vma);
1105 	struct xe_bo *bo = xe_vma_bo(vma);
1106 	int err;
1107 
1108 	XE_WARN_ON(!vm);
1109 
1110 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1111 	if (!err && bo && !bo->vm)
1112 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1113 
1114 	return err;
1115 }
1116 
1117 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1118 {
1119 	struct drm_exec exec;
1120 	int err;
1121 
1122 	drm_exec_init(&exec, 0, 0);
1123 	drm_exec_until_all_locked(&exec) {
1124 		err = xe_vm_lock_vma(&exec, vma);
1125 		drm_exec_retry_on_contention(&exec);
1126 		if (XE_WARN_ON(err))
1127 			break;
1128 	}
1129 
1130 	xe_vma_destroy(vma, NULL);
1131 
1132 	drm_exec_fini(&exec);
1133 }
1134 
1135 struct xe_vma *
1136 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1137 {
1138 	struct drm_gpuva *gpuva;
1139 
1140 	lockdep_assert_held(&vm->lock);
1141 
1142 	if (xe_vm_is_closed_or_banned(vm))
1143 		return NULL;
1144 
1145 	xe_assert(vm->xe, start + range <= vm->size);
1146 
1147 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1148 
1149 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1150 }
1151 
1152 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1153 {
1154 	int err;
1155 
1156 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1157 	lockdep_assert_held(&vm->lock);
1158 
1159 	mutex_lock(&vm->snap_mutex);
1160 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1161 	mutex_unlock(&vm->snap_mutex);
1162 	XE_WARN_ON(err);	/* Shouldn't be possible */
1163 
1164 	return err;
1165 }
1166 
1167 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1168 {
1169 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1170 	lockdep_assert_held(&vm->lock);
1171 
1172 	mutex_lock(&vm->snap_mutex);
1173 	drm_gpuva_remove(&vma->gpuva);
1174 	mutex_unlock(&vm->snap_mutex);
1175 	if (vm->usm.last_fault_vma == vma)
1176 		vm->usm.last_fault_vma = NULL;
1177 }
1178 
1179 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1180 {
1181 	struct xe_vma_op *op;
1182 
1183 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1184 
1185 	if (unlikely(!op))
1186 		return NULL;
1187 
1188 	return &op->base;
1189 }
1190 
1191 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1192 
1193 static const struct drm_gpuvm_ops gpuvm_ops = {
1194 	.op_alloc = xe_vm_op_alloc,
1195 	.vm_bo_validate = xe_gpuvm_validate,
1196 	.vm_free = xe_vm_free,
1197 };
1198 
1199 static u64 pde_encode_pat_index(u16 pat_index)
1200 {
1201 	u64 pte = 0;
1202 
1203 	if (pat_index & BIT(0))
1204 		pte |= XE_PPGTT_PTE_PAT0;
1205 
1206 	if (pat_index & BIT(1))
1207 		pte |= XE_PPGTT_PTE_PAT1;
1208 
1209 	return pte;
1210 }
1211 
1212 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1213 {
1214 	u64 pte = 0;
1215 
1216 	if (pat_index & BIT(0))
1217 		pte |= XE_PPGTT_PTE_PAT0;
1218 
1219 	if (pat_index & BIT(1))
1220 		pte |= XE_PPGTT_PTE_PAT1;
1221 
1222 	if (pat_index & BIT(2)) {
1223 		if (pt_level)
1224 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1225 		else
1226 			pte |= XE_PPGTT_PTE_PAT2;
1227 	}
1228 
1229 	if (pat_index & BIT(3))
1230 		pte |= XELPG_PPGTT_PTE_PAT3;
1231 
1232 	if (pat_index & (BIT(4)))
1233 		pte |= XE2_PPGTT_PTE_PAT4;
1234 
1235 	return pte;
1236 }
1237 
1238 static u64 pte_encode_ps(u32 pt_level)
1239 {
1240 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1241 
1242 	if (pt_level == 1)
1243 		return XE_PDE_PS_2M;
1244 	else if (pt_level == 2)
1245 		return XE_PDPE_PS_1G;
1246 
1247 	return 0;
1248 }
1249 
1250 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1251 			      const u16 pat_index)
1252 {
1253 	u64 pde;
1254 
1255 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1256 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1257 	pde |= pde_encode_pat_index(pat_index);
1258 
1259 	return pde;
1260 }
1261 
1262 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1263 			      u16 pat_index, u32 pt_level)
1264 {
1265 	u64 pte;
1266 
1267 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1268 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1269 	pte |= pte_encode_pat_index(pat_index, pt_level);
1270 	pte |= pte_encode_ps(pt_level);
1271 
1272 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1273 		pte |= XE_PPGTT_PTE_DM;
1274 
1275 	return pte;
1276 }
1277 
1278 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1279 			       u16 pat_index, u32 pt_level)
1280 {
1281 	pte |= XE_PAGE_PRESENT;
1282 
1283 	if (likely(!xe_vma_read_only(vma)))
1284 		pte |= XE_PAGE_RW;
1285 
1286 	pte |= pte_encode_pat_index(pat_index, pt_level);
1287 	pte |= pte_encode_ps(pt_level);
1288 
1289 	if (unlikely(xe_vma_is_null(vma)))
1290 		pte |= XE_PTE_NULL;
1291 
1292 	return pte;
1293 }
1294 
1295 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1296 				u16 pat_index,
1297 				u32 pt_level, bool devmem, u64 flags)
1298 {
1299 	u64 pte;
1300 
1301 	/* Avoid passing random bits directly as flags */
1302 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1303 
1304 	pte = addr;
1305 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1306 	pte |= pte_encode_pat_index(pat_index, pt_level);
1307 	pte |= pte_encode_ps(pt_level);
1308 
1309 	if (devmem)
1310 		pte |= XE_PPGTT_PTE_DM;
1311 
1312 	pte |= flags;
1313 
1314 	return pte;
1315 }
1316 
1317 static const struct xe_pt_ops xelp_pt_ops = {
1318 	.pte_encode_bo = xelp_pte_encode_bo,
1319 	.pte_encode_vma = xelp_pte_encode_vma,
1320 	.pte_encode_addr = xelp_pte_encode_addr,
1321 	.pde_encode_bo = xelp_pde_encode_bo,
1322 };
1323 
1324 static void vm_destroy_work_func(struct work_struct *w);
1325 
1326 /**
1327  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1328  * given tile and vm.
1329  * @xe: xe device.
1330  * @tile: tile to set up for.
1331  * @vm: vm to set up for.
1332  *
1333  * Sets up a pagetable tree with one page-table per level and a single
1334  * leaf PTE. All pagetable entries point to the single page-table or,
1335  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1336  * writes become NOPs.
1337  *
1338  * Return: 0 on success, negative error code on error.
1339  */
1340 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1341 				struct xe_vm *vm)
1342 {
1343 	u8 id = tile->id;
1344 	int i;
1345 
1346 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1347 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1348 		if (IS_ERR(vm->scratch_pt[id][i]))
1349 			return PTR_ERR(vm->scratch_pt[id][i]);
1350 
1351 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1352 	}
1353 
1354 	return 0;
1355 }
1356 ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
1357 
1358 static void xe_vm_free_scratch(struct xe_vm *vm)
1359 {
1360 	struct xe_tile *tile;
1361 	u8 id;
1362 
1363 	if (!xe_vm_has_scratch(vm))
1364 		return;
1365 
1366 	for_each_tile(tile, vm->xe, id) {
1367 		u32 i;
1368 
1369 		if (!vm->pt_root[id])
1370 			continue;
1371 
1372 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1373 			if (vm->scratch_pt[id][i])
1374 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1375 	}
1376 }
1377 
1378 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1379 {
1380 	struct drm_gem_object *vm_resv_obj;
1381 	struct xe_vm *vm;
1382 	int err, number_tiles = 0;
1383 	struct xe_tile *tile;
1384 	u8 id;
1385 
1386 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1387 	if (!vm)
1388 		return ERR_PTR(-ENOMEM);
1389 
1390 	vm->xe = xe;
1391 
1392 	vm->size = 1ull << xe->info.va_bits;
1393 
1394 	vm->flags = flags;
1395 
1396 	init_rwsem(&vm->lock);
1397 	mutex_init(&vm->snap_mutex);
1398 
1399 	INIT_LIST_HEAD(&vm->rebind_list);
1400 
1401 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1402 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1403 	init_rwsem(&vm->userptr.notifier_lock);
1404 	spin_lock_init(&vm->userptr.invalidated_lock);
1405 
1406 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1407 
1408 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1409 
1410 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1411 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1412 
1413 	for_each_tile(tile, xe, id)
1414 		xe_range_fence_tree_init(&vm->rftree[id]);
1415 
1416 	vm->pt_ops = &xelp_pt_ops;
1417 
1418 	/*
1419 	 * Long-running workloads are not protected by the scheduler references.
1420 	 * By design, run_job for long-running workloads returns NULL and the
1421 	 * scheduler drops all the references of it, hence protecting the VM
1422 	 * for this case is necessary.
1423 	 */
1424 	if (flags & XE_VM_FLAG_LR_MODE)
1425 		xe_pm_runtime_get_noresume(xe);
1426 
1427 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1428 	if (!vm_resv_obj) {
1429 		err = -ENOMEM;
1430 		goto err_no_resv;
1431 	}
1432 
1433 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1434 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1435 
1436 	drm_gem_object_put(vm_resv_obj);
1437 
1438 	err = xe_vm_lock(vm, true);
1439 	if (err)
1440 		goto err_close;
1441 
1442 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1443 		vm->flags |= XE_VM_FLAG_64K;
1444 
1445 	for_each_tile(tile, xe, id) {
1446 		if (flags & XE_VM_FLAG_MIGRATION &&
1447 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1448 			continue;
1449 
1450 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1451 		if (IS_ERR(vm->pt_root[id])) {
1452 			err = PTR_ERR(vm->pt_root[id]);
1453 			vm->pt_root[id] = NULL;
1454 			goto err_unlock_close;
1455 		}
1456 	}
1457 
1458 	if (xe_vm_has_scratch(vm)) {
1459 		for_each_tile(tile, xe, id) {
1460 			if (!vm->pt_root[id])
1461 				continue;
1462 
1463 			err = xe_vm_create_scratch(xe, tile, vm);
1464 			if (err)
1465 				goto err_unlock_close;
1466 		}
1467 		vm->batch_invalidate_tlb = true;
1468 	}
1469 
1470 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1471 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1472 		vm->batch_invalidate_tlb = false;
1473 	}
1474 
1475 	/* Fill pt_root after allocating scratch tables */
1476 	for_each_tile(tile, xe, id) {
1477 		if (!vm->pt_root[id])
1478 			continue;
1479 
1480 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1481 	}
1482 	xe_vm_unlock(vm);
1483 
1484 	/* Kernel migration VM shouldn't have a circular loop.. */
1485 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1486 		for_each_tile(tile, xe, id) {
1487 			struct xe_exec_queue *q;
1488 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1489 
1490 			if (!vm->pt_root[id])
1491 				continue;
1492 
1493 			q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
1494 			if (IS_ERR(q)) {
1495 				err = PTR_ERR(q);
1496 				goto err_close;
1497 			}
1498 			vm->q[id] = q;
1499 			number_tiles++;
1500 		}
1501 	}
1502 
1503 	if (number_tiles > 1)
1504 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1505 
1506 	trace_xe_vm_create(vm);
1507 
1508 	return vm;
1509 
1510 err_unlock_close:
1511 	xe_vm_unlock(vm);
1512 err_close:
1513 	xe_vm_close_and_put(vm);
1514 	return ERR_PTR(err);
1515 
1516 err_no_resv:
1517 	mutex_destroy(&vm->snap_mutex);
1518 	for_each_tile(tile, xe, id)
1519 		xe_range_fence_tree_fini(&vm->rftree[id]);
1520 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1521 	kfree(vm);
1522 	if (flags & XE_VM_FLAG_LR_MODE)
1523 		xe_pm_runtime_put(xe);
1524 	return ERR_PTR(err);
1525 }
1526 
1527 static void xe_vm_close(struct xe_vm *vm)
1528 {
1529 	down_write(&vm->lock);
1530 	vm->size = 0;
1531 	up_write(&vm->lock);
1532 }
1533 
1534 void xe_vm_close_and_put(struct xe_vm *vm)
1535 {
1536 	LIST_HEAD(contested);
1537 	struct xe_device *xe = vm->xe;
1538 	struct xe_tile *tile;
1539 	struct xe_vma *vma, *next_vma;
1540 	struct drm_gpuva *gpuva, *next;
1541 	u8 id;
1542 
1543 	xe_assert(xe, !vm->preempt.num_exec_queues);
1544 
1545 	xe_vm_close(vm);
1546 	if (xe_vm_in_preempt_fence_mode(vm))
1547 		flush_work(&vm->preempt.rebind_work);
1548 
1549 	down_write(&vm->lock);
1550 	for_each_tile(tile, xe, id) {
1551 		if (vm->q[id])
1552 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1553 	}
1554 	up_write(&vm->lock);
1555 
1556 	for_each_tile(tile, xe, id) {
1557 		if (vm->q[id]) {
1558 			xe_exec_queue_kill(vm->q[id]);
1559 			xe_exec_queue_put(vm->q[id]);
1560 			vm->q[id] = NULL;
1561 		}
1562 	}
1563 
1564 	down_write(&vm->lock);
1565 	xe_vm_lock(vm, false);
1566 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1567 		vma = gpuva_to_vma(gpuva);
1568 
1569 		if (xe_vma_has_no_bo(vma)) {
1570 			down_read(&vm->userptr.notifier_lock);
1571 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1572 			up_read(&vm->userptr.notifier_lock);
1573 		}
1574 
1575 		xe_vm_remove_vma(vm, vma);
1576 
1577 		/* easy case, remove from VMA? */
1578 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1579 			list_del_init(&vma->combined_links.rebind);
1580 			xe_vma_destroy(vma, NULL);
1581 			continue;
1582 		}
1583 
1584 		list_move_tail(&vma->combined_links.destroy, &contested);
1585 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1586 	}
1587 
1588 	/*
1589 	 * All vm operations will add shared fences to resv.
1590 	 * The only exception is eviction for a shared object,
1591 	 * but even so, the unbind when evicted would still
1592 	 * install a fence to resv. Hence it's safe to
1593 	 * destroy the pagetables immediately.
1594 	 */
1595 	xe_vm_free_scratch(vm);
1596 
1597 	for_each_tile(tile, xe, id) {
1598 		if (vm->pt_root[id]) {
1599 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1600 			vm->pt_root[id] = NULL;
1601 		}
1602 	}
1603 	xe_vm_unlock(vm);
1604 
1605 	/*
1606 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1607 	 * Since we hold a refcount to the bo, we can remove and free
1608 	 * the members safely without locking.
1609 	 */
1610 	list_for_each_entry_safe(vma, next_vma, &contested,
1611 				 combined_links.destroy) {
1612 		list_del_init(&vma->combined_links.destroy);
1613 		xe_vma_destroy_unlocked(vma);
1614 	}
1615 
1616 	up_write(&vm->lock);
1617 
1618 	down_write(&xe->usm.lock);
1619 	if (vm->usm.asid) {
1620 		void *lookup;
1621 
1622 		xe_assert(xe, xe->info.has_asid);
1623 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1624 
1625 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1626 		xe_assert(xe, lookup == vm);
1627 	}
1628 	up_write(&xe->usm.lock);
1629 
1630 	for_each_tile(tile, xe, id)
1631 		xe_range_fence_tree_fini(&vm->rftree[id]);
1632 
1633 	xe_vm_put(vm);
1634 }
1635 
1636 static void vm_destroy_work_func(struct work_struct *w)
1637 {
1638 	struct xe_vm *vm =
1639 		container_of(w, struct xe_vm, destroy_work);
1640 	struct xe_device *xe = vm->xe;
1641 	struct xe_tile *tile;
1642 	u8 id;
1643 
1644 	/* xe_vm_close_and_put was not called? */
1645 	xe_assert(xe, !vm->size);
1646 
1647 	if (xe_vm_in_preempt_fence_mode(vm))
1648 		flush_work(&vm->preempt.rebind_work);
1649 
1650 	mutex_destroy(&vm->snap_mutex);
1651 
1652 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1653 		xe_pm_runtime_put(xe);
1654 
1655 	for_each_tile(tile, xe, id)
1656 		XE_WARN_ON(vm->pt_root[id]);
1657 
1658 	trace_xe_vm_free(vm);
1659 
1660 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1661 
1662 	if (vm->xef)
1663 		xe_file_put(vm->xef);
1664 
1665 	kfree(vm);
1666 }
1667 
1668 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1669 {
1670 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1671 
1672 	/* To destroy the VM we need to be able to sleep */
1673 	queue_work(system_unbound_wq, &vm->destroy_work);
1674 }
1675 
1676 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1677 {
1678 	struct xe_vm *vm;
1679 
1680 	mutex_lock(&xef->vm.lock);
1681 	vm = xa_load(&xef->vm.xa, id);
1682 	if (vm)
1683 		xe_vm_get(vm);
1684 	mutex_unlock(&xef->vm.lock);
1685 
1686 	return vm;
1687 }
1688 
1689 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1690 {
1691 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1692 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1693 }
1694 
1695 static struct xe_exec_queue *
1696 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1697 {
1698 	return q ? q : vm->q[0];
1699 }
1700 
1701 static struct xe_user_fence *
1702 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1703 {
1704 	unsigned int i;
1705 
1706 	for (i = 0; i < num_syncs; i++) {
1707 		struct xe_sync_entry *e = &syncs[i];
1708 
1709 		if (xe_sync_is_ufence(e))
1710 			return xe_sync_ufence_get(e);
1711 	}
1712 
1713 	return NULL;
1714 }
1715 
1716 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1717 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1718 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1719 
1720 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1721 		       struct drm_file *file)
1722 {
1723 	struct xe_device *xe = to_xe_device(dev);
1724 	struct xe_file *xef = to_xe_file(file);
1725 	struct drm_xe_vm_create *args = data;
1726 	struct xe_tile *tile;
1727 	struct xe_vm *vm;
1728 	u32 id, asid;
1729 	int err;
1730 	u32 flags = 0;
1731 
1732 	if (XE_IOCTL_DBG(xe, args->extensions))
1733 		return -EINVAL;
1734 
1735 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1736 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1737 
1738 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1739 			 !xe->info.has_usm))
1740 		return -EINVAL;
1741 
1742 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1743 		return -EINVAL;
1744 
1745 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1746 		return -EINVAL;
1747 
1748 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1749 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1750 		return -EINVAL;
1751 
1752 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1753 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1754 		return -EINVAL;
1755 
1756 	if (XE_IOCTL_DBG(xe, args->extensions))
1757 		return -EINVAL;
1758 
1759 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1760 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1761 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1762 		flags |= XE_VM_FLAG_LR_MODE;
1763 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1764 		flags |= XE_VM_FLAG_FAULT_MODE;
1765 
1766 	vm = xe_vm_create(xe, flags);
1767 	if (IS_ERR(vm))
1768 		return PTR_ERR(vm);
1769 
1770 	if (xe->info.has_asid) {
1771 		down_write(&xe->usm.lock);
1772 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1773 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1774 				      &xe->usm.next_asid, GFP_KERNEL);
1775 		up_write(&xe->usm.lock);
1776 		if (err < 0)
1777 			goto err_close_and_put;
1778 
1779 		vm->usm.asid = asid;
1780 	}
1781 
1782 	vm->xef = xe_file_get(xef);
1783 
1784 	/* Record BO memory for VM pagetable created against client */
1785 	for_each_tile(tile, xe, id)
1786 		if (vm->pt_root[id])
1787 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1788 
1789 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1790 	/* Warning: Security issue - never enable by default */
1791 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1792 #endif
1793 
1794 	/* user id alloc must always be last in ioctl to prevent UAF */
1795 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1796 	if (err)
1797 		goto err_close_and_put;
1798 
1799 	args->vm_id = id;
1800 
1801 	return 0;
1802 
1803 err_close_and_put:
1804 	xe_vm_close_and_put(vm);
1805 
1806 	return err;
1807 }
1808 
1809 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1810 			struct drm_file *file)
1811 {
1812 	struct xe_device *xe = to_xe_device(dev);
1813 	struct xe_file *xef = to_xe_file(file);
1814 	struct drm_xe_vm_destroy *args = data;
1815 	struct xe_vm *vm;
1816 	int err = 0;
1817 
1818 	if (XE_IOCTL_DBG(xe, args->pad) ||
1819 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1820 		return -EINVAL;
1821 
1822 	mutex_lock(&xef->vm.lock);
1823 	vm = xa_load(&xef->vm.xa, args->vm_id);
1824 	if (XE_IOCTL_DBG(xe, !vm))
1825 		err = -ENOENT;
1826 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1827 		err = -EBUSY;
1828 	else
1829 		xa_erase(&xef->vm.xa, args->vm_id);
1830 	mutex_unlock(&xef->vm.lock);
1831 
1832 	if (!err)
1833 		xe_vm_close_and_put(vm);
1834 
1835 	return err;
1836 }
1837 
1838 static const u32 region_to_mem_type[] = {
1839 	XE_PL_TT,
1840 	XE_PL_VRAM0,
1841 	XE_PL_VRAM1,
1842 };
1843 
1844 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1845 			     bool post_commit)
1846 {
1847 	down_read(&vm->userptr.notifier_lock);
1848 	vma->gpuva.flags |= XE_VMA_DESTROYED;
1849 	up_read(&vm->userptr.notifier_lock);
1850 	if (post_commit)
1851 		xe_vm_remove_vma(vm, vma);
1852 }
1853 
1854 #undef ULL
1855 #define ULL	unsigned long long
1856 
1857 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
1858 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1859 {
1860 	struct xe_vma *vma;
1861 
1862 	switch (op->op) {
1863 	case DRM_GPUVA_OP_MAP:
1864 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
1865 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
1866 		break;
1867 	case DRM_GPUVA_OP_REMAP:
1868 		vma = gpuva_to_vma(op->remap.unmap->va);
1869 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1870 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1871 		       op->remap.unmap->keep ? 1 : 0);
1872 		if (op->remap.prev)
1873 			vm_dbg(&xe->drm,
1874 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
1875 			       (ULL)op->remap.prev->va.addr,
1876 			       (ULL)op->remap.prev->va.range);
1877 		if (op->remap.next)
1878 			vm_dbg(&xe->drm,
1879 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
1880 			       (ULL)op->remap.next->va.addr,
1881 			       (ULL)op->remap.next->va.range);
1882 		break;
1883 	case DRM_GPUVA_OP_UNMAP:
1884 		vma = gpuva_to_vma(op->unmap.va);
1885 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1886 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1887 		       op->unmap.keep ? 1 : 0);
1888 		break;
1889 	case DRM_GPUVA_OP_PREFETCH:
1890 		vma = gpuva_to_vma(op->prefetch.va);
1891 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
1892 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
1893 		break;
1894 	default:
1895 		drm_warn(&xe->drm, "NOT POSSIBLE");
1896 	}
1897 }
1898 #else
1899 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1900 {
1901 }
1902 #endif
1903 
1904 /*
1905  * Create operations list from IOCTL arguments, setup operations fields so parse
1906  * and commit steps are decoupled from IOCTL arguments. This step can fail.
1907  */
1908 static struct drm_gpuva_ops *
1909 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
1910 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
1911 			 u32 operation, u32 flags,
1912 			 u32 prefetch_region, u16 pat_index)
1913 {
1914 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
1915 	struct drm_gpuva_ops *ops;
1916 	struct drm_gpuva_op *__op;
1917 	struct drm_gpuvm_bo *vm_bo;
1918 	int err;
1919 
1920 	lockdep_assert_held_write(&vm->lock);
1921 
1922 	vm_dbg(&vm->xe->drm,
1923 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
1924 	       operation, (ULL)addr, (ULL)range,
1925 	       (ULL)bo_offset_or_userptr);
1926 
1927 	switch (operation) {
1928 	case DRM_XE_VM_BIND_OP_MAP:
1929 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
1930 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
1931 						  obj, bo_offset_or_userptr);
1932 		break;
1933 	case DRM_XE_VM_BIND_OP_UNMAP:
1934 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
1935 		break;
1936 	case DRM_XE_VM_BIND_OP_PREFETCH:
1937 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
1938 		break;
1939 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
1940 		xe_assert(vm->xe, bo);
1941 
1942 		err = xe_bo_lock(bo, true);
1943 		if (err)
1944 			return ERR_PTR(err);
1945 
1946 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
1947 		if (IS_ERR(vm_bo)) {
1948 			xe_bo_unlock(bo);
1949 			return ERR_CAST(vm_bo);
1950 		}
1951 
1952 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
1953 		drm_gpuvm_bo_put(vm_bo);
1954 		xe_bo_unlock(bo);
1955 		break;
1956 	default:
1957 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1958 		ops = ERR_PTR(-EINVAL);
1959 	}
1960 	if (IS_ERR(ops))
1961 		return ops;
1962 
1963 	drm_gpuva_for_each_op(__op, ops) {
1964 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
1965 
1966 		if (__op->op == DRM_GPUVA_OP_MAP) {
1967 			op->map.immediate =
1968 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
1969 			op->map.read_only =
1970 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
1971 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
1972 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
1973 			op->map.pat_index = pat_index;
1974 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
1975 			op->prefetch.region = prefetch_region;
1976 		}
1977 
1978 		print_op(vm->xe, __op);
1979 	}
1980 
1981 	return ops;
1982 }
1983 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
1984 
1985 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
1986 			      u16 pat_index, unsigned int flags)
1987 {
1988 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
1989 	struct drm_exec exec;
1990 	struct xe_vma *vma;
1991 	int err = 0;
1992 
1993 	lockdep_assert_held_write(&vm->lock);
1994 
1995 	if (bo) {
1996 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
1997 		drm_exec_until_all_locked(&exec) {
1998 			err = 0;
1999 			if (!bo->vm) {
2000 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2001 				drm_exec_retry_on_contention(&exec);
2002 			}
2003 			if (!err) {
2004 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2005 				drm_exec_retry_on_contention(&exec);
2006 			}
2007 			if (err) {
2008 				drm_exec_fini(&exec);
2009 				return ERR_PTR(err);
2010 			}
2011 		}
2012 	}
2013 	vma = xe_vma_create(vm, bo, op->gem.offset,
2014 			    op->va.addr, op->va.addr +
2015 			    op->va.range - 1, pat_index, flags);
2016 	if (IS_ERR(vma))
2017 		goto err_unlock;
2018 
2019 	if (xe_vma_is_userptr(vma))
2020 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2021 	else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2022 		err = add_preempt_fences(vm, bo);
2023 
2024 err_unlock:
2025 	if (bo)
2026 		drm_exec_fini(&exec);
2027 
2028 	if (err) {
2029 		prep_vma_destroy(vm, vma, false);
2030 		xe_vma_destroy_unlocked(vma);
2031 		vma = ERR_PTR(err);
2032 	}
2033 
2034 	return vma;
2035 }
2036 
2037 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2038 {
2039 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2040 		return SZ_1G;
2041 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2042 		return SZ_2M;
2043 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2044 		return SZ_64K;
2045 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2046 		return SZ_4K;
2047 
2048 	return SZ_1G;	/* Uninitialized, used max size */
2049 }
2050 
2051 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2052 {
2053 	switch (size) {
2054 	case SZ_1G:
2055 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2056 		break;
2057 	case SZ_2M:
2058 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2059 		break;
2060 	case SZ_64K:
2061 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2062 		break;
2063 	case SZ_4K:
2064 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2065 		break;
2066 	}
2067 }
2068 
2069 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2070 {
2071 	int err = 0;
2072 
2073 	lockdep_assert_held_write(&vm->lock);
2074 
2075 	switch (op->base.op) {
2076 	case DRM_GPUVA_OP_MAP:
2077 		err |= xe_vm_insert_vma(vm, op->map.vma);
2078 		if (!err)
2079 			op->flags |= XE_VMA_OP_COMMITTED;
2080 		break;
2081 	case DRM_GPUVA_OP_REMAP:
2082 	{
2083 		u8 tile_present =
2084 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2085 
2086 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2087 				 true);
2088 		op->flags |= XE_VMA_OP_COMMITTED;
2089 
2090 		if (op->remap.prev) {
2091 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2092 			if (!err)
2093 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2094 			if (!err && op->remap.skip_prev) {
2095 				op->remap.prev->tile_present =
2096 					tile_present;
2097 				op->remap.prev = NULL;
2098 			}
2099 		}
2100 		if (op->remap.next) {
2101 			err |= xe_vm_insert_vma(vm, op->remap.next);
2102 			if (!err)
2103 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2104 			if (!err && op->remap.skip_next) {
2105 				op->remap.next->tile_present =
2106 					tile_present;
2107 				op->remap.next = NULL;
2108 			}
2109 		}
2110 
2111 		/* Adjust for partial unbind after removing VMA from VM */
2112 		if (!err) {
2113 			op->base.remap.unmap->va->va.addr = op->remap.start;
2114 			op->base.remap.unmap->va->va.range = op->remap.range;
2115 		}
2116 		break;
2117 	}
2118 	case DRM_GPUVA_OP_UNMAP:
2119 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2120 		op->flags |= XE_VMA_OP_COMMITTED;
2121 		break;
2122 	case DRM_GPUVA_OP_PREFETCH:
2123 		op->flags |= XE_VMA_OP_COMMITTED;
2124 		break;
2125 	default:
2126 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2127 	}
2128 
2129 	return err;
2130 }
2131 
2132 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2133 				   struct xe_vma_ops *vops)
2134 {
2135 	struct xe_device *xe = vm->xe;
2136 	struct drm_gpuva_op *__op;
2137 	struct xe_tile *tile;
2138 	u8 id, tile_mask = 0;
2139 	int err = 0;
2140 
2141 	lockdep_assert_held_write(&vm->lock);
2142 
2143 	for_each_tile(tile, vm->xe, id)
2144 		tile_mask |= 0x1 << id;
2145 
2146 	drm_gpuva_for_each_op(__op, ops) {
2147 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2148 		struct xe_vma *vma;
2149 		unsigned int flags = 0;
2150 
2151 		INIT_LIST_HEAD(&op->link);
2152 		list_add_tail(&op->link, &vops->list);
2153 		op->tile_mask = tile_mask;
2154 
2155 		switch (op->base.op) {
2156 		case DRM_GPUVA_OP_MAP:
2157 		{
2158 			flags |= op->map.read_only ?
2159 				VMA_CREATE_FLAG_READ_ONLY : 0;
2160 			flags |= op->map.is_null ?
2161 				VMA_CREATE_FLAG_IS_NULL : 0;
2162 			flags |= op->map.dumpable ?
2163 				VMA_CREATE_FLAG_DUMPABLE : 0;
2164 
2165 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2166 				      flags);
2167 			if (IS_ERR(vma))
2168 				return PTR_ERR(vma);
2169 
2170 			op->map.vma = vma;
2171 			if (op->map.immediate || !xe_vm_in_fault_mode(vm))
2172 				xe_vma_ops_incr_pt_update_ops(vops,
2173 							      op->tile_mask);
2174 			break;
2175 		}
2176 		case DRM_GPUVA_OP_REMAP:
2177 		{
2178 			struct xe_vma *old =
2179 				gpuva_to_vma(op->base.remap.unmap->va);
2180 
2181 			op->remap.start = xe_vma_start(old);
2182 			op->remap.range = xe_vma_size(old);
2183 
2184 			if (op->base.remap.prev) {
2185 				flags |= op->base.remap.unmap->va->flags &
2186 					XE_VMA_READ_ONLY ?
2187 					VMA_CREATE_FLAG_READ_ONLY : 0;
2188 				flags |= op->base.remap.unmap->va->flags &
2189 					DRM_GPUVA_SPARSE ?
2190 					VMA_CREATE_FLAG_IS_NULL : 0;
2191 				flags |= op->base.remap.unmap->va->flags &
2192 					XE_VMA_DUMPABLE ?
2193 					VMA_CREATE_FLAG_DUMPABLE : 0;
2194 
2195 				vma = new_vma(vm, op->base.remap.prev,
2196 					      old->pat_index, flags);
2197 				if (IS_ERR(vma))
2198 					return PTR_ERR(vma);
2199 
2200 				op->remap.prev = vma;
2201 
2202 				/*
2203 				 * Userptr creates a new SG mapping so
2204 				 * we must also rebind.
2205 				 */
2206 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2207 					IS_ALIGNED(xe_vma_end(vma),
2208 						   xe_vma_max_pte_size(old));
2209 				if (op->remap.skip_prev) {
2210 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2211 					op->remap.range -=
2212 						xe_vma_end(vma) -
2213 						xe_vma_start(old);
2214 					op->remap.start = xe_vma_end(vma);
2215 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2216 					       (ULL)op->remap.start,
2217 					       (ULL)op->remap.range);
2218 				} else {
2219 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2220 				}
2221 			}
2222 
2223 			if (op->base.remap.next) {
2224 				flags |= op->base.remap.unmap->va->flags &
2225 					XE_VMA_READ_ONLY ?
2226 					VMA_CREATE_FLAG_READ_ONLY : 0;
2227 				flags |= op->base.remap.unmap->va->flags &
2228 					DRM_GPUVA_SPARSE ?
2229 					VMA_CREATE_FLAG_IS_NULL : 0;
2230 				flags |= op->base.remap.unmap->va->flags &
2231 					XE_VMA_DUMPABLE ?
2232 					VMA_CREATE_FLAG_DUMPABLE : 0;
2233 
2234 				vma = new_vma(vm, op->base.remap.next,
2235 					      old->pat_index, flags);
2236 				if (IS_ERR(vma))
2237 					return PTR_ERR(vma);
2238 
2239 				op->remap.next = vma;
2240 
2241 				/*
2242 				 * Userptr creates a new SG mapping so
2243 				 * we must also rebind.
2244 				 */
2245 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2246 					IS_ALIGNED(xe_vma_start(vma),
2247 						   xe_vma_max_pte_size(old));
2248 				if (op->remap.skip_next) {
2249 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2250 					op->remap.range -=
2251 						xe_vma_end(old) -
2252 						xe_vma_start(vma);
2253 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2254 					       (ULL)op->remap.start,
2255 					       (ULL)op->remap.range);
2256 				} else {
2257 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2258 				}
2259 			}
2260 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2261 			break;
2262 		}
2263 		case DRM_GPUVA_OP_UNMAP:
2264 		case DRM_GPUVA_OP_PREFETCH:
2265 			/* FIXME: Need to skip some prefetch ops */
2266 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2267 			break;
2268 		default:
2269 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2270 		}
2271 
2272 		err = xe_vma_op_commit(vm, op);
2273 		if (err)
2274 			return err;
2275 	}
2276 
2277 	return 0;
2278 }
2279 
2280 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2281 			     bool post_commit, bool prev_post_commit,
2282 			     bool next_post_commit)
2283 {
2284 	lockdep_assert_held_write(&vm->lock);
2285 
2286 	switch (op->base.op) {
2287 	case DRM_GPUVA_OP_MAP:
2288 		if (op->map.vma) {
2289 			prep_vma_destroy(vm, op->map.vma, post_commit);
2290 			xe_vma_destroy_unlocked(op->map.vma);
2291 		}
2292 		break;
2293 	case DRM_GPUVA_OP_UNMAP:
2294 	{
2295 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2296 
2297 		if (vma) {
2298 			down_read(&vm->userptr.notifier_lock);
2299 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2300 			up_read(&vm->userptr.notifier_lock);
2301 			if (post_commit)
2302 				xe_vm_insert_vma(vm, vma);
2303 		}
2304 		break;
2305 	}
2306 	case DRM_GPUVA_OP_REMAP:
2307 	{
2308 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2309 
2310 		if (op->remap.prev) {
2311 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2312 			xe_vma_destroy_unlocked(op->remap.prev);
2313 		}
2314 		if (op->remap.next) {
2315 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2316 			xe_vma_destroy_unlocked(op->remap.next);
2317 		}
2318 		if (vma) {
2319 			down_read(&vm->userptr.notifier_lock);
2320 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2321 			up_read(&vm->userptr.notifier_lock);
2322 			if (post_commit)
2323 				xe_vm_insert_vma(vm, vma);
2324 		}
2325 		break;
2326 	}
2327 	case DRM_GPUVA_OP_PREFETCH:
2328 		/* Nothing to do */
2329 		break;
2330 	default:
2331 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2332 	}
2333 }
2334 
2335 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2336 				     struct drm_gpuva_ops **ops,
2337 				     int num_ops_list)
2338 {
2339 	int i;
2340 
2341 	for (i = num_ops_list - 1; i >= 0; --i) {
2342 		struct drm_gpuva_ops *__ops = ops[i];
2343 		struct drm_gpuva_op *__op;
2344 
2345 		if (!__ops)
2346 			continue;
2347 
2348 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2349 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2350 
2351 			xe_vma_op_unwind(vm, op,
2352 					 op->flags & XE_VMA_OP_COMMITTED,
2353 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2354 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2355 		}
2356 	}
2357 }
2358 
2359 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2360 				 bool validate)
2361 {
2362 	struct xe_bo *bo = xe_vma_bo(vma);
2363 	struct xe_vm *vm = xe_vma_vm(vma);
2364 	int err = 0;
2365 
2366 	if (bo) {
2367 		if (!bo->vm)
2368 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2369 		if (!err && validate)
2370 			err = xe_bo_validate(bo, vm,
2371 					     !xe_vm_in_preempt_fence_mode(vm));
2372 	}
2373 
2374 	return err;
2375 }
2376 
2377 static int check_ufence(struct xe_vma *vma)
2378 {
2379 	if (vma->ufence) {
2380 		struct xe_user_fence * const f = vma->ufence;
2381 
2382 		if (!xe_sync_ufence_get_status(f))
2383 			return -EBUSY;
2384 
2385 		vma->ufence = NULL;
2386 		xe_sync_ufence_put(f);
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2393 			    struct xe_vma_op *op)
2394 {
2395 	int err = 0;
2396 
2397 	switch (op->base.op) {
2398 	case DRM_GPUVA_OP_MAP:
2399 		err = vma_lock_and_validate(exec, op->map.vma,
2400 					    !xe_vm_in_fault_mode(vm) ||
2401 					    op->map.immediate);
2402 		break;
2403 	case DRM_GPUVA_OP_REMAP:
2404 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2405 		if (err)
2406 			break;
2407 
2408 		err = vma_lock_and_validate(exec,
2409 					    gpuva_to_vma(op->base.remap.unmap->va),
2410 					    false);
2411 		if (!err && op->remap.prev)
2412 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2413 		if (!err && op->remap.next)
2414 			err = vma_lock_and_validate(exec, op->remap.next, true);
2415 		break;
2416 	case DRM_GPUVA_OP_UNMAP:
2417 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2418 		if (err)
2419 			break;
2420 
2421 		err = vma_lock_and_validate(exec,
2422 					    gpuva_to_vma(op->base.unmap.va),
2423 					    false);
2424 		break;
2425 	case DRM_GPUVA_OP_PREFETCH:
2426 	{
2427 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2428 		u32 region = op->prefetch.region;
2429 
2430 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2431 
2432 		err = vma_lock_and_validate(exec,
2433 					    gpuva_to_vma(op->base.prefetch.va),
2434 					    false);
2435 		if (!err && !xe_vma_has_no_bo(vma))
2436 			err = xe_bo_migrate(xe_vma_bo(vma),
2437 					    region_to_mem_type[region]);
2438 		break;
2439 	}
2440 	default:
2441 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2442 	}
2443 
2444 	return err;
2445 }
2446 
2447 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2448 					   struct xe_vm *vm,
2449 					   struct xe_vma_ops *vops)
2450 {
2451 	struct xe_vma_op *op;
2452 	int err;
2453 
2454 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2455 	if (err)
2456 		return err;
2457 
2458 	list_for_each_entry(op, &vops->list, link) {
2459 		err = op_lock_and_prep(exec, vm, op);
2460 		if (err)
2461 			return err;
2462 	}
2463 
2464 #ifdef TEST_VM_OPS_ERROR
2465 	if (vops->inject_error &&
2466 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
2467 		return -ENOSPC;
2468 #endif
2469 
2470 	return 0;
2471 }
2472 
2473 static void op_trace(struct xe_vma_op *op)
2474 {
2475 	switch (op->base.op) {
2476 	case DRM_GPUVA_OP_MAP:
2477 		trace_xe_vma_bind(op->map.vma);
2478 		break;
2479 	case DRM_GPUVA_OP_REMAP:
2480 		trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
2481 		if (op->remap.prev)
2482 			trace_xe_vma_bind(op->remap.prev);
2483 		if (op->remap.next)
2484 			trace_xe_vma_bind(op->remap.next);
2485 		break;
2486 	case DRM_GPUVA_OP_UNMAP:
2487 		trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
2488 		break;
2489 	case DRM_GPUVA_OP_PREFETCH:
2490 		trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
2491 		break;
2492 	default:
2493 		XE_WARN_ON("NOT POSSIBLE");
2494 	}
2495 }
2496 
2497 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
2498 {
2499 	struct xe_vma_op *op;
2500 
2501 	list_for_each_entry(op, &vops->list, link)
2502 		op_trace(op);
2503 }
2504 
2505 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
2506 {
2507 	struct xe_exec_queue *q = vops->q;
2508 	struct xe_tile *tile;
2509 	int number_tiles = 0;
2510 	u8 id;
2511 
2512 	for_each_tile(tile, vm->xe, id) {
2513 		if (vops->pt_update_ops[id].num_ops)
2514 			++number_tiles;
2515 
2516 		if (vops->pt_update_ops[id].q)
2517 			continue;
2518 
2519 		if (q) {
2520 			vops->pt_update_ops[id].q = q;
2521 			if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
2522 				q = list_next_entry(q, multi_gt_list);
2523 		} else {
2524 			vops->pt_update_ops[id].q = vm->q[id];
2525 		}
2526 	}
2527 
2528 	return number_tiles;
2529 }
2530 
2531 static struct dma_fence *ops_execute(struct xe_vm *vm,
2532 				     struct xe_vma_ops *vops)
2533 {
2534 	struct xe_tile *tile;
2535 	struct dma_fence *fence = NULL;
2536 	struct dma_fence **fences = NULL;
2537 	struct dma_fence_array *cf = NULL;
2538 	int number_tiles = 0, current_fence = 0, err;
2539 	u8 id;
2540 
2541 	number_tiles = vm_ops_setup_tile_args(vm, vops);
2542 	if (number_tiles == 0)
2543 		return ERR_PTR(-ENODATA);
2544 
2545 	if (number_tiles > 1) {
2546 		fences = kmalloc_array(number_tiles, sizeof(*fences),
2547 				       GFP_KERNEL);
2548 		if (!fences) {
2549 			fence = ERR_PTR(-ENOMEM);
2550 			goto err_trace;
2551 		}
2552 	}
2553 
2554 	for_each_tile(tile, vm->xe, id) {
2555 		if (!vops->pt_update_ops[id].num_ops)
2556 			continue;
2557 
2558 		err = xe_pt_update_ops_prepare(tile, vops);
2559 		if (err) {
2560 			fence = ERR_PTR(err);
2561 			goto err_out;
2562 		}
2563 	}
2564 
2565 	trace_xe_vm_ops_execute(vops);
2566 
2567 	for_each_tile(tile, vm->xe, id) {
2568 		if (!vops->pt_update_ops[id].num_ops)
2569 			continue;
2570 
2571 		fence = xe_pt_update_ops_run(tile, vops);
2572 		if (IS_ERR(fence))
2573 			goto err_out;
2574 
2575 		if (fences)
2576 			fences[current_fence++] = fence;
2577 	}
2578 
2579 	if (fences) {
2580 		cf = dma_fence_array_create(number_tiles, fences,
2581 					    vm->composite_fence_ctx,
2582 					    vm->composite_fence_seqno++,
2583 					    false);
2584 		if (!cf) {
2585 			--vm->composite_fence_seqno;
2586 			fence = ERR_PTR(-ENOMEM);
2587 			goto err_out;
2588 		}
2589 		fence = &cf->base;
2590 	}
2591 
2592 	for_each_tile(tile, vm->xe, id) {
2593 		if (!vops->pt_update_ops[id].num_ops)
2594 			continue;
2595 
2596 		xe_pt_update_ops_fini(tile, vops);
2597 	}
2598 
2599 	return fence;
2600 
2601 err_out:
2602 	for_each_tile(tile, vm->xe, id) {
2603 		if (!vops->pt_update_ops[id].num_ops)
2604 			continue;
2605 
2606 		xe_pt_update_ops_abort(tile, vops);
2607 	}
2608 	while (current_fence)
2609 		dma_fence_put(fences[--current_fence]);
2610 	kfree(fences);
2611 	kfree(cf);
2612 
2613 err_trace:
2614 	trace_xe_vm_ops_fail(vm);
2615 	return fence;
2616 }
2617 
2618 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2619 {
2620 	if (vma->ufence)
2621 		xe_sync_ufence_put(vma->ufence);
2622 	vma->ufence = __xe_sync_ufence_get(ufence);
2623 }
2624 
2625 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2626 			  struct xe_user_fence *ufence)
2627 {
2628 	switch (op->base.op) {
2629 	case DRM_GPUVA_OP_MAP:
2630 		vma_add_ufence(op->map.vma, ufence);
2631 		break;
2632 	case DRM_GPUVA_OP_REMAP:
2633 		if (op->remap.prev)
2634 			vma_add_ufence(op->remap.prev, ufence);
2635 		if (op->remap.next)
2636 			vma_add_ufence(op->remap.next, ufence);
2637 		break;
2638 	case DRM_GPUVA_OP_UNMAP:
2639 		break;
2640 	case DRM_GPUVA_OP_PREFETCH:
2641 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2642 		break;
2643 	default:
2644 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2645 	}
2646 }
2647 
2648 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2649 				   struct dma_fence *fence)
2650 {
2651 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2652 	struct xe_user_fence *ufence;
2653 	struct xe_vma_op *op;
2654 	int i;
2655 
2656 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2657 	list_for_each_entry(op, &vops->list, link) {
2658 		if (ufence)
2659 			op_add_ufence(vm, op, ufence);
2660 
2661 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
2662 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2663 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
2664 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2665 				       fence);
2666 	}
2667 	if (ufence)
2668 		xe_sync_ufence_put(ufence);
2669 	for (i = 0; i < vops->num_syncs; i++)
2670 		xe_sync_entry_signal(vops->syncs + i, fence);
2671 	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
2672 	dma_fence_put(fence);
2673 }
2674 
2675 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2676 				     struct xe_vma_ops *vops)
2677 {
2678 	struct drm_exec exec;
2679 	struct dma_fence *fence;
2680 	int err;
2681 
2682 	lockdep_assert_held_write(&vm->lock);
2683 
2684 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
2685 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
2686 	drm_exec_until_all_locked(&exec) {
2687 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
2688 		drm_exec_retry_on_contention(&exec);
2689 		if (err)
2690 			goto unlock;
2691 
2692 		fence = ops_execute(vm, vops);
2693 		if (IS_ERR(fence)) {
2694 			err = PTR_ERR(fence);
2695 			goto unlock;
2696 		}
2697 
2698 		vm_bind_ioctl_ops_fini(vm, vops, fence);
2699 	}
2700 
2701 unlock:
2702 	drm_exec_fini(&exec);
2703 	return err;
2704 }
2705 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
2706 
2707 #define SUPPORTED_FLAGS_STUB  \
2708 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2709 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2710 	 DRM_XE_VM_BIND_FLAG_NULL | \
2711 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2712 
2713 #ifdef TEST_VM_OPS_ERROR
2714 #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
2715 #else
2716 #define SUPPORTED_FLAGS	SUPPORTED_FLAGS_STUB
2717 #endif
2718 
2719 #define XE_64K_PAGE_MASK 0xffffull
2720 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2721 
2722 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2723 				    struct drm_xe_vm_bind *args,
2724 				    struct drm_xe_vm_bind_op **bind_ops)
2725 {
2726 	int err;
2727 	int i;
2728 
2729 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2730 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2731 		return -EINVAL;
2732 
2733 	if (XE_IOCTL_DBG(xe, args->extensions))
2734 		return -EINVAL;
2735 
2736 	if (args->num_binds > 1) {
2737 		u64 __user *bind_user =
2738 			u64_to_user_ptr(args->vector_of_binds);
2739 
2740 		*bind_ops = kvmalloc_array(args->num_binds,
2741 					   sizeof(struct drm_xe_vm_bind_op),
2742 					   GFP_KERNEL | __GFP_ACCOUNT |
2743 					   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2744 		if (!*bind_ops)
2745 			return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
2746 
2747 		err = __copy_from_user(*bind_ops, bind_user,
2748 				       sizeof(struct drm_xe_vm_bind_op) *
2749 				       args->num_binds);
2750 		if (XE_IOCTL_DBG(xe, err)) {
2751 			err = -EFAULT;
2752 			goto free_bind_ops;
2753 		}
2754 	} else {
2755 		*bind_ops = &args->bind;
2756 	}
2757 
2758 	for (i = 0; i < args->num_binds; ++i) {
2759 		u64 range = (*bind_ops)[i].range;
2760 		u64 addr = (*bind_ops)[i].addr;
2761 		u32 op = (*bind_ops)[i].op;
2762 		u32 flags = (*bind_ops)[i].flags;
2763 		u32 obj = (*bind_ops)[i].obj;
2764 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2765 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2766 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2767 		u16 pat_index = (*bind_ops)[i].pat_index;
2768 		u16 coh_mode;
2769 
2770 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2771 			err = -EINVAL;
2772 			goto free_bind_ops;
2773 		}
2774 
2775 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2776 		(*bind_ops)[i].pat_index = pat_index;
2777 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2778 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2779 			err = -EINVAL;
2780 			goto free_bind_ops;
2781 		}
2782 
2783 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2784 			err = -EINVAL;
2785 			goto free_bind_ops;
2786 		}
2787 
2788 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2789 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2790 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2791 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2792 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2793 				 is_null) ||
2794 		    XE_IOCTL_DBG(xe, !obj &&
2795 				 op == DRM_XE_VM_BIND_OP_MAP &&
2796 				 !is_null) ||
2797 		    XE_IOCTL_DBG(xe, !obj &&
2798 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2799 		    XE_IOCTL_DBG(xe, addr &&
2800 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2801 		    XE_IOCTL_DBG(xe, range &&
2802 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2803 		    XE_IOCTL_DBG(xe, obj &&
2804 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2805 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2806 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2807 		    XE_IOCTL_DBG(xe, obj &&
2808 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2809 		    XE_IOCTL_DBG(xe, prefetch_region &&
2810 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2811 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2812 				       xe->info.mem_region_mask)) ||
2813 		    XE_IOCTL_DBG(xe, obj &&
2814 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2815 			err = -EINVAL;
2816 			goto free_bind_ops;
2817 		}
2818 
2819 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2820 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2821 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2822 		    XE_IOCTL_DBG(xe, !range &&
2823 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2824 			err = -EINVAL;
2825 			goto free_bind_ops;
2826 		}
2827 	}
2828 
2829 	return 0;
2830 
2831 free_bind_ops:
2832 	if (args->num_binds > 1)
2833 		kvfree(*bind_ops);
2834 	return err;
2835 }
2836 
2837 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2838 				       struct xe_exec_queue *q,
2839 				       struct xe_sync_entry *syncs,
2840 				       int num_syncs)
2841 {
2842 	struct dma_fence *fence;
2843 	int i, err = 0;
2844 
2845 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2846 				     to_wait_exec_queue(vm, q), vm);
2847 	if (IS_ERR(fence))
2848 		return PTR_ERR(fence);
2849 
2850 	for (i = 0; i < num_syncs; i++)
2851 		xe_sync_entry_signal(&syncs[i], fence);
2852 
2853 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2854 				     fence);
2855 	dma_fence_put(fence);
2856 
2857 	return err;
2858 }
2859 
2860 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
2861 			    struct xe_exec_queue *q,
2862 			    struct xe_sync_entry *syncs, u32 num_syncs)
2863 {
2864 	memset(vops, 0, sizeof(*vops));
2865 	INIT_LIST_HEAD(&vops->list);
2866 	vops->vm = vm;
2867 	vops->q = q;
2868 	vops->syncs = syncs;
2869 	vops->num_syncs = num_syncs;
2870 }
2871 
2872 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
2873 					u64 addr, u64 range, u64 obj_offset,
2874 					u16 pat_index)
2875 {
2876 	u16 coh_mode;
2877 
2878 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
2879 	    XE_IOCTL_DBG(xe, obj_offset >
2880 			 bo->size - range)) {
2881 		return -EINVAL;
2882 	}
2883 
2884 	/*
2885 	 * Some platforms require 64k VM_BIND alignment,
2886 	 * specifically those with XE_VRAM_FLAGS_NEED64K.
2887 	 *
2888 	 * Other platforms may have BO's set to 64k physical placement,
2889 	 * but can be mapped at 4k offsets anyway. This check is only
2890 	 * there for the former case.
2891 	 */
2892 	if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
2893 	    (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
2894 		if (XE_IOCTL_DBG(xe, obj_offset &
2895 				 XE_64K_PAGE_MASK) ||
2896 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2897 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2898 			return  -EINVAL;
2899 		}
2900 	}
2901 
2902 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2903 	if (bo->cpu_caching) {
2904 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2905 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
2906 			return  -EINVAL;
2907 		}
2908 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
2909 		/*
2910 		 * Imported dma-buf from a different device should
2911 		 * require 1way or 2way coherency since we don't know
2912 		 * how it was mapped on the CPU. Just assume is it
2913 		 * potentially cached on CPU side.
2914 		 */
2915 		return  -EINVAL;
2916 	}
2917 
2918 	return 0;
2919 }
2920 
2921 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2922 {
2923 	struct xe_device *xe = to_xe_device(dev);
2924 	struct xe_file *xef = to_xe_file(file);
2925 	struct drm_xe_vm_bind *args = data;
2926 	struct drm_xe_sync __user *syncs_user;
2927 	struct xe_bo **bos = NULL;
2928 	struct drm_gpuva_ops **ops = NULL;
2929 	struct xe_vm *vm;
2930 	struct xe_exec_queue *q = NULL;
2931 	u32 num_syncs, num_ufence = 0;
2932 	struct xe_sync_entry *syncs = NULL;
2933 	struct drm_xe_vm_bind_op *bind_ops;
2934 	struct xe_vma_ops vops;
2935 	int err;
2936 	int i;
2937 
2938 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2939 	if (err)
2940 		return err;
2941 
2942 	if (args->exec_queue_id) {
2943 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2944 		if (XE_IOCTL_DBG(xe, !q)) {
2945 			err = -ENOENT;
2946 			goto free_objs;
2947 		}
2948 
2949 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2950 			err = -EINVAL;
2951 			goto put_exec_queue;
2952 		}
2953 	}
2954 
2955 	vm = xe_vm_lookup(xef, args->vm_id);
2956 	if (XE_IOCTL_DBG(xe, !vm)) {
2957 		err = -EINVAL;
2958 		goto put_exec_queue;
2959 	}
2960 
2961 	err = down_write_killable(&vm->lock);
2962 	if (err)
2963 		goto put_vm;
2964 
2965 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2966 		err = -ENOENT;
2967 		goto release_vm_lock;
2968 	}
2969 
2970 	for (i = 0; i < args->num_binds; ++i) {
2971 		u64 range = bind_ops[i].range;
2972 		u64 addr = bind_ops[i].addr;
2973 
2974 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2975 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2976 			err = -EINVAL;
2977 			goto release_vm_lock;
2978 		}
2979 	}
2980 
2981 	if (args->num_binds) {
2982 		bos = kvcalloc(args->num_binds, sizeof(*bos),
2983 			       GFP_KERNEL | __GFP_ACCOUNT |
2984 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2985 		if (!bos) {
2986 			err = -ENOMEM;
2987 			goto release_vm_lock;
2988 		}
2989 
2990 		ops = kvcalloc(args->num_binds, sizeof(*ops),
2991 			       GFP_KERNEL | __GFP_ACCOUNT |
2992 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2993 		if (!ops) {
2994 			err = -ENOMEM;
2995 			goto release_vm_lock;
2996 		}
2997 	}
2998 
2999 	for (i = 0; i < args->num_binds; ++i) {
3000 		struct drm_gem_object *gem_obj;
3001 		u64 range = bind_ops[i].range;
3002 		u64 addr = bind_ops[i].addr;
3003 		u32 obj = bind_ops[i].obj;
3004 		u64 obj_offset = bind_ops[i].obj_offset;
3005 		u16 pat_index = bind_ops[i].pat_index;
3006 
3007 		if (!obj)
3008 			continue;
3009 
3010 		gem_obj = drm_gem_object_lookup(file, obj);
3011 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3012 			err = -ENOENT;
3013 			goto put_obj;
3014 		}
3015 		bos[i] = gem_to_xe_bo(gem_obj);
3016 
3017 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3018 						   obj_offset, pat_index);
3019 		if (err)
3020 			goto put_obj;
3021 	}
3022 
3023 	if (args->num_syncs) {
3024 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3025 		if (!syncs) {
3026 			err = -ENOMEM;
3027 			goto put_obj;
3028 		}
3029 	}
3030 
3031 	syncs_user = u64_to_user_ptr(args->syncs);
3032 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3033 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3034 					  &syncs_user[num_syncs],
3035 					  (xe_vm_in_lr_mode(vm) ?
3036 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3037 					  (!args->num_binds ?
3038 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3039 		if (err)
3040 			goto free_syncs;
3041 
3042 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3043 			num_ufence++;
3044 	}
3045 
3046 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3047 		err = -EINVAL;
3048 		goto free_syncs;
3049 	}
3050 
3051 	if (!args->num_binds) {
3052 		err = -ENODATA;
3053 		goto free_syncs;
3054 	}
3055 
3056 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3057 	for (i = 0; i < args->num_binds; ++i) {
3058 		u64 range = bind_ops[i].range;
3059 		u64 addr = bind_ops[i].addr;
3060 		u32 op = bind_ops[i].op;
3061 		u32 flags = bind_ops[i].flags;
3062 		u64 obj_offset = bind_ops[i].obj_offset;
3063 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3064 		u16 pat_index = bind_ops[i].pat_index;
3065 
3066 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3067 						  addr, range, op, flags,
3068 						  prefetch_region, pat_index);
3069 		if (IS_ERR(ops[i])) {
3070 			err = PTR_ERR(ops[i]);
3071 			ops[i] = NULL;
3072 			goto unwind_ops;
3073 		}
3074 
3075 		err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3076 		if (err)
3077 			goto unwind_ops;
3078 
3079 #ifdef TEST_VM_OPS_ERROR
3080 		if (flags & FORCE_OP_ERROR) {
3081 			vops.inject_error = true;
3082 			vm->xe->vm_inject_error_position =
3083 				(vm->xe->vm_inject_error_position + 1) %
3084 				FORCE_OP_ERROR_COUNT;
3085 		}
3086 #endif
3087 	}
3088 
3089 	/* Nothing to do */
3090 	if (list_empty(&vops.list)) {
3091 		err = -ENODATA;
3092 		goto unwind_ops;
3093 	}
3094 
3095 	err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3096 	if (err)
3097 		goto unwind_ops;
3098 
3099 	err = vm_bind_ioctl_ops_execute(vm, &vops);
3100 
3101 unwind_ops:
3102 	if (err && err != -ENODATA)
3103 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3104 	xe_vma_ops_fini(&vops);
3105 	for (i = args->num_binds - 1; i >= 0; --i)
3106 		if (ops[i])
3107 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3108 free_syncs:
3109 	if (err == -ENODATA)
3110 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3111 	while (num_syncs--)
3112 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3113 
3114 	kfree(syncs);
3115 put_obj:
3116 	for (i = 0; i < args->num_binds; ++i)
3117 		xe_bo_put(bos[i]);
3118 release_vm_lock:
3119 	up_write(&vm->lock);
3120 put_vm:
3121 	xe_vm_put(vm);
3122 put_exec_queue:
3123 	if (q)
3124 		xe_exec_queue_put(q);
3125 free_objs:
3126 	kvfree(bos);
3127 	kvfree(ops);
3128 	if (args->num_binds > 1)
3129 		kvfree(bind_ops);
3130 	return err;
3131 }
3132 
3133 /**
3134  * xe_vm_lock() - Lock the vm's dma_resv object
3135  * @vm: The struct xe_vm whose lock is to be locked
3136  * @intr: Whether to perform any wait interruptible
3137  *
3138  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3139  * contended lock was interrupted. If @intr is false, the function
3140  * always returns 0.
3141  */
3142 int xe_vm_lock(struct xe_vm *vm, bool intr)
3143 {
3144 	if (intr)
3145 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3146 
3147 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3148 }
3149 
3150 /**
3151  * xe_vm_unlock() - Unlock the vm's dma_resv object
3152  * @vm: The struct xe_vm whose lock is to be released.
3153  *
3154  * Unlock a buffer object lock that was locked by xe_vm_lock().
3155  */
3156 void xe_vm_unlock(struct xe_vm *vm)
3157 {
3158 	dma_resv_unlock(xe_vm_resv(vm));
3159 }
3160 
3161 /**
3162  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3163  * @vma: VMA to invalidate
3164  *
3165  * Walks a list of page tables leaves which it memset the entries owned by this
3166  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3167  * complete.
3168  *
3169  * Returns 0 for success, negative error code otherwise.
3170  */
3171 int xe_vm_invalidate_vma(struct xe_vma *vma)
3172 {
3173 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3174 	struct xe_tile *tile;
3175 	struct xe_gt_tlb_invalidation_fence
3176 		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3177 	u8 id;
3178 	u32 fence_id = 0;
3179 	int ret = 0;
3180 
3181 	xe_assert(xe, !xe_vma_is_null(vma));
3182 	trace_xe_vma_invalidate(vma);
3183 
3184 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3185 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3186 		xe_vma_start(vma), xe_vma_size(vma));
3187 
3188 	/* Check that we don't race with page-table updates */
3189 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3190 		if (xe_vma_is_userptr(vma)) {
3191 			WARN_ON_ONCE(!mmu_interval_check_retry
3192 				     (&to_userptr_vma(vma)->userptr.notifier,
3193 				      to_userptr_vma(vma)->userptr.notifier_seq));
3194 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3195 							     DMA_RESV_USAGE_BOOKKEEP));
3196 
3197 		} else {
3198 			xe_bo_assert_held(xe_vma_bo(vma));
3199 		}
3200 	}
3201 
3202 	for_each_tile(tile, xe, id) {
3203 		if (xe_pt_zap_ptes(tile, vma)) {
3204 			xe_device_wmb(xe);
3205 			xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
3206 							  &fence[fence_id],
3207 							  true);
3208 
3209 			ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
3210 							 &fence[fence_id], vma);
3211 			if (ret)
3212 				goto wait;
3213 			++fence_id;
3214 
3215 			if (!tile->media_gt)
3216 				continue;
3217 
3218 			xe_gt_tlb_invalidation_fence_init(tile->media_gt,
3219 							  &fence[fence_id],
3220 							  true);
3221 
3222 			ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
3223 							 &fence[fence_id], vma);
3224 			if (ret)
3225 				goto wait;
3226 			++fence_id;
3227 		}
3228 	}
3229 
3230 wait:
3231 	for (id = 0; id < fence_id; ++id)
3232 		xe_gt_tlb_invalidation_fence_wait(&fence[id]);
3233 
3234 	vma->tile_invalidated = vma->tile_mask;
3235 
3236 	return ret;
3237 }
3238 
3239 struct xe_vm_snapshot {
3240 	unsigned long num_snaps;
3241 	struct {
3242 		u64 ofs, bo_ofs;
3243 		unsigned long len;
3244 		struct xe_bo *bo;
3245 		void *data;
3246 		struct mm_struct *mm;
3247 	} snap[];
3248 };
3249 
3250 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3251 {
3252 	unsigned long num_snaps = 0, i;
3253 	struct xe_vm_snapshot *snap = NULL;
3254 	struct drm_gpuva *gpuva;
3255 
3256 	if (!vm)
3257 		return NULL;
3258 
3259 	mutex_lock(&vm->snap_mutex);
3260 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3261 		if (gpuva->flags & XE_VMA_DUMPABLE)
3262 			num_snaps++;
3263 	}
3264 
3265 	if (num_snaps)
3266 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3267 	if (!snap) {
3268 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3269 		goto out_unlock;
3270 	}
3271 
3272 	snap->num_snaps = num_snaps;
3273 	i = 0;
3274 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3275 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3276 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3277 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3278 
3279 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3280 			continue;
3281 
3282 		snap->snap[i].ofs = xe_vma_start(vma);
3283 		snap->snap[i].len = xe_vma_size(vma);
3284 		if (bo) {
3285 			snap->snap[i].bo = xe_bo_get(bo);
3286 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3287 		} else if (xe_vma_is_userptr(vma)) {
3288 			struct mm_struct *mm =
3289 				to_userptr_vma(vma)->userptr.notifier.mm;
3290 
3291 			if (mmget_not_zero(mm))
3292 				snap->snap[i].mm = mm;
3293 			else
3294 				snap->snap[i].data = ERR_PTR(-EFAULT);
3295 
3296 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3297 		} else {
3298 			snap->snap[i].data = ERR_PTR(-ENOENT);
3299 		}
3300 		i++;
3301 	}
3302 
3303 out_unlock:
3304 	mutex_unlock(&vm->snap_mutex);
3305 	return snap;
3306 }
3307 
3308 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3309 {
3310 	if (IS_ERR_OR_NULL(snap))
3311 		return;
3312 
3313 	for (int i = 0; i < snap->num_snaps; i++) {
3314 		struct xe_bo *bo = snap->snap[i].bo;
3315 		int err;
3316 
3317 		if (IS_ERR(snap->snap[i].data))
3318 			continue;
3319 
3320 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3321 		if (!snap->snap[i].data) {
3322 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3323 			goto cleanup_bo;
3324 		}
3325 
3326 		if (bo) {
3327 			err = xe_bo_read(bo, snap->snap[i].bo_ofs,
3328 					 snap->snap[i].data, snap->snap[i].len);
3329 		} else {
3330 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3331 
3332 			kthread_use_mm(snap->snap[i].mm);
3333 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3334 				err = 0;
3335 			else
3336 				err = -EFAULT;
3337 			kthread_unuse_mm(snap->snap[i].mm);
3338 
3339 			mmput(snap->snap[i].mm);
3340 			snap->snap[i].mm = NULL;
3341 		}
3342 
3343 		if (err) {
3344 			kvfree(snap->snap[i].data);
3345 			snap->snap[i].data = ERR_PTR(err);
3346 		}
3347 
3348 cleanup_bo:
3349 		xe_bo_put(bo);
3350 		snap->snap[i].bo = NULL;
3351 	}
3352 }
3353 
3354 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3355 {
3356 	unsigned long i, j;
3357 
3358 	if (IS_ERR_OR_NULL(snap)) {
3359 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3360 		return;
3361 	}
3362 
3363 	for (i = 0; i < snap->num_snaps; i++) {
3364 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3365 
3366 		if (IS_ERR(snap->snap[i].data)) {
3367 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3368 				   PTR_ERR(snap->snap[i].data));
3369 			continue;
3370 		}
3371 
3372 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3373 
3374 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3375 			u32 *val = snap->snap[i].data + j;
3376 			char dumped[ASCII85_BUFSZ];
3377 
3378 			drm_puts(p, ascii85_encode(*val, dumped));
3379 		}
3380 
3381 		drm_puts(p, "\n");
3382 	}
3383 }
3384 
3385 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3386 {
3387 	unsigned long i;
3388 
3389 	if (IS_ERR_OR_NULL(snap))
3390 		return;
3391 
3392 	for (i = 0; i < snap->num_snaps; i++) {
3393 		if (!IS_ERR(snap->snap[i].data))
3394 			kvfree(snap->snap[i].data);
3395 		xe_bo_put(snap->snap[i].bo);
3396 		if (snap->snap[i].mm)
3397 			mmput(snap->snap[i].mm);
3398 	}
3399 	kvfree(snap);
3400 }
3401