xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 429508c84d95811dd1300181dfe84743caff9a38)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace.h"
40 #include "xe_wa.h"
41 #include "xe_hmm.h"
42 
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 {
45 	return vm->gpuvm.r_obj;
46 }
47 
48 /**
49  * xe_vma_userptr_check_repin() - Advisory check for repin needed
50  * @uvma: The userptr vma
51  *
52  * Check if the userptr vma has been invalidated since last successful
53  * repin. The check is advisory only and can the function can be called
54  * without the vm->userptr.notifier_lock held. There is no guarantee that the
55  * vma userptr will remain valid after a lockless check, so typically
56  * the call needs to be followed by a proper check under the notifier_lock.
57  *
58  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59  */
60 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
61 {
62 	return mmu_interval_check_retry(&uvma->userptr.notifier,
63 					uvma->userptr.notifier_seq) ?
64 		-EAGAIN : 0;
65 }
66 
67 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
68 {
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 
73 	lockdep_assert_held(&vm->lock);
74 	xe_assert(xe, xe_vma_is_userptr(vma));
75 
76 	return xe_hmm_userptr_populate_range(uvma, false);
77 }
78 
79 static bool preempt_fences_waiting(struct xe_vm *vm)
80 {
81 	struct xe_exec_queue *q;
82 
83 	lockdep_assert_held(&vm->lock);
84 	xe_vm_assert_held(vm);
85 
86 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
87 		if (!q->compute.pfence ||
88 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
89 			     &q->compute.pfence->flags)) {
90 			return true;
91 		}
92 	}
93 
94 	return false;
95 }
96 
97 static void free_preempt_fences(struct list_head *list)
98 {
99 	struct list_head *link, *next;
100 
101 	list_for_each_safe(link, next, list)
102 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
103 }
104 
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
106 				unsigned int *count)
107 {
108 	lockdep_assert_held(&vm->lock);
109 	xe_vm_assert_held(vm);
110 
111 	if (*count >= vm->preempt.num_exec_queues)
112 		return 0;
113 
114 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
115 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
116 
117 		if (IS_ERR(pfence))
118 			return PTR_ERR(pfence);
119 
120 		list_move_tail(xe_preempt_fence_link(pfence), list);
121 	}
122 
123 	return 0;
124 }
125 
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
127 {
128 	struct xe_exec_queue *q;
129 
130 	xe_vm_assert_held(vm);
131 
132 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
133 		if (q->compute.pfence) {
134 			long timeout = dma_fence_wait(q->compute.pfence, false);
135 
136 			if (timeout < 0)
137 				return -ETIME;
138 			dma_fence_put(q->compute.pfence);
139 			q->compute.pfence = NULL;
140 		}
141 	}
142 
143 	return 0;
144 }
145 
146 static bool xe_vm_is_idle(struct xe_vm *vm)
147 {
148 	struct xe_exec_queue *q;
149 
150 	xe_vm_assert_held(vm);
151 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
152 		if (!xe_exec_queue_is_idle(q))
153 			return false;
154 	}
155 
156 	return true;
157 }
158 
159 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
160 {
161 	struct list_head *link;
162 	struct xe_exec_queue *q;
163 
164 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
165 		struct dma_fence *fence;
166 
167 		link = list->next;
168 		xe_assert(vm->xe, link != list);
169 
170 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
171 					     q, q->compute.context,
172 					     ++q->compute.seqno);
173 		dma_fence_put(q->compute.pfence);
174 		q->compute.pfence = fence;
175 	}
176 }
177 
178 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
179 {
180 	struct xe_exec_queue *q;
181 	int err;
182 
183 	if (!vm->preempt.num_exec_queues)
184 		return 0;
185 
186 	err = xe_bo_lock(bo, true);
187 	if (err)
188 		return err;
189 
190 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
191 	if (err)
192 		goto out_unlock;
193 
194 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
195 		if (q->compute.pfence) {
196 			dma_resv_add_fence(bo->ttm.base.resv,
197 					   q->compute.pfence,
198 					   DMA_RESV_USAGE_BOOKKEEP);
199 		}
200 
201 out_unlock:
202 	xe_bo_unlock(bo);
203 	return err;
204 }
205 
206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
207 						struct drm_exec *exec)
208 {
209 	struct xe_exec_queue *q;
210 
211 	lockdep_assert_held(&vm->lock);
212 	xe_vm_assert_held(vm);
213 
214 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
215 		q->ops->resume(q);
216 
217 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
218 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
219 	}
220 }
221 
222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
223 {
224 	struct drm_gpuvm_exec vm_exec = {
225 		.vm = &vm->gpuvm,
226 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
227 		.num_fences = 1,
228 	};
229 	struct drm_exec *exec = &vm_exec.exec;
230 	struct dma_fence *pfence;
231 	int err;
232 	bool wait;
233 
234 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
235 
236 	down_write(&vm->lock);
237 	err = drm_gpuvm_exec_lock(&vm_exec);
238 	if (err)
239 		goto out_up_write;
240 
241 	pfence = xe_preempt_fence_create(q, q->compute.context,
242 					 ++q->compute.seqno);
243 	if (!pfence) {
244 		err = -ENOMEM;
245 		goto out_fini;
246 	}
247 
248 	list_add(&q->compute.link, &vm->preempt.exec_queues);
249 	++vm->preempt.num_exec_queues;
250 	q->compute.pfence = pfence;
251 
252 	down_read(&vm->userptr.notifier_lock);
253 
254 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
255 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
256 
257 	/*
258 	 * Check to see if a preemption on VM is in flight or userptr
259 	 * invalidation, if so trigger this preempt fence to sync state with
260 	 * other preempt fences on the VM.
261 	 */
262 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
263 	if (wait)
264 		dma_fence_enable_sw_signaling(pfence);
265 
266 	up_read(&vm->userptr.notifier_lock);
267 
268 out_fini:
269 	drm_exec_fini(exec);
270 out_up_write:
271 	up_write(&vm->lock);
272 
273 	return err;
274 }
275 
276 /**
277  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
278  * @vm: The VM.
279  * @q: The exec_queue
280  */
281 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
282 {
283 	if (!xe_vm_in_preempt_fence_mode(vm))
284 		return;
285 
286 	down_write(&vm->lock);
287 	list_del(&q->compute.link);
288 	--vm->preempt.num_exec_queues;
289 	if (q->compute.pfence) {
290 		dma_fence_enable_sw_signaling(q->compute.pfence);
291 		dma_fence_put(q->compute.pfence);
292 		q->compute.pfence = NULL;
293 	}
294 	up_write(&vm->lock);
295 }
296 
297 /**
298  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
299  * that need repinning.
300  * @vm: The VM.
301  *
302  * This function checks for whether the VM has userptrs that need repinning,
303  * and provides a release-type barrier on the userptr.notifier_lock after
304  * checking.
305  *
306  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
307  */
308 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
309 {
310 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
311 
312 	return (list_empty(&vm->userptr.repin_list) &&
313 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
314 }
315 
316 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
317 
318 static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
319 {
320 	struct xe_exec_queue *q;
321 
322 	lockdep_assert_held(&vm->lock);
323 
324 	if (unlocked)
325 		xe_vm_lock(vm, false);
326 
327 	vm->flags |= XE_VM_FLAG_BANNED;
328 	trace_xe_vm_kill(vm);
329 
330 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
331 		q->ops->kill(q);
332 
333 	if (unlocked)
334 		xe_vm_unlock(vm);
335 
336 	/* TODO: Inform user the VM is banned */
337 }
338 
339 /**
340  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
341  * @exec: The drm_exec object used for locking before validation.
342  * @err: The error returned from ttm_bo_validate().
343  * @end: A ktime_t cookie that should be set to 0 before first use and
344  * that should be reused on subsequent calls.
345  *
346  * With multiple active VMs, under memory pressure, it is possible that
347  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
348  * Until ttm properly handles locking in such scenarios, best thing the
349  * driver can do is retry with a timeout. Check if that is necessary, and
350  * if so unlock the drm_exec's objects while keeping the ticket to prepare
351  * for a rerun.
352  *
353  * Return: true if a retry after drm_exec_init() is recommended;
354  * false otherwise.
355  */
356 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
357 {
358 	ktime_t cur;
359 
360 	if (err != -ENOMEM)
361 		return false;
362 
363 	cur = ktime_get();
364 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
365 	if (!ktime_before(cur, *end))
366 		return false;
367 
368 	msleep(20);
369 	return true;
370 }
371 
372 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
373 {
374 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
375 	struct drm_gpuva *gpuva;
376 	int ret;
377 
378 	lockdep_assert_held(&vm->lock);
379 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
380 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
381 			       &vm->rebind_list);
382 
383 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
384 	if (ret)
385 		return ret;
386 
387 	vm_bo->evicted = false;
388 	return 0;
389 }
390 
391 /**
392  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
393  * @vm: The vm for which we are rebinding.
394  * @exec: The struct drm_exec with the locked GEM objects.
395  * @num_fences: The number of fences to reserve for the operation, not
396  * including rebinds and validations.
397  *
398  * Validates all evicted gem objects and rebinds their vmas. Note that
399  * rebindings may cause evictions and hence the validation-rebind
400  * sequence is rerun until there are no more objects to validate.
401  *
402  * Return: 0 on success, negative error code on error. In particular,
403  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
404  * the drm_exec transaction needs to be restarted.
405  */
406 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
407 			  unsigned int num_fences)
408 {
409 	struct drm_gem_object *obj;
410 	unsigned long index;
411 	int ret;
412 
413 	do {
414 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
415 		if (ret)
416 			return ret;
417 
418 		ret = xe_vm_rebind(vm, false);
419 		if (ret)
420 			return ret;
421 	} while (!list_empty(&vm->gpuvm.evict.list));
422 
423 	drm_exec_for_each_locked_object(exec, index, obj) {
424 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
425 		if (ret)
426 			return ret;
427 	}
428 
429 	return 0;
430 }
431 
432 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
433 				 bool *done)
434 {
435 	int err;
436 
437 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
438 	if (err)
439 		return err;
440 
441 	if (xe_vm_is_idle(vm)) {
442 		vm->preempt.rebind_deactivated = true;
443 		*done = true;
444 		return 0;
445 	}
446 
447 	if (!preempt_fences_waiting(vm)) {
448 		*done = true;
449 		return 0;
450 	}
451 
452 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
453 	if (err)
454 		return err;
455 
456 	err = wait_for_existing_preempt_fences(vm);
457 	if (err)
458 		return err;
459 
460 	/*
461 	 * Add validation and rebinding to the locking loop since both can
462 	 * cause evictions which may require blocing dma_resv locks.
463 	 * The fence reservation here is intended for the new preempt fences
464 	 * we attach at the end of the rebind work.
465 	 */
466 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
467 }
468 
469 static void preempt_rebind_work_func(struct work_struct *w)
470 {
471 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
472 	struct drm_exec exec;
473 	unsigned int fence_count = 0;
474 	LIST_HEAD(preempt_fences);
475 	ktime_t end = 0;
476 	int err = 0;
477 	long wait;
478 	int __maybe_unused tries = 0;
479 
480 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
481 	trace_xe_vm_rebind_worker_enter(vm);
482 
483 	down_write(&vm->lock);
484 
485 	if (xe_vm_is_closed_or_banned(vm)) {
486 		up_write(&vm->lock);
487 		trace_xe_vm_rebind_worker_exit(vm);
488 		return;
489 	}
490 
491 retry:
492 	if (xe_vm_userptr_check_repin(vm)) {
493 		err = xe_vm_userptr_pin(vm);
494 		if (err)
495 			goto out_unlock_outer;
496 	}
497 
498 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
499 
500 	drm_exec_until_all_locked(&exec) {
501 		bool done = false;
502 
503 		err = xe_preempt_work_begin(&exec, vm, &done);
504 		drm_exec_retry_on_contention(&exec);
505 		if (err || done) {
506 			drm_exec_fini(&exec);
507 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
508 				err = -EAGAIN;
509 
510 			goto out_unlock_outer;
511 		}
512 	}
513 
514 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
515 	if (err)
516 		goto out_unlock;
517 
518 	err = xe_vm_rebind(vm, true);
519 	if (err)
520 		goto out_unlock;
521 
522 	/* Wait on rebinds and munmap style VM unbinds */
523 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
524 				     DMA_RESV_USAGE_KERNEL,
525 				     false, MAX_SCHEDULE_TIMEOUT);
526 	if (wait <= 0) {
527 		err = -ETIME;
528 		goto out_unlock;
529 	}
530 
531 #define retry_required(__tries, __vm) \
532 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
533 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
534 	__xe_vm_userptr_needs_repin(__vm))
535 
536 	down_read(&vm->userptr.notifier_lock);
537 	if (retry_required(tries, vm)) {
538 		up_read(&vm->userptr.notifier_lock);
539 		err = -EAGAIN;
540 		goto out_unlock;
541 	}
542 
543 #undef retry_required
544 
545 	spin_lock(&vm->xe->ttm.lru_lock);
546 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
547 	spin_unlock(&vm->xe->ttm.lru_lock);
548 
549 	/* Point of no return. */
550 	arm_preempt_fences(vm, &preempt_fences);
551 	resume_and_reinstall_preempt_fences(vm, &exec);
552 	up_read(&vm->userptr.notifier_lock);
553 
554 out_unlock:
555 	drm_exec_fini(&exec);
556 out_unlock_outer:
557 	if (err == -EAGAIN) {
558 		trace_xe_vm_rebind_worker_retry(vm);
559 		goto retry;
560 	}
561 
562 	if (err) {
563 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
564 		xe_vm_kill(vm, true);
565 	}
566 	up_write(&vm->lock);
567 
568 	free_preempt_fences(&preempt_fences);
569 
570 	trace_xe_vm_rebind_worker_exit(vm);
571 }
572 
573 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
574 				   const struct mmu_notifier_range *range,
575 				   unsigned long cur_seq)
576 {
577 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
578 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
579 	struct xe_vma *vma = &uvma->vma;
580 	struct xe_vm *vm = xe_vma_vm(vma);
581 	struct dma_resv_iter cursor;
582 	struct dma_fence *fence;
583 	long err;
584 
585 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
586 	trace_xe_vma_userptr_invalidate(vma);
587 
588 	if (!mmu_notifier_range_blockable(range))
589 		return false;
590 
591 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
592 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
593 		xe_vma_start(vma), xe_vma_size(vma));
594 
595 	down_write(&vm->userptr.notifier_lock);
596 	mmu_interval_set_seq(mni, cur_seq);
597 
598 	/* No need to stop gpu access if the userptr is not yet bound. */
599 	if (!userptr->initial_bind) {
600 		up_write(&vm->userptr.notifier_lock);
601 		return true;
602 	}
603 
604 	/*
605 	 * Tell exec and rebind worker they need to repin and rebind this
606 	 * userptr.
607 	 */
608 	if (!xe_vm_in_fault_mode(vm) &&
609 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
610 		spin_lock(&vm->userptr.invalidated_lock);
611 		list_move_tail(&userptr->invalidate_link,
612 			       &vm->userptr.invalidated);
613 		spin_unlock(&vm->userptr.invalidated_lock);
614 	}
615 
616 	up_write(&vm->userptr.notifier_lock);
617 
618 	/*
619 	 * Preempt fences turn into schedule disables, pipeline these.
620 	 * Note that even in fault mode, we need to wait for binds and
621 	 * unbinds to complete, and those are attached as BOOKMARK fences
622 	 * to the vm.
623 	 */
624 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
625 			    DMA_RESV_USAGE_BOOKKEEP);
626 	dma_resv_for_each_fence_unlocked(&cursor, fence)
627 		dma_fence_enable_sw_signaling(fence);
628 	dma_resv_iter_end(&cursor);
629 
630 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
631 				    DMA_RESV_USAGE_BOOKKEEP,
632 				    false, MAX_SCHEDULE_TIMEOUT);
633 	XE_WARN_ON(err <= 0);
634 
635 	if (xe_vm_in_fault_mode(vm)) {
636 		err = xe_vm_invalidate_vma(vma);
637 		XE_WARN_ON(err);
638 	}
639 
640 	trace_xe_vma_userptr_invalidate_complete(vma);
641 
642 	return true;
643 }
644 
645 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
646 	.invalidate = vma_userptr_invalidate,
647 };
648 
649 int xe_vm_userptr_pin(struct xe_vm *vm)
650 {
651 	struct xe_userptr_vma *uvma, *next;
652 	int err = 0;
653 	LIST_HEAD(tmp_evict);
654 
655 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
656 	lockdep_assert_held_write(&vm->lock);
657 
658 	/* Collect invalidated userptrs */
659 	spin_lock(&vm->userptr.invalidated_lock);
660 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
661 				 userptr.invalidate_link) {
662 		list_del_init(&uvma->userptr.invalidate_link);
663 		list_move_tail(&uvma->userptr.repin_link,
664 			       &vm->userptr.repin_list);
665 	}
666 	spin_unlock(&vm->userptr.invalidated_lock);
667 
668 	/* Pin and move to temporary list */
669 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
670 				 userptr.repin_link) {
671 		err = xe_vma_userptr_pin_pages(uvma);
672 		if (err == -EFAULT) {
673 			list_del_init(&uvma->userptr.repin_link);
674 
675 			/* Wait for pending binds */
676 			xe_vm_lock(vm, false);
677 			dma_resv_wait_timeout(xe_vm_resv(vm),
678 					      DMA_RESV_USAGE_BOOKKEEP,
679 					      false, MAX_SCHEDULE_TIMEOUT);
680 
681 			err = xe_vm_invalidate_vma(&uvma->vma);
682 			xe_vm_unlock(vm);
683 			if (err)
684 				return err;
685 		} else {
686 			if (err < 0)
687 				return err;
688 
689 			list_del_init(&uvma->userptr.repin_link);
690 			list_move_tail(&uvma->vma.combined_links.rebind,
691 				       &vm->rebind_list);
692 		}
693 	}
694 
695 	return 0;
696 }
697 
698 /**
699  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
700  * that need repinning.
701  * @vm: The VM.
702  *
703  * This function does an advisory check for whether the VM has userptrs that
704  * need repinning.
705  *
706  * Return: 0 if there are no indications of userptrs needing repinning,
707  * -EAGAIN if there are.
708  */
709 int xe_vm_userptr_check_repin(struct xe_vm *vm)
710 {
711 	return (list_empty_careful(&vm->userptr.repin_list) &&
712 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
713 }
714 
715 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
716 				  u8 tile_mask)
717 {
718 	INIT_LIST_HEAD(&op->link);
719 	op->tile_mask = tile_mask;
720 	op->base.op = DRM_GPUVA_OP_MAP;
721 	op->base.map.va.addr = vma->gpuva.va.addr;
722 	op->base.map.va.range = vma->gpuva.va.range;
723 	op->base.map.gem.obj = vma->gpuva.gem.obj;
724 	op->base.map.gem.offset = vma->gpuva.gem.offset;
725 	op->map.vma = vma;
726 	op->map.immediate = true;
727 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
728 	op->map.is_null = xe_vma_is_null(vma);
729 }
730 
731 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
732 				u8 tile_mask)
733 {
734 	struct xe_vma_op *op;
735 
736 	op = kzalloc(sizeof(*op), GFP_KERNEL);
737 	if (!op)
738 		return -ENOMEM;
739 
740 	xe_vm_populate_rebind(op, vma, tile_mask);
741 	list_add_tail(&op->link, &vops->list);
742 
743 	return 0;
744 }
745 
746 static struct dma_fence *ops_execute(struct xe_vm *vm,
747 				     struct xe_vma_ops *vops);
748 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
749 			    struct xe_exec_queue *q,
750 			    struct xe_sync_entry *syncs, u32 num_syncs);
751 
752 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
753 {
754 	struct dma_fence *fence;
755 	struct xe_vma *vma, *next;
756 	struct xe_vma_ops vops;
757 	struct xe_vma_op *op, *next_op;
758 	int err;
759 
760 	lockdep_assert_held(&vm->lock);
761 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
762 	    list_empty(&vm->rebind_list))
763 		return 0;
764 
765 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
766 
767 	xe_vm_assert_held(vm);
768 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
769 		xe_assert(vm->xe, vma->tile_present);
770 
771 		if (rebind_worker)
772 			trace_xe_vma_rebind_worker(vma);
773 		else
774 			trace_xe_vma_rebind_exec(vma);
775 
776 		err = xe_vm_ops_add_rebind(&vops, vma,
777 					   vma->tile_present);
778 		if (err)
779 			goto free_ops;
780 	}
781 
782 	fence = ops_execute(vm, &vops);
783 	if (IS_ERR(fence)) {
784 		err = PTR_ERR(fence);
785 	} else {
786 		dma_fence_put(fence);
787 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
788 					 combined_links.rebind)
789 			list_del_init(&vma->combined_links.rebind);
790 	}
791 free_ops:
792 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
793 		list_del(&op->link);
794 		kfree(op);
795 	}
796 
797 	return err;
798 }
799 
800 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
801 {
802 	struct dma_fence *fence = NULL;
803 	struct xe_vma_ops vops;
804 	struct xe_vma_op *op, *next_op;
805 	int err;
806 
807 	lockdep_assert_held(&vm->lock);
808 	xe_vm_assert_held(vm);
809 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
810 
811 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
812 
813 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
814 	if (err)
815 		return ERR_PTR(err);
816 
817 	fence = ops_execute(vm, &vops);
818 
819 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
820 		list_del(&op->link);
821 		kfree(op);
822 	}
823 
824 	return fence;
825 }
826 
827 static void xe_vma_free(struct xe_vma *vma)
828 {
829 	if (xe_vma_is_userptr(vma))
830 		kfree(to_userptr_vma(vma));
831 	else
832 		kfree(vma);
833 }
834 
835 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
836 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
837 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
838 
839 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
840 				    struct xe_bo *bo,
841 				    u64 bo_offset_or_userptr,
842 				    u64 start, u64 end,
843 				    u16 pat_index, unsigned int flags)
844 {
845 	struct xe_vma *vma;
846 	struct xe_tile *tile;
847 	u8 id;
848 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
849 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
850 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
851 
852 	xe_assert(vm->xe, start < end);
853 	xe_assert(vm->xe, end < vm->size);
854 
855 	/*
856 	 * Allocate and ensure that the xe_vma_is_userptr() return
857 	 * matches what was allocated.
858 	 */
859 	if (!bo && !is_null) {
860 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
861 
862 		if (!uvma)
863 			return ERR_PTR(-ENOMEM);
864 
865 		vma = &uvma->vma;
866 	} else {
867 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
868 		if (!vma)
869 			return ERR_PTR(-ENOMEM);
870 
871 		if (is_null)
872 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
873 		if (bo)
874 			vma->gpuva.gem.obj = &bo->ttm.base;
875 	}
876 
877 	INIT_LIST_HEAD(&vma->combined_links.rebind);
878 
879 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
880 	vma->gpuva.vm = &vm->gpuvm;
881 	vma->gpuva.va.addr = start;
882 	vma->gpuva.va.range = end - start + 1;
883 	if (read_only)
884 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
885 	if (dumpable)
886 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
887 
888 	for_each_tile(tile, vm->xe, id)
889 		vma->tile_mask |= 0x1 << id;
890 
891 	if (vm->xe->info.has_atomic_enable_pte_bit)
892 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
893 
894 	vma->pat_index = pat_index;
895 
896 	if (bo) {
897 		struct drm_gpuvm_bo *vm_bo;
898 
899 		xe_bo_assert_held(bo);
900 
901 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
902 		if (IS_ERR(vm_bo)) {
903 			xe_vma_free(vma);
904 			return ERR_CAST(vm_bo);
905 		}
906 
907 		drm_gpuvm_bo_extobj_add(vm_bo);
908 		drm_gem_object_get(&bo->ttm.base);
909 		vma->gpuva.gem.offset = bo_offset_or_userptr;
910 		drm_gpuva_link(&vma->gpuva, vm_bo);
911 		drm_gpuvm_bo_put(vm_bo);
912 	} else /* userptr or null */ {
913 		if (!is_null) {
914 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
915 			u64 size = end - start + 1;
916 			int err;
917 
918 			INIT_LIST_HEAD(&userptr->invalidate_link);
919 			INIT_LIST_HEAD(&userptr->repin_link);
920 			vma->gpuva.gem.offset = bo_offset_or_userptr;
921 
922 			err = mmu_interval_notifier_insert(&userptr->notifier,
923 							   current->mm,
924 							   xe_vma_userptr(vma), size,
925 							   &vma_userptr_notifier_ops);
926 			if (err) {
927 				xe_vma_free(vma);
928 				return ERR_PTR(err);
929 			}
930 
931 			userptr->notifier_seq = LONG_MAX;
932 		}
933 
934 		xe_vm_get(vm);
935 	}
936 
937 	return vma;
938 }
939 
940 static void xe_vma_destroy_late(struct xe_vma *vma)
941 {
942 	struct xe_vm *vm = xe_vma_vm(vma);
943 
944 	if (vma->ufence) {
945 		xe_sync_ufence_put(vma->ufence);
946 		vma->ufence = NULL;
947 	}
948 
949 	if (xe_vma_is_userptr(vma)) {
950 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
951 		struct xe_userptr *userptr = &uvma->userptr;
952 
953 		if (userptr->sg)
954 			xe_hmm_userptr_free_sg(uvma);
955 
956 		/*
957 		 * Since userptr pages are not pinned, we can't remove
958 		 * the notifer until we're sure the GPU is not accessing
959 		 * them anymore
960 		 */
961 		mmu_interval_notifier_remove(&userptr->notifier);
962 		xe_vm_put(vm);
963 	} else if (xe_vma_is_null(vma)) {
964 		xe_vm_put(vm);
965 	} else {
966 		xe_bo_put(xe_vma_bo(vma));
967 	}
968 
969 	xe_vma_free(vma);
970 }
971 
972 static void vma_destroy_work_func(struct work_struct *w)
973 {
974 	struct xe_vma *vma =
975 		container_of(w, struct xe_vma, destroy_work);
976 
977 	xe_vma_destroy_late(vma);
978 }
979 
980 static void vma_destroy_cb(struct dma_fence *fence,
981 			   struct dma_fence_cb *cb)
982 {
983 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
984 
985 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
986 	queue_work(system_unbound_wq, &vma->destroy_work);
987 }
988 
989 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
990 {
991 	struct xe_vm *vm = xe_vma_vm(vma);
992 
993 	lockdep_assert_held_write(&vm->lock);
994 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
995 
996 	if (xe_vma_is_userptr(vma)) {
997 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
998 
999 		spin_lock(&vm->userptr.invalidated_lock);
1000 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1001 		spin_unlock(&vm->userptr.invalidated_lock);
1002 	} else if (!xe_vma_is_null(vma)) {
1003 		xe_bo_assert_held(xe_vma_bo(vma));
1004 
1005 		drm_gpuva_unlink(&vma->gpuva);
1006 	}
1007 
1008 	xe_vm_assert_held(vm);
1009 	if (fence) {
1010 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1011 						 vma_destroy_cb);
1012 
1013 		if (ret) {
1014 			XE_WARN_ON(ret != -ENOENT);
1015 			xe_vma_destroy_late(vma);
1016 		}
1017 	} else {
1018 		xe_vma_destroy_late(vma);
1019 	}
1020 }
1021 
1022 /**
1023  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1024  * @exec: The drm_exec object we're currently locking for.
1025  * @vma: The vma for witch we want to lock the vm resv and any attached
1026  * object's resv.
1027  *
1028  * Return: 0 on success, negative error code on error. In particular
1029  * may return -EDEADLK on WW transaction contention and -EINTR if
1030  * an interruptible wait is terminated by a signal.
1031  */
1032 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1033 {
1034 	struct xe_vm *vm = xe_vma_vm(vma);
1035 	struct xe_bo *bo = xe_vma_bo(vma);
1036 	int err;
1037 
1038 	XE_WARN_ON(!vm);
1039 
1040 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1041 	if (!err && bo && !bo->vm)
1042 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1043 
1044 	return err;
1045 }
1046 
1047 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1048 {
1049 	struct drm_exec exec;
1050 	int err;
1051 
1052 	drm_exec_init(&exec, 0, 0);
1053 	drm_exec_until_all_locked(&exec) {
1054 		err = xe_vm_lock_vma(&exec, vma);
1055 		drm_exec_retry_on_contention(&exec);
1056 		if (XE_WARN_ON(err))
1057 			break;
1058 	}
1059 
1060 	xe_vma_destroy(vma, NULL);
1061 
1062 	drm_exec_fini(&exec);
1063 }
1064 
1065 struct xe_vma *
1066 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1067 {
1068 	struct drm_gpuva *gpuva;
1069 
1070 	lockdep_assert_held(&vm->lock);
1071 
1072 	if (xe_vm_is_closed_or_banned(vm))
1073 		return NULL;
1074 
1075 	xe_assert(vm->xe, start + range <= vm->size);
1076 
1077 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1078 
1079 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1080 }
1081 
1082 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1083 {
1084 	int err;
1085 
1086 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1087 	lockdep_assert_held(&vm->lock);
1088 
1089 	mutex_lock(&vm->snap_mutex);
1090 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1091 	mutex_unlock(&vm->snap_mutex);
1092 	XE_WARN_ON(err);	/* Shouldn't be possible */
1093 
1094 	return err;
1095 }
1096 
1097 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1098 {
1099 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1100 	lockdep_assert_held(&vm->lock);
1101 
1102 	mutex_lock(&vm->snap_mutex);
1103 	drm_gpuva_remove(&vma->gpuva);
1104 	mutex_unlock(&vm->snap_mutex);
1105 	if (vm->usm.last_fault_vma == vma)
1106 		vm->usm.last_fault_vma = NULL;
1107 }
1108 
1109 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1110 {
1111 	struct xe_vma_op *op;
1112 
1113 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1114 
1115 	if (unlikely(!op))
1116 		return NULL;
1117 
1118 	return &op->base;
1119 }
1120 
1121 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1122 
1123 static const struct drm_gpuvm_ops gpuvm_ops = {
1124 	.op_alloc = xe_vm_op_alloc,
1125 	.vm_bo_validate = xe_gpuvm_validate,
1126 	.vm_free = xe_vm_free,
1127 };
1128 
1129 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1130 {
1131 	u64 pte = 0;
1132 
1133 	if (pat_index & BIT(0))
1134 		pte |= XE_PPGTT_PTE_PAT0;
1135 
1136 	if (pat_index & BIT(1))
1137 		pte |= XE_PPGTT_PTE_PAT1;
1138 
1139 	return pte;
1140 }
1141 
1142 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1143 				u32 pt_level)
1144 {
1145 	u64 pte = 0;
1146 
1147 	if (pat_index & BIT(0))
1148 		pte |= XE_PPGTT_PTE_PAT0;
1149 
1150 	if (pat_index & BIT(1))
1151 		pte |= XE_PPGTT_PTE_PAT1;
1152 
1153 	if (pat_index & BIT(2)) {
1154 		if (pt_level)
1155 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1156 		else
1157 			pte |= XE_PPGTT_PTE_PAT2;
1158 	}
1159 
1160 	if (pat_index & BIT(3))
1161 		pte |= XELPG_PPGTT_PTE_PAT3;
1162 
1163 	if (pat_index & (BIT(4)))
1164 		pte |= XE2_PPGTT_PTE_PAT4;
1165 
1166 	return pte;
1167 }
1168 
1169 static u64 pte_encode_ps(u32 pt_level)
1170 {
1171 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1172 
1173 	if (pt_level == 1)
1174 		return XE_PDE_PS_2M;
1175 	else if (pt_level == 2)
1176 		return XE_PDPE_PS_1G;
1177 
1178 	return 0;
1179 }
1180 
1181 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1182 			      const u16 pat_index)
1183 {
1184 	struct xe_device *xe = xe_bo_device(bo);
1185 	u64 pde;
1186 
1187 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1188 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1189 	pde |= pde_encode_pat_index(xe, pat_index);
1190 
1191 	return pde;
1192 }
1193 
1194 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1195 			      u16 pat_index, u32 pt_level)
1196 {
1197 	struct xe_device *xe = xe_bo_device(bo);
1198 	u64 pte;
1199 
1200 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1201 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1202 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1203 	pte |= pte_encode_ps(pt_level);
1204 
1205 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1206 		pte |= XE_PPGTT_PTE_DM;
1207 
1208 	return pte;
1209 }
1210 
1211 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1212 			       u16 pat_index, u32 pt_level)
1213 {
1214 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1215 
1216 	pte |= XE_PAGE_PRESENT;
1217 
1218 	if (likely(!xe_vma_read_only(vma)))
1219 		pte |= XE_PAGE_RW;
1220 
1221 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1222 	pte |= pte_encode_ps(pt_level);
1223 
1224 	if (unlikely(xe_vma_is_null(vma)))
1225 		pte |= XE_PTE_NULL;
1226 
1227 	return pte;
1228 }
1229 
1230 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1231 				u16 pat_index,
1232 				u32 pt_level, bool devmem, u64 flags)
1233 {
1234 	u64 pte;
1235 
1236 	/* Avoid passing random bits directly as flags */
1237 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1238 
1239 	pte = addr;
1240 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1241 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1242 	pte |= pte_encode_ps(pt_level);
1243 
1244 	if (devmem)
1245 		pte |= XE_PPGTT_PTE_DM;
1246 
1247 	pte |= flags;
1248 
1249 	return pte;
1250 }
1251 
1252 static const struct xe_pt_ops xelp_pt_ops = {
1253 	.pte_encode_bo = xelp_pte_encode_bo,
1254 	.pte_encode_vma = xelp_pte_encode_vma,
1255 	.pte_encode_addr = xelp_pte_encode_addr,
1256 	.pde_encode_bo = xelp_pde_encode_bo,
1257 };
1258 
1259 static void vm_destroy_work_func(struct work_struct *w);
1260 
1261 /**
1262  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1263  * given tile and vm.
1264  * @xe: xe device.
1265  * @tile: tile to set up for.
1266  * @vm: vm to set up for.
1267  *
1268  * Sets up a pagetable tree with one page-table per level and a single
1269  * leaf PTE. All pagetable entries point to the single page-table or,
1270  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1271  * writes become NOPs.
1272  *
1273  * Return: 0 on success, negative error code on error.
1274  */
1275 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1276 				struct xe_vm *vm)
1277 {
1278 	u8 id = tile->id;
1279 	int i;
1280 
1281 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1282 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1283 		if (IS_ERR(vm->scratch_pt[id][i]))
1284 			return PTR_ERR(vm->scratch_pt[id][i]);
1285 
1286 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static void xe_vm_free_scratch(struct xe_vm *vm)
1293 {
1294 	struct xe_tile *tile;
1295 	u8 id;
1296 
1297 	if (!xe_vm_has_scratch(vm))
1298 		return;
1299 
1300 	for_each_tile(tile, vm->xe, id) {
1301 		u32 i;
1302 
1303 		if (!vm->pt_root[id])
1304 			continue;
1305 
1306 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1307 			if (vm->scratch_pt[id][i])
1308 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1309 	}
1310 }
1311 
1312 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1313 {
1314 	struct drm_gem_object *vm_resv_obj;
1315 	struct xe_vm *vm;
1316 	int err, number_tiles = 0;
1317 	struct xe_tile *tile;
1318 	u8 id;
1319 
1320 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1321 	if (!vm)
1322 		return ERR_PTR(-ENOMEM);
1323 
1324 	vm->xe = xe;
1325 
1326 	vm->size = 1ull << xe->info.va_bits;
1327 
1328 	vm->flags = flags;
1329 
1330 	init_rwsem(&vm->lock);
1331 	mutex_init(&vm->snap_mutex);
1332 
1333 	INIT_LIST_HEAD(&vm->rebind_list);
1334 
1335 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1336 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1337 	init_rwsem(&vm->userptr.notifier_lock);
1338 	spin_lock_init(&vm->userptr.invalidated_lock);
1339 
1340 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1341 
1342 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1343 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1344 
1345 	for_each_tile(tile, xe, id)
1346 		xe_range_fence_tree_init(&vm->rftree[id]);
1347 
1348 	vm->pt_ops = &xelp_pt_ops;
1349 
1350 	/*
1351 	 * Long-running workloads are not protected by the scheduler references.
1352 	 * By design, run_job for long-running workloads returns NULL and the
1353 	 * scheduler drops all the references of it, hence protecting the VM
1354 	 * for this case is necessary.
1355 	 */
1356 	if (flags & XE_VM_FLAG_LR_MODE)
1357 		xe_pm_runtime_get_noresume(xe);
1358 
1359 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1360 	if (!vm_resv_obj) {
1361 		err = -ENOMEM;
1362 		goto err_no_resv;
1363 	}
1364 
1365 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1366 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1367 
1368 	drm_gem_object_put(vm_resv_obj);
1369 
1370 	err = xe_vm_lock(vm, true);
1371 	if (err)
1372 		goto err_close;
1373 
1374 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1375 		vm->flags |= XE_VM_FLAG_64K;
1376 
1377 	for_each_tile(tile, xe, id) {
1378 		if (flags & XE_VM_FLAG_MIGRATION &&
1379 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1380 			continue;
1381 
1382 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1383 		if (IS_ERR(vm->pt_root[id])) {
1384 			err = PTR_ERR(vm->pt_root[id]);
1385 			vm->pt_root[id] = NULL;
1386 			goto err_unlock_close;
1387 		}
1388 	}
1389 
1390 	if (xe_vm_has_scratch(vm)) {
1391 		for_each_tile(tile, xe, id) {
1392 			if (!vm->pt_root[id])
1393 				continue;
1394 
1395 			err = xe_vm_create_scratch(xe, tile, vm);
1396 			if (err)
1397 				goto err_unlock_close;
1398 		}
1399 		vm->batch_invalidate_tlb = true;
1400 	}
1401 
1402 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1403 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1404 		vm->batch_invalidate_tlb = false;
1405 	}
1406 
1407 	/* Fill pt_root after allocating scratch tables */
1408 	for_each_tile(tile, xe, id) {
1409 		if (!vm->pt_root[id])
1410 			continue;
1411 
1412 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1413 	}
1414 	xe_vm_unlock(vm);
1415 
1416 	/* Kernel migration VM shouldn't have a circular loop.. */
1417 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1418 		for_each_tile(tile, xe, id) {
1419 			struct xe_gt *gt = tile->primary_gt;
1420 			struct xe_vm *migrate_vm;
1421 			struct xe_exec_queue *q;
1422 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1423 
1424 			if (!vm->pt_root[id])
1425 				continue;
1426 
1427 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1428 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1429 						       XE_ENGINE_CLASS_COPY,
1430 						       create_flags);
1431 			xe_vm_put(migrate_vm);
1432 			if (IS_ERR(q)) {
1433 				err = PTR_ERR(q);
1434 				goto err_close;
1435 			}
1436 			vm->q[id] = q;
1437 			number_tiles++;
1438 		}
1439 	}
1440 
1441 	if (number_tiles > 1)
1442 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1443 
1444 	mutex_lock(&xe->usm.lock);
1445 	if (flags & XE_VM_FLAG_FAULT_MODE)
1446 		xe->usm.num_vm_in_fault_mode++;
1447 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1448 		xe->usm.num_vm_in_non_fault_mode++;
1449 	mutex_unlock(&xe->usm.lock);
1450 
1451 	trace_xe_vm_create(vm);
1452 
1453 	return vm;
1454 
1455 err_unlock_close:
1456 	xe_vm_unlock(vm);
1457 err_close:
1458 	xe_vm_close_and_put(vm);
1459 	return ERR_PTR(err);
1460 
1461 err_no_resv:
1462 	mutex_destroy(&vm->snap_mutex);
1463 	for_each_tile(tile, xe, id)
1464 		xe_range_fence_tree_fini(&vm->rftree[id]);
1465 	kfree(vm);
1466 	if (flags & XE_VM_FLAG_LR_MODE)
1467 		xe_pm_runtime_put(xe);
1468 	return ERR_PTR(err);
1469 }
1470 
1471 static void xe_vm_close(struct xe_vm *vm)
1472 {
1473 	down_write(&vm->lock);
1474 	vm->size = 0;
1475 	up_write(&vm->lock);
1476 }
1477 
1478 void xe_vm_close_and_put(struct xe_vm *vm)
1479 {
1480 	LIST_HEAD(contested);
1481 	struct xe_device *xe = vm->xe;
1482 	struct xe_tile *tile;
1483 	struct xe_vma *vma, *next_vma;
1484 	struct drm_gpuva *gpuva, *next;
1485 	u8 id;
1486 
1487 	xe_assert(xe, !vm->preempt.num_exec_queues);
1488 
1489 	xe_vm_close(vm);
1490 	if (xe_vm_in_preempt_fence_mode(vm))
1491 		flush_work(&vm->preempt.rebind_work);
1492 
1493 	down_write(&vm->lock);
1494 	for_each_tile(tile, xe, id) {
1495 		if (vm->q[id])
1496 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1497 	}
1498 	up_write(&vm->lock);
1499 
1500 	for_each_tile(tile, xe, id) {
1501 		if (vm->q[id]) {
1502 			xe_exec_queue_kill(vm->q[id]);
1503 			xe_exec_queue_put(vm->q[id]);
1504 			vm->q[id] = NULL;
1505 		}
1506 	}
1507 
1508 	down_write(&vm->lock);
1509 	xe_vm_lock(vm, false);
1510 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1511 		vma = gpuva_to_vma(gpuva);
1512 
1513 		if (xe_vma_has_no_bo(vma)) {
1514 			down_read(&vm->userptr.notifier_lock);
1515 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1516 			up_read(&vm->userptr.notifier_lock);
1517 		}
1518 
1519 		xe_vm_remove_vma(vm, vma);
1520 
1521 		/* easy case, remove from VMA? */
1522 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1523 			list_del_init(&vma->combined_links.rebind);
1524 			xe_vma_destroy(vma, NULL);
1525 			continue;
1526 		}
1527 
1528 		list_move_tail(&vma->combined_links.destroy, &contested);
1529 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1530 	}
1531 
1532 	/*
1533 	 * All vm operations will add shared fences to resv.
1534 	 * The only exception is eviction for a shared object,
1535 	 * but even so, the unbind when evicted would still
1536 	 * install a fence to resv. Hence it's safe to
1537 	 * destroy the pagetables immediately.
1538 	 */
1539 	xe_vm_free_scratch(vm);
1540 
1541 	for_each_tile(tile, xe, id) {
1542 		if (vm->pt_root[id]) {
1543 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1544 			vm->pt_root[id] = NULL;
1545 		}
1546 	}
1547 	xe_vm_unlock(vm);
1548 
1549 	/*
1550 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1551 	 * Since we hold a refcount to the bo, we can remove and free
1552 	 * the members safely without locking.
1553 	 */
1554 	list_for_each_entry_safe(vma, next_vma, &contested,
1555 				 combined_links.destroy) {
1556 		list_del_init(&vma->combined_links.destroy);
1557 		xe_vma_destroy_unlocked(vma);
1558 	}
1559 
1560 	up_write(&vm->lock);
1561 
1562 	mutex_lock(&xe->usm.lock);
1563 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1564 		xe->usm.num_vm_in_fault_mode--;
1565 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1566 		xe->usm.num_vm_in_non_fault_mode--;
1567 
1568 	if (vm->usm.asid) {
1569 		void *lookup;
1570 
1571 		xe_assert(xe, xe->info.has_asid);
1572 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1573 
1574 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1575 		xe_assert(xe, lookup == vm);
1576 	}
1577 	mutex_unlock(&xe->usm.lock);
1578 
1579 	for_each_tile(tile, xe, id)
1580 		xe_range_fence_tree_fini(&vm->rftree[id]);
1581 
1582 	xe_vm_put(vm);
1583 }
1584 
1585 static void vm_destroy_work_func(struct work_struct *w)
1586 {
1587 	struct xe_vm *vm =
1588 		container_of(w, struct xe_vm, destroy_work);
1589 	struct xe_device *xe = vm->xe;
1590 	struct xe_tile *tile;
1591 	u8 id;
1592 
1593 	/* xe_vm_close_and_put was not called? */
1594 	xe_assert(xe, !vm->size);
1595 
1596 	if (xe_vm_in_preempt_fence_mode(vm))
1597 		flush_work(&vm->preempt.rebind_work);
1598 
1599 	mutex_destroy(&vm->snap_mutex);
1600 
1601 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1602 		xe_pm_runtime_put(xe);
1603 
1604 	for_each_tile(tile, xe, id)
1605 		XE_WARN_ON(vm->pt_root[id]);
1606 
1607 	trace_xe_vm_free(vm);
1608 	kfree(vm);
1609 }
1610 
1611 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1612 {
1613 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1614 
1615 	/* To destroy the VM we need to be able to sleep */
1616 	queue_work(system_unbound_wq, &vm->destroy_work);
1617 }
1618 
1619 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1620 {
1621 	struct xe_vm *vm;
1622 
1623 	mutex_lock(&xef->vm.lock);
1624 	vm = xa_load(&xef->vm.xa, id);
1625 	if (vm)
1626 		xe_vm_get(vm);
1627 	mutex_unlock(&xef->vm.lock);
1628 
1629 	return vm;
1630 }
1631 
1632 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1633 {
1634 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1635 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1636 }
1637 
1638 static struct xe_exec_queue *
1639 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1640 {
1641 	return q ? q : vm->q[0];
1642 }
1643 
1644 static struct dma_fence *
1645 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1646 		 struct xe_sync_entry *syncs, u32 num_syncs,
1647 		 bool first_op, bool last_op)
1648 {
1649 	struct xe_vm *vm = xe_vma_vm(vma);
1650 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1651 	struct xe_tile *tile;
1652 	struct dma_fence *fence = NULL;
1653 	struct dma_fence **fences = NULL;
1654 	struct dma_fence_array *cf = NULL;
1655 	int cur_fence = 0;
1656 	int number_tiles = hweight8(vma->tile_present);
1657 	int err;
1658 	u8 id;
1659 
1660 	trace_xe_vma_unbind(vma);
1661 
1662 	if (number_tiles > 1) {
1663 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1664 				       GFP_KERNEL);
1665 		if (!fences)
1666 			return ERR_PTR(-ENOMEM);
1667 	}
1668 
1669 	for_each_tile(tile, vm->xe, id) {
1670 		if (!(vma->tile_present & BIT(id)))
1671 			goto next;
1672 
1673 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1674 					   first_op ? syncs : NULL,
1675 					   first_op ? num_syncs : 0);
1676 		if (IS_ERR(fence)) {
1677 			err = PTR_ERR(fence);
1678 			goto err_fences;
1679 		}
1680 
1681 		if (fences)
1682 			fences[cur_fence++] = fence;
1683 
1684 next:
1685 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1686 			q = list_next_entry(q, multi_gt_list);
1687 	}
1688 
1689 	if (fences) {
1690 		cf = dma_fence_array_create(number_tiles, fences,
1691 					    vm->composite_fence_ctx,
1692 					    vm->composite_fence_seqno++,
1693 					    false);
1694 		if (!cf) {
1695 			--vm->composite_fence_seqno;
1696 			err = -ENOMEM;
1697 			goto err_fences;
1698 		}
1699 	}
1700 
1701 	fence = cf ? &cf->base : !fence ?
1702 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1703 
1704 	return fence;
1705 
1706 err_fences:
1707 	if (fences) {
1708 		while (cur_fence)
1709 			dma_fence_put(fences[--cur_fence]);
1710 		kfree(fences);
1711 	}
1712 
1713 	return ERR_PTR(err);
1714 }
1715 
1716 static struct dma_fence *
1717 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1718 	       struct xe_sync_entry *syncs, u32 num_syncs,
1719 	       u8 tile_mask, bool first_op, bool last_op)
1720 {
1721 	struct xe_tile *tile;
1722 	struct dma_fence *fence;
1723 	struct dma_fence **fences = NULL;
1724 	struct dma_fence_array *cf = NULL;
1725 	struct xe_vm *vm = xe_vma_vm(vma);
1726 	int cur_fence = 0;
1727 	int number_tiles = hweight8(tile_mask);
1728 	int err;
1729 	u8 id;
1730 
1731 	trace_xe_vma_bind(vma);
1732 
1733 	if (number_tiles > 1) {
1734 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1735 				       GFP_KERNEL);
1736 		if (!fences)
1737 			return ERR_PTR(-ENOMEM);
1738 	}
1739 
1740 	for_each_tile(tile, vm->xe, id) {
1741 		if (!(tile_mask & BIT(id)))
1742 			goto next;
1743 
1744 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1745 					 first_op ? syncs : NULL,
1746 					 first_op ? num_syncs : 0,
1747 					 vma->tile_present & BIT(id));
1748 		if (IS_ERR(fence)) {
1749 			err = PTR_ERR(fence);
1750 			goto err_fences;
1751 		}
1752 
1753 		if (fences)
1754 			fences[cur_fence++] = fence;
1755 
1756 next:
1757 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1758 			q = list_next_entry(q, multi_gt_list);
1759 	}
1760 
1761 	if (fences) {
1762 		cf = dma_fence_array_create(number_tiles, fences,
1763 					    vm->composite_fence_ctx,
1764 					    vm->composite_fence_seqno++,
1765 					    false);
1766 		if (!cf) {
1767 			--vm->composite_fence_seqno;
1768 			err = -ENOMEM;
1769 			goto err_fences;
1770 		}
1771 	}
1772 
1773 	return cf ? &cf->base : fence;
1774 
1775 err_fences:
1776 	if (fences) {
1777 		while (cur_fence)
1778 			dma_fence_put(fences[--cur_fence]);
1779 		kfree(fences);
1780 	}
1781 
1782 	return ERR_PTR(err);
1783 }
1784 
1785 static struct xe_user_fence *
1786 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1787 {
1788 	unsigned int i;
1789 
1790 	for (i = 0; i < num_syncs; i++) {
1791 		struct xe_sync_entry *e = &syncs[i];
1792 
1793 		if (xe_sync_is_ufence(e))
1794 			return xe_sync_ufence_get(e);
1795 	}
1796 
1797 	return NULL;
1798 }
1799 
1800 static struct dma_fence *
1801 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1802 	   struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
1803 	   u8 tile_mask, bool immediate, bool first_op, bool last_op)
1804 {
1805 	struct dma_fence *fence;
1806 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1807 
1808 	xe_vm_assert_held(vm);
1809 	xe_bo_assert_held(bo);
1810 
1811 	if (immediate) {
1812 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
1813 				       first_op, last_op);
1814 		if (IS_ERR(fence))
1815 			return fence;
1816 	} else {
1817 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1818 
1819 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1820 	}
1821 
1822 	return fence;
1823 }
1824 
1825 static struct dma_fence *
1826 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1827 	     struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1828 	     u32 num_syncs, bool first_op, bool last_op)
1829 {
1830 	struct dma_fence *fence;
1831 
1832 	xe_vm_assert_held(vm);
1833 	xe_bo_assert_held(xe_vma_bo(vma));
1834 
1835 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1836 	if (IS_ERR(fence))
1837 		return fence;
1838 
1839 	return fence;
1840 }
1841 
1842 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1843 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1844 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1845 
1846 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1847 		       struct drm_file *file)
1848 {
1849 	struct xe_device *xe = to_xe_device(dev);
1850 	struct xe_file *xef = to_xe_file(file);
1851 	struct drm_xe_vm_create *args = data;
1852 	struct xe_tile *tile;
1853 	struct xe_vm *vm;
1854 	u32 id, asid;
1855 	int err;
1856 	u32 flags = 0;
1857 
1858 	if (XE_IOCTL_DBG(xe, args->extensions))
1859 		return -EINVAL;
1860 
1861 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1862 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1863 
1864 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1865 			 !xe->info.has_usm))
1866 		return -EINVAL;
1867 
1868 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1869 		return -EINVAL;
1870 
1871 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1872 		return -EINVAL;
1873 
1874 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1875 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1876 		return -EINVAL;
1877 
1878 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1879 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1880 		return -EINVAL;
1881 
1882 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1883 			 xe_device_in_non_fault_mode(xe)))
1884 		return -EINVAL;
1885 
1886 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1887 			 xe_device_in_fault_mode(xe)))
1888 		return -EINVAL;
1889 
1890 	if (XE_IOCTL_DBG(xe, args->extensions))
1891 		return -EINVAL;
1892 
1893 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1894 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1895 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1896 		flags |= XE_VM_FLAG_LR_MODE;
1897 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1898 		flags |= XE_VM_FLAG_FAULT_MODE;
1899 
1900 	vm = xe_vm_create(xe, flags);
1901 	if (IS_ERR(vm))
1902 		return PTR_ERR(vm);
1903 
1904 	mutex_lock(&xef->vm.lock);
1905 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1906 	mutex_unlock(&xef->vm.lock);
1907 	if (err)
1908 		goto err_close_and_put;
1909 
1910 	if (xe->info.has_asid) {
1911 		mutex_lock(&xe->usm.lock);
1912 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1913 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1914 				      &xe->usm.next_asid, GFP_KERNEL);
1915 		mutex_unlock(&xe->usm.lock);
1916 		if (err < 0)
1917 			goto err_free_id;
1918 
1919 		vm->usm.asid = asid;
1920 	}
1921 
1922 	args->vm_id = id;
1923 	vm->xef = xef;
1924 
1925 	/* Record BO memory for VM pagetable created against client */
1926 	for_each_tile(tile, xe, id)
1927 		if (vm->pt_root[id])
1928 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1929 
1930 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1931 	/* Warning: Security issue - never enable by default */
1932 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1933 #endif
1934 
1935 	return 0;
1936 
1937 err_free_id:
1938 	mutex_lock(&xef->vm.lock);
1939 	xa_erase(&xef->vm.xa, id);
1940 	mutex_unlock(&xef->vm.lock);
1941 err_close_and_put:
1942 	xe_vm_close_and_put(vm);
1943 
1944 	return err;
1945 }
1946 
1947 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1948 			struct drm_file *file)
1949 {
1950 	struct xe_device *xe = to_xe_device(dev);
1951 	struct xe_file *xef = to_xe_file(file);
1952 	struct drm_xe_vm_destroy *args = data;
1953 	struct xe_vm *vm;
1954 	int err = 0;
1955 
1956 	if (XE_IOCTL_DBG(xe, args->pad) ||
1957 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1958 		return -EINVAL;
1959 
1960 	mutex_lock(&xef->vm.lock);
1961 	vm = xa_load(&xef->vm.xa, args->vm_id);
1962 	if (XE_IOCTL_DBG(xe, !vm))
1963 		err = -ENOENT;
1964 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1965 		err = -EBUSY;
1966 	else
1967 		xa_erase(&xef->vm.xa, args->vm_id);
1968 	mutex_unlock(&xef->vm.lock);
1969 
1970 	if (!err)
1971 		xe_vm_close_and_put(vm);
1972 
1973 	return err;
1974 }
1975 
1976 static const u32 region_to_mem_type[] = {
1977 	XE_PL_TT,
1978 	XE_PL_VRAM0,
1979 	XE_PL_VRAM1,
1980 };
1981 
1982 static struct dma_fence *
1983 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1984 	       struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1985 	       u32 num_syncs, bool first_op, bool last_op)
1986 {
1987 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1988 
1989 	if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
1990 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1991 				  vma->tile_mask, true, first_op, last_op);
1992 	} else {
1993 		return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1994 	}
1995 }
1996 
1997 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1998 			     bool post_commit)
1999 {
2000 	down_read(&vm->userptr.notifier_lock);
2001 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2002 	up_read(&vm->userptr.notifier_lock);
2003 	if (post_commit)
2004 		xe_vm_remove_vma(vm, vma);
2005 }
2006 
2007 #undef ULL
2008 #define ULL	unsigned long long
2009 
2010 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2011 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2012 {
2013 	struct xe_vma *vma;
2014 
2015 	switch (op->op) {
2016 	case DRM_GPUVA_OP_MAP:
2017 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2018 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2019 		break;
2020 	case DRM_GPUVA_OP_REMAP:
2021 		vma = gpuva_to_vma(op->remap.unmap->va);
2022 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2023 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2024 		       op->remap.unmap->keep ? 1 : 0);
2025 		if (op->remap.prev)
2026 			vm_dbg(&xe->drm,
2027 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2028 			       (ULL)op->remap.prev->va.addr,
2029 			       (ULL)op->remap.prev->va.range);
2030 		if (op->remap.next)
2031 			vm_dbg(&xe->drm,
2032 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2033 			       (ULL)op->remap.next->va.addr,
2034 			       (ULL)op->remap.next->va.range);
2035 		break;
2036 	case DRM_GPUVA_OP_UNMAP:
2037 		vma = gpuva_to_vma(op->unmap.va);
2038 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2039 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2040 		       op->unmap.keep ? 1 : 0);
2041 		break;
2042 	case DRM_GPUVA_OP_PREFETCH:
2043 		vma = gpuva_to_vma(op->prefetch.va);
2044 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2045 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2046 		break;
2047 	default:
2048 		drm_warn(&xe->drm, "NOT POSSIBLE");
2049 	}
2050 }
2051 #else
2052 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2053 {
2054 }
2055 #endif
2056 
2057 /*
2058  * Create operations list from IOCTL arguments, setup operations fields so parse
2059  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2060  */
2061 static struct drm_gpuva_ops *
2062 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2063 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2064 			 u32 operation, u32 flags,
2065 			 u32 prefetch_region, u16 pat_index)
2066 {
2067 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2068 	struct drm_gpuva_ops *ops;
2069 	struct drm_gpuva_op *__op;
2070 	struct drm_gpuvm_bo *vm_bo;
2071 	int err;
2072 
2073 	lockdep_assert_held_write(&vm->lock);
2074 
2075 	vm_dbg(&vm->xe->drm,
2076 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2077 	       operation, (ULL)addr, (ULL)range,
2078 	       (ULL)bo_offset_or_userptr);
2079 
2080 	switch (operation) {
2081 	case DRM_XE_VM_BIND_OP_MAP:
2082 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2083 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2084 						  obj, bo_offset_or_userptr);
2085 		break;
2086 	case DRM_XE_VM_BIND_OP_UNMAP:
2087 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2088 		break;
2089 	case DRM_XE_VM_BIND_OP_PREFETCH:
2090 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2091 		break;
2092 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2093 		xe_assert(vm->xe, bo);
2094 
2095 		err = xe_bo_lock(bo, true);
2096 		if (err)
2097 			return ERR_PTR(err);
2098 
2099 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2100 		if (IS_ERR(vm_bo)) {
2101 			xe_bo_unlock(bo);
2102 			return ERR_CAST(vm_bo);
2103 		}
2104 
2105 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2106 		drm_gpuvm_bo_put(vm_bo);
2107 		xe_bo_unlock(bo);
2108 		break;
2109 	default:
2110 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2111 		ops = ERR_PTR(-EINVAL);
2112 	}
2113 	if (IS_ERR(ops))
2114 		return ops;
2115 
2116 	drm_gpuva_for_each_op(__op, ops) {
2117 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2118 
2119 		if (__op->op == DRM_GPUVA_OP_MAP) {
2120 			op->map.immediate =
2121 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2122 			op->map.read_only =
2123 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2124 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2125 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2126 			op->map.pat_index = pat_index;
2127 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2128 			op->prefetch.region = prefetch_region;
2129 		}
2130 
2131 		print_op(vm->xe, __op);
2132 	}
2133 
2134 	return ops;
2135 }
2136 
2137 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2138 			      u16 pat_index, unsigned int flags)
2139 {
2140 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2141 	struct drm_exec exec;
2142 	struct xe_vma *vma;
2143 	int err;
2144 
2145 	lockdep_assert_held_write(&vm->lock);
2146 
2147 	if (bo) {
2148 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2149 		drm_exec_until_all_locked(&exec) {
2150 			err = 0;
2151 			if (!bo->vm) {
2152 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2153 				drm_exec_retry_on_contention(&exec);
2154 			}
2155 			if (!err) {
2156 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2157 				drm_exec_retry_on_contention(&exec);
2158 			}
2159 			if (err) {
2160 				drm_exec_fini(&exec);
2161 				return ERR_PTR(err);
2162 			}
2163 		}
2164 	}
2165 	vma = xe_vma_create(vm, bo, op->gem.offset,
2166 			    op->va.addr, op->va.addr +
2167 			    op->va.range - 1, pat_index, flags);
2168 	if (bo)
2169 		drm_exec_fini(&exec);
2170 
2171 	if (xe_vma_is_userptr(vma)) {
2172 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2173 		if (err) {
2174 			prep_vma_destroy(vm, vma, false);
2175 			xe_vma_destroy_unlocked(vma);
2176 			return ERR_PTR(err);
2177 		}
2178 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2179 		err = add_preempt_fences(vm, bo);
2180 		if (err) {
2181 			prep_vma_destroy(vm, vma, false);
2182 			xe_vma_destroy_unlocked(vma);
2183 			return ERR_PTR(err);
2184 		}
2185 	}
2186 
2187 	return vma;
2188 }
2189 
2190 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2191 {
2192 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2193 		return SZ_1G;
2194 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2195 		return SZ_2M;
2196 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2197 		return SZ_64K;
2198 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2199 		return SZ_4K;
2200 
2201 	return SZ_1G;	/* Uninitialized, used max size */
2202 }
2203 
2204 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2205 {
2206 	switch (size) {
2207 	case SZ_1G:
2208 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2209 		break;
2210 	case SZ_2M:
2211 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2212 		break;
2213 	case SZ_64K:
2214 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2215 		break;
2216 	case SZ_4K:
2217 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2218 		break;
2219 	}
2220 }
2221 
2222 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2223 {
2224 	int err = 0;
2225 
2226 	lockdep_assert_held_write(&vm->lock);
2227 
2228 	switch (op->base.op) {
2229 	case DRM_GPUVA_OP_MAP:
2230 		err |= xe_vm_insert_vma(vm, op->map.vma);
2231 		if (!err)
2232 			op->flags |= XE_VMA_OP_COMMITTED;
2233 		break;
2234 	case DRM_GPUVA_OP_REMAP:
2235 	{
2236 		u8 tile_present =
2237 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2238 
2239 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2240 				 true);
2241 		op->flags |= XE_VMA_OP_COMMITTED;
2242 
2243 		if (op->remap.prev) {
2244 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2245 			if (!err)
2246 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2247 			if (!err && op->remap.skip_prev) {
2248 				op->remap.prev->tile_present =
2249 					tile_present;
2250 				op->remap.prev = NULL;
2251 			}
2252 		}
2253 		if (op->remap.next) {
2254 			err |= xe_vm_insert_vma(vm, op->remap.next);
2255 			if (!err)
2256 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2257 			if (!err && op->remap.skip_next) {
2258 				op->remap.next->tile_present =
2259 					tile_present;
2260 				op->remap.next = NULL;
2261 			}
2262 		}
2263 
2264 		/* Adjust for partial unbind after removin VMA from VM */
2265 		if (!err) {
2266 			op->base.remap.unmap->va->va.addr = op->remap.start;
2267 			op->base.remap.unmap->va->va.range = op->remap.range;
2268 		}
2269 		break;
2270 	}
2271 	case DRM_GPUVA_OP_UNMAP:
2272 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2273 		op->flags |= XE_VMA_OP_COMMITTED;
2274 		break;
2275 	case DRM_GPUVA_OP_PREFETCH:
2276 		op->flags |= XE_VMA_OP_COMMITTED;
2277 		break;
2278 	default:
2279 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2280 	}
2281 
2282 	return err;
2283 }
2284 
2285 
2286 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2287 				   struct drm_gpuva_ops *ops,
2288 				   struct xe_sync_entry *syncs, u32 num_syncs,
2289 				   struct xe_vma_ops *vops, bool last)
2290 {
2291 	struct xe_device *xe = vm->xe;
2292 	struct xe_vma_op *last_op = NULL;
2293 	struct drm_gpuva_op *__op;
2294 	struct xe_tile *tile;
2295 	u8 id, tile_mask = 0;
2296 	int err = 0;
2297 
2298 	lockdep_assert_held_write(&vm->lock);
2299 
2300 	for_each_tile(tile, vm->xe, id)
2301 		tile_mask |= 0x1 << id;
2302 
2303 	drm_gpuva_for_each_op(__op, ops) {
2304 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2305 		struct xe_vma *vma;
2306 		bool first = list_empty(&vops->list);
2307 		unsigned int flags = 0;
2308 
2309 		INIT_LIST_HEAD(&op->link);
2310 		list_add_tail(&op->link, &vops->list);
2311 
2312 		if (first) {
2313 			op->flags |= XE_VMA_OP_FIRST;
2314 			op->num_syncs = num_syncs;
2315 			op->syncs = syncs;
2316 		}
2317 
2318 		op->q = q;
2319 		op->tile_mask = tile_mask;
2320 
2321 		switch (op->base.op) {
2322 		case DRM_GPUVA_OP_MAP:
2323 		{
2324 			flags |= op->map.read_only ?
2325 				VMA_CREATE_FLAG_READ_ONLY : 0;
2326 			flags |= op->map.is_null ?
2327 				VMA_CREATE_FLAG_IS_NULL : 0;
2328 			flags |= op->map.dumpable ?
2329 				VMA_CREATE_FLAG_DUMPABLE : 0;
2330 
2331 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2332 				      flags);
2333 			if (IS_ERR(vma))
2334 				return PTR_ERR(vma);
2335 
2336 			op->map.vma = vma;
2337 			break;
2338 		}
2339 		case DRM_GPUVA_OP_REMAP:
2340 		{
2341 			struct xe_vma *old =
2342 				gpuva_to_vma(op->base.remap.unmap->va);
2343 
2344 			op->remap.start = xe_vma_start(old);
2345 			op->remap.range = xe_vma_size(old);
2346 
2347 			if (op->base.remap.prev) {
2348 				flags |= op->base.remap.unmap->va->flags &
2349 					XE_VMA_READ_ONLY ?
2350 					VMA_CREATE_FLAG_READ_ONLY : 0;
2351 				flags |= op->base.remap.unmap->va->flags &
2352 					DRM_GPUVA_SPARSE ?
2353 					VMA_CREATE_FLAG_IS_NULL : 0;
2354 				flags |= op->base.remap.unmap->va->flags &
2355 					XE_VMA_DUMPABLE ?
2356 					VMA_CREATE_FLAG_DUMPABLE : 0;
2357 
2358 				vma = new_vma(vm, op->base.remap.prev,
2359 					      old->pat_index, flags);
2360 				if (IS_ERR(vma))
2361 					return PTR_ERR(vma);
2362 
2363 				op->remap.prev = vma;
2364 
2365 				/*
2366 				 * Userptr creates a new SG mapping so
2367 				 * we must also rebind.
2368 				 */
2369 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2370 					IS_ALIGNED(xe_vma_end(vma),
2371 						   xe_vma_max_pte_size(old));
2372 				if (op->remap.skip_prev) {
2373 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2374 					op->remap.range -=
2375 						xe_vma_end(vma) -
2376 						xe_vma_start(old);
2377 					op->remap.start = xe_vma_end(vma);
2378 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2379 					       (ULL)op->remap.start,
2380 					       (ULL)op->remap.range);
2381 				}
2382 			}
2383 
2384 			if (op->base.remap.next) {
2385 				flags |= op->base.remap.unmap->va->flags &
2386 					XE_VMA_READ_ONLY ?
2387 					VMA_CREATE_FLAG_READ_ONLY : 0;
2388 				flags |= op->base.remap.unmap->va->flags &
2389 					DRM_GPUVA_SPARSE ?
2390 					VMA_CREATE_FLAG_IS_NULL : 0;
2391 				flags |= op->base.remap.unmap->va->flags &
2392 					XE_VMA_DUMPABLE ?
2393 					VMA_CREATE_FLAG_DUMPABLE : 0;
2394 
2395 				vma = new_vma(vm, op->base.remap.next,
2396 					      old->pat_index, flags);
2397 				if (IS_ERR(vma))
2398 					return PTR_ERR(vma);
2399 
2400 				op->remap.next = vma;
2401 
2402 				/*
2403 				 * Userptr creates a new SG mapping so
2404 				 * we must also rebind.
2405 				 */
2406 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2407 					IS_ALIGNED(xe_vma_start(vma),
2408 						   xe_vma_max_pte_size(old));
2409 				if (op->remap.skip_next) {
2410 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2411 					op->remap.range -=
2412 						xe_vma_end(old) -
2413 						xe_vma_start(vma);
2414 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2415 					       (ULL)op->remap.start,
2416 					       (ULL)op->remap.range);
2417 				}
2418 			}
2419 			break;
2420 		}
2421 		case DRM_GPUVA_OP_UNMAP:
2422 		case DRM_GPUVA_OP_PREFETCH:
2423 			/* Nothing to do */
2424 			break;
2425 		default:
2426 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2427 		}
2428 
2429 		last_op = op;
2430 
2431 		err = xe_vma_op_commit(vm, op);
2432 		if (err)
2433 			return err;
2434 	}
2435 
2436 	/* FIXME: Unhandled corner case */
2437 	XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
2438 
2439 	if (!last_op)
2440 		return 0;
2441 
2442 	if (last) {
2443 		last_op->flags |= XE_VMA_OP_LAST;
2444 		last_op->num_syncs = num_syncs;
2445 		last_op->syncs = syncs;
2446 	}
2447 
2448 	return 0;
2449 }
2450 
2451 static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
2452 				    struct xe_vma_op *op)
2453 {
2454 	struct dma_fence *fence = NULL;
2455 
2456 	lockdep_assert_held(&vm->lock);
2457 
2458 	xe_vm_assert_held(vm);
2459 	xe_bo_assert_held(xe_vma_bo(vma));
2460 
2461 	switch (op->base.op) {
2462 	case DRM_GPUVA_OP_MAP:
2463 		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2464 				   op->syncs, op->num_syncs,
2465 				   op->tile_mask,
2466 				   op->map.immediate || !xe_vm_in_fault_mode(vm),
2467 				   op->flags & XE_VMA_OP_FIRST,
2468 				   op->flags & XE_VMA_OP_LAST);
2469 		break;
2470 	case DRM_GPUVA_OP_REMAP:
2471 	{
2472 		bool prev = !!op->remap.prev;
2473 		bool next = !!op->remap.next;
2474 
2475 		if (!op->remap.unmap_done) {
2476 			if (prev || next)
2477 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2478 			fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2479 					     op->num_syncs,
2480 					     op->flags & XE_VMA_OP_FIRST,
2481 					     op->flags & XE_VMA_OP_LAST &&
2482 					     !prev && !next);
2483 			if (IS_ERR(fence))
2484 				break;
2485 			op->remap.unmap_done = true;
2486 		}
2487 
2488 		if (prev) {
2489 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2490 			dma_fence_put(fence);
2491 			fence = xe_vm_bind(vm, op->remap.prev, op->q,
2492 					   xe_vma_bo(op->remap.prev), op->syncs,
2493 					   op->num_syncs,
2494 					   op->remap.prev->tile_mask, true,
2495 					   false,
2496 					   op->flags & XE_VMA_OP_LAST && !next);
2497 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2498 			if (IS_ERR(fence))
2499 				break;
2500 			op->remap.prev = NULL;
2501 		}
2502 
2503 		if (next) {
2504 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2505 			dma_fence_put(fence);
2506 			fence = xe_vm_bind(vm, op->remap.next, op->q,
2507 					   xe_vma_bo(op->remap.next),
2508 					   op->syncs, op->num_syncs,
2509 					   op->remap.next->tile_mask, true,
2510 					   false, op->flags & XE_VMA_OP_LAST);
2511 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2512 			if (IS_ERR(fence))
2513 				break;
2514 			op->remap.next = NULL;
2515 		}
2516 
2517 		break;
2518 	}
2519 	case DRM_GPUVA_OP_UNMAP:
2520 		fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2521 				     op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2522 				     op->flags & XE_VMA_OP_LAST);
2523 		break;
2524 	case DRM_GPUVA_OP_PREFETCH:
2525 		fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
2526 				       op->flags & XE_VMA_OP_FIRST,
2527 				       op->flags & XE_VMA_OP_LAST);
2528 		break;
2529 	default:
2530 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2531 	}
2532 
2533 	if (IS_ERR(fence))
2534 		trace_xe_vma_fail(vma);
2535 
2536 	return fence;
2537 }
2538 
2539 static struct dma_fence *
2540 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2541 		    struct xe_vma_op *op)
2542 {
2543 	struct dma_fence *fence;
2544 	int err;
2545 
2546 retry_userptr:
2547 	fence = op_execute(vm, vma, op);
2548 	if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
2549 		lockdep_assert_held_write(&vm->lock);
2550 
2551 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2552 			if (!op->remap.unmap_done)
2553 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2554 			else if (op->remap.prev)
2555 				vma = op->remap.prev;
2556 			else
2557 				vma = op->remap.next;
2558 		}
2559 
2560 		if (xe_vma_is_userptr(vma)) {
2561 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2562 			if (!err)
2563 				goto retry_userptr;
2564 
2565 			fence = ERR_PTR(err);
2566 			trace_xe_vma_fail(vma);
2567 		}
2568 	}
2569 
2570 	return fence;
2571 }
2572 
2573 static struct dma_fence *
2574 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2575 {
2576 	struct dma_fence *fence = ERR_PTR(-ENOMEM);
2577 
2578 	lockdep_assert_held(&vm->lock);
2579 
2580 	switch (op->base.op) {
2581 	case DRM_GPUVA_OP_MAP:
2582 		fence = __xe_vma_op_execute(vm, op->map.vma, op);
2583 		break;
2584 	case DRM_GPUVA_OP_REMAP:
2585 	{
2586 		struct xe_vma *vma;
2587 
2588 		if (!op->remap.unmap_done)
2589 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2590 		else if (op->remap.prev)
2591 			vma = op->remap.prev;
2592 		else
2593 			vma = op->remap.next;
2594 
2595 		fence = __xe_vma_op_execute(vm, vma, op);
2596 		break;
2597 	}
2598 	case DRM_GPUVA_OP_UNMAP:
2599 		fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2600 					    op);
2601 		break;
2602 	case DRM_GPUVA_OP_PREFETCH:
2603 		fence = __xe_vma_op_execute(vm,
2604 					    gpuva_to_vma(op->base.prefetch.va),
2605 					    op);
2606 		break;
2607 	default:
2608 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2609 	}
2610 
2611 	return fence;
2612 }
2613 
2614 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2615 			     bool post_commit, bool prev_post_commit,
2616 			     bool next_post_commit)
2617 {
2618 	lockdep_assert_held_write(&vm->lock);
2619 
2620 	switch (op->base.op) {
2621 	case DRM_GPUVA_OP_MAP:
2622 		if (op->map.vma) {
2623 			prep_vma_destroy(vm, op->map.vma, post_commit);
2624 			xe_vma_destroy_unlocked(op->map.vma);
2625 		}
2626 		break;
2627 	case DRM_GPUVA_OP_UNMAP:
2628 	{
2629 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2630 
2631 		if (vma) {
2632 			down_read(&vm->userptr.notifier_lock);
2633 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2634 			up_read(&vm->userptr.notifier_lock);
2635 			if (post_commit)
2636 				xe_vm_insert_vma(vm, vma);
2637 		}
2638 		break;
2639 	}
2640 	case DRM_GPUVA_OP_REMAP:
2641 	{
2642 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2643 
2644 		if (op->remap.prev) {
2645 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2646 			xe_vma_destroy_unlocked(op->remap.prev);
2647 		}
2648 		if (op->remap.next) {
2649 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2650 			xe_vma_destroy_unlocked(op->remap.next);
2651 		}
2652 		if (vma) {
2653 			down_read(&vm->userptr.notifier_lock);
2654 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2655 			up_read(&vm->userptr.notifier_lock);
2656 			if (post_commit)
2657 				xe_vm_insert_vma(vm, vma);
2658 		}
2659 		break;
2660 	}
2661 	case DRM_GPUVA_OP_PREFETCH:
2662 		/* Nothing to do */
2663 		break;
2664 	default:
2665 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2666 	}
2667 }
2668 
2669 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2670 				     struct drm_gpuva_ops **ops,
2671 				     int num_ops_list)
2672 {
2673 	int i;
2674 
2675 	for (i = num_ops_list - 1; i >= 0; --i) {
2676 		struct drm_gpuva_ops *__ops = ops[i];
2677 		struct drm_gpuva_op *__op;
2678 
2679 		if (!__ops)
2680 			continue;
2681 
2682 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2683 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2684 
2685 			xe_vma_op_unwind(vm, op,
2686 					 op->flags & XE_VMA_OP_COMMITTED,
2687 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2688 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2689 		}
2690 	}
2691 }
2692 
2693 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2694 				 bool validate)
2695 {
2696 	struct xe_bo *bo = xe_vma_bo(vma);
2697 	int err = 0;
2698 
2699 	if (bo) {
2700 		if (!bo->vm)
2701 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2702 		if (!err && validate)
2703 			err = xe_bo_validate(bo, xe_vma_vm(vma), true);
2704 	}
2705 
2706 	return err;
2707 }
2708 
2709 static int check_ufence(struct xe_vma *vma)
2710 {
2711 	if (vma->ufence) {
2712 		struct xe_user_fence * const f = vma->ufence;
2713 
2714 		if (!xe_sync_ufence_get_status(f))
2715 			return -EBUSY;
2716 
2717 		vma->ufence = NULL;
2718 		xe_sync_ufence_put(f);
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2725 			    struct xe_vma_op *op)
2726 {
2727 	int err = 0;
2728 
2729 	switch (op->base.op) {
2730 	case DRM_GPUVA_OP_MAP:
2731 		err = vma_lock_and_validate(exec, op->map.vma,
2732 					    !xe_vm_in_fault_mode(vm) ||
2733 					    op->map.immediate);
2734 		break;
2735 	case DRM_GPUVA_OP_REMAP:
2736 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2737 		if (err)
2738 			break;
2739 
2740 		err = vma_lock_and_validate(exec,
2741 					    gpuva_to_vma(op->base.remap.unmap->va),
2742 					    false);
2743 		if (!err && op->remap.prev)
2744 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2745 		if (!err && op->remap.next)
2746 			err = vma_lock_and_validate(exec, op->remap.next, true);
2747 		break;
2748 	case DRM_GPUVA_OP_UNMAP:
2749 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2750 		if (err)
2751 			break;
2752 
2753 		err = vma_lock_and_validate(exec,
2754 					    gpuva_to_vma(op->base.unmap.va),
2755 					    false);
2756 		break;
2757 	case DRM_GPUVA_OP_PREFETCH:
2758 	{
2759 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2760 		u32 region = op->prefetch.region;
2761 
2762 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2763 
2764 		err = vma_lock_and_validate(exec,
2765 					    gpuva_to_vma(op->base.prefetch.va),
2766 					    false);
2767 		if (!err && !xe_vma_has_no_bo(vma))
2768 			err = xe_bo_migrate(xe_vma_bo(vma),
2769 					    region_to_mem_type[region]);
2770 		break;
2771 	}
2772 	default:
2773 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2774 	}
2775 
2776 	return err;
2777 }
2778 
2779 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2780 					   struct xe_vm *vm,
2781 					   struct xe_vma_ops *vops)
2782 {
2783 	struct xe_vma_op *op;
2784 	int err;
2785 
2786 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2787 	if (err)
2788 		return err;
2789 
2790 	list_for_each_entry(op, &vops->list, link) {
2791 		err = op_lock_and_prep(exec, vm, op);
2792 		if (err)
2793 			return err;
2794 	}
2795 
2796 	return 0;
2797 }
2798 
2799 static struct dma_fence *ops_execute(struct xe_vm *vm,
2800 				     struct xe_vma_ops *vops)
2801 {
2802 	struct xe_vma_op *op, *next;
2803 	struct dma_fence *fence = NULL;
2804 
2805 	list_for_each_entry_safe(op, next, &vops->list, link) {
2806 		dma_fence_put(fence);
2807 		fence = xe_vma_op_execute(vm, op);
2808 		if (IS_ERR(fence)) {
2809 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
2810 				 op->base.op, PTR_ERR(fence));
2811 			fence = ERR_PTR(-ENOSPC);
2812 			break;
2813 		}
2814 	}
2815 
2816 	return fence;
2817 }
2818 
2819 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2820 {
2821 	if (vma->ufence)
2822 		xe_sync_ufence_put(vma->ufence);
2823 	vma->ufence = __xe_sync_ufence_get(ufence);
2824 }
2825 
2826 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2827 			  struct xe_user_fence *ufence)
2828 {
2829 	switch (op->base.op) {
2830 	case DRM_GPUVA_OP_MAP:
2831 		vma_add_ufence(op->map.vma, ufence);
2832 		break;
2833 	case DRM_GPUVA_OP_REMAP:
2834 		if (op->remap.prev)
2835 			vma_add_ufence(op->remap.prev, ufence);
2836 		if (op->remap.next)
2837 			vma_add_ufence(op->remap.next, ufence);
2838 		break;
2839 	case DRM_GPUVA_OP_UNMAP:
2840 		break;
2841 	case DRM_GPUVA_OP_PREFETCH:
2842 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2843 		break;
2844 	default:
2845 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2846 	}
2847 }
2848 
2849 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2850 				   struct dma_fence *fence)
2851 {
2852 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2853 	struct xe_user_fence *ufence;
2854 	struct xe_vma_op *op;
2855 	int i;
2856 
2857 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2858 	list_for_each_entry(op, &vops->list, link) {
2859 		if (ufence)
2860 			op_add_ufence(vm, op, ufence);
2861 
2862 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
2863 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2864 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
2865 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2866 				       fence);
2867 	}
2868 	if (ufence)
2869 		xe_sync_ufence_put(ufence);
2870 	for (i = 0; i < vops->num_syncs; i++)
2871 		xe_sync_entry_signal(vops->syncs + i, fence);
2872 	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
2873 	dma_fence_put(fence);
2874 }
2875 
2876 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2877 				     struct xe_vma_ops *vops)
2878 {
2879 	struct drm_exec exec;
2880 	struct dma_fence *fence;
2881 	int err;
2882 
2883 	lockdep_assert_held_write(&vm->lock);
2884 
2885 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
2886 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
2887 	drm_exec_until_all_locked(&exec) {
2888 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
2889 		drm_exec_retry_on_contention(&exec);
2890 		if (err)
2891 			goto unlock;
2892 
2893 		fence = ops_execute(vm, vops);
2894 		if (IS_ERR(fence)) {
2895 			err = PTR_ERR(fence);
2896 			/* FIXME: Killing VM rather than proper error handling */
2897 			xe_vm_kill(vm, false);
2898 			goto unlock;
2899 		} else {
2900 			vm_bind_ioctl_ops_fini(vm, vops, fence);
2901 		}
2902 	}
2903 
2904 unlock:
2905 	drm_exec_fini(&exec);
2906 	return err;
2907 }
2908 
2909 #define SUPPORTED_FLAGS	\
2910 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2911 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2912 	 DRM_XE_VM_BIND_FLAG_NULL | \
2913 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2914 #define XE_64K_PAGE_MASK 0xffffull
2915 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2916 
2917 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2918 				    struct drm_xe_vm_bind *args,
2919 				    struct drm_xe_vm_bind_op **bind_ops)
2920 {
2921 	int err;
2922 	int i;
2923 
2924 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2925 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2926 		return -EINVAL;
2927 
2928 	if (XE_IOCTL_DBG(xe, args->extensions))
2929 		return -EINVAL;
2930 
2931 	if (args->num_binds > 1) {
2932 		u64 __user *bind_user =
2933 			u64_to_user_ptr(args->vector_of_binds);
2934 
2935 		*bind_ops = kvmalloc_array(args->num_binds,
2936 					   sizeof(struct drm_xe_vm_bind_op),
2937 					   GFP_KERNEL | __GFP_ACCOUNT);
2938 		if (!*bind_ops)
2939 			return -ENOMEM;
2940 
2941 		err = __copy_from_user(*bind_ops, bind_user,
2942 				       sizeof(struct drm_xe_vm_bind_op) *
2943 				       args->num_binds);
2944 		if (XE_IOCTL_DBG(xe, err)) {
2945 			err = -EFAULT;
2946 			goto free_bind_ops;
2947 		}
2948 	} else {
2949 		*bind_ops = &args->bind;
2950 	}
2951 
2952 	for (i = 0; i < args->num_binds; ++i) {
2953 		u64 range = (*bind_ops)[i].range;
2954 		u64 addr = (*bind_ops)[i].addr;
2955 		u32 op = (*bind_ops)[i].op;
2956 		u32 flags = (*bind_ops)[i].flags;
2957 		u32 obj = (*bind_ops)[i].obj;
2958 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2959 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2960 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2961 		u16 pat_index = (*bind_ops)[i].pat_index;
2962 		u16 coh_mode;
2963 
2964 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2965 			err = -EINVAL;
2966 			goto free_bind_ops;
2967 		}
2968 
2969 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2970 		(*bind_ops)[i].pat_index = pat_index;
2971 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2972 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2973 			err = -EINVAL;
2974 			goto free_bind_ops;
2975 		}
2976 
2977 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2978 			err = -EINVAL;
2979 			goto free_bind_ops;
2980 		}
2981 
2982 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2983 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2984 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2985 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2986 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2987 				 is_null) ||
2988 		    XE_IOCTL_DBG(xe, !obj &&
2989 				 op == DRM_XE_VM_BIND_OP_MAP &&
2990 				 !is_null) ||
2991 		    XE_IOCTL_DBG(xe, !obj &&
2992 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2993 		    XE_IOCTL_DBG(xe, addr &&
2994 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2995 		    XE_IOCTL_DBG(xe, range &&
2996 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2997 		    XE_IOCTL_DBG(xe, obj &&
2998 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2999 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3000 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3001 		    XE_IOCTL_DBG(xe, obj &&
3002 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3003 		    XE_IOCTL_DBG(xe, prefetch_region &&
3004 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3005 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
3006 				       xe->info.mem_region_mask)) ||
3007 		    XE_IOCTL_DBG(xe, obj &&
3008 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
3009 			err = -EINVAL;
3010 			goto free_bind_ops;
3011 		}
3012 
3013 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3014 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3015 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3016 		    XE_IOCTL_DBG(xe, !range &&
3017 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3018 			err = -EINVAL;
3019 			goto free_bind_ops;
3020 		}
3021 	}
3022 
3023 	return 0;
3024 
3025 free_bind_ops:
3026 	if (args->num_binds > 1)
3027 		kvfree(*bind_ops);
3028 	return err;
3029 }
3030 
3031 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3032 				       struct xe_exec_queue *q,
3033 				       struct xe_sync_entry *syncs,
3034 				       int num_syncs)
3035 {
3036 	struct dma_fence *fence;
3037 	int i, err = 0;
3038 
3039 	fence = xe_sync_in_fence_get(syncs, num_syncs,
3040 				     to_wait_exec_queue(vm, q), vm);
3041 	if (IS_ERR(fence))
3042 		return PTR_ERR(fence);
3043 
3044 	for (i = 0; i < num_syncs; i++)
3045 		xe_sync_entry_signal(&syncs[i], fence);
3046 
3047 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
3048 				     fence);
3049 	dma_fence_put(fence);
3050 
3051 	return err;
3052 }
3053 
3054 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3055 			    struct xe_exec_queue *q,
3056 			    struct xe_sync_entry *syncs, u32 num_syncs)
3057 {
3058 	memset(vops, 0, sizeof(*vops));
3059 	INIT_LIST_HEAD(&vops->list);
3060 	vops->vm = vm;
3061 	vops->q = q;
3062 	vops->syncs = syncs;
3063 	vops->num_syncs = num_syncs;
3064 }
3065 
3066 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3067 					u64 addr, u64 range, u64 obj_offset,
3068 					u16 pat_index)
3069 {
3070 	u16 coh_mode;
3071 
3072 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
3073 	    XE_IOCTL_DBG(xe, obj_offset >
3074 			 bo->size - range)) {
3075 		return -EINVAL;
3076 	}
3077 
3078 	if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
3079 		if (XE_IOCTL_DBG(xe, obj_offset &
3080 				 XE_64K_PAGE_MASK) ||
3081 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3082 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3083 			return  -EINVAL;
3084 		}
3085 	}
3086 
3087 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3088 	if (bo->cpu_caching) {
3089 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3090 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3091 			return  -EINVAL;
3092 		}
3093 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3094 		/*
3095 		 * Imported dma-buf from a different device should
3096 		 * require 1way or 2way coherency since we don't know
3097 		 * how it was mapped on the CPU. Just assume is it
3098 		 * potentially cached on CPU side.
3099 		 */
3100 		return  -EINVAL;
3101 	}
3102 
3103 	return 0;
3104 }
3105 
3106 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3107 {
3108 	struct xe_device *xe = to_xe_device(dev);
3109 	struct xe_file *xef = to_xe_file(file);
3110 	struct drm_xe_vm_bind *args = data;
3111 	struct drm_xe_sync __user *syncs_user;
3112 	struct xe_bo **bos = NULL;
3113 	struct drm_gpuva_ops **ops = NULL;
3114 	struct xe_vm *vm;
3115 	struct xe_exec_queue *q = NULL;
3116 	u32 num_syncs, num_ufence = 0;
3117 	struct xe_sync_entry *syncs = NULL;
3118 	struct drm_xe_vm_bind_op *bind_ops;
3119 	struct xe_vma_ops vops;
3120 	int err;
3121 	int i;
3122 
3123 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
3124 	if (err)
3125 		return err;
3126 
3127 	if (args->exec_queue_id) {
3128 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3129 		if (XE_IOCTL_DBG(xe, !q)) {
3130 			err = -ENOENT;
3131 			goto free_objs;
3132 		}
3133 
3134 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3135 			err = -EINVAL;
3136 			goto put_exec_queue;
3137 		}
3138 	}
3139 
3140 	vm = xe_vm_lookup(xef, args->vm_id);
3141 	if (XE_IOCTL_DBG(xe, !vm)) {
3142 		err = -EINVAL;
3143 		goto put_exec_queue;
3144 	}
3145 
3146 	err = down_write_killable(&vm->lock);
3147 	if (err)
3148 		goto put_vm;
3149 
3150 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3151 		err = -ENOENT;
3152 		goto release_vm_lock;
3153 	}
3154 
3155 	for (i = 0; i < args->num_binds; ++i) {
3156 		u64 range = bind_ops[i].range;
3157 		u64 addr = bind_ops[i].addr;
3158 
3159 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3160 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3161 			err = -EINVAL;
3162 			goto release_vm_lock;
3163 		}
3164 	}
3165 
3166 	if (args->num_binds) {
3167 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3168 			       GFP_KERNEL | __GFP_ACCOUNT);
3169 		if (!bos) {
3170 			err = -ENOMEM;
3171 			goto release_vm_lock;
3172 		}
3173 
3174 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3175 			       GFP_KERNEL | __GFP_ACCOUNT);
3176 		if (!ops) {
3177 			err = -ENOMEM;
3178 			goto release_vm_lock;
3179 		}
3180 	}
3181 
3182 	for (i = 0; i < args->num_binds; ++i) {
3183 		struct drm_gem_object *gem_obj;
3184 		u64 range = bind_ops[i].range;
3185 		u64 addr = bind_ops[i].addr;
3186 		u32 obj = bind_ops[i].obj;
3187 		u64 obj_offset = bind_ops[i].obj_offset;
3188 		u16 pat_index = bind_ops[i].pat_index;
3189 
3190 		if (!obj)
3191 			continue;
3192 
3193 		gem_obj = drm_gem_object_lookup(file, obj);
3194 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3195 			err = -ENOENT;
3196 			goto put_obj;
3197 		}
3198 		bos[i] = gem_to_xe_bo(gem_obj);
3199 
3200 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3201 						   obj_offset, pat_index);
3202 		if (err)
3203 			goto put_obj;
3204 	}
3205 
3206 	if (args->num_syncs) {
3207 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3208 		if (!syncs) {
3209 			err = -ENOMEM;
3210 			goto put_obj;
3211 		}
3212 	}
3213 
3214 	syncs_user = u64_to_user_ptr(args->syncs);
3215 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3216 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3217 					  &syncs_user[num_syncs],
3218 					  (xe_vm_in_lr_mode(vm) ?
3219 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3220 					  (!args->num_binds ?
3221 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3222 		if (err)
3223 			goto free_syncs;
3224 
3225 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3226 			num_ufence++;
3227 	}
3228 
3229 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3230 		err = -EINVAL;
3231 		goto free_syncs;
3232 	}
3233 
3234 	if (!args->num_binds) {
3235 		err = -ENODATA;
3236 		goto free_syncs;
3237 	}
3238 
3239 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3240 	for (i = 0; i < args->num_binds; ++i) {
3241 		u64 range = bind_ops[i].range;
3242 		u64 addr = bind_ops[i].addr;
3243 		u32 op = bind_ops[i].op;
3244 		u32 flags = bind_ops[i].flags;
3245 		u64 obj_offset = bind_ops[i].obj_offset;
3246 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3247 		u16 pat_index = bind_ops[i].pat_index;
3248 
3249 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3250 						  addr, range, op, flags,
3251 						  prefetch_region, pat_index);
3252 		if (IS_ERR(ops[i])) {
3253 			err = PTR_ERR(ops[i]);
3254 			ops[i] = NULL;
3255 			goto unwind_ops;
3256 		}
3257 
3258 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3259 					      &vops, i == args->num_binds - 1);
3260 		if (err)
3261 			goto unwind_ops;
3262 	}
3263 
3264 	/* Nothing to do */
3265 	if (list_empty(&vops.list)) {
3266 		err = -ENODATA;
3267 		goto unwind_ops;
3268 	}
3269 
3270 	err = vm_bind_ioctl_ops_execute(vm, &vops);
3271 
3272 unwind_ops:
3273 	if (err && err != -ENODATA)
3274 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3275 	for (i = args->num_binds - 1; i >= 0; --i)
3276 		if (ops[i])
3277 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3278 free_syncs:
3279 	if (err == -ENODATA)
3280 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3281 	while (num_syncs--)
3282 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3283 
3284 	kfree(syncs);
3285 put_obj:
3286 	for (i = 0; i < args->num_binds; ++i)
3287 		xe_bo_put(bos[i]);
3288 release_vm_lock:
3289 	up_write(&vm->lock);
3290 put_vm:
3291 	xe_vm_put(vm);
3292 put_exec_queue:
3293 	if (q)
3294 		xe_exec_queue_put(q);
3295 free_objs:
3296 	kvfree(bos);
3297 	kvfree(ops);
3298 	if (args->num_binds > 1)
3299 		kvfree(bind_ops);
3300 	return err;
3301 }
3302 
3303 /**
3304  * xe_vm_lock() - Lock the vm's dma_resv object
3305  * @vm: The struct xe_vm whose lock is to be locked
3306  * @intr: Whether to perform any wait interruptible
3307  *
3308  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3309  * contended lock was interrupted. If @intr is false, the function
3310  * always returns 0.
3311  */
3312 int xe_vm_lock(struct xe_vm *vm, bool intr)
3313 {
3314 	if (intr)
3315 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3316 
3317 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3318 }
3319 
3320 /**
3321  * xe_vm_unlock() - Unlock the vm's dma_resv object
3322  * @vm: The struct xe_vm whose lock is to be released.
3323  *
3324  * Unlock a buffer object lock that was locked by xe_vm_lock().
3325  */
3326 void xe_vm_unlock(struct xe_vm *vm)
3327 {
3328 	dma_resv_unlock(xe_vm_resv(vm));
3329 }
3330 
3331 /**
3332  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3333  * @vma: VMA to invalidate
3334  *
3335  * Walks a list of page tables leaves which it memset the entries owned by this
3336  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3337  * complete.
3338  *
3339  * Returns 0 for success, negative error code otherwise.
3340  */
3341 int xe_vm_invalidate_vma(struct xe_vma *vma)
3342 {
3343 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3344 	struct xe_tile *tile;
3345 	u32 tile_needs_invalidate = 0;
3346 	int seqno[XE_MAX_TILES_PER_DEVICE];
3347 	u8 id;
3348 	int ret;
3349 
3350 	xe_assert(xe, !xe_vma_is_null(vma));
3351 	trace_xe_vma_invalidate(vma);
3352 
3353 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3354 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3355 		xe_vma_start(vma), xe_vma_size(vma));
3356 
3357 	/* Check that we don't race with page-table updates */
3358 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3359 		if (xe_vma_is_userptr(vma)) {
3360 			WARN_ON_ONCE(!mmu_interval_check_retry
3361 				     (&to_userptr_vma(vma)->userptr.notifier,
3362 				      to_userptr_vma(vma)->userptr.notifier_seq));
3363 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3364 							     DMA_RESV_USAGE_BOOKKEEP));
3365 
3366 		} else {
3367 			xe_bo_assert_held(xe_vma_bo(vma));
3368 		}
3369 	}
3370 
3371 	for_each_tile(tile, xe, id) {
3372 		if (xe_pt_zap_ptes(tile, vma)) {
3373 			tile_needs_invalidate |= BIT(id);
3374 			xe_device_wmb(xe);
3375 			/*
3376 			 * FIXME: We potentially need to invalidate multiple
3377 			 * GTs within the tile
3378 			 */
3379 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3380 			if (seqno[id] < 0)
3381 				return seqno[id];
3382 		}
3383 	}
3384 
3385 	for_each_tile(tile, xe, id) {
3386 		if (tile_needs_invalidate & BIT(id)) {
3387 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3388 			if (ret < 0)
3389 				return ret;
3390 		}
3391 	}
3392 
3393 	vma->tile_invalidated = vma->tile_mask;
3394 
3395 	return 0;
3396 }
3397 
3398 struct xe_vm_snapshot {
3399 	unsigned long num_snaps;
3400 	struct {
3401 		u64 ofs, bo_ofs;
3402 		unsigned long len;
3403 		struct xe_bo *bo;
3404 		void *data;
3405 		struct mm_struct *mm;
3406 	} snap[];
3407 };
3408 
3409 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3410 {
3411 	unsigned long num_snaps = 0, i;
3412 	struct xe_vm_snapshot *snap = NULL;
3413 	struct drm_gpuva *gpuva;
3414 
3415 	if (!vm)
3416 		return NULL;
3417 
3418 	mutex_lock(&vm->snap_mutex);
3419 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3420 		if (gpuva->flags & XE_VMA_DUMPABLE)
3421 			num_snaps++;
3422 	}
3423 
3424 	if (num_snaps)
3425 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3426 	if (!snap) {
3427 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3428 		goto out_unlock;
3429 	}
3430 
3431 	snap->num_snaps = num_snaps;
3432 	i = 0;
3433 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3434 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3435 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3436 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3437 
3438 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3439 			continue;
3440 
3441 		snap->snap[i].ofs = xe_vma_start(vma);
3442 		snap->snap[i].len = xe_vma_size(vma);
3443 		if (bo) {
3444 			snap->snap[i].bo = xe_bo_get(bo);
3445 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3446 		} else if (xe_vma_is_userptr(vma)) {
3447 			struct mm_struct *mm =
3448 				to_userptr_vma(vma)->userptr.notifier.mm;
3449 
3450 			if (mmget_not_zero(mm))
3451 				snap->snap[i].mm = mm;
3452 			else
3453 				snap->snap[i].data = ERR_PTR(-EFAULT);
3454 
3455 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3456 		} else {
3457 			snap->snap[i].data = ERR_PTR(-ENOENT);
3458 		}
3459 		i++;
3460 	}
3461 
3462 out_unlock:
3463 	mutex_unlock(&vm->snap_mutex);
3464 	return snap;
3465 }
3466 
3467 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3468 {
3469 	if (IS_ERR_OR_NULL(snap))
3470 		return;
3471 
3472 	for (int i = 0; i < snap->num_snaps; i++) {
3473 		struct xe_bo *bo = snap->snap[i].bo;
3474 		struct iosys_map src;
3475 		int err;
3476 
3477 		if (IS_ERR(snap->snap[i].data))
3478 			continue;
3479 
3480 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3481 		if (!snap->snap[i].data) {
3482 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3483 			goto cleanup_bo;
3484 		}
3485 
3486 		if (bo) {
3487 			xe_bo_lock(bo, false);
3488 			err = ttm_bo_vmap(&bo->ttm, &src);
3489 			if (!err) {
3490 				xe_map_memcpy_from(xe_bo_device(bo),
3491 						   snap->snap[i].data,
3492 						   &src, snap->snap[i].bo_ofs,
3493 						   snap->snap[i].len);
3494 				ttm_bo_vunmap(&bo->ttm, &src);
3495 			}
3496 			xe_bo_unlock(bo);
3497 		} else {
3498 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3499 
3500 			kthread_use_mm(snap->snap[i].mm);
3501 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3502 				err = 0;
3503 			else
3504 				err = -EFAULT;
3505 			kthread_unuse_mm(snap->snap[i].mm);
3506 
3507 			mmput(snap->snap[i].mm);
3508 			snap->snap[i].mm = NULL;
3509 		}
3510 
3511 		if (err) {
3512 			kvfree(snap->snap[i].data);
3513 			snap->snap[i].data = ERR_PTR(err);
3514 		}
3515 
3516 cleanup_bo:
3517 		xe_bo_put(bo);
3518 		snap->snap[i].bo = NULL;
3519 	}
3520 }
3521 
3522 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3523 {
3524 	unsigned long i, j;
3525 
3526 	if (IS_ERR_OR_NULL(snap)) {
3527 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3528 		return;
3529 	}
3530 
3531 	for (i = 0; i < snap->num_snaps; i++) {
3532 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3533 
3534 		if (IS_ERR(snap->snap[i].data)) {
3535 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3536 				   PTR_ERR(snap->snap[i].data));
3537 			continue;
3538 		}
3539 
3540 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3541 
3542 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3543 			u32 *val = snap->snap[i].data + j;
3544 			char dumped[ASCII85_BUFSZ];
3545 
3546 			drm_puts(p, ascii85_encode(*val, dumped));
3547 		}
3548 
3549 		drm_puts(p, "\n");
3550 	}
3551 }
3552 
3553 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3554 {
3555 	unsigned long i;
3556 
3557 	if (IS_ERR_OR_NULL(snap))
3558 		return;
3559 
3560 	for (i = 0; i < snap->num_snaps; i++) {
3561 		if (!IS_ERR(snap->snap[i].data))
3562 			kvfree(snap->snap[i].data);
3563 		xe_bo_put(snap->snap[i].bo);
3564 		if (snap->snap[i].mm)
3565 			mmput(snap->snap[i].mm);
3566 	}
3567 	kvfree(snap);
3568 }
3569