xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 649e980dadee36f961738d054627225542d547a2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace_bo.h"
40 #include "xe_wa.h"
41 #include "xe_hmm.h"
42 
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 {
45 	return vm->gpuvm.r_obj;
46 }
47 
48 /**
49  * xe_vma_userptr_check_repin() - Advisory check for repin needed
50  * @uvma: The userptr vma
51  *
52  * Check if the userptr vma has been invalidated since last successful
53  * repin. The check is advisory only and can the function can be called
54  * without the vm->userptr.notifier_lock held. There is no guarantee that the
55  * vma userptr will remain valid after a lockless check, so typically
56  * the call needs to be followed by a proper check under the notifier_lock.
57  *
58  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59  */
60 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
61 {
62 	return mmu_interval_check_retry(&uvma->userptr.notifier,
63 					uvma->userptr.notifier_seq) ?
64 		-EAGAIN : 0;
65 }
66 
67 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
68 {
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 
73 	lockdep_assert_held(&vm->lock);
74 	xe_assert(xe, xe_vma_is_userptr(vma));
75 
76 	return xe_hmm_userptr_populate_range(uvma, false);
77 }
78 
79 static bool preempt_fences_waiting(struct xe_vm *vm)
80 {
81 	struct xe_exec_queue *q;
82 
83 	lockdep_assert_held(&vm->lock);
84 	xe_vm_assert_held(vm);
85 
86 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
87 		if (!q->lr.pfence ||
88 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
89 			     &q->lr.pfence->flags)) {
90 			return true;
91 		}
92 	}
93 
94 	return false;
95 }
96 
97 static void free_preempt_fences(struct list_head *list)
98 {
99 	struct list_head *link, *next;
100 
101 	list_for_each_safe(link, next, list)
102 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
103 }
104 
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
106 				unsigned int *count)
107 {
108 	lockdep_assert_held(&vm->lock);
109 	xe_vm_assert_held(vm);
110 
111 	if (*count >= vm->preempt.num_exec_queues)
112 		return 0;
113 
114 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
115 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
116 
117 		if (IS_ERR(pfence))
118 			return PTR_ERR(pfence);
119 
120 		list_move_tail(xe_preempt_fence_link(pfence), list);
121 	}
122 
123 	return 0;
124 }
125 
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
127 {
128 	struct xe_exec_queue *q;
129 
130 	xe_vm_assert_held(vm);
131 
132 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
133 		if (q->lr.pfence) {
134 			long timeout = dma_fence_wait(q->lr.pfence, false);
135 
136 			if (timeout < 0)
137 				return -ETIME;
138 			dma_fence_put(q->lr.pfence);
139 			q->lr.pfence = NULL;
140 		}
141 	}
142 
143 	return 0;
144 }
145 
146 static bool xe_vm_is_idle(struct xe_vm *vm)
147 {
148 	struct xe_exec_queue *q;
149 
150 	xe_vm_assert_held(vm);
151 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
152 		if (!xe_exec_queue_is_idle(q))
153 			return false;
154 	}
155 
156 	return true;
157 }
158 
159 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
160 {
161 	struct list_head *link;
162 	struct xe_exec_queue *q;
163 
164 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
165 		struct dma_fence *fence;
166 
167 		link = list->next;
168 		xe_assert(vm->xe, link != list);
169 
170 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
171 					     q, q->lr.context,
172 					     ++q->lr.seqno);
173 		dma_fence_put(q->lr.pfence);
174 		q->lr.pfence = fence;
175 	}
176 }
177 
178 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
179 {
180 	struct xe_exec_queue *q;
181 	int err;
182 
183 	xe_bo_assert_held(bo);
184 
185 	if (!vm->preempt.num_exec_queues)
186 		return 0;
187 
188 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
189 	if (err)
190 		return err;
191 
192 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
193 		if (q->lr.pfence) {
194 			dma_resv_add_fence(bo->ttm.base.resv,
195 					   q->lr.pfence,
196 					   DMA_RESV_USAGE_BOOKKEEP);
197 		}
198 
199 	return 0;
200 }
201 
202 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
203 						struct drm_exec *exec)
204 {
205 	struct xe_exec_queue *q;
206 
207 	lockdep_assert_held(&vm->lock);
208 	xe_vm_assert_held(vm);
209 
210 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
211 		q->ops->resume(q);
212 
213 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
214 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
215 	}
216 }
217 
218 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
219 {
220 	struct drm_gpuvm_exec vm_exec = {
221 		.vm = &vm->gpuvm,
222 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
223 		.num_fences = 1,
224 	};
225 	struct drm_exec *exec = &vm_exec.exec;
226 	struct dma_fence *pfence;
227 	int err;
228 	bool wait;
229 
230 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
231 
232 	down_write(&vm->lock);
233 	err = drm_gpuvm_exec_lock(&vm_exec);
234 	if (err)
235 		goto out_up_write;
236 
237 	pfence = xe_preempt_fence_create(q, q->lr.context,
238 					 ++q->lr.seqno);
239 	if (!pfence) {
240 		err = -ENOMEM;
241 		goto out_fini;
242 	}
243 
244 	list_add(&q->lr.link, &vm->preempt.exec_queues);
245 	++vm->preempt.num_exec_queues;
246 	q->lr.pfence = pfence;
247 
248 	down_read(&vm->userptr.notifier_lock);
249 
250 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
251 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
252 
253 	/*
254 	 * Check to see if a preemption on VM is in flight or userptr
255 	 * invalidation, if so trigger this preempt fence to sync state with
256 	 * other preempt fences on the VM.
257 	 */
258 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
259 	if (wait)
260 		dma_fence_enable_sw_signaling(pfence);
261 
262 	up_read(&vm->userptr.notifier_lock);
263 
264 out_fini:
265 	drm_exec_fini(exec);
266 out_up_write:
267 	up_write(&vm->lock);
268 
269 	return err;
270 }
271 
272 /**
273  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
274  * @vm: The VM.
275  * @q: The exec_queue
276  */
277 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
278 {
279 	if (!xe_vm_in_preempt_fence_mode(vm))
280 		return;
281 
282 	down_write(&vm->lock);
283 	list_del(&q->lr.link);
284 	--vm->preempt.num_exec_queues;
285 	if (q->lr.pfence) {
286 		dma_fence_enable_sw_signaling(q->lr.pfence);
287 		dma_fence_put(q->lr.pfence);
288 		q->lr.pfence = NULL;
289 	}
290 	up_write(&vm->lock);
291 }
292 
293 /**
294  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
295  * that need repinning.
296  * @vm: The VM.
297  *
298  * This function checks for whether the VM has userptrs that need repinning,
299  * and provides a release-type barrier on the userptr.notifier_lock after
300  * checking.
301  *
302  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
303  */
304 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
305 {
306 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
307 
308 	return (list_empty(&vm->userptr.repin_list) &&
309 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
310 }
311 
312 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
313 
314 static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
315 {
316 	struct xe_exec_queue *q;
317 
318 	lockdep_assert_held(&vm->lock);
319 
320 	if (unlocked)
321 		xe_vm_lock(vm, false);
322 
323 	vm->flags |= XE_VM_FLAG_BANNED;
324 	trace_xe_vm_kill(vm);
325 
326 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
327 		q->ops->kill(q);
328 
329 	if (unlocked)
330 		xe_vm_unlock(vm);
331 
332 	/* TODO: Inform user the VM is banned */
333 }
334 
335 /**
336  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
337  * @exec: The drm_exec object used for locking before validation.
338  * @err: The error returned from ttm_bo_validate().
339  * @end: A ktime_t cookie that should be set to 0 before first use and
340  * that should be reused on subsequent calls.
341  *
342  * With multiple active VMs, under memory pressure, it is possible that
343  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
344  * Until ttm properly handles locking in such scenarios, best thing the
345  * driver can do is retry with a timeout. Check if that is necessary, and
346  * if so unlock the drm_exec's objects while keeping the ticket to prepare
347  * for a rerun.
348  *
349  * Return: true if a retry after drm_exec_init() is recommended;
350  * false otherwise.
351  */
352 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
353 {
354 	ktime_t cur;
355 
356 	if (err != -ENOMEM)
357 		return false;
358 
359 	cur = ktime_get();
360 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
361 	if (!ktime_before(cur, *end))
362 		return false;
363 
364 	msleep(20);
365 	return true;
366 }
367 
368 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
369 {
370 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
371 	struct drm_gpuva *gpuva;
372 	int ret;
373 
374 	lockdep_assert_held(&vm->lock);
375 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
376 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
377 			       &vm->rebind_list);
378 
379 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
380 	if (ret)
381 		return ret;
382 
383 	vm_bo->evicted = false;
384 	return 0;
385 }
386 
387 /**
388  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
389  * @vm: The vm for which we are rebinding.
390  * @exec: The struct drm_exec with the locked GEM objects.
391  * @num_fences: The number of fences to reserve for the operation, not
392  * including rebinds and validations.
393  *
394  * Validates all evicted gem objects and rebinds their vmas. Note that
395  * rebindings may cause evictions and hence the validation-rebind
396  * sequence is rerun until there are no more objects to validate.
397  *
398  * Return: 0 on success, negative error code on error. In particular,
399  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
400  * the drm_exec transaction needs to be restarted.
401  */
402 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
403 			  unsigned int num_fences)
404 {
405 	struct drm_gem_object *obj;
406 	unsigned long index;
407 	int ret;
408 
409 	do {
410 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
411 		if (ret)
412 			return ret;
413 
414 		ret = xe_vm_rebind(vm, false);
415 		if (ret)
416 			return ret;
417 	} while (!list_empty(&vm->gpuvm.evict.list));
418 
419 	drm_exec_for_each_locked_object(exec, index, obj) {
420 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
421 		if (ret)
422 			return ret;
423 	}
424 
425 	return 0;
426 }
427 
428 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
429 				 bool *done)
430 {
431 	int err;
432 
433 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
434 	if (err)
435 		return err;
436 
437 	if (xe_vm_is_idle(vm)) {
438 		vm->preempt.rebind_deactivated = true;
439 		*done = true;
440 		return 0;
441 	}
442 
443 	if (!preempt_fences_waiting(vm)) {
444 		*done = true;
445 		return 0;
446 	}
447 
448 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
449 	if (err)
450 		return err;
451 
452 	err = wait_for_existing_preempt_fences(vm);
453 	if (err)
454 		return err;
455 
456 	/*
457 	 * Add validation and rebinding to the locking loop since both can
458 	 * cause evictions which may require blocing dma_resv locks.
459 	 * The fence reservation here is intended for the new preempt fences
460 	 * we attach at the end of the rebind work.
461 	 */
462 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
463 }
464 
465 static void preempt_rebind_work_func(struct work_struct *w)
466 {
467 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
468 	struct drm_exec exec;
469 	unsigned int fence_count = 0;
470 	LIST_HEAD(preempt_fences);
471 	ktime_t end = 0;
472 	int err = 0;
473 	long wait;
474 	int __maybe_unused tries = 0;
475 
476 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
477 	trace_xe_vm_rebind_worker_enter(vm);
478 
479 	down_write(&vm->lock);
480 
481 	if (xe_vm_is_closed_or_banned(vm)) {
482 		up_write(&vm->lock);
483 		trace_xe_vm_rebind_worker_exit(vm);
484 		return;
485 	}
486 
487 retry:
488 	if (xe_vm_userptr_check_repin(vm)) {
489 		err = xe_vm_userptr_pin(vm);
490 		if (err)
491 			goto out_unlock_outer;
492 	}
493 
494 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
495 
496 	drm_exec_until_all_locked(&exec) {
497 		bool done = false;
498 
499 		err = xe_preempt_work_begin(&exec, vm, &done);
500 		drm_exec_retry_on_contention(&exec);
501 		if (err || done) {
502 			drm_exec_fini(&exec);
503 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
504 				err = -EAGAIN;
505 
506 			goto out_unlock_outer;
507 		}
508 	}
509 
510 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
511 	if (err)
512 		goto out_unlock;
513 
514 	err = xe_vm_rebind(vm, true);
515 	if (err)
516 		goto out_unlock;
517 
518 	/* Wait on rebinds and munmap style VM unbinds */
519 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
520 				     DMA_RESV_USAGE_KERNEL,
521 				     false, MAX_SCHEDULE_TIMEOUT);
522 	if (wait <= 0) {
523 		err = -ETIME;
524 		goto out_unlock;
525 	}
526 
527 #define retry_required(__tries, __vm) \
528 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
529 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
530 	__xe_vm_userptr_needs_repin(__vm))
531 
532 	down_read(&vm->userptr.notifier_lock);
533 	if (retry_required(tries, vm)) {
534 		up_read(&vm->userptr.notifier_lock);
535 		err = -EAGAIN;
536 		goto out_unlock;
537 	}
538 
539 #undef retry_required
540 
541 	spin_lock(&vm->xe->ttm.lru_lock);
542 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
543 	spin_unlock(&vm->xe->ttm.lru_lock);
544 
545 	/* Point of no return. */
546 	arm_preempt_fences(vm, &preempt_fences);
547 	resume_and_reinstall_preempt_fences(vm, &exec);
548 	up_read(&vm->userptr.notifier_lock);
549 
550 out_unlock:
551 	drm_exec_fini(&exec);
552 out_unlock_outer:
553 	if (err == -EAGAIN) {
554 		trace_xe_vm_rebind_worker_retry(vm);
555 		goto retry;
556 	}
557 
558 	if (err) {
559 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
560 		xe_vm_kill(vm, true);
561 	}
562 	up_write(&vm->lock);
563 
564 	free_preempt_fences(&preempt_fences);
565 
566 	trace_xe_vm_rebind_worker_exit(vm);
567 }
568 
569 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
570 				   const struct mmu_notifier_range *range,
571 				   unsigned long cur_seq)
572 {
573 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
574 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
575 	struct xe_vma *vma = &uvma->vma;
576 	struct xe_vm *vm = xe_vma_vm(vma);
577 	struct dma_resv_iter cursor;
578 	struct dma_fence *fence;
579 	long err;
580 
581 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
582 	trace_xe_vma_userptr_invalidate(vma);
583 
584 	if (!mmu_notifier_range_blockable(range))
585 		return false;
586 
587 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
588 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
589 		xe_vma_start(vma), xe_vma_size(vma));
590 
591 	down_write(&vm->userptr.notifier_lock);
592 	mmu_interval_set_seq(mni, cur_seq);
593 
594 	/* No need to stop gpu access if the userptr is not yet bound. */
595 	if (!userptr->initial_bind) {
596 		up_write(&vm->userptr.notifier_lock);
597 		return true;
598 	}
599 
600 	/*
601 	 * Tell exec and rebind worker they need to repin and rebind this
602 	 * userptr.
603 	 */
604 	if (!xe_vm_in_fault_mode(vm) &&
605 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
606 		spin_lock(&vm->userptr.invalidated_lock);
607 		list_move_tail(&userptr->invalidate_link,
608 			       &vm->userptr.invalidated);
609 		spin_unlock(&vm->userptr.invalidated_lock);
610 	}
611 
612 	up_write(&vm->userptr.notifier_lock);
613 
614 	/*
615 	 * Preempt fences turn into schedule disables, pipeline these.
616 	 * Note that even in fault mode, we need to wait for binds and
617 	 * unbinds to complete, and those are attached as BOOKMARK fences
618 	 * to the vm.
619 	 */
620 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
621 			    DMA_RESV_USAGE_BOOKKEEP);
622 	dma_resv_for_each_fence_unlocked(&cursor, fence)
623 		dma_fence_enable_sw_signaling(fence);
624 	dma_resv_iter_end(&cursor);
625 
626 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
627 				    DMA_RESV_USAGE_BOOKKEEP,
628 				    false, MAX_SCHEDULE_TIMEOUT);
629 	XE_WARN_ON(err <= 0);
630 
631 	if (xe_vm_in_fault_mode(vm)) {
632 		err = xe_vm_invalidate_vma(vma);
633 		XE_WARN_ON(err);
634 	}
635 
636 	trace_xe_vma_userptr_invalidate_complete(vma);
637 
638 	return true;
639 }
640 
641 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
642 	.invalidate = vma_userptr_invalidate,
643 };
644 
645 int xe_vm_userptr_pin(struct xe_vm *vm)
646 {
647 	struct xe_userptr_vma *uvma, *next;
648 	int err = 0;
649 	LIST_HEAD(tmp_evict);
650 
651 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
652 	lockdep_assert_held_write(&vm->lock);
653 
654 	/* Collect invalidated userptrs */
655 	spin_lock(&vm->userptr.invalidated_lock);
656 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
657 				 userptr.invalidate_link) {
658 		list_del_init(&uvma->userptr.invalidate_link);
659 		list_move_tail(&uvma->userptr.repin_link,
660 			       &vm->userptr.repin_list);
661 	}
662 	spin_unlock(&vm->userptr.invalidated_lock);
663 
664 	/* Pin and move to temporary list */
665 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
666 				 userptr.repin_link) {
667 		err = xe_vma_userptr_pin_pages(uvma);
668 		if (err == -EFAULT) {
669 			list_del_init(&uvma->userptr.repin_link);
670 
671 			/* Wait for pending binds */
672 			xe_vm_lock(vm, false);
673 			dma_resv_wait_timeout(xe_vm_resv(vm),
674 					      DMA_RESV_USAGE_BOOKKEEP,
675 					      false, MAX_SCHEDULE_TIMEOUT);
676 
677 			err = xe_vm_invalidate_vma(&uvma->vma);
678 			xe_vm_unlock(vm);
679 			if (err)
680 				return err;
681 		} else {
682 			if (err < 0)
683 				return err;
684 
685 			list_del_init(&uvma->userptr.repin_link);
686 			list_move_tail(&uvma->vma.combined_links.rebind,
687 				       &vm->rebind_list);
688 		}
689 	}
690 
691 	return 0;
692 }
693 
694 /**
695  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
696  * that need repinning.
697  * @vm: The VM.
698  *
699  * This function does an advisory check for whether the VM has userptrs that
700  * need repinning.
701  *
702  * Return: 0 if there are no indications of userptrs needing repinning,
703  * -EAGAIN if there are.
704  */
705 int xe_vm_userptr_check_repin(struct xe_vm *vm)
706 {
707 	return (list_empty_careful(&vm->userptr.repin_list) &&
708 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
709 }
710 
711 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
712 				  u8 tile_mask)
713 {
714 	INIT_LIST_HEAD(&op->link);
715 	op->tile_mask = tile_mask;
716 	op->base.op = DRM_GPUVA_OP_MAP;
717 	op->base.map.va.addr = vma->gpuva.va.addr;
718 	op->base.map.va.range = vma->gpuva.va.range;
719 	op->base.map.gem.obj = vma->gpuva.gem.obj;
720 	op->base.map.gem.offset = vma->gpuva.gem.offset;
721 	op->map.vma = vma;
722 	op->map.immediate = true;
723 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
724 	op->map.is_null = xe_vma_is_null(vma);
725 }
726 
727 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
728 				u8 tile_mask)
729 {
730 	struct xe_vma_op *op;
731 
732 	op = kzalloc(sizeof(*op), GFP_KERNEL);
733 	if (!op)
734 		return -ENOMEM;
735 
736 	xe_vm_populate_rebind(op, vma, tile_mask);
737 	list_add_tail(&op->link, &vops->list);
738 
739 	return 0;
740 }
741 
742 static struct dma_fence *ops_execute(struct xe_vm *vm,
743 				     struct xe_vma_ops *vops);
744 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
745 			    struct xe_exec_queue *q,
746 			    struct xe_sync_entry *syncs, u32 num_syncs);
747 
748 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
749 {
750 	struct dma_fence *fence;
751 	struct xe_vma *vma, *next;
752 	struct xe_vma_ops vops;
753 	struct xe_vma_op *op, *next_op;
754 	int err;
755 
756 	lockdep_assert_held(&vm->lock);
757 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
758 	    list_empty(&vm->rebind_list))
759 		return 0;
760 
761 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
762 
763 	xe_vm_assert_held(vm);
764 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
765 		xe_assert(vm->xe, vma->tile_present);
766 
767 		if (rebind_worker)
768 			trace_xe_vma_rebind_worker(vma);
769 		else
770 			trace_xe_vma_rebind_exec(vma);
771 
772 		err = xe_vm_ops_add_rebind(&vops, vma,
773 					   vma->tile_present);
774 		if (err)
775 			goto free_ops;
776 	}
777 
778 	fence = ops_execute(vm, &vops);
779 	if (IS_ERR(fence)) {
780 		err = PTR_ERR(fence);
781 	} else {
782 		dma_fence_put(fence);
783 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
784 					 combined_links.rebind)
785 			list_del_init(&vma->combined_links.rebind);
786 	}
787 free_ops:
788 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
789 		list_del(&op->link);
790 		kfree(op);
791 	}
792 
793 	return err;
794 }
795 
796 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
797 {
798 	struct dma_fence *fence = NULL;
799 	struct xe_vma_ops vops;
800 	struct xe_vma_op *op, *next_op;
801 	int err;
802 
803 	lockdep_assert_held(&vm->lock);
804 	xe_vm_assert_held(vm);
805 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
806 
807 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
808 
809 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
810 	if (err)
811 		return ERR_PTR(err);
812 
813 	fence = ops_execute(vm, &vops);
814 
815 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
816 		list_del(&op->link);
817 		kfree(op);
818 	}
819 
820 	return fence;
821 }
822 
823 static void xe_vma_free(struct xe_vma *vma)
824 {
825 	if (xe_vma_is_userptr(vma))
826 		kfree(to_userptr_vma(vma));
827 	else
828 		kfree(vma);
829 }
830 
831 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
832 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
833 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
834 
835 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
836 				    struct xe_bo *bo,
837 				    u64 bo_offset_or_userptr,
838 				    u64 start, u64 end,
839 				    u16 pat_index, unsigned int flags)
840 {
841 	struct xe_vma *vma;
842 	struct xe_tile *tile;
843 	u8 id;
844 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
845 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
846 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
847 
848 	xe_assert(vm->xe, start < end);
849 	xe_assert(vm->xe, end < vm->size);
850 
851 	/*
852 	 * Allocate and ensure that the xe_vma_is_userptr() return
853 	 * matches what was allocated.
854 	 */
855 	if (!bo && !is_null) {
856 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
857 
858 		if (!uvma)
859 			return ERR_PTR(-ENOMEM);
860 
861 		vma = &uvma->vma;
862 	} else {
863 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
864 		if (!vma)
865 			return ERR_PTR(-ENOMEM);
866 
867 		if (is_null)
868 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
869 		if (bo)
870 			vma->gpuva.gem.obj = &bo->ttm.base;
871 	}
872 
873 	INIT_LIST_HEAD(&vma->combined_links.rebind);
874 
875 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
876 	vma->gpuva.vm = &vm->gpuvm;
877 	vma->gpuva.va.addr = start;
878 	vma->gpuva.va.range = end - start + 1;
879 	if (read_only)
880 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
881 	if (dumpable)
882 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
883 
884 	for_each_tile(tile, vm->xe, id)
885 		vma->tile_mask |= 0x1 << id;
886 
887 	if (vm->xe->info.has_atomic_enable_pte_bit)
888 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
889 
890 	vma->pat_index = pat_index;
891 
892 	if (bo) {
893 		struct drm_gpuvm_bo *vm_bo;
894 
895 		xe_bo_assert_held(bo);
896 
897 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
898 		if (IS_ERR(vm_bo)) {
899 			xe_vma_free(vma);
900 			return ERR_CAST(vm_bo);
901 		}
902 
903 		drm_gpuvm_bo_extobj_add(vm_bo);
904 		drm_gem_object_get(&bo->ttm.base);
905 		vma->gpuva.gem.offset = bo_offset_or_userptr;
906 		drm_gpuva_link(&vma->gpuva, vm_bo);
907 		drm_gpuvm_bo_put(vm_bo);
908 	} else /* userptr or null */ {
909 		if (!is_null) {
910 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
911 			u64 size = end - start + 1;
912 			int err;
913 
914 			INIT_LIST_HEAD(&userptr->invalidate_link);
915 			INIT_LIST_HEAD(&userptr->repin_link);
916 			vma->gpuva.gem.offset = bo_offset_or_userptr;
917 
918 			err = mmu_interval_notifier_insert(&userptr->notifier,
919 							   current->mm,
920 							   xe_vma_userptr(vma), size,
921 							   &vma_userptr_notifier_ops);
922 			if (err) {
923 				xe_vma_free(vma);
924 				return ERR_PTR(err);
925 			}
926 
927 			userptr->notifier_seq = LONG_MAX;
928 		}
929 
930 		xe_vm_get(vm);
931 	}
932 
933 	return vma;
934 }
935 
936 static void xe_vma_destroy_late(struct xe_vma *vma)
937 {
938 	struct xe_vm *vm = xe_vma_vm(vma);
939 
940 	if (vma->ufence) {
941 		xe_sync_ufence_put(vma->ufence);
942 		vma->ufence = NULL;
943 	}
944 
945 	if (xe_vma_is_userptr(vma)) {
946 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
947 		struct xe_userptr *userptr = &uvma->userptr;
948 
949 		if (userptr->sg)
950 			xe_hmm_userptr_free_sg(uvma);
951 
952 		/*
953 		 * Since userptr pages are not pinned, we can't remove
954 		 * the notifer until we're sure the GPU is not accessing
955 		 * them anymore
956 		 */
957 		mmu_interval_notifier_remove(&userptr->notifier);
958 		xe_vm_put(vm);
959 	} else if (xe_vma_is_null(vma)) {
960 		xe_vm_put(vm);
961 	} else {
962 		xe_bo_put(xe_vma_bo(vma));
963 	}
964 
965 	xe_vma_free(vma);
966 }
967 
968 static void vma_destroy_work_func(struct work_struct *w)
969 {
970 	struct xe_vma *vma =
971 		container_of(w, struct xe_vma, destroy_work);
972 
973 	xe_vma_destroy_late(vma);
974 }
975 
976 static void vma_destroy_cb(struct dma_fence *fence,
977 			   struct dma_fence_cb *cb)
978 {
979 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
980 
981 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
982 	queue_work(system_unbound_wq, &vma->destroy_work);
983 }
984 
985 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
986 {
987 	struct xe_vm *vm = xe_vma_vm(vma);
988 
989 	lockdep_assert_held_write(&vm->lock);
990 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
991 
992 	if (xe_vma_is_userptr(vma)) {
993 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
994 
995 		spin_lock(&vm->userptr.invalidated_lock);
996 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
997 		spin_unlock(&vm->userptr.invalidated_lock);
998 	} else if (!xe_vma_is_null(vma)) {
999 		xe_bo_assert_held(xe_vma_bo(vma));
1000 
1001 		drm_gpuva_unlink(&vma->gpuva);
1002 	}
1003 
1004 	xe_vm_assert_held(vm);
1005 	if (fence) {
1006 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1007 						 vma_destroy_cb);
1008 
1009 		if (ret) {
1010 			XE_WARN_ON(ret != -ENOENT);
1011 			xe_vma_destroy_late(vma);
1012 		}
1013 	} else {
1014 		xe_vma_destroy_late(vma);
1015 	}
1016 }
1017 
1018 /**
1019  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1020  * @exec: The drm_exec object we're currently locking for.
1021  * @vma: The vma for witch we want to lock the vm resv and any attached
1022  * object's resv.
1023  *
1024  * Return: 0 on success, negative error code on error. In particular
1025  * may return -EDEADLK on WW transaction contention and -EINTR if
1026  * an interruptible wait is terminated by a signal.
1027  */
1028 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1029 {
1030 	struct xe_vm *vm = xe_vma_vm(vma);
1031 	struct xe_bo *bo = xe_vma_bo(vma);
1032 	int err;
1033 
1034 	XE_WARN_ON(!vm);
1035 
1036 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1037 	if (!err && bo && !bo->vm)
1038 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1039 
1040 	return err;
1041 }
1042 
1043 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1044 {
1045 	struct drm_exec exec;
1046 	int err;
1047 
1048 	drm_exec_init(&exec, 0, 0);
1049 	drm_exec_until_all_locked(&exec) {
1050 		err = xe_vm_lock_vma(&exec, vma);
1051 		drm_exec_retry_on_contention(&exec);
1052 		if (XE_WARN_ON(err))
1053 			break;
1054 	}
1055 
1056 	xe_vma_destroy(vma, NULL);
1057 
1058 	drm_exec_fini(&exec);
1059 }
1060 
1061 struct xe_vma *
1062 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1063 {
1064 	struct drm_gpuva *gpuva;
1065 
1066 	lockdep_assert_held(&vm->lock);
1067 
1068 	if (xe_vm_is_closed_or_banned(vm))
1069 		return NULL;
1070 
1071 	xe_assert(vm->xe, start + range <= vm->size);
1072 
1073 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1074 
1075 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1076 }
1077 
1078 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1079 {
1080 	int err;
1081 
1082 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1083 	lockdep_assert_held(&vm->lock);
1084 
1085 	mutex_lock(&vm->snap_mutex);
1086 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1087 	mutex_unlock(&vm->snap_mutex);
1088 	XE_WARN_ON(err);	/* Shouldn't be possible */
1089 
1090 	return err;
1091 }
1092 
1093 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1094 {
1095 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1096 	lockdep_assert_held(&vm->lock);
1097 
1098 	mutex_lock(&vm->snap_mutex);
1099 	drm_gpuva_remove(&vma->gpuva);
1100 	mutex_unlock(&vm->snap_mutex);
1101 	if (vm->usm.last_fault_vma == vma)
1102 		vm->usm.last_fault_vma = NULL;
1103 }
1104 
1105 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1106 {
1107 	struct xe_vma_op *op;
1108 
1109 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1110 
1111 	if (unlikely(!op))
1112 		return NULL;
1113 
1114 	return &op->base;
1115 }
1116 
1117 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1118 
1119 static const struct drm_gpuvm_ops gpuvm_ops = {
1120 	.op_alloc = xe_vm_op_alloc,
1121 	.vm_bo_validate = xe_gpuvm_validate,
1122 	.vm_free = xe_vm_free,
1123 };
1124 
1125 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1126 {
1127 	u64 pte = 0;
1128 
1129 	if (pat_index & BIT(0))
1130 		pte |= XE_PPGTT_PTE_PAT0;
1131 
1132 	if (pat_index & BIT(1))
1133 		pte |= XE_PPGTT_PTE_PAT1;
1134 
1135 	return pte;
1136 }
1137 
1138 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1139 				u32 pt_level)
1140 {
1141 	u64 pte = 0;
1142 
1143 	if (pat_index & BIT(0))
1144 		pte |= XE_PPGTT_PTE_PAT0;
1145 
1146 	if (pat_index & BIT(1))
1147 		pte |= XE_PPGTT_PTE_PAT1;
1148 
1149 	if (pat_index & BIT(2)) {
1150 		if (pt_level)
1151 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1152 		else
1153 			pte |= XE_PPGTT_PTE_PAT2;
1154 	}
1155 
1156 	if (pat_index & BIT(3))
1157 		pte |= XELPG_PPGTT_PTE_PAT3;
1158 
1159 	if (pat_index & (BIT(4)))
1160 		pte |= XE2_PPGTT_PTE_PAT4;
1161 
1162 	return pte;
1163 }
1164 
1165 static u64 pte_encode_ps(u32 pt_level)
1166 {
1167 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1168 
1169 	if (pt_level == 1)
1170 		return XE_PDE_PS_2M;
1171 	else if (pt_level == 2)
1172 		return XE_PDPE_PS_1G;
1173 
1174 	return 0;
1175 }
1176 
1177 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1178 			      const u16 pat_index)
1179 {
1180 	struct xe_device *xe = xe_bo_device(bo);
1181 	u64 pde;
1182 
1183 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1184 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1185 	pde |= pde_encode_pat_index(xe, pat_index);
1186 
1187 	return pde;
1188 }
1189 
1190 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1191 			      u16 pat_index, u32 pt_level)
1192 {
1193 	struct xe_device *xe = xe_bo_device(bo);
1194 	u64 pte;
1195 
1196 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1197 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1198 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1199 	pte |= pte_encode_ps(pt_level);
1200 
1201 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1202 		pte |= XE_PPGTT_PTE_DM;
1203 
1204 	return pte;
1205 }
1206 
1207 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1208 			       u16 pat_index, u32 pt_level)
1209 {
1210 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1211 
1212 	pte |= XE_PAGE_PRESENT;
1213 
1214 	if (likely(!xe_vma_read_only(vma)))
1215 		pte |= XE_PAGE_RW;
1216 
1217 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1218 	pte |= pte_encode_ps(pt_level);
1219 
1220 	if (unlikely(xe_vma_is_null(vma)))
1221 		pte |= XE_PTE_NULL;
1222 
1223 	return pte;
1224 }
1225 
1226 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1227 				u16 pat_index,
1228 				u32 pt_level, bool devmem, u64 flags)
1229 {
1230 	u64 pte;
1231 
1232 	/* Avoid passing random bits directly as flags */
1233 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1234 
1235 	pte = addr;
1236 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1237 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1238 	pte |= pte_encode_ps(pt_level);
1239 
1240 	if (devmem)
1241 		pte |= XE_PPGTT_PTE_DM;
1242 
1243 	pte |= flags;
1244 
1245 	return pte;
1246 }
1247 
1248 static const struct xe_pt_ops xelp_pt_ops = {
1249 	.pte_encode_bo = xelp_pte_encode_bo,
1250 	.pte_encode_vma = xelp_pte_encode_vma,
1251 	.pte_encode_addr = xelp_pte_encode_addr,
1252 	.pde_encode_bo = xelp_pde_encode_bo,
1253 };
1254 
1255 static void vm_destroy_work_func(struct work_struct *w);
1256 
1257 /**
1258  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1259  * given tile and vm.
1260  * @xe: xe device.
1261  * @tile: tile to set up for.
1262  * @vm: vm to set up for.
1263  *
1264  * Sets up a pagetable tree with one page-table per level and a single
1265  * leaf PTE. All pagetable entries point to the single page-table or,
1266  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1267  * writes become NOPs.
1268  *
1269  * Return: 0 on success, negative error code on error.
1270  */
1271 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1272 				struct xe_vm *vm)
1273 {
1274 	u8 id = tile->id;
1275 	int i;
1276 
1277 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1278 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1279 		if (IS_ERR(vm->scratch_pt[id][i]))
1280 			return PTR_ERR(vm->scratch_pt[id][i]);
1281 
1282 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static void xe_vm_free_scratch(struct xe_vm *vm)
1289 {
1290 	struct xe_tile *tile;
1291 	u8 id;
1292 
1293 	if (!xe_vm_has_scratch(vm))
1294 		return;
1295 
1296 	for_each_tile(tile, vm->xe, id) {
1297 		u32 i;
1298 
1299 		if (!vm->pt_root[id])
1300 			continue;
1301 
1302 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1303 			if (vm->scratch_pt[id][i])
1304 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1305 	}
1306 }
1307 
1308 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1309 {
1310 	struct drm_gem_object *vm_resv_obj;
1311 	struct xe_vm *vm;
1312 	int err, number_tiles = 0;
1313 	struct xe_tile *tile;
1314 	u8 id;
1315 
1316 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1317 	if (!vm)
1318 		return ERR_PTR(-ENOMEM);
1319 
1320 	vm->xe = xe;
1321 
1322 	vm->size = 1ull << xe->info.va_bits;
1323 
1324 	vm->flags = flags;
1325 
1326 	init_rwsem(&vm->lock);
1327 	mutex_init(&vm->snap_mutex);
1328 
1329 	INIT_LIST_HEAD(&vm->rebind_list);
1330 
1331 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1332 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1333 	init_rwsem(&vm->userptr.notifier_lock);
1334 	spin_lock_init(&vm->userptr.invalidated_lock);
1335 
1336 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1337 
1338 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1339 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1340 
1341 	for_each_tile(tile, xe, id)
1342 		xe_range_fence_tree_init(&vm->rftree[id]);
1343 
1344 	vm->pt_ops = &xelp_pt_ops;
1345 
1346 	/*
1347 	 * Long-running workloads are not protected by the scheduler references.
1348 	 * By design, run_job for long-running workloads returns NULL and the
1349 	 * scheduler drops all the references of it, hence protecting the VM
1350 	 * for this case is necessary.
1351 	 */
1352 	if (flags & XE_VM_FLAG_LR_MODE)
1353 		xe_pm_runtime_get_noresume(xe);
1354 
1355 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1356 	if (!vm_resv_obj) {
1357 		err = -ENOMEM;
1358 		goto err_no_resv;
1359 	}
1360 
1361 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1362 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1363 
1364 	drm_gem_object_put(vm_resv_obj);
1365 
1366 	err = xe_vm_lock(vm, true);
1367 	if (err)
1368 		goto err_close;
1369 
1370 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1371 		vm->flags |= XE_VM_FLAG_64K;
1372 
1373 	for_each_tile(tile, xe, id) {
1374 		if (flags & XE_VM_FLAG_MIGRATION &&
1375 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1376 			continue;
1377 
1378 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1379 		if (IS_ERR(vm->pt_root[id])) {
1380 			err = PTR_ERR(vm->pt_root[id]);
1381 			vm->pt_root[id] = NULL;
1382 			goto err_unlock_close;
1383 		}
1384 	}
1385 
1386 	if (xe_vm_has_scratch(vm)) {
1387 		for_each_tile(tile, xe, id) {
1388 			if (!vm->pt_root[id])
1389 				continue;
1390 
1391 			err = xe_vm_create_scratch(xe, tile, vm);
1392 			if (err)
1393 				goto err_unlock_close;
1394 		}
1395 		vm->batch_invalidate_tlb = true;
1396 	}
1397 
1398 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1399 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1400 		vm->batch_invalidate_tlb = false;
1401 	}
1402 
1403 	/* Fill pt_root after allocating scratch tables */
1404 	for_each_tile(tile, xe, id) {
1405 		if (!vm->pt_root[id])
1406 			continue;
1407 
1408 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1409 	}
1410 	xe_vm_unlock(vm);
1411 
1412 	/* Kernel migration VM shouldn't have a circular loop.. */
1413 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1414 		for_each_tile(tile, xe, id) {
1415 			struct xe_gt *gt = tile->primary_gt;
1416 			struct xe_vm *migrate_vm;
1417 			struct xe_exec_queue *q;
1418 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1419 
1420 			if (!vm->pt_root[id])
1421 				continue;
1422 
1423 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1424 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1425 						       XE_ENGINE_CLASS_COPY,
1426 						       create_flags);
1427 			xe_vm_put(migrate_vm);
1428 			if (IS_ERR(q)) {
1429 				err = PTR_ERR(q);
1430 				goto err_close;
1431 			}
1432 			vm->q[id] = q;
1433 			number_tiles++;
1434 		}
1435 	}
1436 
1437 	if (number_tiles > 1)
1438 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1439 
1440 	mutex_lock(&xe->usm.lock);
1441 	if (flags & XE_VM_FLAG_FAULT_MODE)
1442 		xe->usm.num_vm_in_fault_mode++;
1443 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1444 		xe->usm.num_vm_in_non_fault_mode++;
1445 	mutex_unlock(&xe->usm.lock);
1446 
1447 	trace_xe_vm_create(vm);
1448 
1449 	return vm;
1450 
1451 err_unlock_close:
1452 	xe_vm_unlock(vm);
1453 err_close:
1454 	xe_vm_close_and_put(vm);
1455 	return ERR_PTR(err);
1456 
1457 err_no_resv:
1458 	mutex_destroy(&vm->snap_mutex);
1459 	for_each_tile(tile, xe, id)
1460 		xe_range_fence_tree_fini(&vm->rftree[id]);
1461 	kfree(vm);
1462 	if (flags & XE_VM_FLAG_LR_MODE)
1463 		xe_pm_runtime_put(xe);
1464 	return ERR_PTR(err);
1465 }
1466 
1467 static void xe_vm_close(struct xe_vm *vm)
1468 {
1469 	down_write(&vm->lock);
1470 	vm->size = 0;
1471 	up_write(&vm->lock);
1472 }
1473 
1474 void xe_vm_close_and_put(struct xe_vm *vm)
1475 {
1476 	LIST_HEAD(contested);
1477 	struct xe_device *xe = vm->xe;
1478 	struct xe_tile *tile;
1479 	struct xe_vma *vma, *next_vma;
1480 	struct drm_gpuva *gpuva, *next;
1481 	u8 id;
1482 
1483 	xe_assert(xe, !vm->preempt.num_exec_queues);
1484 
1485 	xe_vm_close(vm);
1486 	if (xe_vm_in_preempt_fence_mode(vm))
1487 		flush_work(&vm->preempt.rebind_work);
1488 
1489 	down_write(&vm->lock);
1490 	for_each_tile(tile, xe, id) {
1491 		if (vm->q[id])
1492 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1493 	}
1494 	up_write(&vm->lock);
1495 
1496 	for_each_tile(tile, xe, id) {
1497 		if (vm->q[id]) {
1498 			xe_exec_queue_kill(vm->q[id]);
1499 			xe_exec_queue_put(vm->q[id]);
1500 			vm->q[id] = NULL;
1501 		}
1502 	}
1503 
1504 	down_write(&vm->lock);
1505 	xe_vm_lock(vm, false);
1506 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1507 		vma = gpuva_to_vma(gpuva);
1508 
1509 		if (xe_vma_has_no_bo(vma)) {
1510 			down_read(&vm->userptr.notifier_lock);
1511 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1512 			up_read(&vm->userptr.notifier_lock);
1513 		}
1514 
1515 		xe_vm_remove_vma(vm, vma);
1516 
1517 		/* easy case, remove from VMA? */
1518 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1519 			list_del_init(&vma->combined_links.rebind);
1520 			xe_vma_destroy(vma, NULL);
1521 			continue;
1522 		}
1523 
1524 		list_move_tail(&vma->combined_links.destroy, &contested);
1525 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1526 	}
1527 
1528 	/*
1529 	 * All vm operations will add shared fences to resv.
1530 	 * The only exception is eviction for a shared object,
1531 	 * but even so, the unbind when evicted would still
1532 	 * install a fence to resv. Hence it's safe to
1533 	 * destroy the pagetables immediately.
1534 	 */
1535 	xe_vm_free_scratch(vm);
1536 
1537 	for_each_tile(tile, xe, id) {
1538 		if (vm->pt_root[id]) {
1539 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1540 			vm->pt_root[id] = NULL;
1541 		}
1542 	}
1543 	xe_vm_unlock(vm);
1544 
1545 	/*
1546 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1547 	 * Since we hold a refcount to the bo, we can remove and free
1548 	 * the members safely without locking.
1549 	 */
1550 	list_for_each_entry_safe(vma, next_vma, &contested,
1551 				 combined_links.destroy) {
1552 		list_del_init(&vma->combined_links.destroy);
1553 		xe_vma_destroy_unlocked(vma);
1554 	}
1555 
1556 	up_write(&vm->lock);
1557 
1558 	mutex_lock(&xe->usm.lock);
1559 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1560 		xe->usm.num_vm_in_fault_mode--;
1561 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1562 		xe->usm.num_vm_in_non_fault_mode--;
1563 
1564 	if (vm->usm.asid) {
1565 		void *lookup;
1566 
1567 		xe_assert(xe, xe->info.has_asid);
1568 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1569 
1570 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1571 		xe_assert(xe, lookup == vm);
1572 	}
1573 	mutex_unlock(&xe->usm.lock);
1574 
1575 	for_each_tile(tile, xe, id)
1576 		xe_range_fence_tree_fini(&vm->rftree[id]);
1577 
1578 	xe_vm_put(vm);
1579 }
1580 
1581 static void vm_destroy_work_func(struct work_struct *w)
1582 {
1583 	struct xe_vm *vm =
1584 		container_of(w, struct xe_vm, destroy_work);
1585 	struct xe_device *xe = vm->xe;
1586 	struct xe_tile *tile;
1587 	u8 id;
1588 
1589 	/* xe_vm_close_and_put was not called? */
1590 	xe_assert(xe, !vm->size);
1591 
1592 	if (xe_vm_in_preempt_fence_mode(vm))
1593 		flush_work(&vm->preempt.rebind_work);
1594 
1595 	mutex_destroy(&vm->snap_mutex);
1596 
1597 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1598 		xe_pm_runtime_put(xe);
1599 
1600 	for_each_tile(tile, xe, id)
1601 		XE_WARN_ON(vm->pt_root[id]);
1602 
1603 	trace_xe_vm_free(vm);
1604 
1605 	if (vm->xef)
1606 		xe_file_put(vm->xef);
1607 
1608 	kfree(vm);
1609 }
1610 
1611 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1612 {
1613 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1614 
1615 	/* To destroy the VM we need to be able to sleep */
1616 	queue_work(system_unbound_wq, &vm->destroy_work);
1617 }
1618 
1619 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1620 {
1621 	struct xe_vm *vm;
1622 
1623 	mutex_lock(&xef->vm.lock);
1624 	vm = xa_load(&xef->vm.xa, id);
1625 	if (vm)
1626 		xe_vm_get(vm);
1627 	mutex_unlock(&xef->vm.lock);
1628 
1629 	return vm;
1630 }
1631 
1632 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1633 {
1634 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1635 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1636 }
1637 
1638 static struct xe_exec_queue *
1639 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1640 {
1641 	return q ? q : vm->q[0];
1642 }
1643 
1644 static struct dma_fence *
1645 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1646 		 struct xe_sync_entry *syncs, u32 num_syncs,
1647 		 bool first_op, bool last_op)
1648 {
1649 	struct xe_vm *vm = xe_vma_vm(vma);
1650 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1651 	struct xe_tile *tile;
1652 	struct dma_fence *fence = NULL;
1653 	struct dma_fence **fences = NULL;
1654 	struct dma_fence_array *cf = NULL;
1655 	int cur_fence = 0;
1656 	int number_tiles = hweight8(vma->tile_present);
1657 	int err;
1658 	u8 id;
1659 
1660 	trace_xe_vma_unbind(vma);
1661 
1662 	if (number_tiles > 1) {
1663 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1664 				       GFP_KERNEL);
1665 		if (!fences)
1666 			return ERR_PTR(-ENOMEM);
1667 	}
1668 
1669 	for_each_tile(tile, vm->xe, id) {
1670 		if (!(vma->tile_present & BIT(id)))
1671 			goto next;
1672 
1673 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1674 					   first_op ? syncs : NULL,
1675 					   first_op ? num_syncs : 0);
1676 		if (IS_ERR(fence)) {
1677 			err = PTR_ERR(fence);
1678 			goto err_fences;
1679 		}
1680 
1681 		if (fences)
1682 			fences[cur_fence++] = fence;
1683 
1684 next:
1685 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1686 			q = list_next_entry(q, multi_gt_list);
1687 	}
1688 
1689 	if (fences) {
1690 		cf = dma_fence_array_create(number_tiles, fences,
1691 					    vm->composite_fence_ctx,
1692 					    vm->composite_fence_seqno++,
1693 					    false);
1694 		if (!cf) {
1695 			--vm->composite_fence_seqno;
1696 			err = -ENOMEM;
1697 			goto err_fences;
1698 		}
1699 	}
1700 
1701 	fence = cf ? &cf->base : !fence ?
1702 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1703 
1704 	return fence;
1705 
1706 err_fences:
1707 	if (fences) {
1708 		while (cur_fence)
1709 			dma_fence_put(fences[--cur_fence]);
1710 		kfree(fences);
1711 	}
1712 
1713 	return ERR_PTR(err);
1714 }
1715 
1716 static struct dma_fence *
1717 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1718 	       struct xe_sync_entry *syncs, u32 num_syncs,
1719 	       u8 tile_mask, bool first_op, bool last_op)
1720 {
1721 	struct xe_tile *tile;
1722 	struct dma_fence *fence;
1723 	struct dma_fence **fences = NULL;
1724 	struct dma_fence_array *cf = NULL;
1725 	struct xe_vm *vm = xe_vma_vm(vma);
1726 	int cur_fence = 0;
1727 	int number_tiles = hweight8(tile_mask);
1728 	int err;
1729 	u8 id;
1730 
1731 	trace_xe_vma_bind(vma);
1732 
1733 	if (number_tiles > 1) {
1734 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1735 				       GFP_KERNEL);
1736 		if (!fences)
1737 			return ERR_PTR(-ENOMEM);
1738 	}
1739 
1740 	for_each_tile(tile, vm->xe, id) {
1741 		if (!(tile_mask & BIT(id)))
1742 			goto next;
1743 
1744 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1745 					 first_op ? syncs : NULL,
1746 					 first_op ? num_syncs : 0,
1747 					 vma->tile_present & BIT(id));
1748 		if (IS_ERR(fence)) {
1749 			err = PTR_ERR(fence);
1750 			goto err_fences;
1751 		}
1752 
1753 		if (fences)
1754 			fences[cur_fence++] = fence;
1755 
1756 next:
1757 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1758 			q = list_next_entry(q, multi_gt_list);
1759 	}
1760 
1761 	if (fences) {
1762 		cf = dma_fence_array_create(number_tiles, fences,
1763 					    vm->composite_fence_ctx,
1764 					    vm->composite_fence_seqno++,
1765 					    false);
1766 		if (!cf) {
1767 			--vm->composite_fence_seqno;
1768 			err = -ENOMEM;
1769 			goto err_fences;
1770 		}
1771 	}
1772 
1773 	return cf ? &cf->base : fence;
1774 
1775 err_fences:
1776 	if (fences) {
1777 		while (cur_fence)
1778 			dma_fence_put(fences[--cur_fence]);
1779 		kfree(fences);
1780 	}
1781 
1782 	return ERR_PTR(err);
1783 }
1784 
1785 static struct xe_user_fence *
1786 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1787 {
1788 	unsigned int i;
1789 
1790 	for (i = 0; i < num_syncs; i++) {
1791 		struct xe_sync_entry *e = &syncs[i];
1792 
1793 		if (xe_sync_is_ufence(e))
1794 			return xe_sync_ufence_get(e);
1795 	}
1796 
1797 	return NULL;
1798 }
1799 
1800 static struct dma_fence *
1801 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1802 	   struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
1803 	   u8 tile_mask, bool immediate, bool first_op, bool last_op)
1804 {
1805 	struct dma_fence *fence;
1806 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1807 
1808 	xe_vm_assert_held(vm);
1809 	xe_bo_assert_held(bo);
1810 
1811 	if (immediate) {
1812 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
1813 				       first_op, last_op);
1814 		if (IS_ERR(fence))
1815 			return fence;
1816 	} else {
1817 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1818 
1819 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1820 	}
1821 
1822 	return fence;
1823 }
1824 
1825 static struct dma_fence *
1826 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1827 	     struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1828 	     u32 num_syncs, bool first_op, bool last_op)
1829 {
1830 	struct dma_fence *fence;
1831 
1832 	xe_vm_assert_held(vm);
1833 	xe_bo_assert_held(xe_vma_bo(vma));
1834 
1835 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1836 	if (IS_ERR(fence))
1837 		return fence;
1838 
1839 	return fence;
1840 }
1841 
1842 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1843 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1844 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1845 
1846 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1847 		       struct drm_file *file)
1848 {
1849 	struct xe_device *xe = to_xe_device(dev);
1850 	struct xe_file *xef = to_xe_file(file);
1851 	struct drm_xe_vm_create *args = data;
1852 	struct xe_tile *tile;
1853 	struct xe_vm *vm;
1854 	u32 id, asid;
1855 	int err;
1856 	u32 flags = 0;
1857 
1858 	if (XE_IOCTL_DBG(xe, args->extensions))
1859 		return -EINVAL;
1860 
1861 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1862 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1863 
1864 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1865 			 !xe->info.has_usm))
1866 		return -EINVAL;
1867 
1868 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1869 		return -EINVAL;
1870 
1871 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1872 		return -EINVAL;
1873 
1874 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1875 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1876 		return -EINVAL;
1877 
1878 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1879 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1880 		return -EINVAL;
1881 
1882 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1883 			 xe_device_in_non_fault_mode(xe)))
1884 		return -EINVAL;
1885 
1886 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1887 			 xe_device_in_fault_mode(xe)))
1888 		return -EINVAL;
1889 
1890 	if (XE_IOCTL_DBG(xe, args->extensions))
1891 		return -EINVAL;
1892 
1893 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1894 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1895 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1896 		flags |= XE_VM_FLAG_LR_MODE;
1897 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1898 		flags |= XE_VM_FLAG_FAULT_MODE;
1899 
1900 	vm = xe_vm_create(xe, flags);
1901 	if (IS_ERR(vm))
1902 		return PTR_ERR(vm);
1903 
1904 	mutex_lock(&xef->vm.lock);
1905 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1906 	mutex_unlock(&xef->vm.lock);
1907 	if (err)
1908 		goto err_close_and_put;
1909 
1910 	if (xe->info.has_asid) {
1911 		mutex_lock(&xe->usm.lock);
1912 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1913 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1914 				      &xe->usm.next_asid, GFP_KERNEL);
1915 		mutex_unlock(&xe->usm.lock);
1916 		if (err < 0)
1917 			goto err_free_id;
1918 
1919 		vm->usm.asid = asid;
1920 	}
1921 
1922 	args->vm_id = id;
1923 	vm->xef = xe_file_get(xef);
1924 
1925 	/* Record BO memory for VM pagetable created against client */
1926 	for_each_tile(tile, xe, id)
1927 		if (vm->pt_root[id])
1928 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1929 
1930 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1931 	/* Warning: Security issue - never enable by default */
1932 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1933 #endif
1934 
1935 	return 0;
1936 
1937 err_free_id:
1938 	mutex_lock(&xef->vm.lock);
1939 	xa_erase(&xef->vm.xa, id);
1940 	mutex_unlock(&xef->vm.lock);
1941 err_close_and_put:
1942 	xe_vm_close_and_put(vm);
1943 
1944 	return err;
1945 }
1946 
1947 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1948 			struct drm_file *file)
1949 {
1950 	struct xe_device *xe = to_xe_device(dev);
1951 	struct xe_file *xef = to_xe_file(file);
1952 	struct drm_xe_vm_destroy *args = data;
1953 	struct xe_vm *vm;
1954 	int err = 0;
1955 
1956 	if (XE_IOCTL_DBG(xe, args->pad) ||
1957 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1958 		return -EINVAL;
1959 
1960 	mutex_lock(&xef->vm.lock);
1961 	vm = xa_load(&xef->vm.xa, args->vm_id);
1962 	if (XE_IOCTL_DBG(xe, !vm))
1963 		err = -ENOENT;
1964 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1965 		err = -EBUSY;
1966 	else
1967 		xa_erase(&xef->vm.xa, args->vm_id);
1968 	mutex_unlock(&xef->vm.lock);
1969 
1970 	if (!err)
1971 		xe_vm_close_and_put(vm);
1972 
1973 	return err;
1974 }
1975 
1976 static const u32 region_to_mem_type[] = {
1977 	XE_PL_TT,
1978 	XE_PL_VRAM0,
1979 	XE_PL_VRAM1,
1980 };
1981 
1982 static struct dma_fence *
1983 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1984 	       struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1985 	       u32 num_syncs, bool first_op, bool last_op)
1986 {
1987 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1988 
1989 	if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
1990 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1991 				  vma->tile_mask, true, first_op, last_op);
1992 	} else {
1993 		return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1994 	}
1995 }
1996 
1997 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1998 			     bool post_commit)
1999 {
2000 	down_read(&vm->userptr.notifier_lock);
2001 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2002 	up_read(&vm->userptr.notifier_lock);
2003 	if (post_commit)
2004 		xe_vm_remove_vma(vm, vma);
2005 }
2006 
2007 #undef ULL
2008 #define ULL	unsigned long long
2009 
2010 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2011 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2012 {
2013 	struct xe_vma *vma;
2014 
2015 	switch (op->op) {
2016 	case DRM_GPUVA_OP_MAP:
2017 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2018 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2019 		break;
2020 	case DRM_GPUVA_OP_REMAP:
2021 		vma = gpuva_to_vma(op->remap.unmap->va);
2022 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2023 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2024 		       op->remap.unmap->keep ? 1 : 0);
2025 		if (op->remap.prev)
2026 			vm_dbg(&xe->drm,
2027 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2028 			       (ULL)op->remap.prev->va.addr,
2029 			       (ULL)op->remap.prev->va.range);
2030 		if (op->remap.next)
2031 			vm_dbg(&xe->drm,
2032 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2033 			       (ULL)op->remap.next->va.addr,
2034 			       (ULL)op->remap.next->va.range);
2035 		break;
2036 	case DRM_GPUVA_OP_UNMAP:
2037 		vma = gpuva_to_vma(op->unmap.va);
2038 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2039 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2040 		       op->unmap.keep ? 1 : 0);
2041 		break;
2042 	case DRM_GPUVA_OP_PREFETCH:
2043 		vma = gpuva_to_vma(op->prefetch.va);
2044 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2045 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2046 		break;
2047 	default:
2048 		drm_warn(&xe->drm, "NOT POSSIBLE");
2049 	}
2050 }
2051 #else
2052 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2053 {
2054 }
2055 #endif
2056 
2057 /*
2058  * Create operations list from IOCTL arguments, setup operations fields so parse
2059  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2060  */
2061 static struct drm_gpuva_ops *
2062 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2063 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2064 			 u32 operation, u32 flags,
2065 			 u32 prefetch_region, u16 pat_index)
2066 {
2067 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2068 	struct drm_gpuva_ops *ops;
2069 	struct drm_gpuva_op *__op;
2070 	struct drm_gpuvm_bo *vm_bo;
2071 	int err;
2072 
2073 	lockdep_assert_held_write(&vm->lock);
2074 
2075 	vm_dbg(&vm->xe->drm,
2076 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2077 	       operation, (ULL)addr, (ULL)range,
2078 	       (ULL)bo_offset_or_userptr);
2079 
2080 	switch (operation) {
2081 	case DRM_XE_VM_BIND_OP_MAP:
2082 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2083 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2084 						  obj, bo_offset_or_userptr);
2085 		break;
2086 	case DRM_XE_VM_BIND_OP_UNMAP:
2087 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2088 		break;
2089 	case DRM_XE_VM_BIND_OP_PREFETCH:
2090 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2091 		break;
2092 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2093 		xe_assert(vm->xe, bo);
2094 
2095 		err = xe_bo_lock(bo, true);
2096 		if (err)
2097 			return ERR_PTR(err);
2098 
2099 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2100 		if (IS_ERR(vm_bo)) {
2101 			xe_bo_unlock(bo);
2102 			return ERR_CAST(vm_bo);
2103 		}
2104 
2105 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2106 		drm_gpuvm_bo_put(vm_bo);
2107 		xe_bo_unlock(bo);
2108 		break;
2109 	default:
2110 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2111 		ops = ERR_PTR(-EINVAL);
2112 	}
2113 	if (IS_ERR(ops))
2114 		return ops;
2115 
2116 	drm_gpuva_for_each_op(__op, ops) {
2117 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2118 
2119 		if (__op->op == DRM_GPUVA_OP_MAP) {
2120 			op->map.immediate =
2121 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2122 			op->map.read_only =
2123 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2124 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2125 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2126 			op->map.pat_index = pat_index;
2127 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2128 			op->prefetch.region = prefetch_region;
2129 		}
2130 
2131 		print_op(vm->xe, __op);
2132 	}
2133 
2134 	return ops;
2135 }
2136 
2137 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2138 			      u16 pat_index, unsigned int flags)
2139 {
2140 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2141 	struct drm_exec exec;
2142 	struct xe_vma *vma;
2143 	int err = 0;
2144 
2145 	lockdep_assert_held_write(&vm->lock);
2146 
2147 	if (bo) {
2148 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2149 		drm_exec_until_all_locked(&exec) {
2150 			err = 0;
2151 			if (!bo->vm) {
2152 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2153 				drm_exec_retry_on_contention(&exec);
2154 			}
2155 			if (!err) {
2156 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2157 				drm_exec_retry_on_contention(&exec);
2158 			}
2159 			if (err) {
2160 				drm_exec_fini(&exec);
2161 				return ERR_PTR(err);
2162 			}
2163 		}
2164 	}
2165 	vma = xe_vma_create(vm, bo, op->gem.offset,
2166 			    op->va.addr, op->va.addr +
2167 			    op->va.range - 1, pat_index, flags);
2168 	if (IS_ERR(vma))
2169 		goto err_unlock;
2170 
2171 	if (xe_vma_is_userptr(vma))
2172 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2173 	else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2174 		err = add_preempt_fences(vm, bo);
2175 
2176 err_unlock:
2177 	if (bo)
2178 		drm_exec_fini(&exec);
2179 
2180 	if (err) {
2181 		prep_vma_destroy(vm, vma, false);
2182 		xe_vma_destroy_unlocked(vma);
2183 		vma = ERR_PTR(err);
2184 	}
2185 
2186 	return vma;
2187 }
2188 
2189 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2190 {
2191 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2192 		return SZ_1G;
2193 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2194 		return SZ_2M;
2195 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2196 		return SZ_64K;
2197 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2198 		return SZ_4K;
2199 
2200 	return SZ_1G;	/* Uninitialized, used max size */
2201 }
2202 
2203 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2204 {
2205 	switch (size) {
2206 	case SZ_1G:
2207 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2208 		break;
2209 	case SZ_2M:
2210 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2211 		break;
2212 	case SZ_64K:
2213 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2214 		break;
2215 	case SZ_4K:
2216 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2217 		break;
2218 	}
2219 }
2220 
2221 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2222 {
2223 	int err = 0;
2224 
2225 	lockdep_assert_held_write(&vm->lock);
2226 
2227 	switch (op->base.op) {
2228 	case DRM_GPUVA_OP_MAP:
2229 		err |= xe_vm_insert_vma(vm, op->map.vma);
2230 		if (!err)
2231 			op->flags |= XE_VMA_OP_COMMITTED;
2232 		break;
2233 	case DRM_GPUVA_OP_REMAP:
2234 	{
2235 		u8 tile_present =
2236 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2237 
2238 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2239 				 true);
2240 		op->flags |= XE_VMA_OP_COMMITTED;
2241 
2242 		if (op->remap.prev) {
2243 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2244 			if (!err)
2245 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2246 			if (!err && op->remap.skip_prev) {
2247 				op->remap.prev->tile_present =
2248 					tile_present;
2249 				op->remap.prev = NULL;
2250 			}
2251 		}
2252 		if (op->remap.next) {
2253 			err |= xe_vm_insert_vma(vm, op->remap.next);
2254 			if (!err)
2255 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2256 			if (!err && op->remap.skip_next) {
2257 				op->remap.next->tile_present =
2258 					tile_present;
2259 				op->remap.next = NULL;
2260 			}
2261 		}
2262 
2263 		/* Adjust for partial unbind after removin VMA from VM */
2264 		if (!err) {
2265 			op->base.remap.unmap->va->va.addr = op->remap.start;
2266 			op->base.remap.unmap->va->va.range = op->remap.range;
2267 		}
2268 		break;
2269 	}
2270 	case DRM_GPUVA_OP_UNMAP:
2271 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2272 		op->flags |= XE_VMA_OP_COMMITTED;
2273 		break;
2274 	case DRM_GPUVA_OP_PREFETCH:
2275 		op->flags |= XE_VMA_OP_COMMITTED;
2276 		break;
2277 	default:
2278 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2279 	}
2280 
2281 	return err;
2282 }
2283 
2284 
2285 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2286 				   struct drm_gpuva_ops *ops,
2287 				   struct xe_sync_entry *syncs, u32 num_syncs,
2288 				   struct xe_vma_ops *vops, bool last)
2289 {
2290 	struct xe_device *xe = vm->xe;
2291 	struct xe_vma_op *last_op = NULL;
2292 	struct drm_gpuva_op *__op;
2293 	struct xe_tile *tile;
2294 	u8 id, tile_mask = 0;
2295 	int err = 0;
2296 
2297 	lockdep_assert_held_write(&vm->lock);
2298 
2299 	for_each_tile(tile, vm->xe, id)
2300 		tile_mask |= 0x1 << id;
2301 
2302 	drm_gpuva_for_each_op(__op, ops) {
2303 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2304 		struct xe_vma *vma;
2305 		bool first = list_empty(&vops->list);
2306 		unsigned int flags = 0;
2307 
2308 		INIT_LIST_HEAD(&op->link);
2309 		list_add_tail(&op->link, &vops->list);
2310 
2311 		if (first) {
2312 			op->flags |= XE_VMA_OP_FIRST;
2313 			op->num_syncs = num_syncs;
2314 			op->syncs = syncs;
2315 		}
2316 
2317 		op->q = q;
2318 		op->tile_mask = tile_mask;
2319 
2320 		switch (op->base.op) {
2321 		case DRM_GPUVA_OP_MAP:
2322 		{
2323 			flags |= op->map.read_only ?
2324 				VMA_CREATE_FLAG_READ_ONLY : 0;
2325 			flags |= op->map.is_null ?
2326 				VMA_CREATE_FLAG_IS_NULL : 0;
2327 			flags |= op->map.dumpable ?
2328 				VMA_CREATE_FLAG_DUMPABLE : 0;
2329 
2330 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2331 				      flags);
2332 			if (IS_ERR(vma))
2333 				return PTR_ERR(vma);
2334 
2335 			op->map.vma = vma;
2336 			break;
2337 		}
2338 		case DRM_GPUVA_OP_REMAP:
2339 		{
2340 			struct xe_vma *old =
2341 				gpuva_to_vma(op->base.remap.unmap->va);
2342 
2343 			op->remap.start = xe_vma_start(old);
2344 			op->remap.range = xe_vma_size(old);
2345 
2346 			if (op->base.remap.prev) {
2347 				flags |= op->base.remap.unmap->va->flags &
2348 					XE_VMA_READ_ONLY ?
2349 					VMA_CREATE_FLAG_READ_ONLY : 0;
2350 				flags |= op->base.remap.unmap->va->flags &
2351 					DRM_GPUVA_SPARSE ?
2352 					VMA_CREATE_FLAG_IS_NULL : 0;
2353 				flags |= op->base.remap.unmap->va->flags &
2354 					XE_VMA_DUMPABLE ?
2355 					VMA_CREATE_FLAG_DUMPABLE : 0;
2356 
2357 				vma = new_vma(vm, op->base.remap.prev,
2358 					      old->pat_index, flags);
2359 				if (IS_ERR(vma))
2360 					return PTR_ERR(vma);
2361 
2362 				op->remap.prev = vma;
2363 
2364 				/*
2365 				 * Userptr creates a new SG mapping so
2366 				 * we must also rebind.
2367 				 */
2368 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2369 					IS_ALIGNED(xe_vma_end(vma),
2370 						   xe_vma_max_pte_size(old));
2371 				if (op->remap.skip_prev) {
2372 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2373 					op->remap.range -=
2374 						xe_vma_end(vma) -
2375 						xe_vma_start(old);
2376 					op->remap.start = xe_vma_end(vma);
2377 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2378 					       (ULL)op->remap.start,
2379 					       (ULL)op->remap.range);
2380 				}
2381 			}
2382 
2383 			if (op->base.remap.next) {
2384 				flags |= op->base.remap.unmap->va->flags &
2385 					XE_VMA_READ_ONLY ?
2386 					VMA_CREATE_FLAG_READ_ONLY : 0;
2387 				flags |= op->base.remap.unmap->va->flags &
2388 					DRM_GPUVA_SPARSE ?
2389 					VMA_CREATE_FLAG_IS_NULL : 0;
2390 				flags |= op->base.remap.unmap->va->flags &
2391 					XE_VMA_DUMPABLE ?
2392 					VMA_CREATE_FLAG_DUMPABLE : 0;
2393 
2394 				vma = new_vma(vm, op->base.remap.next,
2395 					      old->pat_index, flags);
2396 				if (IS_ERR(vma))
2397 					return PTR_ERR(vma);
2398 
2399 				op->remap.next = vma;
2400 
2401 				/*
2402 				 * Userptr creates a new SG mapping so
2403 				 * we must also rebind.
2404 				 */
2405 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2406 					IS_ALIGNED(xe_vma_start(vma),
2407 						   xe_vma_max_pte_size(old));
2408 				if (op->remap.skip_next) {
2409 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2410 					op->remap.range -=
2411 						xe_vma_end(old) -
2412 						xe_vma_start(vma);
2413 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2414 					       (ULL)op->remap.start,
2415 					       (ULL)op->remap.range);
2416 				}
2417 			}
2418 			break;
2419 		}
2420 		case DRM_GPUVA_OP_UNMAP:
2421 		case DRM_GPUVA_OP_PREFETCH:
2422 			/* Nothing to do */
2423 			break;
2424 		default:
2425 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2426 		}
2427 
2428 		last_op = op;
2429 
2430 		err = xe_vma_op_commit(vm, op);
2431 		if (err)
2432 			return err;
2433 	}
2434 
2435 	/* FIXME: Unhandled corner case */
2436 	XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
2437 
2438 	if (!last_op)
2439 		return 0;
2440 
2441 	if (last) {
2442 		last_op->flags |= XE_VMA_OP_LAST;
2443 		last_op->num_syncs = num_syncs;
2444 		last_op->syncs = syncs;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
2451 				    struct xe_vma_op *op)
2452 {
2453 	struct dma_fence *fence = NULL;
2454 
2455 	lockdep_assert_held(&vm->lock);
2456 
2457 	xe_vm_assert_held(vm);
2458 	xe_bo_assert_held(xe_vma_bo(vma));
2459 
2460 	switch (op->base.op) {
2461 	case DRM_GPUVA_OP_MAP:
2462 		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2463 				   op->syncs, op->num_syncs,
2464 				   op->tile_mask,
2465 				   op->map.immediate || !xe_vm_in_fault_mode(vm),
2466 				   op->flags & XE_VMA_OP_FIRST,
2467 				   op->flags & XE_VMA_OP_LAST);
2468 		break;
2469 	case DRM_GPUVA_OP_REMAP:
2470 	{
2471 		bool prev = !!op->remap.prev;
2472 		bool next = !!op->remap.next;
2473 
2474 		if (!op->remap.unmap_done) {
2475 			if (prev || next)
2476 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2477 			fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2478 					     op->num_syncs,
2479 					     op->flags & XE_VMA_OP_FIRST,
2480 					     op->flags & XE_VMA_OP_LAST &&
2481 					     !prev && !next);
2482 			if (IS_ERR(fence))
2483 				break;
2484 			op->remap.unmap_done = true;
2485 		}
2486 
2487 		if (prev) {
2488 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2489 			dma_fence_put(fence);
2490 			fence = xe_vm_bind(vm, op->remap.prev, op->q,
2491 					   xe_vma_bo(op->remap.prev), op->syncs,
2492 					   op->num_syncs,
2493 					   op->remap.prev->tile_mask, true,
2494 					   false,
2495 					   op->flags & XE_VMA_OP_LAST && !next);
2496 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2497 			if (IS_ERR(fence))
2498 				break;
2499 			op->remap.prev = NULL;
2500 		}
2501 
2502 		if (next) {
2503 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2504 			dma_fence_put(fence);
2505 			fence = xe_vm_bind(vm, op->remap.next, op->q,
2506 					   xe_vma_bo(op->remap.next),
2507 					   op->syncs, op->num_syncs,
2508 					   op->remap.next->tile_mask, true,
2509 					   false, op->flags & XE_VMA_OP_LAST);
2510 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2511 			if (IS_ERR(fence))
2512 				break;
2513 			op->remap.next = NULL;
2514 		}
2515 
2516 		break;
2517 	}
2518 	case DRM_GPUVA_OP_UNMAP:
2519 		fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2520 				     op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2521 				     op->flags & XE_VMA_OP_LAST);
2522 		break;
2523 	case DRM_GPUVA_OP_PREFETCH:
2524 		fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
2525 				       op->flags & XE_VMA_OP_FIRST,
2526 				       op->flags & XE_VMA_OP_LAST);
2527 		break;
2528 	default:
2529 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2530 	}
2531 
2532 	if (IS_ERR(fence))
2533 		trace_xe_vma_fail(vma);
2534 
2535 	return fence;
2536 }
2537 
2538 static struct dma_fence *
2539 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2540 		    struct xe_vma_op *op)
2541 {
2542 	struct dma_fence *fence;
2543 	int err;
2544 
2545 retry_userptr:
2546 	fence = op_execute(vm, vma, op);
2547 	if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
2548 		lockdep_assert_held_write(&vm->lock);
2549 
2550 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2551 			if (!op->remap.unmap_done)
2552 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2553 			else if (op->remap.prev)
2554 				vma = op->remap.prev;
2555 			else
2556 				vma = op->remap.next;
2557 		}
2558 
2559 		if (xe_vma_is_userptr(vma)) {
2560 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2561 			if (!err)
2562 				goto retry_userptr;
2563 
2564 			fence = ERR_PTR(err);
2565 			trace_xe_vma_fail(vma);
2566 		}
2567 	}
2568 
2569 	return fence;
2570 }
2571 
2572 static struct dma_fence *
2573 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2574 {
2575 	struct dma_fence *fence = ERR_PTR(-ENOMEM);
2576 
2577 	lockdep_assert_held(&vm->lock);
2578 
2579 	switch (op->base.op) {
2580 	case DRM_GPUVA_OP_MAP:
2581 		fence = __xe_vma_op_execute(vm, op->map.vma, op);
2582 		break;
2583 	case DRM_GPUVA_OP_REMAP:
2584 	{
2585 		struct xe_vma *vma;
2586 
2587 		if (!op->remap.unmap_done)
2588 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2589 		else if (op->remap.prev)
2590 			vma = op->remap.prev;
2591 		else
2592 			vma = op->remap.next;
2593 
2594 		fence = __xe_vma_op_execute(vm, vma, op);
2595 		break;
2596 	}
2597 	case DRM_GPUVA_OP_UNMAP:
2598 		fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2599 					    op);
2600 		break;
2601 	case DRM_GPUVA_OP_PREFETCH:
2602 		fence = __xe_vma_op_execute(vm,
2603 					    gpuva_to_vma(op->base.prefetch.va),
2604 					    op);
2605 		break;
2606 	default:
2607 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2608 	}
2609 
2610 	return fence;
2611 }
2612 
2613 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2614 			     bool post_commit, bool prev_post_commit,
2615 			     bool next_post_commit)
2616 {
2617 	lockdep_assert_held_write(&vm->lock);
2618 
2619 	switch (op->base.op) {
2620 	case DRM_GPUVA_OP_MAP:
2621 		if (op->map.vma) {
2622 			prep_vma_destroy(vm, op->map.vma, post_commit);
2623 			xe_vma_destroy_unlocked(op->map.vma);
2624 		}
2625 		break;
2626 	case DRM_GPUVA_OP_UNMAP:
2627 	{
2628 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2629 
2630 		if (vma) {
2631 			down_read(&vm->userptr.notifier_lock);
2632 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2633 			up_read(&vm->userptr.notifier_lock);
2634 			if (post_commit)
2635 				xe_vm_insert_vma(vm, vma);
2636 		}
2637 		break;
2638 	}
2639 	case DRM_GPUVA_OP_REMAP:
2640 	{
2641 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2642 
2643 		if (op->remap.prev) {
2644 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2645 			xe_vma_destroy_unlocked(op->remap.prev);
2646 		}
2647 		if (op->remap.next) {
2648 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2649 			xe_vma_destroy_unlocked(op->remap.next);
2650 		}
2651 		if (vma) {
2652 			down_read(&vm->userptr.notifier_lock);
2653 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2654 			up_read(&vm->userptr.notifier_lock);
2655 			if (post_commit)
2656 				xe_vm_insert_vma(vm, vma);
2657 		}
2658 		break;
2659 	}
2660 	case DRM_GPUVA_OP_PREFETCH:
2661 		/* Nothing to do */
2662 		break;
2663 	default:
2664 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2665 	}
2666 }
2667 
2668 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2669 				     struct drm_gpuva_ops **ops,
2670 				     int num_ops_list)
2671 {
2672 	int i;
2673 
2674 	for (i = num_ops_list - 1; i >= 0; --i) {
2675 		struct drm_gpuva_ops *__ops = ops[i];
2676 		struct drm_gpuva_op *__op;
2677 
2678 		if (!__ops)
2679 			continue;
2680 
2681 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2682 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2683 
2684 			xe_vma_op_unwind(vm, op,
2685 					 op->flags & XE_VMA_OP_COMMITTED,
2686 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2687 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2688 		}
2689 	}
2690 }
2691 
2692 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2693 				 bool validate)
2694 {
2695 	struct xe_bo *bo = xe_vma_bo(vma);
2696 	int err = 0;
2697 
2698 	if (bo) {
2699 		if (!bo->vm)
2700 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2701 		if (!err && validate)
2702 			err = xe_bo_validate(bo, xe_vma_vm(vma), true);
2703 	}
2704 
2705 	return err;
2706 }
2707 
2708 static int check_ufence(struct xe_vma *vma)
2709 {
2710 	if (vma->ufence) {
2711 		struct xe_user_fence * const f = vma->ufence;
2712 
2713 		if (!xe_sync_ufence_get_status(f))
2714 			return -EBUSY;
2715 
2716 		vma->ufence = NULL;
2717 		xe_sync_ufence_put(f);
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2724 			    struct xe_vma_op *op)
2725 {
2726 	int err = 0;
2727 
2728 	switch (op->base.op) {
2729 	case DRM_GPUVA_OP_MAP:
2730 		err = vma_lock_and_validate(exec, op->map.vma,
2731 					    !xe_vm_in_fault_mode(vm) ||
2732 					    op->map.immediate);
2733 		break;
2734 	case DRM_GPUVA_OP_REMAP:
2735 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2736 		if (err)
2737 			break;
2738 
2739 		err = vma_lock_and_validate(exec,
2740 					    gpuva_to_vma(op->base.remap.unmap->va),
2741 					    false);
2742 		if (!err && op->remap.prev)
2743 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2744 		if (!err && op->remap.next)
2745 			err = vma_lock_and_validate(exec, op->remap.next, true);
2746 		break;
2747 	case DRM_GPUVA_OP_UNMAP:
2748 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2749 		if (err)
2750 			break;
2751 
2752 		err = vma_lock_and_validate(exec,
2753 					    gpuva_to_vma(op->base.unmap.va),
2754 					    false);
2755 		break;
2756 	case DRM_GPUVA_OP_PREFETCH:
2757 	{
2758 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2759 		u32 region = op->prefetch.region;
2760 
2761 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2762 
2763 		err = vma_lock_and_validate(exec,
2764 					    gpuva_to_vma(op->base.prefetch.va),
2765 					    false);
2766 		if (!err && !xe_vma_has_no_bo(vma))
2767 			err = xe_bo_migrate(xe_vma_bo(vma),
2768 					    region_to_mem_type[region]);
2769 		break;
2770 	}
2771 	default:
2772 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2773 	}
2774 
2775 	return err;
2776 }
2777 
2778 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2779 					   struct xe_vm *vm,
2780 					   struct xe_vma_ops *vops)
2781 {
2782 	struct xe_vma_op *op;
2783 	int err;
2784 
2785 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2786 	if (err)
2787 		return err;
2788 
2789 	list_for_each_entry(op, &vops->list, link) {
2790 		err = op_lock_and_prep(exec, vm, op);
2791 		if (err)
2792 			return err;
2793 	}
2794 
2795 	return 0;
2796 }
2797 
2798 static struct dma_fence *ops_execute(struct xe_vm *vm,
2799 				     struct xe_vma_ops *vops)
2800 {
2801 	struct xe_vma_op *op, *next;
2802 	struct dma_fence *fence = NULL;
2803 
2804 	list_for_each_entry_safe(op, next, &vops->list, link) {
2805 		dma_fence_put(fence);
2806 		fence = xe_vma_op_execute(vm, op);
2807 		if (IS_ERR(fence)) {
2808 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
2809 				 op->base.op, PTR_ERR(fence));
2810 			fence = ERR_PTR(-ENOSPC);
2811 			break;
2812 		}
2813 	}
2814 
2815 	return fence;
2816 }
2817 
2818 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2819 {
2820 	if (vma->ufence)
2821 		xe_sync_ufence_put(vma->ufence);
2822 	vma->ufence = __xe_sync_ufence_get(ufence);
2823 }
2824 
2825 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2826 			  struct xe_user_fence *ufence)
2827 {
2828 	switch (op->base.op) {
2829 	case DRM_GPUVA_OP_MAP:
2830 		vma_add_ufence(op->map.vma, ufence);
2831 		break;
2832 	case DRM_GPUVA_OP_REMAP:
2833 		if (op->remap.prev)
2834 			vma_add_ufence(op->remap.prev, ufence);
2835 		if (op->remap.next)
2836 			vma_add_ufence(op->remap.next, ufence);
2837 		break;
2838 	case DRM_GPUVA_OP_UNMAP:
2839 		break;
2840 	case DRM_GPUVA_OP_PREFETCH:
2841 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2842 		break;
2843 	default:
2844 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2845 	}
2846 }
2847 
2848 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2849 				   struct dma_fence *fence)
2850 {
2851 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2852 	struct xe_user_fence *ufence;
2853 	struct xe_vma_op *op;
2854 	int i;
2855 
2856 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2857 	list_for_each_entry(op, &vops->list, link) {
2858 		if (ufence)
2859 			op_add_ufence(vm, op, ufence);
2860 
2861 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
2862 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2863 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
2864 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2865 				       fence);
2866 	}
2867 	if (ufence)
2868 		xe_sync_ufence_put(ufence);
2869 	for (i = 0; i < vops->num_syncs; i++)
2870 		xe_sync_entry_signal(vops->syncs + i, fence);
2871 	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
2872 	dma_fence_put(fence);
2873 }
2874 
2875 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2876 				     struct xe_vma_ops *vops)
2877 {
2878 	struct drm_exec exec;
2879 	struct dma_fence *fence;
2880 	int err;
2881 
2882 	lockdep_assert_held_write(&vm->lock);
2883 
2884 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
2885 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
2886 	drm_exec_until_all_locked(&exec) {
2887 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
2888 		drm_exec_retry_on_contention(&exec);
2889 		if (err)
2890 			goto unlock;
2891 
2892 		fence = ops_execute(vm, vops);
2893 		if (IS_ERR(fence)) {
2894 			err = PTR_ERR(fence);
2895 			/* FIXME: Killing VM rather than proper error handling */
2896 			xe_vm_kill(vm, false);
2897 			goto unlock;
2898 		} else {
2899 			vm_bind_ioctl_ops_fini(vm, vops, fence);
2900 		}
2901 	}
2902 
2903 unlock:
2904 	drm_exec_fini(&exec);
2905 	return err;
2906 }
2907 
2908 #define SUPPORTED_FLAGS	\
2909 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2910 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2911 	 DRM_XE_VM_BIND_FLAG_NULL | \
2912 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2913 #define XE_64K_PAGE_MASK 0xffffull
2914 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2915 
2916 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2917 				    struct drm_xe_vm_bind *args,
2918 				    struct drm_xe_vm_bind_op **bind_ops)
2919 {
2920 	int err;
2921 	int i;
2922 
2923 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2924 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2925 		return -EINVAL;
2926 
2927 	if (XE_IOCTL_DBG(xe, args->extensions))
2928 		return -EINVAL;
2929 
2930 	if (args->num_binds > 1) {
2931 		u64 __user *bind_user =
2932 			u64_to_user_ptr(args->vector_of_binds);
2933 
2934 		*bind_ops = kvmalloc_array(args->num_binds,
2935 					   sizeof(struct drm_xe_vm_bind_op),
2936 					   GFP_KERNEL | __GFP_ACCOUNT);
2937 		if (!*bind_ops)
2938 			return -ENOMEM;
2939 
2940 		err = __copy_from_user(*bind_ops, bind_user,
2941 				       sizeof(struct drm_xe_vm_bind_op) *
2942 				       args->num_binds);
2943 		if (XE_IOCTL_DBG(xe, err)) {
2944 			err = -EFAULT;
2945 			goto free_bind_ops;
2946 		}
2947 	} else {
2948 		*bind_ops = &args->bind;
2949 	}
2950 
2951 	for (i = 0; i < args->num_binds; ++i) {
2952 		u64 range = (*bind_ops)[i].range;
2953 		u64 addr = (*bind_ops)[i].addr;
2954 		u32 op = (*bind_ops)[i].op;
2955 		u32 flags = (*bind_ops)[i].flags;
2956 		u32 obj = (*bind_ops)[i].obj;
2957 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2958 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2959 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2960 		u16 pat_index = (*bind_ops)[i].pat_index;
2961 		u16 coh_mode;
2962 
2963 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2964 			err = -EINVAL;
2965 			goto free_bind_ops;
2966 		}
2967 
2968 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2969 		(*bind_ops)[i].pat_index = pat_index;
2970 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2971 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2972 			err = -EINVAL;
2973 			goto free_bind_ops;
2974 		}
2975 
2976 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2977 			err = -EINVAL;
2978 			goto free_bind_ops;
2979 		}
2980 
2981 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2982 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2983 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2984 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2985 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2986 				 is_null) ||
2987 		    XE_IOCTL_DBG(xe, !obj &&
2988 				 op == DRM_XE_VM_BIND_OP_MAP &&
2989 				 !is_null) ||
2990 		    XE_IOCTL_DBG(xe, !obj &&
2991 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2992 		    XE_IOCTL_DBG(xe, addr &&
2993 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2994 		    XE_IOCTL_DBG(xe, range &&
2995 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2996 		    XE_IOCTL_DBG(xe, obj &&
2997 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2998 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2999 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3000 		    XE_IOCTL_DBG(xe, obj &&
3001 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3002 		    XE_IOCTL_DBG(xe, prefetch_region &&
3003 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3004 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
3005 				       xe->info.mem_region_mask)) ||
3006 		    XE_IOCTL_DBG(xe, obj &&
3007 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
3008 			err = -EINVAL;
3009 			goto free_bind_ops;
3010 		}
3011 
3012 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3013 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3014 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3015 		    XE_IOCTL_DBG(xe, !range &&
3016 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3017 			err = -EINVAL;
3018 			goto free_bind_ops;
3019 		}
3020 	}
3021 
3022 	return 0;
3023 
3024 free_bind_ops:
3025 	if (args->num_binds > 1)
3026 		kvfree(*bind_ops);
3027 	return err;
3028 }
3029 
3030 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3031 				       struct xe_exec_queue *q,
3032 				       struct xe_sync_entry *syncs,
3033 				       int num_syncs)
3034 {
3035 	struct dma_fence *fence;
3036 	int i, err = 0;
3037 
3038 	fence = xe_sync_in_fence_get(syncs, num_syncs,
3039 				     to_wait_exec_queue(vm, q), vm);
3040 	if (IS_ERR(fence))
3041 		return PTR_ERR(fence);
3042 
3043 	for (i = 0; i < num_syncs; i++)
3044 		xe_sync_entry_signal(&syncs[i], fence);
3045 
3046 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
3047 				     fence);
3048 	dma_fence_put(fence);
3049 
3050 	return err;
3051 }
3052 
3053 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3054 			    struct xe_exec_queue *q,
3055 			    struct xe_sync_entry *syncs, u32 num_syncs)
3056 {
3057 	memset(vops, 0, sizeof(*vops));
3058 	INIT_LIST_HEAD(&vops->list);
3059 	vops->vm = vm;
3060 	vops->q = q;
3061 	vops->syncs = syncs;
3062 	vops->num_syncs = num_syncs;
3063 }
3064 
3065 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3066 					u64 addr, u64 range, u64 obj_offset,
3067 					u16 pat_index)
3068 {
3069 	u16 coh_mode;
3070 
3071 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
3072 	    XE_IOCTL_DBG(xe, obj_offset >
3073 			 bo->size - range)) {
3074 		return -EINVAL;
3075 	}
3076 
3077 	if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
3078 		if (XE_IOCTL_DBG(xe, obj_offset &
3079 				 XE_64K_PAGE_MASK) ||
3080 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3081 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3082 			return  -EINVAL;
3083 		}
3084 	}
3085 
3086 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3087 	if (bo->cpu_caching) {
3088 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3089 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3090 			return  -EINVAL;
3091 		}
3092 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3093 		/*
3094 		 * Imported dma-buf from a different device should
3095 		 * require 1way or 2way coherency since we don't know
3096 		 * how it was mapped on the CPU. Just assume is it
3097 		 * potentially cached on CPU side.
3098 		 */
3099 		return  -EINVAL;
3100 	}
3101 
3102 	return 0;
3103 }
3104 
3105 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3106 {
3107 	struct xe_device *xe = to_xe_device(dev);
3108 	struct xe_file *xef = to_xe_file(file);
3109 	struct drm_xe_vm_bind *args = data;
3110 	struct drm_xe_sync __user *syncs_user;
3111 	struct xe_bo **bos = NULL;
3112 	struct drm_gpuva_ops **ops = NULL;
3113 	struct xe_vm *vm;
3114 	struct xe_exec_queue *q = NULL;
3115 	u32 num_syncs, num_ufence = 0;
3116 	struct xe_sync_entry *syncs = NULL;
3117 	struct drm_xe_vm_bind_op *bind_ops;
3118 	struct xe_vma_ops vops;
3119 	int err;
3120 	int i;
3121 
3122 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
3123 	if (err)
3124 		return err;
3125 
3126 	if (args->exec_queue_id) {
3127 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3128 		if (XE_IOCTL_DBG(xe, !q)) {
3129 			err = -ENOENT;
3130 			goto free_objs;
3131 		}
3132 
3133 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3134 			err = -EINVAL;
3135 			goto put_exec_queue;
3136 		}
3137 	}
3138 
3139 	vm = xe_vm_lookup(xef, args->vm_id);
3140 	if (XE_IOCTL_DBG(xe, !vm)) {
3141 		err = -EINVAL;
3142 		goto put_exec_queue;
3143 	}
3144 
3145 	err = down_write_killable(&vm->lock);
3146 	if (err)
3147 		goto put_vm;
3148 
3149 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3150 		err = -ENOENT;
3151 		goto release_vm_lock;
3152 	}
3153 
3154 	for (i = 0; i < args->num_binds; ++i) {
3155 		u64 range = bind_ops[i].range;
3156 		u64 addr = bind_ops[i].addr;
3157 
3158 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3159 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3160 			err = -EINVAL;
3161 			goto release_vm_lock;
3162 		}
3163 	}
3164 
3165 	if (args->num_binds) {
3166 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3167 			       GFP_KERNEL | __GFP_ACCOUNT);
3168 		if (!bos) {
3169 			err = -ENOMEM;
3170 			goto release_vm_lock;
3171 		}
3172 
3173 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3174 			       GFP_KERNEL | __GFP_ACCOUNT);
3175 		if (!ops) {
3176 			err = -ENOMEM;
3177 			goto release_vm_lock;
3178 		}
3179 	}
3180 
3181 	for (i = 0; i < args->num_binds; ++i) {
3182 		struct drm_gem_object *gem_obj;
3183 		u64 range = bind_ops[i].range;
3184 		u64 addr = bind_ops[i].addr;
3185 		u32 obj = bind_ops[i].obj;
3186 		u64 obj_offset = bind_ops[i].obj_offset;
3187 		u16 pat_index = bind_ops[i].pat_index;
3188 
3189 		if (!obj)
3190 			continue;
3191 
3192 		gem_obj = drm_gem_object_lookup(file, obj);
3193 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3194 			err = -ENOENT;
3195 			goto put_obj;
3196 		}
3197 		bos[i] = gem_to_xe_bo(gem_obj);
3198 
3199 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3200 						   obj_offset, pat_index);
3201 		if (err)
3202 			goto put_obj;
3203 	}
3204 
3205 	if (args->num_syncs) {
3206 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3207 		if (!syncs) {
3208 			err = -ENOMEM;
3209 			goto put_obj;
3210 		}
3211 	}
3212 
3213 	syncs_user = u64_to_user_ptr(args->syncs);
3214 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3215 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3216 					  &syncs_user[num_syncs],
3217 					  (xe_vm_in_lr_mode(vm) ?
3218 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3219 					  (!args->num_binds ?
3220 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3221 		if (err)
3222 			goto free_syncs;
3223 
3224 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3225 			num_ufence++;
3226 	}
3227 
3228 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3229 		err = -EINVAL;
3230 		goto free_syncs;
3231 	}
3232 
3233 	if (!args->num_binds) {
3234 		err = -ENODATA;
3235 		goto free_syncs;
3236 	}
3237 
3238 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3239 	for (i = 0; i < args->num_binds; ++i) {
3240 		u64 range = bind_ops[i].range;
3241 		u64 addr = bind_ops[i].addr;
3242 		u32 op = bind_ops[i].op;
3243 		u32 flags = bind_ops[i].flags;
3244 		u64 obj_offset = bind_ops[i].obj_offset;
3245 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3246 		u16 pat_index = bind_ops[i].pat_index;
3247 
3248 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3249 						  addr, range, op, flags,
3250 						  prefetch_region, pat_index);
3251 		if (IS_ERR(ops[i])) {
3252 			err = PTR_ERR(ops[i]);
3253 			ops[i] = NULL;
3254 			goto unwind_ops;
3255 		}
3256 
3257 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3258 					      &vops, i == args->num_binds - 1);
3259 		if (err)
3260 			goto unwind_ops;
3261 	}
3262 
3263 	/* Nothing to do */
3264 	if (list_empty(&vops.list)) {
3265 		err = -ENODATA;
3266 		goto unwind_ops;
3267 	}
3268 
3269 	err = vm_bind_ioctl_ops_execute(vm, &vops);
3270 
3271 unwind_ops:
3272 	if (err && err != -ENODATA)
3273 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3274 	for (i = args->num_binds - 1; i >= 0; --i)
3275 		if (ops[i])
3276 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3277 free_syncs:
3278 	if (err == -ENODATA)
3279 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3280 	while (num_syncs--)
3281 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3282 
3283 	kfree(syncs);
3284 put_obj:
3285 	for (i = 0; i < args->num_binds; ++i)
3286 		xe_bo_put(bos[i]);
3287 release_vm_lock:
3288 	up_write(&vm->lock);
3289 put_vm:
3290 	xe_vm_put(vm);
3291 put_exec_queue:
3292 	if (q)
3293 		xe_exec_queue_put(q);
3294 free_objs:
3295 	kvfree(bos);
3296 	kvfree(ops);
3297 	if (args->num_binds > 1)
3298 		kvfree(bind_ops);
3299 	return err;
3300 }
3301 
3302 /**
3303  * xe_vm_lock() - Lock the vm's dma_resv object
3304  * @vm: The struct xe_vm whose lock is to be locked
3305  * @intr: Whether to perform any wait interruptible
3306  *
3307  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3308  * contended lock was interrupted. If @intr is false, the function
3309  * always returns 0.
3310  */
3311 int xe_vm_lock(struct xe_vm *vm, bool intr)
3312 {
3313 	if (intr)
3314 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3315 
3316 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3317 }
3318 
3319 /**
3320  * xe_vm_unlock() - Unlock the vm's dma_resv object
3321  * @vm: The struct xe_vm whose lock is to be released.
3322  *
3323  * Unlock a buffer object lock that was locked by xe_vm_lock().
3324  */
3325 void xe_vm_unlock(struct xe_vm *vm)
3326 {
3327 	dma_resv_unlock(xe_vm_resv(vm));
3328 }
3329 
3330 /**
3331  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3332  * @vma: VMA to invalidate
3333  *
3334  * Walks a list of page tables leaves which it memset the entries owned by this
3335  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3336  * complete.
3337  *
3338  * Returns 0 for success, negative error code otherwise.
3339  */
3340 int xe_vm_invalidate_vma(struct xe_vma *vma)
3341 {
3342 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3343 	struct xe_tile *tile;
3344 	struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
3345 	u32 tile_needs_invalidate = 0;
3346 	u8 id;
3347 	int ret = 0;
3348 
3349 	xe_assert(xe, !xe_vma_is_null(vma));
3350 	trace_xe_vma_invalidate(vma);
3351 
3352 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3353 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3354 		xe_vma_start(vma), xe_vma_size(vma));
3355 
3356 	/* Check that we don't race with page-table updates */
3357 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3358 		if (xe_vma_is_userptr(vma)) {
3359 			WARN_ON_ONCE(!mmu_interval_check_retry
3360 				     (&to_userptr_vma(vma)->userptr.notifier,
3361 				      to_userptr_vma(vma)->userptr.notifier_seq));
3362 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3363 							     DMA_RESV_USAGE_BOOKKEEP));
3364 
3365 		} else {
3366 			xe_bo_assert_held(xe_vma_bo(vma));
3367 		}
3368 	}
3369 
3370 	for_each_tile(tile, xe, id) {
3371 		if (xe_pt_zap_ptes(tile, vma)) {
3372 			xe_device_wmb(xe);
3373 			xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
3374 							  &fence[id], true);
3375 
3376 			/*
3377 			 * FIXME: We potentially need to invalidate multiple
3378 			 * GTs within the tile
3379 			 */
3380 			ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
3381 							 &fence[id], vma);
3382 			if (ret < 0) {
3383 				xe_gt_tlb_invalidation_fence_fini(&fence[id]);
3384 				goto wait;
3385 			}
3386 
3387 			tile_needs_invalidate |= BIT(id);
3388 		}
3389 	}
3390 
3391 wait:
3392 	for_each_tile(tile, xe, id)
3393 		if (tile_needs_invalidate & BIT(id))
3394 			xe_gt_tlb_invalidation_fence_wait(&fence[id]);
3395 
3396 	vma->tile_invalidated = vma->tile_mask;
3397 
3398 	return ret;
3399 }
3400 
3401 struct xe_vm_snapshot {
3402 	unsigned long num_snaps;
3403 	struct {
3404 		u64 ofs, bo_ofs;
3405 		unsigned long len;
3406 		struct xe_bo *bo;
3407 		void *data;
3408 		struct mm_struct *mm;
3409 	} snap[];
3410 };
3411 
3412 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3413 {
3414 	unsigned long num_snaps = 0, i;
3415 	struct xe_vm_snapshot *snap = NULL;
3416 	struct drm_gpuva *gpuva;
3417 
3418 	if (!vm)
3419 		return NULL;
3420 
3421 	mutex_lock(&vm->snap_mutex);
3422 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3423 		if (gpuva->flags & XE_VMA_DUMPABLE)
3424 			num_snaps++;
3425 	}
3426 
3427 	if (num_snaps)
3428 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3429 	if (!snap) {
3430 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3431 		goto out_unlock;
3432 	}
3433 
3434 	snap->num_snaps = num_snaps;
3435 	i = 0;
3436 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3437 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3438 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3439 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3440 
3441 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3442 			continue;
3443 
3444 		snap->snap[i].ofs = xe_vma_start(vma);
3445 		snap->snap[i].len = xe_vma_size(vma);
3446 		if (bo) {
3447 			snap->snap[i].bo = xe_bo_get(bo);
3448 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3449 		} else if (xe_vma_is_userptr(vma)) {
3450 			struct mm_struct *mm =
3451 				to_userptr_vma(vma)->userptr.notifier.mm;
3452 
3453 			if (mmget_not_zero(mm))
3454 				snap->snap[i].mm = mm;
3455 			else
3456 				snap->snap[i].data = ERR_PTR(-EFAULT);
3457 
3458 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3459 		} else {
3460 			snap->snap[i].data = ERR_PTR(-ENOENT);
3461 		}
3462 		i++;
3463 	}
3464 
3465 out_unlock:
3466 	mutex_unlock(&vm->snap_mutex);
3467 	return snap;
3468 }
3469 
3470 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3471 {
3472 	if (IS_ERR_OR_NULL(snap))
3473 		return;
3474 
3475 	for (int i = 0; i < snap->num_snaps; i++) {
3476 		struct xe_bo *bo = snap->snap[i].bo;
3477 		struct iosys_map src;
3478 		int err;
3479 
3480 		if (IS_ERR(snap->snap[i].data))
3481 			continue;
3482 
3483 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3484 		if (!snap->snap[i].data) {
3485 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3486 			goto cleanup_bo;
3487 		}
3488 
3489 		if (bo) {
3490 			xe_bo_lock(bo, false);
3491 			err = ttm_bo_vmap(&bo->ttm, &src);
3492 			if (!err) {
3493 				xe_map_memcpy_from(xe_bo_device(bo),
3494 						   snap->snap[i].data,
3495 						   &src, snap->snap[i].bo_ofs,
3496 						   snap->snap[i].len);
3497 				ttm_bo_vunmap(&bo->ttm, &src);
3498 			}
3499 			xe_bo_unlock(bo);
3500 		} else {
3501 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3502 
3503 			kthread_use_mm(snap->snap[i].mm);
3504 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3505 				err = 0;
3506 			else
3507 				err = -EFAULT;
3508 			kthread_unuse_mm(snap->snap[i].mm);
3509 
3510 			mmput(snap->snap[i].mm);
3511 			snap->snap[i].mm = NULL;
3512 		}
3513 
3514 		if (err) {
3515 			kvfree(snap->snap[i].data);
3516 			snap->snap[i].data = ERR_PTR(err);
3517 		}
3518 
3519 cleanup_bo:
3520 		xe_bo_put(bo);
3521 		snap->snap[i].bo = NULL;
3522 	}
3523 }
3524 
3525 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3526 {
3527 	unsigned long i, j;
3528 
3529 	if (IS_ERR_OR_NULL(snap)) {
3530 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3531 		return;
3532 	}
3533 
3534 	for (i = 0; i < snap->num_snaps; i++) {
3535 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3536 
3537 		if (IS_ERR(snap->snap[i].data)) {
3538 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3539 				   PTR_ERR(snap->snap[i].data));
3540 			continue;
3541 		}
3542 
3543 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3544 
3545 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3546 			u32 *val = snap->snap[i].data + j;
3547 			char dumped[ASCII85_BUFSZ];
3548 
3549 			drm_puts(p, ascii85_encode(*val, dumped));
3550 		}
3551 
3552 		drm_puts(p, "\n");
3553 	}
3554 }
3555 
3556 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3557 {
3558 	unsigned long i;
3559 
3560 	if (IS_ERR_OR_NULL(snap))
3561 		return;
3562 
3563 	for (i = 0; i < snap->num_snaps; i++) {
3564 		if (!IS_ERR(snap->snap[i].data))
3565 			kvfree(snap->snap[i].data);
3566 		xe_bo_put(snap->snap[i].bo);
3567 		if (snap->snap[i].mm)
3568 			mmput(snap->snap[i].mm);
3569 	}
3570 	kvfree(snap);
3571 }
3572