xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision f6e0a4984c2e7244689ea87b62b433bed9d07e94)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/delay.h>
17 #include <linux/kthread.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_drm_client.h"
25 #include "xe_exec_queue.h"
26 #include "xe_gt.h"
27 #include "xe_gt_pagefault.h"
28 #include "xe_gt_tlb_invalidation.h"
29 #include "xe_migrate.h"
30 #include "xe_pat.h"
31 #include "xe_pm.h"
32 #include "xe_preempt_fence.h"
33 #include "xe_pt.h"
34 #include "xe_res_cursor.h"
35 #include "xe_sync.h"
36 #include "xe_trace.h"
37 #include "generated/xe_wa_oob.h"
38 #include "xe_wa.h"
39 
40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
41 {
42 	return vm->gpuvm.r_obj;
43 }
44 
45 /**
46  * xe_vma_userptr_check_repin() - Advisory check for repin needed
47  * @uvma: The userptr vma
48  *
49  * Check if the userptr vma has been invalidated since last successful
50  * repin. The check is advisory only and can the function can be called
51  * without the vm->userptr.notifier_lock held. There is no guarantee that the
52  * vma userptr will remain valid after a lockless check, so typically
53  * the call needs to be followed by a proper check under the notifier_lock.
54  *
55  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
56  */
57 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
58 {
59 	return mmu_interval_check_retry(&uvma->userptr.notifier,
60 					uvma->userptr.notifier_seq) ?
61 		-EAGAIN : 0;
62 }
63 
64 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
65 {
66 	struct xe_userptr *userptr = &uvma->userptr;
67 	struct xe_vma *vma = &uvma->vma;
68 	struct xe_vm *vm = xe_vma_vm(vma);
69 	struct xe_device *xe = vm->xe;
70 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
71 	struct page **pages;
72 	bool in_kthread = !current->mm;
73 	unsigned long notifier_seq;
74 	int pinned, ret, i;
75 	bool read_only = xe_vma_read_only(vma);
76 
77 	lockdep_assert_held(&vm->lock);
78 	xe_assert(xe, xe_vma_is_userptr(vma));
79 retry:
80 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
81 		return 0;
82 
83 	notifier_seq = mmu_interval_read_begin(&userptr->notifier);
84 	if (notifier_seq == userptr->notifier_seq)
85 		return 0;
86 
87 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
88 	if (!pages)
89 		return -ENOMEM;
90 
91 	if (userptr->sg) {
92 		dma_unmap_sgtable(xe->drm.dev,
93 				  userptr->sg,
94 				  read_only ? DMA_TO_DEVICE :
95 				  DMA_BIDIRECTIONAL, 0);
96 		sg_free_table(userptr->sg);
97 		userptr->sg = NULL;
98 	}
99 
100 	pinned = ret = 0;
101 	if (in_kthread) {
102 		if (!mmget_not_zero(userptr->notifier.mm)) {
103 			ret = -EFAULT;
104 			goto mm_closed;
105 		}
106 		kthread_use_mm(userptr->notifier.mm);
107 	}
108 
109 	while (pinned < num_pages) {
110 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
111 					  pinned * PAGE_SIZE,
112 					  num_pages - pinned,
113 					  read_only ? 0 : FOLL_WRITE,
114 					  &pages[pinned]);
115 		if (ret < 0)
116 			break;
117 
118 		pinned += ret;
119 		ret = 0;
120 	}
121 
122 	if (in_kthread) {
123 		kthread_unuse_mm(userptr->notifier.mm);
124 		mmput(userptr->notifier.mm);
125 	}
126 mm_closed:
127 	if (ret)
128 		goto out;
129 
130 	ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
131 						pinned, 0,
132 						(u64)pinned << PAGE_SHIFT,
133 						xe_sg_segment_size(xe->drm.dev),
134 						GFP_KERNEL);
135 	if (ret) {
136 		userptr->sg = NULL;
137 		goto out;
138 	}
139 	userptr->sg = &userptr->sgt;
140 
141 	ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
142 			      read_only ? DMA_TO_DEVICE :
143 			      DMA_BIDIRECTIONAL,
144 			      DMA_ATTR_SKIP_CPU_SYNC |
145 			      DMA_ATTR_NO_KERNEL_MAPPING);
146 	if (ret) {
147 		sg_free_table(userptr->sg);
148 		userptr->sg = NULL;
149 		goto out;
150 	}
151 
152 	for (i = 0; i < pinned; ++i) {
153 		if (!read_only) {
154 			lock_page(pages[i]);
155 			set_page_dirty(pages[i]);
156 			unlock_page(pages[i]);
157 		}
158 
159 		mark_page_accessed(pages[i]);
160 	}
161 
162 out:
163 	release_pages(pages, pinned);
164 	kvfree(pages);
165 
166 	if (!(ret < 0)) {
167 		userptr->notifier_seq = notifier_seq;
168 		if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
169 			goto retry;
170 	}
171 
172 	return ret < 0 ? ret : 0;
173 }
174 
175 static bool preempt_fences_waiting(struct xe_vm *vm)
176 {
177 	struct xe_exec_queue *q;
178 
179 	lockdep_assert_held(&vm->lock);
180 	xe_vm_assert_held(vm);
181 
182 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
183 		if (!q->compute.pfence ||
184 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
185 						   &q->compute.pfence->flags))) {
186 			return true;
187 		}
188 	}
189 
190 	return false;
191 }
192 
193 static void free_preempt_fences(struct list_head *list)
194 {
195 	struct list_head *link, *next;
196 
197 	list_for_each_safe(link, next, list)
198 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
199 }
200 
201 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
202 				unsigned int *count)
203 {
204 	lockdep_assert_held(&vm->lock);
205 	xe_vm_assert_held(vm);
206 
207 	if (*count >= vm->preempt.num_exec_queues)
208 		return 0;
209 
210 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
211 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
212 
213 		if (IS_ERR(pfence))
214 			return PTR_ERR(pfence);
215 
216 		list_move_tail(xe_preempt_fence_link(pfence), list);
217 	}
218 
219 	return 0;
220 }
221 
222 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
223 {
224 	struct xe_exec_queue *q;
225 
226 	xe_vm_assert_held(vm);
227 
228 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
229 		if (q->compute.pfence) {
230 			long timeout = dma_fence_wait(q->compute.pfence, false);
231 
232 			if (timeout < 0)
233 				return -ETIME;
234 			dma_fence_put(q->compute.pfence);
235 			q->compute.pfence = NULL;
236 		}
237 	}
238 
239 	return 0;
240 }
241 
242 static bool xe_vm_is_idle(struct xe_vm *vm)
243 {
244 	struct xe_exec_queue *q;
245 
246 	xe_vm_assert_held(vm);
247 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
248 		if (!xe_exec_queue_is_idle(q))
249 			return false;
250 	}
251 
252 	return true;
253 }
254 
255 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
256 {
257 	struct list_head *link;
258 	struct xe_exec_queue *q;
259 
260 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
261 		struct dma_fence *fence;
262 
263 		link = list->next;
264 		xe_assert(vm->xe, link != list);
265 
266 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
267 					     q, q->compute.context,
268 					     ++q->compute.seqno);
269 		dma_fence_put(q->compute.pfence);
270 		q->compute.pfence = fence;
271 	}
272 }
273 
274 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
275 {
276 	struct xe_exec_queue *q;
277 	int err;
278 
279 	if (!vm->preempt.num_exec_queues)
280 		return 0;
281 
282 	err = xe_bo_lock(bo, true);
283 	if (err)
284 		return err;
285 
286 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
287 	if (err)
288 		goto out_unlock;
289 
290 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
291 		if (q->compute.pfence) {
292 			dma_resv_add_fence(bo->ttm.base.resv,
293 					   q->compute.pfence,
294 					   DMA_RESV_USAGE_BOOKKEEP);
295 		}
296 
297 out_unlock:
298 	xe_bo_unlock(bo);
299 	return err;
300 }
301 
302 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
303 						struct drm_exec *exec)
304 {
305 	struct xe_exec_queue *q;
306 
307 	lockdep_assert_held(&vm->lock);
308 	xe_vm_assert_held(vm);
309 
310 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
311 		q->ops->resume(q);
312 
313 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
314 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
315 	}
316 }
317 
318 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
319 {
320 	struct drm_gpuvm_exec vm_exec = {
321 		.vm = &vm->gpuvm,
322 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
323 		.num_fences = 1,
324 	};
325 	struct drm_exec *exec = &vm_exec.exec;
326 	struct dma_fence *pfence;
327 	int err;
328 	bool wait;
329 
330 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
331 
332 	down_write(&vm->lock);
333 	err = drm_gpuvm_exec_lock(&vm_exec);
334 	if (err)
335 		goto out_up_write;
336 
337 	pfence = xe_preempt_fence_create(q, q->compute.context,
338 					 ++q->compute.seqno);
339 	if (!pfence) {
340 		err = -ENOMEM;
341 		goto out_fini;
342 	}
343 
344 	list_add(&q->compute.link, &vm->preempt.exec_queues);
345 	++vm->preempt.num_exec_queues;
346 	q->compute.pfence = pfence;
347 
348 	down_read(&vm->userptr.notifier_lock);
349 
350 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
351 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
352 
353 	/*
354 	 * Check to see if a preemption on VM is in flight or userptr
355 	 * invalidation, if so trigger this preempt fence to sync state with
356 	 * other preempt fences on the VM.
357 	 */
358 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
359 	if (wait)
360 		dma_fence_enable_sw_signaling(pfence);
361 
362 	up_read(&vm->userptr.notifier_lock);
363 
364 out_fini:
365 	drm_exec_fini(exec);
366 out_up_write:
367 	up_write(&vm->lock);
368 
369 	return err;
370 }
371 
372 /**
373  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
374  * @vm: The VM.
375  * @q: The exec_queue
376  */
377 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
378 {
379 	if (!xe_vm_in_preempt_fence_mode(vm))
380 		return;
381 
382 	down_write(&vm->lock);
383 	list_del(&q->compute.link);
384 	--vm->preempt.num_exec_queues;
385 	if (q->compute.pfence) {
386 		dma_fence_enable_sw_signaling(q->compute.pfence);
387 		dma_fence_put(q->compute.pfence);
388 		q->compute.pfence = NULL;
389 	}
390 	up_write(&vm->lock);
391 }
392 
393 /**
394  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
395  * that need repinning.
396  * @vm: The VM.
397  *
398  * This function checks for whether the VM has userptrs that need repinning,
399  * and provides a release-type barrier on the userptr.notifier_lock after
400  * checking.
401  *
402  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
403  */
404 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
405 {
406 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
407 
408 	return (list_empty(&vm->userptr.repin_list) &&
409 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
410 }
411 
412 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
413 
414 static void xe_vm_kill(struct xe_vm *vm)
415 {
416 	struct xe_exec_queue *q;
417 
418 	lockdep_assert_held(&vm->lock);
419 
420 	xe_vm_lock(vm, false);
421 	vm->flags |= XE_VM_FLAG_BANNED;
422 	trace_xe_vm_kill(vm);
423 
424 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
425 		q->ops->kill(q);
426 	xe_vm_unlock(vm);
427 
428 	/* TODO: Inform user the VM is banned */
429 }
430 
431 /**
432  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
433  * @exec: The drm_exec object used for locking before validation.
434  * @err: The error returned from ttm_bo_validate().
435  * @end: A ktime_t cookie that should be set to 0 before first use and
436  * that should be reused on subsequent calls.
437  *
438  * With multiple active VMs, under memory pressure, it is possible that
439  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
440  * Until ttm properly handles locking in such scenarios, best thing the
441  * driver can do is retry with a timeout. Check if that is necessary, and
442  * if so unlock the drm_exec's objects while keeping the ticket to prepare
443  * for a rerun.
444  *
445  * Return: true if a retry after drm_exec_init() is recommended;
446  * false otherwise.
447  */
448 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
449 {
450 	ktime_t cur;
451 
452 	if (err != -ENOMEM)
453 		return false;
454 
455 	cur = ktime_get();
456 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
457 	if (!ktime_before(cur, *end))
458 		return false;
459 
460 	msleep(20);
461 	return true;
462 }
463 
464 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
465 {
466 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
467 	struct drm_gpuva *gpuva;
468 	int ret;
469 
470 	lockdep_assert_held(&vm->lock);
471 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
472 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
473 			       &vm->rebind_list);
474 
475 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
476 	if (ret)
477 		return ret;
478 
479 	vm_bo->evicted = false;
480 	return 0;
481 }
482 
483 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
484 				 bool *done)
485 {
486 	int err;
487 
488 	/*
489 	 * 1 fence for each preempt fence plus a fence for each tile from a
490 	 * possible rebind
491 	 */
492 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
493 				   vm->xe->info.tile_count);
494 	if (err)
495 		return err;
496 
497 	if (xe_vm_is_idle(vm)) {
498 		vm->preempt.rebind_deactivated = true;
499 		*done = true;
500 		return 0;
501 	}
502 
503 	if (!preempt_fences_waiting(vm)) {
504 		*done = true;
505 		return 0;
506 	}
507 
508 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
509 	if (err)
510 		return err;
511 
512 	err = wait_for_existing_preempt_fences(vm);
513 	if (err)
514 		return err;
515 
516 	return drm_gpuvm_validate(&vm->gpuvm, exec);
517 }
518 
519 static void preempt_rebind_work_func(struct work_struct *w)
520 {
521 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
522 	struct drm_exec exec;
523 	struct dma_fence *rebind_fence;
524 	unsigned int fence_count = 0;
525 	LIST_HEAD(preempt_fences);
526 	ktime_t end = 0;
527 	int err = 0;
528 	long wait;
529 	int __maybe_unused tries = 0;
530 
531 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
532 	trace_xe_vm_rebind_worker_enter(vm);
533 
534 	down_write(&vm->lock);
535 
536 	if (xe_vm_is_closed_or_banned(vm)) {
537 		up_write(&vm->lock);
538 		trace_xe_vm_rebind_worker_exit(vm);
539 		return;
540 	}
541 
542 retry:
543 	if (xe_vm_userptr_check_repin(vm)) {
544 		err = xe_vm_userptr_pin(vm);
545 		if (err)
546 			goto out_unlock_outer;
547 	}
548 
549 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
550 
551 	drm_exec_until_all_locked(&exec) {
552 		bool done = false;
553 
554 		err = xe_preempt_work_begin(&exec, vm, &done);
555 		drm_exec_retry_on_contention(&exec);
556 		if (err || done) {
557 			drm_exec_fini(&exec);
558 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
559 				err = -EAGAIN;
560 
561 			goto out_unlock_outer;
562 		}
563 	}
564 
565 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
566 	if (err)
567 		goto out_unlock;
568 
569 	rebind_fence = xe_vm_rebind(vm, true);
570 	if (IS_ERR(rebind_fence)) {
571 		err = PTR_ERR(rebind_fence);
572 		goto out_unlock;
573 	}
574 
575 	if (rebind_fence) {
576 		dma_fence_wait(rebind_fence, false);
577 		dma_fence_put(rebind_fence);
578 	}
579 
580 	/* Wait on munmap style VM unbinds */
581 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
582 				     DMA_RESV_USAGE_KERNEL,
583 				     false, MAX_SCHEDULE_TIMEOUT);
584 	if (wait <= 0) {
585 		err = -ETIME;
586 		goto out_unlock;
587 	}
588 
589 #define retry_required(__tries, __vm) \
590 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
591 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
592 	__xe_vm_userptr_needs_repin(__vm))
593 
594 	down_read(&vm->userptr.notifier_lock);
595 	if (retry_required(tries, vm)) {
596 		up_read(&vm->userptr.notifier_lock);
597 		err = -EAGAIN;
598 		goto out_unlock;
599 	}
600 
601 #undef retry_required
602 
603 	spin_lock(&vm->xe->ttm.lru_lock);
604 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
605 	spin_unlock(&vm->xe->ttm.lru_lock);
606 
607 	/* Point of no return. */
608 	arm_preempt_fences(vm, &preempt_fences);
609 	resume_and_reinstall_preempt_fences(vm, &exec);
610 	up_read(&vm->userptr.notifier_lock);
611 
612 out_unlock:
613 	drm_exec_fini(&exec);
614 out_unlock_outer:
615 	if (err == -EAGAIN) {
616 		trace_xe_vm_rebind_worker_retry(vm);
617 		goto retry;
618 	}
619 
620 	if (err) {
621 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
622 		xe_vm_kill(vm);
623 	}
624 	up_write(&vm->lock);
625 
626 	free_preempt_fences(&preempt_fences);
627 
628 	trace_xe_vm_rebind_worker_exit(vm);
629 }
630 
631 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
632 				   const struct mmu_notifier_range *range,
633 				   unsigned long cur_seq)
634 {
635 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
636 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
637 	struct xe_vma *vma = &uvma->vma;
638 	struct xe_vm *vm = xe_vma_vm(vma);
639 	struct dma_resv_iter cursor;
640 	struct dma_fence *fence;
641 	long err;
642 
643 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
644 	trace_xe_vma_userptr_invalidate(vma);
645 
646 	if (!mmu_notifier_range_blockable(range))
647 		return false;
648 
649 	down_write(&vm->userptr.notifier_lock);
650 	mmu_interval_set_seq(mni, cur_seq);
651 
652 	/* No need to stop gpu access if the userptr is not yet bound. */
653 	if (!userptr->initial_bind) {
654 		up_write(&vm->userptr.notifier_lock);
655 		return true;
656 	}
657 
658 	/*
659 	 * Tell exec and rebind worker they need to repin and rebind this
660 	 * userptr.
661 	 */
662 	if (!xe_vm_in_fault_mode(vm) &&
663 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
664 		spin_lock(&vm->userptr.invalidated_lock);
665 		list_move_tail(&userptr->invalidate_link,
666 			       &vm->userptr.invalidated);
667 		spin_unlock(&vm->userptr.invalidated_lock);
668 	}
669 
670 	up_write(&vm->userptr.notifier_lock);
671 
672 	/*
673 	 * Preempt fences turn into schedule disables, pipeline these.
674 	 * Note that even in fault mode, we need to wait for binds and
675 	 * unbinds to complete, and those are attached as BOOKMARK fences
676 	 * to the vm.
677 	 */
678 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
679 			    DMA_RESV_USAGE_BOOKKEEP);
680 	dma_resv_for_each_fence_unlocked(&cursor, fence)
681 		dma_fence_enable_sw_signaling(fence);
682 	dma_resv_iter_end(&cursor);
683 
684 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
685 				    DMA_RESV_USAGE_BOOKKEEP,
686 				    false, MAX_SCHEDULE_TIMEOUT);
687 	XE_WARN_ON(err <= 0);
688 
689 	if (xe_vm_in_fault_mode(vm)) {
690 		err = xe_vm_invalidate_vma(vma);
691 		XE_WARN_ON(err);
692 	}
693 
694 	trace_xe_vma_userptr_invalidate_complete(vma);
695 
696 	return true;
697 }
698 
699 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
700 	.invalidate = vma_userptr_invalidate,
701 };
702 
703 int xe_vm_userptr_pin(struct xe_vm *vm)
704 {
705 	struct xe_userptr_vma *uvma, *next;
706 	int err = 0;
707 	LIST_HEAD(tmp_evict);
708 
709 	lockdep_assert_held_write(&vm->lock);
710 
711 	/* Collect invalidated userptrs */
712 	spin_lock(&vm->userptr.invalidated_lock);
713 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
714 				 userptr.invalidate_link) {
715 		list_del_init(&uvma->userptr.invalidate_link);
716 		list_move_tail(&uvma->userptr.repin_link,
717 			       &vm->userptr.repin_list);
718 	}
719 	spin_unlock(&vm->userptr.invalidated_lock);
720 
721 	/* Pin and move to temporary list */
722 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
723 				 userptr.repin_link) {
724 		err = xe_vma_userptr_pin_pages(uvma);
725 		if (err < 0)
726 			return err;
727 
728 		list_del_init(&uvma->userptr.repin_link);
729 		list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
730 	}
731 
732 	return 0;
733 }
734 
735 /**
736  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
737  * that need repinning.
738  * @vm: The VM.
739  *
740  * This function does an advisory check for whether the VM has userptrs that
741  * need repinning.
742  *
743  * Return: 0 if there are no indications of userptrs needing repinning,
744  * -EAGAIN if there are.
745  */
746 int xe_vm_userptr_check_repin(struct xe_vm *vm)
747 {
748 	return (list_empty_careful(&vm->userptr.repin_list) &&
749 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
750 }
751 
752 static struct dma_fence *
753 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
754 	       struct xe_sync_entry *syncs, u32 num_syncs,
755 	       bool first_op, bool last_op);
756 
757 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
758 {
759 	struct dma_fence *fence = NULL;
760 	struct xe_vma *vma, *next;
761 
762 	lockdep_assert_held(&vm->lock);
763 	if (xe_vm_in_lr_mode(vm) && !rebind_worker)
764 		return NULL;
765 
766 	xe_vm_assert_held(vm);
767 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
768 				 combined_links.rebind) {
769 		xe_assert(vm->xe, vma->tile_present);
770 
771 		list_del_init(&vma->combined_links.rebind);
772 		dma_fence_put(fence);
773 		if (rebind_worker)
774 			trace_xe_vma_rebind_worker(vma);
775 		else
776 			trace_xe_vma_rebind_exec(vma);
777 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
778 		if (IS_ERR(fence))
779 			return fence;
780 	}
781 
782 	return fence;
783 }
784 
785 static void xe_vma_free(struct xe_vma *vma)
786 {
787 	if (xe_vma_is_userptr(vma))
788 		kfree(to_userptr_vma(vma));
789 	else
790 		kfree(vma);
791 }
792 
793 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
794 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
795 
796 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
797 				    struct xe_bo *bo,
798 				    u64 bo_offset_or_userptr,
799 				    u64 start, u64 end,
800 				    u16 pat_index, unsigned int flags)
801 {
802 	struct xe_vma *vma;
803 	struct xe_tile *tile;
804 	u8 id;
805 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
806 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
807 
808 	xe_assert(vm->xe, start < end);
809 	xe_assert(vm->xe, end < vm->size);
810 
811 	/*
812 	 * Allocate and ensure that the xe_vma_is_userptr() return
813 	 * matches what was allocated.
814 	 */
815 	if (!bo && !is_null) {
816 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
817 
818 		if (!uvma)
819 			return ERR_PTR(-ENOMEM);
820 
821 		vma = &uvma->vma;
822 	} else {
823 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
824 		if (!vma)
825 			return ERR_PTR(-ENOMEM);
826 
827 		if (is_null)
828 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
829 		if (bo)
830 			vma->gpuva.gem.obj = &bo->ttm.base;
831 	}
832 
833 	INIT_LIST_HEAD(&vma->combined_links.rebind);
834 
835 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
836 	vma->gpuva.vm = &vm->gpuvm;
837 	vma->gpuva.va.addr = start;
838 	vma->gpuva.va.range = end - start + 1;
839 	if (read_only)
840 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
841 
842 	for_each_tile(tile, vm->xe, id)
843 		vma->tile_mask |= 0x1 << id;
844 
845 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
846 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
847 
848 	vma->pat_index = pat_index;
849 
850 	if (bo) {
851 		struct drm_gpuvm_bo *vm_bo;
852 
853 		xe_bo_assert_held(bo);
854 
855 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
856 		if (IS_ERR(vm_bo)) {
857 			xe_vma_free(vma);
858 			return ERR_CAST(vm_bo);
859 		}
860 
861 		drm_gpuvm_bo_extobj_add(vm_bo);
862 		drm_gem_object_get(&bo->ttm.base);
863 		vma->gpuva.gem.offset = bo_offset_or_userptr;
864 		drm_gpuva_link(&vma->gpuva, vm_bo);
865 		drm_gpuvm_bo_put(vm_bo);
866 	} else /* userptr or null */ {
867 		if (!is_null) {
868 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
869 			u64 size = end - start + 1;
870 			int err;
871 
872 			INIT_LIST_HEAD(&userptr->invalidate_link);
873 			INIT_LIST_HEAD(&userptr->repin_link);
874 			vma->gpuva.gem.offset = bo_offset_or_userptr;
875 
876 			err = mmu_interval_notifier_insert(&userptr->notifier,
877 							   current->mm,
878 							   xe_vma_userptr(vma), size,
879 							   &vma_userptr_notifier_ops);
880 			if (err) {
881 				xe_vma_free(vma);
882 				return ERR_PTR(err);
883 			}
884 
885 			userptr->notifier_seq = LONG_MAX;
886 		}
887 
888 		xe_vm_get(vm);
889 	}
890 
891 	return vma;
892 }
893 
894 static void xe_vma_destroy_late(struct xe_vma *vma)
895 {
896 	struct xe_vm *vm = xe_vma_vm(vma);
897 	struct xe_device *xe = vm->xe;
898 	bool read_only = xe_vma_read_only(vma);
899 
900 	if (vma->ufence) {
901 		xe_sync_ufence_put(vma->ufence);
902 		vma->ufence = NULL;
903 	}
904 
905 	if (xe_vma_is_userptr(vma)) {
906 		struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
907 
908 		if (userptr->sg) {
909 			dma_unmap_sgtable(xe->drm.dev,
910 					  userptr->sg,
911 					  read_only ? DMA_TO_DEVICE :
912 					  DMA_BIDIRECTIONAL, 0);
913 			sg_free_table(userptr->sg);
914 			userptr->sg = NULL;
915 		}
916 
917 		/*
918 		 * Since userptr pages are not pinned, we can't remove
919 		 * the notifer until we're sure the GPU is not accessing
920 		 * them anymore
921 		 */
922 		mmu_interval_notifier_remove(&userptr->notifier);
923 		xe_vm_put(vm);
924 	} else if (xe_vma_is_null(vma)) {
925 		xe_vm_put(vm);
926 	} else {
927 		xe_bo_put(xe_vma_bo(vma));
928 	}
929 
930 	xe_vma_free(vma);
931 }
932 
933 static void vma_destroy_work_func(struct work_struct *w)
934 {
935 	struct xe_vma *vma =
936 		container_of(w, struct xe_vma, destroy_work);
937 
938 	xe_vma_destroy_late(vma);
939 }
940 
941 static void vma_destroy_cb(struct dma_fence *fence,
942 			   struct dma_fence_cb *cb)
943 {
944 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
945 
946 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
947 	queue_work(system_unbound_wq, &vma->destroy_work);
948 }
949 
950 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
951 {
952 	struct xe_vm *vm = xe_vma_vm(vma);
953 
954 	lockdep_assert_held_write(&vm->lock);
955 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
956 
957 	if (xe_vma_is_userptr(vma)) {
958 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
959 
960 		spin_lock(&vm->userptr.invalidated_lock);
961 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
962 		spin_unlock(&vm->userptr.invalidated_lock);
963 	} else if (!xe_vma_is_null(vma)) {
964 		xe_bo_assert_held(xe_vma_bo(vma));
965 
966 		drm_gpuva_unlink(&vma->gpuva);
967 	}
968 
969 	xe_vm_assert_held(vm);
970 	if (fence) {
971 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
972 						 vma_destroy_cb);
973 
974 		if (ret) {
975 			XE_WARN_ON(ret != -ENOENT);
976 			xe_vma_destroy_late(vma);
977 		}
978 	} else {
979 		xe_vma_destroy_late(vma);
980 	}
981 }
982 
983 /**
984  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
985  * @exec: The drm_exec object we're currently locking for.
986  * @vma: The vma for witch we want to lock the vm resv and any attached
987  * object's resv.
988  * @num_shared: The number of dma-fence slots to pre-allocate in the
989  * objects' reservation objects.
990  *
991  * Return: 0 on success, negative error code on error. In particular
992  * may return -EDEADLK on WW transaction contention and -EINTR if
993  * an interruptible wait is terminated by a signal.
994  */
995 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
996 		      unsigned int num_shared)
997 {
998 	struct xe_vm *vm = xe_vma_vm(vma);
999 	struct xe_bo *bo = xe_vma_bo(vma);
1000 	int err;
1001 
1002 	XE_WARN_ON(!vm);
1003 	if (num_shared)
1004 		err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1005 	else
1006 		err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1007 	if (!err && bo && !bo->vm) {
1008 		if (num_shared)
1009 			err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1010 		else
1011 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
1012 	}
1013 
1014 	return err;
1015 }
1016 
1017 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1018 {
1019 	struct drm_exec exec;
1020 	int err;
1021 
1022 	drm_exec_init(&exec, 0, 0);
1023 	drm_exec_until_all_locked(&exec) {
1024 		err = xe_vm_prepare_vma(&exec, vma, 0);
1025 		drm_exec_retry_on_contention(&exec);
1026 		if (XE_WARN_ON(err))
1027 			break;
1028 	}
1029 
1030 	xe_vma_destroy(vma, NULL);
1031 
1032 	drm_exec_fini(&exec);
1033 }
1034 
1035 struct xe_vma *
1036 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1037 {
1038 	struct drm_gpuva *gpuva;
1039 
1040 	lockdep_assert_held(&vm->lock);
1041 
1042 	if (xe_vm_is_closed_or_banned(vm))
1043 		return NULL;
1044 
1045 	xe_assert(vm->xe, start + range <= vm->size);
1046 
1047 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1048 
1049 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1050 }
1051 
1052 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1053 {
1054 	int err;
1055 
1056 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1057 	lockdep_assert_held(&vm->lock);
1058 
1059 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1060 	XE_WARN_ON(err);	/* Shouldn't be possible */
1061 
1062 	return err;
1063 }
1064 
1065 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1066 {
1067 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1068 	lockdep_assert_held(&vm->lock);
1069 
1070 	drm_gpuva_remove(&vma->gpuva);
1071 	if (vm->usm.last_fault_vma == vma)
1072 		vm->usm.last_fault_vma = NULL;
1073 }
1074 
1075 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1076 {
1077 	struct xe_vma_op *op;
1078 
1079 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1080 
1081 	if (unlikely(!op))
1082 		return NULL;
1083 
1084 	return &op->base;
1085 }
1086 
1087 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1088 
1089 static struct drm_gpuvm_ops gpuvm_ops = {
1090 	.op_alloc = xe_vm_op_alloc,
1091 	.vm_bo_validate = xe_gpuvm_validate,
1092 	.vm_free = xe_vm_free,
1093 };
1094 
1095 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1096 {
1097 	u64 pte = 0;
1098 
1099 	if (pat_index & BIT(0))
1100 		pte |= XE_PPGTT_PTE_PAT0;
1101 
1102 	if (pat_index & BIT(1))
1103 		pte |= XE_PPGTT_PTE_PAT1;
1104 
1105 	return pte;
1106 }
1107 
1108 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1109 				u32 pt_level)
1110 {
1111 	u64 pte = 0;
1112 
1113 	if (pat_index & BIT(0))
1114 		pte |= XE_PPGTT_PTE_PAT0;
1115 
1116 	if (pat_index & BIT(1))
1117 		pte |= XE_PPGTT_PTE_PAT1;
1118 
1119 	if (pat_index & BIT(2)) {
1120 		if (pt_level)
1121 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1122 		else
1123 			pte |= XE_PPGTT_PTE_PAT2;
1124 	}
1125 
1126 	if (pat_index & BIT(3))
1127 		pte |= XELPG_PPGTT_PTE_PAT3;
1128 
1129 	if (pat_index & (BIT(4)))
1130 		pte |= XE2_PPGTT_PTE_PAT4;
1131 
1132 	return pte;
1133 }
1134 
1135 static u64 pte_encode_ps(u32 pt_level)
1136 {
1137 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1138 
1139 	if (pt_level == 1)
1140 		return XE_PDE_PS_2M;
1141 	else if (pt_level == 2)
1142 		return XE_PDPE_PS_1G;
1143 
1144 	return 0;
1145 }
1146 
1147 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1148 			      const u16 pat_index)
1149 {
1150 	struct xe_device *xe = xe_bo_device(bo);
1151 	u64 pde;
1152 
1153 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1154 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1155 	pde |= pde_encode_pat_index(xe, pat_index);
1156 
1157 	return pde;
1158 }
1159 
1160 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1161 			      u16 pat_index, u32 pt_level)
1162 {
1163 	struct xe_device *xe = xe_bo_device(bo);
1164 	u64 pte;
1165 
1166 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1167 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1168 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1169 	pte |= pte_encode_ps(pt_level);
1170 
1171 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1172 		pte |= XE_PPGTT_PTE_DM;
1173 
1174 	return pte;
1175 }
1176 
1177 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1178 			       u16 pat_index, u32 pt_level)
1179 {
1180 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1181 
1182 	pte |= XE_PAGE_PRESENT;
1183 
1184 	if (likely(!xe_vma_read_only(vma)))
1185 		pte |= XE_PAGE_RW;
1186 
1187 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1188 	pte |= pte_encode_ps(pt_level);
1189 
1190 	if (unlikely(xe_vma_is_null(vma)))
1191 		pte |= XE_PTE_NULL;
1192 
1193 	return pte;
1194 }
1195 
1196 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1197 				u16 pat_index,
1198 				u32 pt_level, bool devmem, u64 flags)
1199 {
1200 	u64 pte;
1201 
1202 	/* Avoid passing random bits directly as flags */
1203 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1204 
1205 	pte = addr;
1206 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1207 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1208 	pte |= pte_encode_ps(pt_level);
1209 
1210 	if (devmem)
1211 		pte |= XE_PPGTT_PTE_DM;
1212 
1213 	pte |= flags;
1214 
1215 	return pte;
1216 }
1217 
1218 static const struct xe_pt_ops xelp_pt_ops = {
1219 	.pte_encode_bo = xelp_pte_encode_bo,
1220 	.pte_encode_vma = xelp_pte_encode_vma,
1221 	.pte_encode_addr = xelp_pte_encode_addr,
1222 	.pde_encode_bo = xelp_pde_encode_bo,
1223 };
1224 
1225 static void vm_destroy_work_func(struct work_struct *w);
1226 
1227 /**
1228  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1229  * given tile and vm.
1230  * @xe: xe device.
1231  * @tile: tile to set up for.
1232  * @vm: vm to set up for.
1233  *
1234  * Sets up a pagetable tree with one page-table per level and a single
1235  * leaf PTE. All pagetable entries point to the single page-table or,
1236  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1237  * writes become NOPs.
1238  *
1239  * Return: 0 on success, negative error code on error.
1240  */
1241 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1242 				struct xe_vm *vm)
1243 {
1244 	u8 id = tile->id;
1245 	int i;
1246 
1247 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1248 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1249 		if (IS_ERR(vm->scratch_pt[id][i]))
1250 			return PTR_ERR(vm->scratch_pt[id][i]);
1251 
1252 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1253 	}
1254 
1255 	return 0;
1256 }
1257 
1258 static void xe_vm_free_scratch(struct xe_vm *vm)
1259 {
1260 	struct xe_tile *tile;
1261 	u8 id;
1262 
1263 	if (!xe_vm_has_scratch(vm))
1264 		return;
1265 
1266 	for_each_tile(tile, vm->xe, id) {
1267 		u32 i;
1268 
1269 		if (!vm->pt_root[id])
1270 			continue;
1271 
1272 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1273 			if (vm->scratch_pt[id][i])
1274 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1275 	}
1276 }
1277 
1278 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1279 {
1280 	struct drm_gem_object *vm_resv_obj;
1281 	struct xe_vm *vm;
1282 	int err, number_tiles = 0;
1283 	struct xe_tile *tile;
1284 	u8 id;
1285 
1286 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1287 	if (!vm)
1288 		return ERR_PTR(-ENOMEM);
1289 
1290 	vm->xe = xe;
1291 
1292 	vm->size = 1ull << xe->info.va_bits;
1293 
1294 	vm->flags = flags;
1295 
1296 	init_rwsem(&vm->lock);
1297 
1298 	INIT_LIST_HEAD(&vm->rebind_list);
1299 
1300 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1301 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1302 	init_rwsem(&vm->userptr.notifier_lock);
1303 	spin_lock_init(&vm->userptr.invalidated_lock);
1304 
1305 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1306 
1307 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1308 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1309 
1310 	for_each_tile(tile, xe, id)
1311 		xe_range_fence_tree_init(&vm->rftree[id]);
1312 
1313 	vm->pt_ops = &xelp_pt_ops;
1314 
1315 	if (!(flags & XE_VM_FLAG_MIGRATION))
1316 		xe_device_mem_access_get(xe);
1317 
1318 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1319 	if (!vm_resv_obj) {
1320 		err = -ENOMEM;
1321 		goto err_no_resv;
1322 	}
1323 
1324 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1325 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1326 
1327 	drm_gem_object_put(vm_resv_obj);
1328 
1329 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1330 	if (err)
1331 		goto err_close;
1332 
1333 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1334 		vm->flags |= XE_VM_FLAG_64K;
1335 
1336 	for_each_tile(tile, xe, id) {
1337 		if (flags & XE_VM_FLAG_MIGRATION &&
1338 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1339 			continue;
1340 
1341 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1342 		if (IS_ERR(vm->pt_root[id])) {
1343 			err = PTR_ERR(vm->pt_root[id]);
1344 			vm->pt_root[id] = NULL;
1345 			goto err_unlock_close;
1346 		}
1347 	}
1348 
1349 	if (xe_vm_has_scratch(vm)) {
1350 		for_each_tile(tile, xe, id) {
1351 			if (!vm->pt_root[id])
1352 				continue;
1353 
1354 			err = xe_vm_create_scratch(xe, tile, vm);
1355 			if (err)
1356 				goto err_unlock_close;
1357 		}
1358 		vm->batch_invalidate_tlb = true;
1359 	}
1360 
1361 	if (flags & XE_VM_FLAG_LR_MODE) {
1362 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1363 		vm->flags |= XE_VM_FLAG_LR_MODE;
1364 		vm->batch_invalidate_tlb = false;
1365 	}
1366 
1367 	/* Fill pt_root after allocating scratch tables */
1368 	for_each_tile(tile, xe, id) {
1369 		if (!vm->pt_root[id])
1370 			continue;
1371 
1372 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1373 	}
1374 	dma_resv_unlock(xe_vm_resv(vm));
1375 
1376 	/* Kernel migration VM shouldn't have a circular loop.. */
1377 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1378 		for_each_tile(tile, xe, id) {
1379 			struct xe_gt *gt = tile->primary_gt;
1380 			struct xe_vm *migrate_vm;
1381 			struct xe_exec_queue *q;
1382 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1383 
1384 			if (!vm->pt_root[id])
1385 				continue;
1386 
1387 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1388 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1389 						       XE_ENGINE_CLASS_COPY,
1390 						       create_flags);
1391 			xe_vm_put(migrate_vm);
1392 			if (IS_ERR(q)) {
1393 				err = PTR_ERR(q);
1394 				goto err_close;
1395 			}
1396 			vm->q[id] = q;
1397 			number_tiles++;
1398 		}
1399 	}
1400 
1401 	if (number_tiles > 1)
1402 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1403 
1404 	mutex_lock(&xe->usm.lock);
1405 	if (flags & XE_VM_FLAG_FAULT_MODE)
1406 		xe->usm.num_vm_in_fault_mode++;
1407 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1408 		xe->usm.num_vm_in_non_fault_mode++;
1409 	mutex_unlock(&xe->usm.lock);
1410 
1411 	trace_xe_vm_create(vm);
1412 
1413 	return vm;
1414 
1415 err_unlock_close:
1416 	dma_resv_unlock(xe_vm_resv(vm));
1417 err_close:
1418 	xe_vm_close_and_put(vm);
1419 	return ERR_PTR(err);
1420 
1421 err_no_resv:
1422 	for_each_tile(tile, xe, id)
1423 		xe_range_fence_tree_fini(&vm->rftree[id]);
1424 	kfree(vm);
1425 	if (!(flags & XE_VM_FLAG_MIGRATION))
1426 		xe_device_mem_access_put(xe);
1427 	return ERR_PTR(err);
1428 }
1429 
1430 static void xe_vm_close(struct xe_vm *vm)
1431 {
1432 	down_write(&vm->lock);
1433 	vm->size = 0;
1434 	up_write(&vm->lock);
1435 }
1436 
1437 void xe_vm_close_and_put(struct xe_vm *vm)
1438 {
1439 	LIST_HEAD(contested);
1440 	struct xe_device *xe = vm->xe;
1441 	struct xe_tile *tile;
1442 	struct xe_vma *vma, *next_vma;
1443 	struct drm_gpuva *gpuva, *next;
1444 	u8 id;
1445 
1446 	xe_assert(xe, !vm->preempt.num_exec_queues);
1447 
1448 	xe_vm_close(vm);
1449 	if (xe_vm_in_preempt_fence_mode(vm))
1450 		flush_work(&vm->preempt.rebind_work);
1451 
1452 	down_write(&vm->lock);
1453 	for_each_tile(tile, xe, id) {
1454 		if (vm->q[id])
1455 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1456 	}
1457 	up_write(&vm->lock);
1458 
1459 	for_each_tile(tile, xe, id) {
1460 		if (vm->q[id]) {
1461 			xe_exec_queue_kill(vm->q[id]);
1462 			xe_exec_queue_put(vm->q[id]);
1463 			vm->q[id] = NULL;
1464 		}
1465 	}
1466 
1467 	down_write(&vm->lock);
1468 	xe_vm_lock(vm, false);
1469 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1470 		vma = gpuva_to_vma(gpuva);
1471 
1472 		if (xe_vma_has_no_bo(vma)) {
1473 			down_read(&vm->userptr.notifier_lock);
1474 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1475 			up_read(&vm->userptr.notifier_lock);
1476 		}
1477 
1478 		xe_vm_remove_vma(vm, vma);
1479 
1480 		/* easy case, remove from VMA? */
1481 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1482 			list_del_init(&vma->combined_links.rebind);
1483 			xe_vma_destroy(vma, NULL);
1484 			continue;
1485 		}
1486 
1487 		list_move_tail(&vma->combined_links.destroy, &contested);
1488 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1489 	}
1490 
1491 	/*
1492 	 * All vm operations will add shared fences to resv.
1493 	 * The only exception is eviction for a shared object,
1494 	 * but even so, the unbind when evicted would still
1495 	 * install a fence to resv. Hence it's safe to
1496 	 * destroy the pagetables immediately.
1497 	 */
1498 	xe_vm_free_scratch(vm);
1499 
1500 	for_each_tile(tile, xe, id) {
1501 		if (vm->pt_root[id]) {
1502 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1503 			vm->pt_root[id] = NULL;
1504 		}
1505 	}
1506 	xe_vm_unlock(vm);
1507 
1508 	/*
1509 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1510 	 * Since we hold a refcount to the bo, we can remove and free
1511 	 * the members safely without locking.
1512 	 */
1513 	list_for_each_entry_safe(vma, next_vma, &contested,
1514 				 combined_links.destroy) {
1515 		list_del_init(&vma->combined_links.destroy);
1516 		xe_vma_destroy_unlocked(vma);
1517 	}
1518 
1519 	up_write(&vm->lock);
1520 
1521 	mutex_lock(&xe->usm.lock);
1522 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1523 		xe->usm.num_vm_in_fault_mode--;
1524 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1525 		xe->usm.num_vm_in_non_fault_mode--;
1526 	mutex_unlock(&xe->usm.lock);
1527 
1528 	for_each_tile(tile, xe, id)
1529 		xe_range_fence_tree_fini(&vm->rftree[id]);
1530 
1531 	xe_vm_put(vm);
1532 }
1533 
1534 static void vm_destroy_work_func(struct work_struct *w)
1535 {
1536 	struct xe_vm *vm =
1537 		container_of(w, struct xe_vm, destroy_work);
1538 	struct xe_device *xe = vm->xe;
1539 	struct xe_tile *tile;
1540 	u8 id;
1541 	void *lookup;
1542 
1543 	/* xe_vm_close_and_put was not called? */
1544 	xe_assert(xe, !vm->size);
1545 
1546 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1547 		xe_device_mem_access_put(xe);
1548 
1549 		if (xe->info.has_asid && vm->usm.asid) {
1550 			mutex_lock(&xe->usm.lock);
1551 			lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1552 			xe_assert(xe, lookup == vm);
1553 			mutex_unlock(&xe->usm.lock);
1554 		}
1555 	}
1556 
1557 	for_each_tile(tile, xe, id)
1558 		XE_WARN_ON(vm->pt_root[id]);
1559 
1560 	trace_xe_vm_free(vm);
1561 	dma_fence_put(vm->rebind_fence);
1562 	kfree(vm);
1563 }
1564 
1565 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1566 {
1567 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1568 
1569 	/* To destroy the VM we need to be able to sleep */
1570 	queue_work(system_unbound_wq, &vm->destroy_work);
1571 }
1572 
1573 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1574 {
1575 	struct xe_vm *vm;
1576 
1577 	mutex_lock(&xef->vm.lock);
1578 	vm = xa_load(&xef->vm.xa, id);
1579 	if (vm)
1580 		xe_vm_get(vm);
1581 	mutex_unlock(&xef->vm.lock);
1582 
1583 	return vm;
1584 }
1585 
1586 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1587 {
1588 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1589 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1590 }
1591 
1592 static struct xe_exec_queue *
1593 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1594 {
1595 	return q ? q : vm->q[0];
1596 }
1597 
1598 static struct dma_fence *
1599 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1600 		 struct xe_sync_entry *syncs, u32 num_syncs,
1601 		 bool first_op, bool last_op)
1602 {
1603 	struct xe_vm *vm = xe_vma_vm(vma);
1604 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1605 	struct xe_tile *tile;
1606 	struct dma_fence *fence = NULL;
1607 	struct dma_fence **fences = NULL;
1608 	struct dma_fence_array *cf = NULL;
1609 	int cur_fence = 0, i;
1610 	int number_tiles = hweight8(vma->tile_present);
1611 	int err;
1612 	u8 id;
1613 
1614 	trace_xe_vma_unbind(vma);
1615 
1616 	if (vma->ufence) {
1617 		struct xe_user_fence * const f = vma->ufence;
1618 
1619 		if (!xe_sync_ufence_get_status(f))
1620 			return ERR_PTR(-EBUSY);
1621 
1622 		vma->ufence = NULL;
1623 		xe_sync_ufence_put(f);
1624 	}
1625 
1626 	if (number_tiles > 1) {
1627 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1628 				       GFP_KERNEL);
1629 		if (!fences)
1630 			return ERR_PTR(-ENOMEM);
1631 	}
1632 
1633 	for_each_tile(tile, vm->xe, id) {
1634 		if (!(vma->tile_present & BIT(id)))
1635 			goto next;
1636 
1637 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1638 					   first_op ? syncs : NULL,
1639 					   first_op ? num_syncs : 0);
1640 		if (IS_ERR(fence)) {
1641 			err = PTR_ERR(fence);
1642 			goto err_fences;
1643 		}
1644 
1645 		if (fences)
1646 			fences[cur_fence++] = fence;
1647 
1648 next:
1649 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1650 			q = list_next_entry(q, multi_gt_list);
1651 	}
1652 
1653 	if (fences) {
1654 		cf = dma_fence_array_create(number_tiles, fences,
1655 					    vm->composite_fence_ctx,
1656 					    vm->composite_fence_seqno++,
1657 					    false);
1658 		if (!cf) {
1659 			--vm->composite_fence_seqno;
1660 			err = -ENOMEM;
1661 			goto err_fences;
1662 		}
1663 	}
1664 
1665 	fence = cf ? &cf->base : !fence ?
1666 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1667 	if (last_op) {
1668 		for (i = 0; i < num_syncs; i++)
1669 			xe_sync_entry_signal(&syncs[i], NULL, fence);
1670 	}
1671 
1672 	return fence;
1673 
1674 err_fences:
1675 	if (fences) {
1676 		while (cur_fence)
1677 			dma_fence_put(fences[--cur_fence]);
1678 		kfree(fences);
1679 	}
1680 
1681 	return ERR_PTR(err);
1682 }
1683 
1684 static struct dma_fence *
1685 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1686 	       struct xe_sync_entry *syncs, u32 num_syncs,
1687 	       bool first_op, bool last_op)
1688 {
1689 	struct xe_tile *tile;
1690 	struct dma_fence *fence;
1691 	struct dma_fence **fences = NULL;
1692 	struct dma_fence_array *cf = NULL;
1693 	struct xe_vm *vm = xe_vma_vm(vma);
1694 	int cur_fence = 0, i;
1695 	int number_tiles = hweight8(vma->tile_mask);
1696 	int err;
1697 	u8 id;
1698 
1699 	trace_xe_vma_bind(vma);
1700 
1701 	if (number_tiles > 1) {
1702 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1703 				       GFP_KERNEL);
1704 		if (!fences)
1705 			return ERR_PTR(-ENOMEM);
1706 	}
1707 
1708 	for_each_tile(tile, vm->xe, id) {
1709 		if (!(vma->tile_mask & BIT(id)))
1710 			goto next;
1711 
1712 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1713 					 first_op ? syncs : NULL,
1714 					 first_op ? num_syncs : 0,
1715 					 vma->tile_present & BIT(id));
1716 		if (IS_ERR(fence)) {
1717 			err = PTR_ERR(fence);
1718 			goto err_fences;
1719 		}
1720 
1721 		if (fences)
1722 			fences[cur_fence++] = fence;
1723 
1724 next:
1725 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1726 			q = list_next_entry(q, multi_gt_list);
1727 	}
1728 
1729 	if (fences) {
1730 		cf = dma_fence_array_create(number_tiles, fences,
1731 					    vm->composite_fence_ctx,
1732 					    vm->composite_fence_seqno++,
1733 					    false);
1734 		if (!cf) {
1735 			--vm->composite_fence_seqno;
1736 			err = -ENOMEM;
1737 			goto err_fences;
1738 		}
1739 	}
1740 
1741 	if (last_op) {
1742 		for (i = 0; i < num_syncs; i++)
1743 			xe_sync_entry_signal(&syncs[i], NULL,
1744 					     cf ? &cf->base : fence);
1745 	}
1746 
1747 	return cf ? &cf->base : fence;
1748 
1749 err_fences:
1750 	if (fences) {
1751 		while (cur_fence)
1752 			dma_fence_put(fences[--cur_fence]);
1753 		kfree(fences);
1754 	}
1755 
1756 	return ERR_PTR(err);
1757 }
1758 
1759 static struct xe_user_fence *
1760 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1761 {
1762 	unsigned int i;
1763 
1764 	for (i = 0; i < num_syncs; i++) {
1765 		struct xe_sync_entry *e = &syncs[i];
1766 
1767 		if (xe_sync_is_ufence(e))
1768 			return xe_sync_ufence_get(e);
1769 	}
1770 
1771 	return NULL;
1772 }
1773 
1774 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1775 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1776 			u32 num_syncs, bool immediate, bool first_op,
1777 			bool last_op)
1778 {
1779 	struct dma_fence *fence;
1780 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1781 	struct xe_user_fence *ufence;
1782 
1783 	xe_vm_assert_held(vm);
1784 
1785 	ufence = find_ufence_get(syncs, num_syncs);
1786 	if (vma->ufence && ufence)
1787 		xe_sync_ufence_put(vma->ufence);
1788 
1789 	vma->ufence = ufence ?: vma->ufence;
1790 
1791 	if (immediate) {
1792 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1793 				       last_op);
1794 		if (IS_ERR(fence))
1795 			return PTR_ERR(fence);
1796 	} else {
1797 		int i;
1798 
1799 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1800 
1801 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1802 		if (last_op) {
1803 			for (i = 0; i < num_syncs; i++)
1804 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1805 		}
1806 	}
1807 
1808 	if (last_op)
1809 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1810 	dma_fence_put(fence);
1811 
1812 	return 0;
1813 }
1814 
1815 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1816 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1817 		      u32 num_syncs, bool immediate, bool first_op,
1818 		      bool last_op)
1819 {
1820 	int err;
1821 
1822 	xe_vm_assert_held(vm);
1823 	xe_bo_assert_held(bo);
1824 
1825 	if (bo && immediate) {
1826 		err = xe_bo_validate(bo, vm, true);
1827 		if (err)
1828 			return err;
1829 	}
1830 
1831 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1832 			    last_op);
1833 }
1834 
1835 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1836 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1837 			u32 num_syncs, bool first_op, bool last_op)
1838 {
1839 	struct dma_fence *fence;
1840 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1841 
1842 	xe_vm_assert_held(vm);
1843 	xe_bo_assert_held(xe_vma_bo(vma));
1844 
1845 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1846 	if (IS_ERR(fence))
1847 		return PTR_ERR(fence);
1848 
1849 	xe_vma_destroy(vma, fence);
1850 	if (last_op)
1851 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1852 	dma_fence_put(fence);
1853 
1854 	return 0;
1855 }
1856 
1857 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1858 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1859 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1860 
1861 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1862 		       struct drm_file *file)
1863 {
1864 	struct xe_device *xe = to_xe_device(dev);
1865 	struct xe_file *xef = to_xe_file(file);
1866 	struct drm_xe_vm_create *args = data;
1867 	struct xe_tile *tile;
1868 	struct xe_vm *vm;
1869 	u32 id, asid;
1870 	int err;
1871 	u32 flags = 0;
1872 
1873 	if (XE_IOCTL_DBG(xe, args->extensions))
1874 		return -EINVAL;
1875 
1876 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1877 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1878 
1879 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1880 			 !xe->info.has_usm))
1881 		return -EINVAL;
1882 
1883 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1884 		return -EINVAL;
1885 
1886 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1887 		return -EINVAL;
1888 
1889 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1890 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1891 		return -EINVAL;
1892 
1893 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1894 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1895 		return -EINVAL;
1896 
1897 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1898 			 xe_device_in_non_fault_mode(xe)))
1899 		return -EINVAL;
1900 
1901 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1902 			 xe_device_in_fault_mode(xe)))
1903 		return -EINVAL;
1904 
1905 	if (XE_IOCTL_DBG(xe, args->extensions))
1906 		return -EINVAL;
1907 
1908 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1909 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1910 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1911 		flags |= XE_VM_FLAG_LR_MODE;
1912 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1913 		flags |= XE_VM_FLAG_FAULT_MODE;
1914 
1915 	vm = xe_vm_create(xe, flags);
1916 	if (IS_ERR(vm))
1917 		return PTR_ERR(vm);
1918 
1919 	mutex_lock(&xef->vm.lock);
1920 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1921 	mutex_unlock(&xef->vm.lock);
1922 	if (err)
1923 		goto err_close_and_put;
1924 
1925 	if (xe->info.has_asid) {
1926 		mutex_lock(&xe->usm.lock);
1927 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1928 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1929 				      &xe->usm.next_asid, GFP_KERNEL);
1930 		mutex_unlock(&xe->usm.lock);
1931 		if (err < 0)
1932 			goto err_free_id;
1933 
1934 		vm->usm.asid = asid;
1935 	}
1936 
1937 	args->vm_id = id;
1938 	vm->xef = xef;
1939 
1940 	/* Record BO memory for VM pagetable created against client */
1941 	for_each_tile(tile, xe, id)
1942 		if (vm->pt_root[id])
1943 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1944 
1945 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1946 	/* Warning: Security issue - never enable by default */
1947 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1948 #endif
1949 
1950 	return 0;
1951 
1952 err_free_id:
1953 	mutex_lock(&xef->vm.lock);
1954 	xa_erase(&xef->vm.xa, id);
1955 	mutex_unlock(&xef->vm.lock);
1956 err_close_and_put:
1957 	xe_vm_close_and_put(vm);
1958 
1959 	return err;
1960 }
1961 
1962 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1963 			struct drm_file *file)
1964 {
1965 	struct xe_device *xe = to_xe_device(dev);
1966 	struct xe_file *xef = to_xe_file(file);
1967 	struct drm_xe_vm_destroy *args = data;
1968 	struct xe_vm *vm;
1969 	int err = 0;
1970 
1971 	if (XE_IOCTL_DBG(xe, args->pad) ||
1972 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1973 		return -EINVAL;
1974 
1975 	mutex_lock(&xef->vm.lock);
1976 	vm = xa_load(&xef->vm.xa, args->vm_id);
1977 	if (XE_IOCTL_DBG(xe, !vm))
1978 		err = -ENOENT;
1979 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1980 		err = -EBUSY;
1981 	else
1982 		xa_erase(&xef->vm.xa, args->vm_id);
1983 	mutex_unlock(&xef->vm.lock);
1984 
1985 	if (!err)
1986 		xe_vm_close_and_put(vm);
1987 
1988 	return err;
1989 }
1990 
1991 static const u32 region_to_mem_type[] = {
1992 	XE_PL_TT,
1993 	XE_PL_VRAM0,
1994 	XE_PL_VRAM1,
1995 };
1996 
1997 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1998 			  struct xe_exec_queue *q, u32 region,
1999 			  struct xe_sync_entry *syncs, u32 num_syncs,
2000 			  bool first_op, bool last_op)
2001 {
2002 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2003 	int err;
2004 
2005 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2006 
2007 	if (!xe_vma_has_no_bo(vma)) {
2008 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2009 		if (err)
2010 			return err;
2011 	}
2012 
2013 	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2014 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2015 				  true, first_op, last_op);
2016 	} else {
2017 		int i;
2018 
2019 		/* Nothing to do, signal fences now */
2020 		if (last_op) {
2021 			for (i = 0; i < num_syncs; i++) {
2022 				struct dma_fence *fence =
2023 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2024 
2025 				xe_sync_entry_signal(&syncs[i], NULL, fence);
2026 				dma_fence_put(fence);
2027 			}
2028 		}
2029 
2030 		return 0;
2031 	}
2032 }
2033 
2034 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2035 			     bool post_commit)
2036 {
2037 	down_read(&vm->userptr.notifier_lock);
2038 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2039 	up_read(&vm->userptr.notifier_lock);
2040 	if (post_commit)
2041 		xe_vm_remove_vma(vm, vma);
2042 }
2043 
2044 #undef ULL
2045 #define ULL	unsigned long long
2046 
2047 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2048 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2049 {
2050 	struct xe_vma *vma;
2051 
2052 	switch (op->op) {
2053 	case DRM_GPUVA_OP_MAP:
2054 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2055 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2056 		break;
2057 	case DRM_GPUVA_OP_REMAP:
2058 		vma = gpuva_to_vma(op->remap.unmap->va);
2059 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2060 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2061 		       op->remap.unmap->keep ? 1 : 0);
2062 		if (op->remap.prev)
2063 			vm_dbg(&xe->drm,
2064 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2065 			       (ULL)op->remap.prev->va.addr,
2066 			       (ULL)op->remap.prev->va.range);
2067 		if (op->remap.next)
2068 			vm_dbg(&xe->drm,
2069 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2070 			       (ULL)op->remap.next->va.addr,
2071 			       (ULL)op->remap.next->va.range);
2072 		break;
2073 	case DRM_GPUVA_OP_UNMAP:
2074 		vma = gpuva_to_vma(op->unmap.va);
2075 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2076 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2077 		       op->unmap.keep ? 1 : 0);
2078 		break;
2079 	case DRM_GPUVA_OP_PREFETCH:
2080 		vma = gpuva_to_vma(op->prefetch.va);
2081 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2082 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2083 		break;
2084 	default:
2085 		drm_warn(&xe->drm, "NOT POSSIBLE");
2086 	}
2087 }
2088 #else
2089 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2090 {
2091 }
2092 #endif
2093 
2094 /*
2095  * Create operations list from IOCTL arguments, setup operations fields so parse
2096  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2097  */
2098 static struct drm_gpuva_ops *
2099 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2100 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2101 			 u32 operation, u32 flags,
2102 			 u32 prefetch_region, u16 pat_index)
2103 {
2104 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2105 	struct drm_gpuva_ops *ops;
2106 	struct drm_gpuva_op *__op;
2107 	struct drm_gpuvm_bo *vm_bo;
2108 	int err;
2109 
2110 	lockdep_assert_held_write(&vm->lock);
2111 
2112 	vm_dbg(&vm->xe->drm,
2113 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2114 	       operation, (ULL)addr, (ULL)range,
2115 	       (ULL)bo_offset_or_userptr);
2116 
2117 	switch (operation) {
2118 	case DRM_XE_VM_BIND_OP_MAP:
2119 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2120 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2121 						  obj, bo_offset_or_userptr);
2122 		break;
2123 	case DRM_XE_VM_BIND_OP_UNMAP:
2124 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2125 		break;
2126 	case DRM_XE_VM_BIND_OP_PREFETCH:
2127 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2128 		break;
2129 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2130 		xe_assert(vm->xe, bo);
2131 
2132 		err = xe_bo_lock(bo, true);
2133 		if (err)
2134 			return ERR_PTR(err);
2135 
2136 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2137 		if (IS_ERR(vm_bo)) {
2138 			xe_bo_unlock(bo);
2139 			return ERR_CAST(vm_bo);
2140 		}
2141 
2142 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2143 		drm_gpuvm_bo_put(vm_bo);
2144 		xe_bo_unlock(bo);
2145 		break;
2146 	default:
2147 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2148 		ops = ERR_PTR(-EINVAL);
2149 	}
2150 	if (IS_ERR(ops))
2151 		return ops;
2152 
2153 	drm_gpuva_for_each_op(__op, ops) {
2154 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2155 
2156 		if (__op->op == DRM_GPUVA_OP_MAP) {
2157 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2158 			op->map.pat_index = pat_index;
2159 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2160 			op->prefetch.region = prefetch_region;
2161 		}
2162 
2163 		print_op(vm->xe, __op);
2164 	}
2165 
2166 	return ops;
2167 }
2168 
2169 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2170 			      u16 pat_index, unsigned int flags)
2171 {
2172 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2173 	struct drm_exec exec;
2174 	struct xe_vma *vma;
2175 	int err;
2176 
2177 	lockdep_assert_held_write(&vm->lock);
2178 
2179 	if (bo) {
2180 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2181 		drm_exec_until_all_locked(&exec) {
2182 			err = 0;
2183 			if (!bo->vm) {
2184 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2185 				drm_exec_retry_on_contention(&exec);
2186 			}
2187 			if (!err) {
2188 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2189 				drm_exec_retry_on_contention(&exec);
2190 			}
2191 			if (err) {
2192 				drm_exec_fini(&exec);
2193 				return ERR_PTR(err);
2194 			}
2195 		}
2196 	}
2197 	vma = xe_vma_create(vm, bo, op->gem.offset,
2198 			    op->va.addr, op->va.addr +
2199 			    op->va.range - 1, pat_index, flags);
2200 	if (bo)
2201 		drm_exec_fini(&exec);
2202 
2203 	if (xe_vma_is_userptr(vma)) {
2204 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2205 		if (err) {
2206 			prep_vma_destroy(vm, vma, false);
2207 			xe_vma_destroy_unlocked(vma);
2208 			return ERR_PTR(err);
2209 		}
2210 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2211 		err = add_preempt_fences(vm, bo);
2212 		if (err) {
2213 			prep_vma_destroy(vm, vma, false);
2214 			xe_vma_destroy_unlocked(vma);
2215 			return ERR_PTR(err);
2216 		}
2217 	}
2218 
2219 	return vma;
2220 }
2221 
2222 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2223 {
2224 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2225 		return SZ_1G;
2226 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2227 		return SZ_2M;
2228 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2229 		return SZ_64K;
2230 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2231 		return SZ_4K;
2232 
2233 	return SZ_1G;	/* Uninitialized, used max size */
2234 }
2235 
2236 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2237 {
2238 	switch (size) {
2239 	case SZ_1G:
2240 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2241 		break;
2242 	case SZ_2M:
2243 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2244 		break;
2245 	case SZ_64K:
2246 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2247 		break;
2248 	case SZ_4K:
2249 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2250 		break;
2251 	}
2252 }
2253 
2254 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2255 {
2256 	int err = 0;
2257 
2258 	lockdep_assert_held_write(&vm->lock);
2259 
2260 	switch (op->base.op) {
2261 	case DRM_GPUVA_OP_MAP:
2262 		err |= xe_vm_insert_vma(vm, op->map.vma);
2263 		if (!err)
2264 			op->flags |= XE_VMA_OP_COMMITTED;
2265 		break;
2266 	case DRM_GPUVA_OP_REMAP:
2267 	{
2268 		u8 tile_present =
2269 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2270 
2271 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2272 				 true);
2273 		op->flags |= XE_VMA_OP_COMMITTED;
2274 
2275 		if (op->remap.prev) {
2276 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2277 			if (!err)
2278 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2279 			if (!err && op->remap.skip_prev) {
2280 				op->remap.prev->tile_present =
2281 					tile_present;
2282 				op->remap.prev = NULL;
2283 			}
2284 		}
2285 		if (op->remap.next) {
2286 			err |= xe_vm_insert_vma(vm, op->remap.next);
2287 			if (!err)
2288 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2289 			if (!err && op->remap.skip_next) {
2290 				op->remap.next->tile_present =
2291 					tile_present;
2292 				op->remap.next = NULL;
2293 			}
2294 		}
2295 
2296 		/* Adjust for partial unbind after removin VMA from VM */
2297 		if (!err) {
2298 			op->base.remap.unmap->va->va.addr = op->remap.start;
2299 			op->base.remap.unmap->va->va.range = op->remap.range;
2300 		}
2301 		break;
2302 	}
2303 	case DRM_GPUVA_OP_UNMAP:
2304 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2305 		op->flags |= XE_VMA_OP_COMMITTED;
2306 		break;
2307 	case DRM_GPUVA_OP_PREFETCH:
2308 		op->flags |= XE_VMA_OP_COMMITTED;
2309 		break;
2310 	default:
2311 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2312 	}
2313 
2314 	return err;
2315 }
2316 
2317 
2318 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2319 				   struct drm_gpuva_ops *ops,
2320 				   struct xe_sync_entry *syncs, u32 num_syncs,
2321 				   struct list_head *ops_list, bool last)
2322 {
2323 	struct xe_vma_op *last_op = NULL;
2324 	struct drm_gpuva_op *__op;
2325 	int err = 0;
2326 
2327 	lockdep_assert_held_write(&vm->lock);
2328 
2329 	drm_gpuva_for_each_op(__op, ops) {
2330 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2331 		struct xe_vma *vma;
2332 		bool first = list_empty(ops_list);
2333 		unsigned int flags = 0;
2334 
2335 		INIT_LIST_HEAD(&op->link);
2336 		list_add_tail(&op->link, ops_list);
2337 
2338 		if (first) {
2339 			op->flags |= XE_VMA_OP_FIRST;
2340 			op->num_syncs = num_syncs;
2341 			op->syncs = syncs;
2342 		}
2343 
2344 		op->q = q;
2345 
2346 		switch (op->base.op) {
2347 		case DRM_GPUVA_OP_MAP:
2348 		{
2349 			flags |= op->map.is_null ?
2350 				VMA_CREATE_FLAG_IS_NULL : 0;
2351 
2352 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2353 				      flags);
2354 			if (IS_ERR(vma))
2355 				return PTR_ERR(vma);
2356 
2357 			op->map.vma = vma;
2358 			break;
2359 		}
2360 		case DRM_GPUVA_OP_REMAP:
2361 		{
2362 			struct xe_vma *old =
2363 				gpuva_to_vma(op->base.remap.unmap->va);
2364 
2365 			op->remap.start = xe_vma_start(old);
2366 			op->remap.range = xe_vma_size(old);
2367 
2368 			if (op->base.remap.prev) {
2369 				flags |= op->base.remap.unmap->va->flags &
2370 					XE_VMA_READ_ONLY ?
2371 					VMA_CREATE_FLAG_READ_ONLY : 0;
2372 				flags |= op->base.remap.unmap->va->flags &
2373 					DRM_GPUVA_SPARSE ?
2374 					VMA_CREATE_FLAG_IS_NULL : 0;
2375 
2376 				vma = new_vma(vm, op->base.remap.prev,
2377 					      old->pat_index, flags);
2378 				if (IS_ERR(vma))
2379 					return PTR_ERR(vma);
2380 
2381 				op->remap.prev = vma;
2382 
2383 				/*
2384 				 * Userptr creates a new SG mapping so
2385 				 * we must also rebind.
2386 				 */
2387 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2388 					IS_ALIGNED(xe_vma_end(vma),
2389 						   xe_vma_max_pte_size(old));
2390 				if (op->remap.skip_prev) {
2391 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2392 					op->remap.range -=
2393 						xe_vma_end(vma) -
2394 						xe_vma_start(old);
2395 					op->remap.start = xe_vma_end(vma);
2396 				}
2397 			}
2398 
2399 			if (op->base.remap.next) {
2400 				flags |= op->base.remap.unmap->va->flags &
2401 					XE_VMA_READ_ONLY ?
2402 					VMA_CREATE_FLAG_READ_ONLY : 0;
2403 				flags |= op->base.remap.unmap->va->flags &
2404 					DRM_GPUVA_SPARSE ?
2405 					VMA_CREATE_FLAG_IS_NULL : 0;
2406 
2407 				vma = new_vma(vm, op->base.remap.next,
2408 					      old->pat_index, flags);
2409 				if (IS_ERR(vma))
2410 					return PTR_ERR(vma);
2411 
2412 				op->remap.next = vma;
2413 
2414 				/*
2415 				 * Userptr creates a new SG mapping so
2416 				 * we must also rebind.
2417 				 */
2418 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2419 					IS_ALIGNED(xe_vma_start(vma),
2420 						   xe_vma_max_pte_size(old));
2421 				if (op->remap.skip_next) {
2422 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2423 					op->remap.range -=
2424 						xe_vma_end(old) -
2425 						xe_vma_start(vma);
2426 				}
2427 			}
2428 			break;
2429 		}
2430 		case DRM_GPUVA_OP_UNMAP:
2431 		case DRM_GPUVA_OP_PREFETCH:
2432 			/* Nothing to do */
2433 			break;
2434 		default:
2435 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2436 		}
2437 
2438 		last_op = op;
2439 
2440 		err = xe_vma_op_commit(vm, op);
2441 		if (err)
2442 			return err;
2443 	}
2444 
2445 	/* FIXME: Unhandled corner case */
2446 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2447 
2448 	if (!last_op)
2449 		return 0;
2450 
2451 	last_op->ops = ops;
2452 	if (last) {
2453 		last_op->flags |= XE_VMA_OP_LAST;
2454 		last_op->num_syncs = num_syncs;
2455 		last_op->syncs = syncs;
2456 	}
2457 
2458 	return 0;
2459 }
2460 
2461 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2462 		      struct xe_vma *vma, struct xe_vma_op *op)
2463 {
2464 	int err;
2465 
2466 	lockdep_assert_held_write(&vm->lock);
2467 
2468 	err = xe_vm_prepare_vma(exec, vma, 1);
2469 	if (err)
2470 		return err;
2471 
2472 	xe_vm_assert_held(vm);
2473 	xe_bo_assert_held(xe_vma_bo(vma));
2474 
2475 	switch (op->base.op) {
2476 	case DRM_GPUVA_OP_MAP:
2477 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2478 				 op->syncs, op->num_syncs,
2479 				 !xe_vm_in_fault_mode(vm),
2480 				 op->flags & XE_VMA_OP_FIRST,
2481 				 op->flags & XE_VMA_OP_LAST);
2482 		break;
2483 	case DRM_GPUVA_OP_REMAP:
2484 	{
2485 		bool prev = !!op->remap.prev;
2486 		bool next = !!op->remap.next;
2487 
2488 		if (!op->remap.unmap_done) {
2489 			if (prev || next)
2490 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2491 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2492 					   op->num_syncs,
2493 					   op->flags & XE_VMA_OP_FIRST,
2494 					   op->flags & XE_VMA_OP_LAST &&
2495 					   !prev && !next);
2496 			if (err)
2497 				break;
2498 			op->remap.unmap_done = true;
2499 		}
2500 
2501 		if (prev) {
2502 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2503 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2504 					 xe_vma_bo(op->remap.prev), op->syncs,
2505 					 op->num_syncs, true, false,
2506 					 op->flags & XE_VMA_OP_LAST && !next);
2507 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2508 			if (err)
2509 				break;
2510 			op->remap.prev = NULL;
2511 		}
2512 
2513 		if (next) {
2514 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2515 			err = xe_vm_bind(vm, op->remap.next, op->q,
2516 					 xe_vma_bo(op->remap.next),
2517 					 op->syncs, op->num_syncs,
2518 					 true, false,
2519 					 op->flags & XE_VMA_OP_LAST);
2520 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2521 			if (err)
2522 				break;
2523 			op->remap.next = NULL;
2524 		}
2525 
2526 		break;
2527 	}
2528 	case DRM_GPUVA_OP_UNMAP:
2529 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2530 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2531 				   op->flags & XE_VMA_OP_LAST);
2532 		break;
2533 	case DRM_GPUVA_OP_PREFETCH:
2534 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2535 				     op->syncs, op->num_syncs,
2536 				     op->flags & XE_VMA_OP_FIRST,
2537 				     op->flags & XE_VMA_OP_LAST);
2538 		break;
2539 	default:
2540 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2541 	}
2542 
2543 	if (err)
2544 		trace_xe_vma_fail(vma);
2545 
2546 	return err;
2547 }
2548 
2549 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2550 			       struct xe_vma_op *op)
2551 {
2552 	struct drm_exec exec;
2553 	int err;
2554 
2555 retry_userptr:
2556 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2557 	drm_exec_until_all_locked(&exec) {
2558 		err = op_execute(&exec, vm, vma, op);
2559 		drm_exec_retry_on_contention(&exec);
2560 		if (err)
2561 			break;
2562 	}
2563 	drm_exec_fini(&exec);
2564 
2565 	if (err == -EAGAIN) {
2566 		lockdep_assert_held_write(&vm->lock);
2567 
2568 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2569 			if (!op->remap.unmap_done)
2570 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2571 			else if (op->remap.prev)
2572 				vma = op->remap.prev;
2573 			else
2574 				vma = op->remap.next;
2575 		}
2576 
2577 		if (xe_vma_is_userptr(vma)) {
2578 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2579 			if (!err)
2580 				goto retry_userptr;
2581 
2582 			trace_xe_vma_fail(vma);
2583 		}
2584 	}
2585 
2586 	return err;
2587 }
2588 
2589 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2590 {
2591 	int ret = 0;
2592 
2593 	lockdep_assert_held_write(&vm->lock);
2594 
2595 	switch (op->base.op) {
2596 	case DRM_GPUVA_OP_MAP:
2597 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2598 		break;
2599 	case DRM_GPUVA_OP_REMAP:
2600 	{
2601 		struct xe_vma *vma;
2602 
2603 		if (!op->remap.unmap_done)
2604 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2605 		else if (op->remap.prev)
2606 			vma = op->remap.prev;
2607 		else
2608 			vma = op->remap.next;
2609 
2610 		ret = __xe_vma_op_execute(vm, vma, op);
2611 		break;
2612 	}
2613 	case DRM_GPUVA_OP_UNMAP:
2614 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2615 					  op);
2616 		break;
2617 	case DRM_GPUVA_OP_PREFETCH:
2618 		ret = __xe_vma_op_execute(vm,
2619 					  gpuva_to_vma(op->base.prefetch.va),
2620 					  op);
2621 		break;
2622 	default:
2623 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2624 	}
2625 
2626 	return ret;
2627 }
2628 
2629 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2630 {
2631 	bool last = op->flags & XE_VMA_OP_LAST;
2632 
2633 	if (last) {
2634 		while (op->num_syncs--)
2635 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2636 		kfree(op->syncs);
2637 		if (op->q)
2638 			xe_exec_queue_put(op->q);
2639 	}
2640 	if (!list_empty(&op->link))
2641 		list_del(&op->link);
2642 	if (op->ops)
2643 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2644 	if (last)
2645 		xe_vm_put(vm);
2646 }
2647 
2648 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2649 			     bool post_commit, bool prev_post_commit,
2650 			     bool next_post_commit)
2651 {
2652 	lockdep_assert_held_write(&vm->lock);
2653 
2654 	switch (op->base.op) {
2655 	case DRM_GPUVA_OP_MAP:
2656 		if (op->map.vma) {
2657 			prep_vma_destroy(vm, op->map.vma, post_commit);
2658 			xe_vma_destroy_unlocked(op->map.vma);
2659 		}
2660 		break;
2661 	case DRM_GPUVA_OP_UNMAP:
2662 	{
2663 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2664 
2665 		if (vma) {
2666 			down_read(&vm->userptr.notifier_lock);
2667 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2668 			up_read(&vm->userptr.notifier_lock);
2669 			if (post_commit)
2670 				xe_vm_insert_vma(vm, vma);
2671 		}
2672 		break;
2673 	}
2674 	case DRM_GPUVA_OP_REMAP:
2675 	{
2676 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2677 
2678 		if (op->remap.prev) {
2679 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2680 			xe_vma_destroy_unlocked(op->remap.prev);
2681 		}
2682 		if (op->remap.next) {
2683 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2684 			xe_vma_destroy_unlocked(op->remap.next);
2685 		}
2686 		if (vma) {
2687 			down_read(&vm->userptr.notifier_lock);
2688 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2689 			up_read(&vm->userptr.notifier_lock);
2690 			if (post_commit)
2691 				xe_vm_insert_vma(vm, vma);
2692 		}
2693 		break;
2694 	}
2695 	case DRM_GPUVA_OP_PREFETCH:
2696 		/* Nothing to do */
2697 		break;
2698 	default:
2699 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2700 	}
2701 }
2702 
2703 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2704 				     struct drm_gpuva_ops **ops,
2705 				     int num_ops_list)
2706 {
2707 	int i;
2708 
2709 	for (i = num_ops_list - 1; i >= 0; --i) {
2710 		struct drm_gpuva_ops *__ops = ops[i];
2711 		struct drm_gpuva_op *__op;
2712 
2713 		if (!__ops)
2714 			continue;
2715 
2716 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2717 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2718 
2719 			xe_vma_op_unwind(vm, op,
2720 					 op->flags & XE_VMA_OP_COMMITTED,
2721 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2722 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2723 		}
2724 
2725 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2726 	}
2727 }
2728 
2729 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2730 				     struct list_head *ops_list)
2731 {
2732 	struct xe_vma_op *op, *next;
2733 	int err;
2734 
2735 	lockdep_assert_held_write(&vm->lock);
2736 
2737 	list_for_each_entry_safe(op, next, ops_list, link) {
2738 		err = xe_vma_op_execute(vm, op);
2739 		if (err) {
2740 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2741 				 op->base.op, err);
2742 			/*
2743 			 * FIXME: Killing VM rather than proper error handling
2744 			 */
2745 			xe_vm_kill(vm);
2746 			return -ENOSPC;
2747 		}
2748 		xe_vma_op_cleanup(vm, op);
2749 	}
2750 
2751 	return 0;
2752 }
2753 
2754 #define SUPPORTED_FLAGS	(DRM_XE_VM_BIND_FLAG_NULL | \
2755 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2756 #define XE_64K_PAGE_MASK 0xffffull
2757 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2758 
2759 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2760 				    struct drm_xe_vm_bind *args,
2761 				    struct drm_xe_vm_bind_op **bind_ops)
2762 {
2763 	int err;
2764 	int i;
2765 
2766 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2767 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2768 		return -EINVAL;
2769 
2770 	if (XE_IOCTL_DBG(xe, args->extensions))
2771 		return -EINVAL;
2772 
2773 	if (args->num_binds > 1) {
2774 		u64 __user *bind_user =
2775 			u64_to_user_ptr(args->vector_of_binds);
2776 
2777 		*bind_ops = kvmalloc_array(args->num_binds,
2778 					   sizeof(struct drm_xe_vm_bind_op),
2779 					   GFP_KERNEL | __GFP_ACCOUNT);
2780 		if (!*bind_ops)
2781 			return -ENOMEM;
2782 
2783 		err = __copy_from_user(*bind_ops, bind_user,
2784 				       sizeof(struct drm_xe_vm_bind_op) *
2785 				       args->num_binds);
2786 		if (XE_IOCTL_DBG(xe, err)) {
2787 			err = -EFAULT;
2788 			goto free_bind_ops;
2789 		}
2790 	} else {
2791 		*bind_ops = &args->bind;
2792 	}
2793 
2794 	for (i = 0; i < args->num_binds; ++i) {
2795 		u64 range = (*bind_ops)[i].range;
2796 		u64 addr = (*bind_ops)[i].addr;
2797 		u32 op = (*bind_ops)[i].op;
2798 		u32 flags = (*bind_ops)[i].flags;
2799 		u32 obj = (*bind_ops)[i].obj;
2800 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2801 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2802 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2803 		u16 pat_index = (*bind_ops)[i].pat_index;
2804 		u16 coh_mode;
2805 
2806 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2807 			err = -EINVAL;
2808 			goto free_bind_ops;
2809 		}
2810 
2811 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2812 		(*bind_ops)[i].pat_index = pat_index;
2813 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2814 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2815 			err = -EINVAL;
2816 			goto free_bind_ops;
2817 		}
2818 
2819 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2820 			err = -EINVAL;
2821 			goto free_bind_ops;
2822 		}
2823 
2824 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2825 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2826 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2827 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2828 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2829 				 is_null) ||
2830 		    XE_IOCTL_DBG(xe, !obj &&
2831 				 op == DRM_XE_VM_BIND_OP_MAP &&
2832 				 !is_null) ||
2833 		    XE_IOCTL_DBG(xe, !obj &&
2834 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2835 		    XE_IOCTL_DBG(xe, addr &&
2836 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2837 		    XE_IOCTL_DBG(xe, range &&
2838 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2839 		    XE_IOCTL_DBG(xe, obj &&
2840 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2841 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2842 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2843 		    XE_IOCTL_DBG(xe, obj &&
2844 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2845 		    XE_IOCTL_DBG(xe, prefetch_region &&
2846 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2847 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2848 				       xe->info.mem_region_mask)) ||
2849 		    XE_IOCTL_DBG(xe, obj &&
2850 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2851 			err = -EINVAL;
2852 			goto free_bind_ops;
2853 		}
2854 
2855 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2856 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2857 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2858 		    XE_IOCTL_DBG(xe, !range &&
2859 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2860 			err = -EINVAL;
2861 			goto free_bind_ops;
2862 		}
2863 	}
2864 
2865 	return 0;
2866 
2867 free_bind_ops:
2868 	if (args->num_binds > 1)
2869 		kvfree(*bind_ops);
2870 	return err;
2871 }
2872 
2873 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2874 				       struct xe_exec_queue *q,
2875 				       struct xe_sync_entry *syncs,
2876 				       int num_syncs)
2877 {
2878 	struct dma_fence *fence;
2879 	int i, err = 0;
2880 
2881 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2882 				     to_wait_exec_queue(vm, q), vm);
2883 	if (IS_ERR(fence))
2884 		return PTR_ERR(fence);
2885 
2886 	for (i = 0; i < num_syncs; i++)
2887 		xe_sync_entry_signal(&syncs[i], NULL, fence);
2888 
2889 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2890 				     fence);
2891 	dma_fence_put(fence);
2892 
2893 	return err;
2894 }
2895 
2896 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2897 {
2898 	struct xe_device *xe = to_xe_device(dev);
2899 	struct xe_file *xef = to_xe_file(file);
2900 	struct drm_xe_vm_bind *args = data;
2901 	struct drm_xe_sync __user *syncs_user;
2902 	struct xe_bo **bos = NULL;
2903 	struct drm_gpuva_ops **ops = NULL;
2904 	struct xe_vm *vm;
2905 	struct xe_exec_queue *q = NULL;
2906 	u32 num_syncs, num_ufence = 0;
2907 	struct xe_sync_entry *syncs = NULL;
2908 	struct drm_xe_vm_bind_op *bind_ops;
2909 	LIST_HEAD(ops_list);
2910 	int err;
2911 	int i;
2912 
2913 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2914 	if (err)
2915 		return err;
2916 
2917 	if (args->exec_queue_id) {
2918 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2919 		if (XE_IOCTL_DBG(xe, !q)) {
2920 			err = -ENOENT;
2921 			goto free_objs;
2922 		}
2923 
2924 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2925 			err = -EINVAL;
2926 			goto put_exec_queue;
2927 		}
2928 	}
2929 
2930 	vm = xe_vm_lookup(xef, args->vm_id);
2931 	if (XE_IOCTL_DBG(xe, !vm)) {
2932 		err = -EINVAL;
2933 		goto put_exec_queue;
2934 	}
2935 
2936 	err = down_write_killable(&vm->lock);
2937 	if (err)
2938 		goto put_vm;
2939 
2940 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2941 		err = -ENOENT;
2942 		goto release_vm_lock;
2943 	}
2944 
2945 	for (i = 0; i < args->num_binds; ++i) {
2946 		u64 range = bind_ops[i].range;
2947 		u64 addr = bind_ops[i].addr;
2948 
2949 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2950 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2951 			err = -EINVAL;
2952 			goto release_vm_lock;
2953 		}
2954 	}
2955 
2956 	if (args->num_binds) {
2957 		bos = kvcalloc(args->num_binds, sizeof(*bos),
2958 			       GFP_KERNEL | __GFP_ACCOUNT);
2959 		if (!bos) {
2960 			err = -ENOMEM;
2961 			goto release_vm_lock;
2962 		}
2963 
2964 		ops = kvcalloc(args->num_binds, sizeof(*ops),
2965 			       GFP_KERNEL | __GFP_ACCOUNT);
2966 		if (!ops) {
2967 			err = -ENOMEM;
2968 			goto release_vm_lock;
2969 		}
2970 	}
2971 
2972 	for (i = 0; i < args->num_binds; ++i) {
2973 		struct drm_gem_object *gem_obj;
2974 		u64 range = bind_ops[i].range;
2975 		u64 addr = bind_ops[i].addr;
2976 		u32 obj = bind_ops[i].obj;
2977 		u64 obj_offset = bind_ops[i].obj_offset;
2978 		u16 pat_index = bind_ops[i].pat_index;
2979 		u16 coh_mode;
2980 
2981 		if (!obj)
2982 			continue;
2983 
2984 		gem_obj = drm_gem_object_lookup(file, obj);
2985 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
2986 			err = -ENOENT;
2987 			goto put_obj;
2988 		}
2989 		bos[i] = gem_to_xe_bo(gem_obj);
2990 
2991 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2992 		    XE_IOCTL_DBG(xe, obj_offset >
2993 				 bos[i]->size - range)) {
2994 			err = -EINVAL;
2995 			goto put_obj;
2996 		}
2997 
2998 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
2999 			if (XE_IOCTL_DBG(xe, obj_offset &
3000 					 XE_64K_PAGE_MASK) ||
3001 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3002 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3003 				err = -EINVAL;
3004 				goto put_obj;
3005 			}
3006 		}
3007 
3008 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3009 		if (bos[i]->cpu_caching) {
3010 			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3011 					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3012 				err = -EINVAL;
3013 				goto put_obj;
3014 			}
3015 		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3016 			/*
3017 			 * Imported dma-buf from a different device should
3018 			 * require 1way or 2way coherency since we don't know
3019 			 * how it was mapped on the CPU. Just assume is it
3020 			 * potentially cached on CPU side.
3021 			 */
3022 			err = -EINVAL;
3023 			goto put_obj;
3024 		}
3025 	}
3026 
3027 	if (args->num_syncs) {
3028 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3029 		if (!syncs) {
3030 			err = -ENOMEM;
3031 			goto put_obj;
3032 		}
3033 	}
3034 
3035 	syncs_user = u64_to_user_ptr(args->syncs);
3036 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3037 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3038 					  &syncs_user[num_syncs],
3039 					  (xe_vm_in_lr_mode(vm) ?
3040 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3041 					  (!args->num_binds ?
3042 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3043 		if (err)
3044 			goto free_syncs;
3045 
3046 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3047 			num_ufence++;
3048 	}
3049 
3050 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3051 		err = -EINVAL;
3052 		goto free_syncs;
3053 	}
3054 
3055 	if (!args->num_binds) {
3056 		err = -ENODATA;
3057 		goto free_syncs;
3058 	}
3059 
3060 	for (i = 0; i < args->num_binds; ++i) {
3061 		u64 range = bind_ops[i].range;
3062 		u64 addr = bind_ops[i].addr;
3063 		u32 op = bind_ops[i].op;
3064 		u32 flags = bind_ops[i].flags;
3065 		u64 obj_offset = bind_ops[i].obj_offset;
3066 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3067 		u16 pat_index = bind_ops[i].pat_index;
3068 
3069 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3070 						  addr, range, op, flags,
3071 						  prefetch_region, pat_index);
3072 		if (IS_ERR(ops[i])) {
3073 			err = PTR_ERR(ops[i]);
3074 			ops[i] = NULL;
3075 			goto unwind_ops;
3076 		}
3077 
3078 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3079 					      &ops_list,
3080 					      i == args->num_binds - 1);
3081 		if (err)
3082 			goto unwind_ops;
3083 	}
3084 
3085 	/* Nothing to do */
3086 	if (list_empty(&ops_list)) {
3087 		err = -ENODATA;
3088 		goto unwind_ops;
3089 	}
3090 
3091 	xe_vm_get(vm);
3092 	if (q)
3093 		xe_exec_queue_get(q);
3094 
3095 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3096 
3097 	up_write(&vm->lock);
3098 
3099 	if (q)
3100 		xe_exec_queue_put(q);
3101 	xe_vm_put(vm);
3102 
3103 	for (i = 0; bos && i < args->num_binds; ++i)
3104 		xe_bo_put(bos[i]);
3105 
3106 	kvfree(bos);
3107 	kvfree(ops);
3108 	if (args->num_binds > 1)
3109 		kvfree(bind_ops);
3110 
3111 	return err;
3112 
3113 unwind_ops:
3114 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3115 free_syncs:
3116 	if (err == -ENODATA)
3117 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3118 	while (num_syncs--)
3119 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3120 
3121 	kfree(syncs);
3122 put_obj:
3123 	for (i = 0; i < args->num_binds; ++i)
3124 		xe_bo_put(bos[i]);
3125 release_vm_lock:
3126 	up_write(&vm->lock);
3127 put_vm:
3128 	xe_vm_put(vm);
3129 put_exec_queue:
3130 	if (q)
3131 		xe_exec_queue_put(q);
3132 free_objs:
3133 	kvfree(bos);
3134 	kvfree(ops);
3135 	if (args->num_binds > 1)
3136 		kvfree(bind_ops);
3137 	return err;
3138 }
3139 
3140 /**
3141  * xe_vm_lock() - Lock the vm's dma_resv object
3142  * @vm: The struct xe_vm whose lock is to be locked
3143  * @intr: Whether to perform any wait interruptible
3144  *
3145  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3146  * contended lock was interrupted. If @intr is false, the function
3147  * always returns 0.
3148  */
3149 int xe_vm_lock(struct xe_vm *vm, bool intr)
3150 {
3151 	if (intr)
3152 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3153 
3154 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3155 }
3156 
3157 /**
3158  * xe_vm_unlock() - Unlock the vm's dma_resv object
3159  * @vm: The struct xe_vm whose lock is to be released.
3160  *
3161  * Unlock a buffer object lock that was locked by xe_vm_lock().
3162  */
3163 void xe_vm_unlock(struct xe_vm *vm)
3164 {
3165 	dma_resv_unlock(xe_vm_resv(vm));
3166 }
3167 
3168 /**
3169  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3170  * @vma: VMA to invalidate
3171  *
3172  * Walks a list of page tables leaves which it memset the entries owned by this
3173  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3174  * complete.
3175  *
3176  * Returns 0 for success, negative error code otherwise.
3177  */
3178 int xe_vm_invalidate_vma(struct xe_vma *vma)
3179 {
3180 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3181 	struct xe_tile *tile;
3182 	u32 tile_needs_invalidate = 0;
3183 	int seqno[XE_MAX_TILES_PER_DEVICE];
3184 	u8 id;
3185 	int ret;
3186 
3187 	xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3188 	xe_assert(xe, !xe_vma_is_null(vma));
3189 	trace_xe_vma_usm_invalidate(vma);
3190 
3191 	/* Check that we don't race with page-table updates */
3192 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3193 		if (xe_vma_is_userptr(vma)) {
3194 			WARN_ON_ONCE(!mmu_interval_check_retry
3195 				     (&to_userptr_vma(vma)->userptr.notifier,
3196 				      to_userptr_vma(vma)->userptr.notifier_seq));
3197 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3198 							     DMA_RESV_USAGE_BOOKKEEP));
3199 
3200 		} else {
3201 			xe_bo_assert_held(xe_vma_bo(vma));
3202 		}
3203 	}
3204 
3205 	for_each_tile(tile, xe, id) {
3206 		if (xe_pt_zap_ptes(tile, vma)) {
3207 			tile_needs_invalidate |= BIT(id);
3208 			xe_device_wmb(xe);
3209 			/*
3210 			 * FIXME: We potentially need to invalidate multiple
3211 			 * GTs within the tile
3212 			 */
3213 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3214 			if (seqno[id] < 0)
3215 				return seqno[id];
3216 		}
3217 	}
3218 
3219 	for_each_tile(tile, xe, id) {
3220 		if (tile_needs_invalidate & BIT(id)) {
3221 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3222 			if (ret < 0)
3223 				return ret;
3224 		}
3225 	}
3226 
3227 	vma->usm.tile_invalidated = vma->tile_mask;
3228 
3229 	return 0;
3230 }
3231 
3232 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3233 {
3234 	struct drm_gpuva *gpuva;
3235 	bool is_vram;
3236 	uint64_t addr;
3237 
3238 	if (!down_read_trylock(&vm->lock)) {
3239 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3240 		return 0;
3241 	}
3242 	if (vm->pt_root[gt_id]) {
3243 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3244 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3245 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3246 			   is_vram ? "VRAM" : "SYS");
3247 	}
3248 
3249 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3250 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3251 		bool is_userptr = xe_vma_is_userptr(vma);
3252 		bool is_null = xe_vma_is_null(vma);
3253 
3254 		if (is_null) {
3255 			addr = 0;
3256 		} else if (is_userptr) {
3257 			struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3258 			struct xe_res_cursor cur;
3259 
3260 			if (sg) {
3261 				xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3262 				addr = xe_res_dma(&cur);
3263 			} else {
3264 				addr = 0;
3265 			}
3266 		} else {
3267 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3268 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3269 		}
3270 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3271 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3272 			   xe_vma_size(vma),
3273 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3274 			   is_vram ? "VRAM" : "SYS");
3275 	}
3276 	up_read(&vm->lock);
3277 
3278 	return 0;
3279 }
3280