xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 3e0bc2855b573bcffa2a52955a878f537f5ac0cd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/delay.h>
17 #include <linux/kthread.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 
21 #include "xe_assert.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_drm_client.h"
25 #include "xe_exec_queue.h"
26 #include "xe_gt.h"
27 #include "xe_gt_pagefault.h"
28 #include "xe_gt_tlb_invalidation.h"
29 #include "xe_migrate.h"
30 #include "xe_pat.h"
31 #include "xe_pm.h"
32 #include "xe_preempt_fence.h"
33 #include "xe_pt.h"
34 #include "xe_res_cursor.h"
35 #include "xe_sync.h"
36 #include "xe_trace.h"
37 #include "generated/xe_wa_oob.h"
38 #include "xe_wa.h"
39 
40 #define TEST_VM_ASYNC_OPS_ERROR
41 
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
43 {
44 	return vm->gpuvm.r_obj;
45 }
46 
47 /**
48  * xe_vma_userptr_check_repin() - Advisory check for repin needed
49  * @vma: The userptr vma
50  *
51  * Check if the userptr vma has been invalidated since last successful
52  * repin. The check is advisory only and can the function can be called
53  * without the vm->userptr.notifier_lock held. There is no guarantee that the
54  * vma userptr will remain valid after a lockless check, so typically
55  * the call needs to be followed by a proper check under the notifier_lock.
56  *
57  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
58  */
59 int xe_vma_userptr_check_repin(struct xe_vma *vma)
60 {
61 	return mmu_interval_check_retry(&vma->userptr.notifier,
62 					vma->userptr.notifier_seq) ?
63 		-EAGAIN : 0;
64 }
65 
66 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
67 {
68 	struct xe_vm *vm = xe_vma_vm(vma);
69 	struct xe_device *xe = vm->xe;
70 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
71 	struct page **pages;
72 	bool in_kthread = !current->mm;
73 	unsigned long notifier_seq;
74 	int pinned, ret, i;
75 	bool read_only = xe_vma_read_only(vma);
76 
77 	lockdep_assert_held(&vm->lock);
78 	xe_assert(xe, xe_vma_is_userptr(vma));
79 retry:
80 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
81 		return 0;
82 
83 	notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
84 	if (notifier_seq == vma->userptr.notifier_seq)
85 		return 0;
86 
87 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
88 	if (!pages)
89 		return -ENOMEM;
90 
91 	if (vma->userptr.sg) {
92 		dma_unmap_sgtable(xe->drm.dev,
93 				  vma->userptr.sg,
94 				  read_only ? DMA_TO_DEVICE :
95 				  DMA_BIDIRECTIONAL, 0);
96 		sg_free_table(vma->userptr.sg);
97 		vma->userptr.sg = NULL;
98 	}
99 
100 	pinned = ret = 0;
101 	if (in_kthread) {
102 		if (!mmget_not_zero(vma->userptr.notifier.mm)) {
103 			ret = -EFAULT;
104 			goto mm_closed;
105 		}
106 		kthread_use_mm(vma->userptr.notifier.mm);
107 	}
108 
109 	while (pinned < num_pages) {
110 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
111 					  pinned * PAGE_SIZE,
112 					  num_pages - pinned,
113 					  read_only ? 0 : FOLL_WRITE,
114 					  &pages[pinned]);
115 		if (ret < 0) {
116 			if (in_kthread)
117 				ret = 0;
118 			break;
119 		}
120 
121 		pinned += ret;
122 		ret = 0;
123 	}
124 
125 	if (in_kthread) {
126 		kthread_unuse_mm(vma->userptr.notifier.mm);
127 		mmput(vma->userptr.notifier.mm);
128 	}
129 mm_closed:
130 	if (ret)
131 		goto out;
132 
133 	ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
134 						pinned, 0,
135 						(u64)pinned << PAGE_SHIFT,
136 						xe_sg_segment_size(xe->drm.dev),
137 						GFP_KERNEL);
138 	if (ret) {
139 		vma->userptr.sg = NULL;
140 		goto out;
141 	}
142 	vma->userptr.sg = &vma->userptr.sgt;
143 
144 	ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
145 			      read_only ? DMA_TO_DEVICE :
146 			      DMA_BIDIRECTIONAL,
147 			      DMA_ATTR_SKIP_CPU_SYNC |
148 			      DMA_ATTR_NO_KERNEL_MAPPING);
149 	if (ret) {
150 		sg_free_table(vma->userptr.sg);
151 		vma->userptr.sg = NULL;
152 		goto out;
153 	}
154 
155 	for (i = 0; i < pinned; ++i) {
156 		if (!read_only) {
157 			lock_page(pages[i]);
158 			set_page_dirty(pages[i]);
159 			unlock_page(pages[i]);
160 		}
161 
162 		mark_page_accessed(pages[i]);
163 	}
164 
165 out:
166 	release_pages(pages, pinned);
167 	kvfree(pages);
168 
169 	if (!(ret < 0)) {
170 		vma->userptr.notifier_seq = notifier_seq;
171 		if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
172 			goto retry;
173 	}
174 
175 	return ret < 0 ? ret : 0;
176 }
177 
178 static bool preempt_fences_waiting(struct xe_vm *vm)
179 {
180 	struct xe_exec_queue *q;
181 
182 	lockdep_assert_held(&vm->lock);
183 	xe_vm_assert_held(vm);
184 
185 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
186 		if (!q->compute.pfence ||
187 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
188 						   &q->compute.pfence->flags))) {
189 			return true;
190 		}
191 	}
192 
193 	return false;
194 }
195 
196 static void free_preempt_fences(struct list_head *list)
197 {
198 	struct list_head *link, *next;
199 
200 	list_for_each_safe(link, next, list)
201 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
202 }
203 
204 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
205 				unsigned int *count)
206 {
207 	lockdep_assert_held(&vm->lock);
208 	xe_vm_assert_held(vm);
209 
210 	if (*count >= vm->preempt.num_exec_queues)
211 		return 0;
212 
213 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
214 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
215 
216 		if (IS_ERR(pfence))
217 			return PTR_ERR(pfence);
218 
219 		list_move_tail(xe_preempt_fence_link(pfence), list);
220 	}
221 
222 	return 0;
223 }
224 
225 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
226 {
227 	struct xe_exec_queue *q;
228 
229 	xe_vm_assert_held(vm);
230 
231 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
232 		if (q->compute.pfence) {
233 			long timeout = dma_fence_wait(q->compute.pfence, false);
234 
235 			if (timeout < 0)
236 				return -ETIME;
237 			dma_fence_put(q->compute.pfence);
238 			q->compute.pfence = NULL;
239 		}
240 	}
241 
242 	return 0;
243 }
244 
245 static bool xe_vm_is_idle(struct xe_vm *vm)
246 {
247 	struct xe_exec_queue *q;
248 
249 	xe_vm_assert_held(vm);
250 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
251 		if (!xe_exec_queue_is_idle(q))
252 			return false;
253 	}
254 
255 	return true;
256 }
257 
258 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
259 {
260 	struct list_head *link;
261 	struct xe_exec_queue *q;
262 
263 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
264 		struct dma_fence *fence;
265 
266 		link = list->next;
267 		xe_assert(vm->xe, link != list);
268 
269 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
270 					     q, q->compute.context,
271 					     ++q->compute.seqno);
272 		dma_fence_put(q->compute.pfence);
273 		q->compute.pfence = fence;
274 	}
275 }
276 
277 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
278 {
279 	struct xe_exec_queue *q;
280 	int err;
281 
282 	if (!vm->preempt.num_exec_queues)
283 		return 0;
284 
285 	err = xe_bo_lock(bo, true);
286 	if (err)
287 		return err;
288 
289 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
290 	if (err)
291 		goto out_unlock;
292 
293 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
294 		if (q->compute.pfence) {
295 			dma_resv_add_fence(bo->ttm.base.resv,
296 					   q->compute.pfence,
297 					   DMA_RESV_USAGE_BOOKKEEP);
298 		}
299 
300 out_unlock:
301 	xe_bo_unlock(bo);
302 	return err;
303 }
304 
305 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
306 						struct drm_exec *exec)
307 {
308 	struct xe_exec_queue *q;
309 
310 	lockdep_assert_held(&vm->lock);
311 	xe_vm_assert_held(vm);
312 
313 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
314 		q->ops->resume(q);
315 
316 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
317 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
318 	}
319 }
320 
321 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
322 {
323 	struct drm_gpuvm_exec vm_exec = {
324 		.vm = &vm->gpuvm,
325 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
326 		.num_fences = 1,
327 	};
328 	struct drm_exec *exec = &vm_exec.exec;
329 	struct dma_fence *pfence;
330 	int err;
331 	bool wait;
332 
333 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
334 
335 	down_write(&vm->lock);
336 	err = drm_gpuvm_exec_lock(&vm_exec);
337 	if (err)
338 		return err;
339 
340 	pfence = xe_preempt_fence_create(q, q->compute.context,
341 					 ++q->compute.seqno);
342 	if (!pfence) {
343 		err = -ENOMEM;
344 		goto out_unlock;
345 	}
346 
347 	list_add(&q->compute.link, &vm->preempt.exec_queues);
348 	++vm->preempt.num_exec_queues;
349 	q->compute.pfence = pfence;
350 
351 	down_read(&vm->userptr.notifier_lock);
352 
353 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
354 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
355 
356 	/*
357 	 * Check to see if a preemption on VM is in flight or userptr
358 	 * invalidation, if so trigger this preempt fence to sync state with
359 	 * other preempt fences on the VM.
360 	 */
361 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
362 	if (wait)
363 		dma_fence_enable_sw_signaling(pfence);
364 
365 	up_read(&vm->userptr.notifier_lock);
366 
367 out_unlock:
368 	drm_exec_fini(exec);
369 	up_write(&vm->lock);
370 
371 	return err;
372 }
373 
374 /**
375  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
376  * @vm: The VM.
377  * @q: The exec_queue
378  */
379 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
380 {
381 	if (!xe_vm_in_preempt_fence_mode(vm))
382 		return;
383 
384 	down_write(&vm->lock);
385 	list_del(&q->compute.link);
386 	--vm->preempt.num_exec_queues;
387 	if (q->compute.pfence) {
388 		dma_fence_enable_sw_signaling(q->compute.pfence);
389 		dma_fence_put(q->compute.pfence);
390 		q->compute.pfence = NULL;
391 	}
392 	up_write(&vm->lock);
393 }
394 
395 /**
396  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
397  * that need repinning.
398  * @vm: The VM.
399  *
400  * This function checks for whether the VM has userptrs that need repinning,
401  * and provides a release-type barrier on the userptr.notifier_lock after
402  * checking.
403  *
404  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
405  */
406 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
407 {
408 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
409 
410 	return (list_empty(&vm->userptr.repin_list) &&
411 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
412 }
413 
414 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
415 
416 static void xe_vm_kill(struct xe_vm *vm)
417 {
418 	struct xe_exec_queue *q;
419 
420 	lockdep_assert_held(&vm->lock);
421 
422 	xe_vm_lock(vm, false);
423 	vm->flags |= XE_VM_FLAG_BANNED;
424 	trace_xe_vm_kill(vm);
425 
426 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
427 		q->ops->kill(q);
428 	xe_vm_unlock(vm);
429 
430 	/* TODO: Inform user the VM is banned */
431 }
432 
433 /**
434  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
435  * @exec: The drm_exec object used for locking before validation.
436  * @err: The error returned from ttm_bo_validate().
437  * @end: A ktime_t cookie that should be set to 0 before first use and
438  * that should be reused on subsequent calls.
439  *
440  * With multiple active VMs, under memory pressure, it is possible that
441  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
442  * Until ttm properly handles locking in such scenarios, best thing the
443  * driver can do is retry with a timeout. Check if that is necessary, and
444  * if so unlock the drm_exec's objects while keeping the ticket to prepare
445  * for a rerun.
446  *
447  * Return: true if a retry after drm_exec_init() is recommended;
448  * false otherwise.
449  */
450 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
451 {
452 	ktime_t cur;
453 
454 	if (err != -ENOMEM)
455 		return false;
456 
457 	cur = ktime_get();
458 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 	if (!ktime_before(cur, *end))
460 		return false;
461 
462 	msleep(20);
463 	return true;
464 }
465 
466 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
467 {
468 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
469 	struct drm_gpuva *gpuva;
470 	int ret;
471 
472 	lockdep_assert_held(&vm->lock);
473 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
474 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
475 			       &vm->rebind_list);
476 
477 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
478 	if (ret)
479 		return ret;
480 
481 	vm_bo->evicted = false;
482 	return 0;
483 }
484 
485 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
486 				 bool *done)
487 {
488 	int err;
489 
490 	/*
491 	 * 1 fence for each preempt fence plus a fence for each tile from a
492 	 * possible rebind
493 	 */
494 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
495 				   vm->xe->info.tile_count);
496 	if (err)
497 		return err;
498 
499 	if (xe_vm_is_idle(vm)) {
500 		vm->preempt.rebind_deactivated = true;
501 		*done = true;
502 		return 0;
503 	}
504 
505 	if (!preempt_fences_waiting(vm)) {
506 		*done = true;
507 		return 0;
508 	}
509 
510 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
511 	if (err)
512 		return err;
513 
514 	err = wait_for_existing_preempt_fences(vm);
515 	if (err)
516 		return err;
517 
518 	return drm_gpuvm_validate(&vm->gpuvm, exec);
519 }
520 
521 static void preempt_rebind_work_func(struct work_struct *w)
522 {
523 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
524 	struct drm_exec exec;
525 	struct dma_fence *rebind_fence;
526 	unsigned int fence_count = 0;
527 	LIST_HEAD(preempt_fences);
528 	ktime_t end = 0;
529 	int err = 0;
530 	long wait;
531 	int __maybe_unused tries = 0;
532 
533 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
534 	trace_xe_vm_rebind_worker_enter(vm);
535 
536 	down_write(&vm->lock);
537 
538 	if (xe_vm_is_closed_or_banned(vm)) {
539 		up_write(&vm->lock);
540 		trace_xe_vm_rebind_worker_exit(vm);
541 		return;
542 	}
543 
544 retry:
545 	if (xe_vm_userptr_check_repin(vm)) {
546 		err = xe_vm_userptr_pin(vm);
547 		if (err)
548 			goto out_unlock_outer;
549 	}
550 
551 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
552 
553 	drm_exec_until_all_locked(&exec) {
554 		bool done = false;
555 
556 		err = xe_preempt_work_begin(&exec, vm, &done);
557 		drm_exec_retry_on_contention(&exec);
558 		if (err || done) {
559 			drm_exec_fini(&exec);
560 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
561 				err = -EAGAIN;
562 
563 			goto out_unlock_outer;
564 		}
565 	}
566 
567 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
568 	if (err)
569 		goto out_unlock;
570 
571 	rebind_fence = xe_vm_rebind(vm, true);
572 	if (IS_ERR(rebind_fence)) {
573 		err = PTR_ERR(rebind_fence);
574 		goto out_unlock;
575 	}
576 
577 	if (rebind_fence) {
578 		dma_fence_wait(rebind_fence, false);
579 		dma_fence_put(rebind_fence);
580 	}
581 
582 	/* Wait on munmap style VM unbinds */
583 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
584 				     DMA_RESV_USAGE_KERNEL,
585 				     false, MAX_SCHEDULE_TIMEOUT);
586 	if (wait <= 0) {
587 		err = -ETIME;
588 		goto out_unlock;
589 	}
590 
591 #define retry_required(__tries, __vm) \
592 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
593 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
594 	__xe_vm_userptr_needs_repin(__vm))
595 
596 	down_read(&vm->userptr.notifier_lock);
597 	if (retry_required(tries, vm)) {
598 		up_read(&vm->userptr.notifier_lock);
599 		err = -EAGAIN;
600 		goto out_unlock;
601 	}
602 
603 #undef retry_required
604 
605 	spin_lock(&vm->xe->ttm.lru_lock);
606 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
607 	spin_unlock(&vm->xe->ttm.lru_lock);
608 
609 	/* Point of no return. */
610 	arm_preempt_fences(vm, &preempt_fences);
611 	resume_and_reinstall_preempt_fences(vm, &exec);
612 	up_read(&vm->userptr.notifier_lock);
613 
614 out_unlock:
615 	drm_exec_fini(&exec);
616 out_unlock_outer:
617 	if (err == -EAGAIN) {
618 		trace_xe_vm_rebind_worker_retry(vm);
619 		goto retry;
620 	}
621 
622 	if (err) {
623 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
624 		xe_vm_kill(vm);
625 	}
626 	up_write(&vm->lock);
627 
628 	free_preempt_fences(&preempt_fences);
629 
630 	trace_xe_vm_rebind_worker_exit(vm);
631 }
632 
633 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
634 				   const struct mmu_notifier_range *range,
635 				   unsigned long cur_seq)
636 {
637 	struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
638 	struct xe_vm *vm = xe_vma_vm(vma);
639 	struct dma_resv_iter cursor;
640 	struct dma_fence *fence;
641 	long err;
642 
643 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
644 	trace_xe_vma_userptr_invalidate(vma);
645 
646 	if (!mmu_notifier_range_blockable(range))
647 		return false;
648 
649 	down_write(&vm->userptr.notifier_lock);
650 	mmu_interval_set_seq(mni, cur_seq);
651 
652 	/* No need to stop gpu access if the userptr is not yet bound. */
653 	if (!vma->userptr.initial_bind) {
654 		up_write(&vm->userptr.notifier_lock);
655 		return true;
656 	}
657 
658 	/*
659 	 * Tell exec and rebind worker they need to repin and rebind this
660 	 * userptr.
661 	 */
662 	if (!xe_vm_in_fault_mode(vm) &&
663 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
664 		spin_lock(&vm->userptr.invalidated_lock);
665 		list_move_tail(&vma->userptr.invalidate_link,
666 			       &vm->userptr.invalidated);
667 		spin_unlock(&vm->userptr.invalidated_lock);
668 	}
669 
670 	up_write(&vm->userptr.notifier_lock);
671 
672 	/*
673 	 * Preempt fences turn into schedule disables, pipeline these.
674 	 * Note that even in fault mode, we need to wait for binds and
675 	 * unbinds to complete, and those are attached as BOOKMARK fences
676 	 * to the vm.
677 	 */
678 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
679 			    DMA_RESV_USAGE_BOOKKEEP);
680 	dma_resv_for_each_fence_unlocked(&cursor, fence)
681 		dma_fence_enable_sw_signaling(fence);
682 	dma_resv_iter_end(&cursor);
683 
684 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
685 				    DMA_RESV_USAGE_BOOKKEEP,
686 				    false, MAX_SCHEDULE_TIMEOUT);
687 	XE_WARN_ON(err <= 0);
688 
689 	if (xe_vm_in_fault_mode(vm)) {
690 		err = xe_vm_invalidate_vma(vma);
691 		XE_WARN_ON(err);
692 	}
693 
694 	trace_xe_vma_userptr_invalidate_complete(vma);
695 
696 	return true;
697 }
698 
699 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
700 	.invalidate = vma_userptr_invalidate,
701 };
702 
703 int xe_vm_userptr_pin(struct xe_vm *vm)
704 {
705 	struct xe_vma *vma, *next;
706 	int err = 0;
707 	LIST_HEAD(tmp_evict);
708 
709 	lockdep_assert_held_write(&vm->lock);
710 
711 	/* Collect invalidated userptrs */
712 	spin_lock(&vm->userptr.invalidated_lock);
713 	list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
714 				 userptr.invalidate_link) {
715 		list_del_init(&vma->userptr.invalidate_link);
716 		list_move_tail(&vma->combined_links.userptr,
717 			       &vm->userptr.repin_list);
718 	}
719 	spin_unlock(&vm->userptr.invalidated_lock);
720 
721 	/* Pin and move to temporary list */
722 	list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
723 				 combined_links.userptr) {
724 		err = xe_vma_userptr_pin_pages(vma);
725 		if (err < 0)
726 			return err;
727 
728 		list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
729 	}
730 
731 	return 0;
732 }
733 
734 /**
735  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
736  * that need repinning.
737  * @vm: The VM.
738  *
739  * This function does an advisory check for whether the VM has userptrs that
740  * need repinning.
741  *
742  * Return: 0 if there are no indications of userptrs needing repinning,
743  * -EAGAIN if there are.
744  */
745 int xe_vm_userptr_check_repin(struct xe_vm *vm)
746 {
747 	return (list_empty_careful(&vm->userptr.repin_list) &&
748 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
749 }
750 
751 static struct dma_fence *
752 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
753 	       struct xe_sync_entry *syncs, u32 num_syncs,
754 	       bool first_op, bool last_op);
755 
756 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
757 {
758 	struct dma_fence *fence = NULL;
759 	struct xe_vma *vma, *next;
760 
761 	lockdep_assert_held(&vm->lock);
762 	if (xe_vm_in_lr_mode(vm) && !rebind_worker)
763 		return NULL;
764 
765 	xe_vm_assert_held(vm);
766 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
767 				 combined_links.rebind) {
768 		xe_assert(vm->xe, vma->tile_present);
769 
770 		list_del_init(&vma->combined_links.rebind);
771 		dma_fence_put(fence);
772 		if (rebind_worker)
773 			trace_xe_vma_rebind_worker(vma);
774 		else
775 			trace_xe_vma_rebind_exec(vma);
776 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
777 		if (IS_ERR(fence))
778 			return fence;
779 	}
780 
781 	return fence;
782 }
783 
784 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
785 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
786 
787 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
788 				    struct xe_bo *bo,
789 				    u64 bo_offset_or_userptr,
790 				    u64 start, u64 end,
791 				    u16 pat_index, unsigned int flags)
792 {
793 	struct xe_vma *vma;
794 	struct xe_tile *tile;
795 	u8 id;
796 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
797 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
798 
799 	xe_assert(vm->xe, start < end);
800 	xe_assert(vm->xe, end < vm->size);
801 
802 	if (!bo && !is_null)	/* userptr */
803 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
804 	else
805 		vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
806 			      GFP_KERNEL);
807 	if (!vma) {
808 		vma = ERR_PTR(-ENOMEM);
809 		return vma;
810 	}
811 
812 	INIT_LIST_HEAD(&vma->combined_links.rebind);
813 
814 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
815 	vma->gpuva.vm = &vm->gpuvm;
816 	vma->gpuva.va.addr = start;
817 	vma->gpuva.va.range = end - start + 1;
818 	if (read_only)
819 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
820 	if (is_null)
821 		vma->gpuva.flags |= DRM_GPUVA_SPARSE;
822 
823 	for_each_tile(tile, vm->xe, id)
824 		vma->tile_mask |= 0x1 << id;
825 
826 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
827 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
828 
829 	vma->pat_index = pat_index;
830 
831 	if (bo) {
832 		struct drm_gpuvm_bo *vm_bo;
833 
834 		xe_bo_assert_held(bo);
835 
836 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
837 		if (IS_ERR(vm_bo)) {
838 			kfree(vma);
839 			return ERR_CAST(vm_bo);
840 		}
841 
842 		drm_gpuvm_bo_extobj_add(vm_bo);
843 		drm_gem_object_get(&bo->ttm.base);
844 		vma->gpuva.gem.obj = &bo->ttm.base;
845 		vma->gpuva.gem.offset = bo_offset_or_userptr;
846 		drm_gpuva_link(&vma->gpuva, vm_bo);
847 		drm_gpuvm_bo_put(vm_bo);
848 	} else /* userptr or null */ {
849 		if (!is_null) {
850 			u64 size = end - start + 1;
851 			int err;
852 
853 			INIT_LIST_HEAD(&vma->userptr.invalidate_link);
854 			vma->gpuva.gem.offset = bo_offset_or_userptr;
855 
856 			err = mmu_interval_notifier_insert(&vma->userptr.notifier,
857 							   current->mm,
858 							   xe_vma_userptr(vma), size,
859 							   &vma_userptr_notifier_ops);
860 			if (err) {
861 				kfree(vma);
862 				vma = ERR_PTR(err);
863 				return vma;
864 			}
865 
866 			vma->userptr.notifier_seq = LONG_MAX;
867 		}
868 
869 		xe_vm_get(vm);
870 	}
871 
872 	return vma;
873 }
874 
875 static void xe_vma_destroy_late(struct xe_vma *vma)
876 {
877 	struct xe_vm *vm = xe_vma_vm(vma);
878 	struct xe_device *xe = vm->xe;
879 	bool read_only = xe_vma_read_only(vma);
880 
881 	if (xe_vma_is_userptr(vma)) {
882 		if (vma->userptr.sg) {
883 			dma_unmap_sgtable(xe->drm.dev,
884 					  vma->userptr.sg,
885 					  read_only ? DMA_TO_DEVICE :
886 					  DMA_BIDIRECTIONAL, 0);
887 			sg_free_table(vma->userptr.sg);
888 			vma->userptr.sg = NULL;
889 		}
890 
891 		/*
892 		 * Since userptr pages are not pinned, we can't remove
893 		 * the notifer until we're sure the GPU is not accessing
894 		 * them anymore
895 		 */
896 		mmu_interval_notifier_remove(&vma->userptr.notifier);
897 		xe_vm_put(vm);
898 	} else if (xe_vma_is_null(vma)) {
899 		xe_vm_put(vm);
900 	} else {
901 		xe_bo_put(xe_vma_bo(vma));
902 	}
903 
904 	kfree(vma);
905 }
906 
907 static void vma_destroy_work_func(struct work_struct *w)
908 {
909 	struct xe_vma *vma =
910 		container_of(w, struct xe_vma, destroy_work);
911 
912 	xe_vma_destroy_late(vma);
913 }
914 
915 static void vma_destroy_cb(struct dma_fence *fence,
916 			   struct dma_fence_cb *cb)
917 {
918 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
919 
920 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
921 	queue_work(system_unbound_wq, &vma->destroy_work);
922 }
923 
924 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
925 {
926 	struct xe_vm *vm = xe_vma_vm(vma);
927 
928 	lockdep_assert_held_write(&vm->lock);
929 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
930 
931 	if (xe_vma_is_userptr(vma)) {
932 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
933 
934 		spin_lock(&vm->userptr.invalidated_lock);
935 		list_del(&vma->userptr.invalidate_link);
936 		spin_unlock(&vm->userptr.invalidated_lock);
937 	} else if (!xe_vma_is_null(vma)) {
938 		xe_bo_assert_held(xe_vma_bo(vma));
939 
940 		drm_gpuva_unlink(&vma->gpuva);
941 	}
942 
943 	xe_vm_assert_held(vm);
944 	if (fence) {
945 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
946 						 vma_destroy_cb);
947 
948 		if (ret) {
949 			XE_WARN_ON(ret != -ENOENT);
950 			xe_vma_destroy_late(vma);
951 		}
952 	} else {
953 		xe_vma_destroy_late(vma);
954 	}
955 }
956 
957 /**
958  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
959  * @exec: The drm_exec object we're currently locking for.
960  * @vma: The vma for witch we want to lock the vm resv and any attached
961  * object's resv.
962  * @num_shared: The number of dma-fence slots to pre-allocate in the
963  * objects' reservation objects.
964  *
965  * Return: 0 on success, negative error code on error. In particular
966  * may return -EDEADLK on WW transaction contention and -EINTR if
967  * an interruptible wait is terminated by a signal.
968  */
969 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
970 		      unsigned int num_shared)
971 {
972 	struct xe_vm *vm = xe_vma_vm(vma);
973 	struct xe_bo *bo = xe_vma_bo(vma);
974 	int err;
975 
976 	XE_WARN_ON(!vm);
977 	err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
978 	if (!err && bo && !bo->vm)
979 		err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
980 
981 	return err;
982 }
983 
984 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
985 {
986 	struct drm_exec exec;
987 	int err;
988 
989 	drm_exec_init(&exec, 0, 0);
990 	drm_exec_until_all_locked(&exec) {
991 		err = xe_vm_prepare_vma(&exec, vma, 0);
992 		drm_exec_retry_on_contention(&exec);
993 		if (XE_WARN_ON(err))
994 			break;
995 	}
996 
997 	xe_vma_destroy(vma, NULL);
998 
999 	drm_exec_fini(&exec);
1000 }
1001 
1002 struct xe_vma *
1003 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1004 {
1005 	struct drm_gpuva *gpuva;
1006 
1007 	lockdep_assert_held(&vm->lock);
1008 
1009 	if (xe_vm_is_closed_or_banned(vm))
1010 		return NULL;
1011 
1012 	xe_assert(vm->xe, start + range <= vm->size);
1013 
1014 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1015 
1016 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1017 }
1018 
1019 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1020 {
1021 	int err;
1022 
1023 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1024 	lockdep_assert_held(&vm->lock);
1025 
1026 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1027 	XE_WARN_ON(err);	/* Shouldn't be possible */
1028 
1029 	return err;
1030 }
1031 
1032 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1033 {
1034 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1035 	lockdep_assert_held(&vm->lock);
1036 
1037 	drm_gpuva_remove(&vma->gpuva);
1038 	if (vm->usm.last_fault_vma == vma)
1039 		vm->usm.last_fault_vma = NULL;
1040 }
1041 
1042 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1043 {
1044 	struct xe_vma_op *op;
1045 
1046 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1047 
1048 	if (unlikely(!op))
1049 		return NULL;
1050 
1051 	return &op->base;
1052 }
1053 
1054 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1055 
1056 static struct drm_gpuvm_ops gpuvm_ops = {
1057 	.op_alloc = xe_vm_op_alloc,
1058 	.vm_bo_validate = xe_gpuvm_validate,
1059 	.vm_free = xe_vm_free,
1060 };
1061 
1062 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1063 {
1064 	u64 pte = 0;
1065 
1066 	if (pat_index & BIT(0))
1067 		pte |= XE_PPGTT_PTE_PAT0;
1068 
1069 	if (pat_index & BIT(1))
1070 		pte |= XE_PPGTT_PTE_PAT1;
1071 
1072 	return pte;
1073 }
1074 
1075 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1076 				u32 pt_level)
1077 {
1078 	u64 pte = 0;
1079 
1080 	if (pat_index & BIT(0))
1081 		pte |= XE_PPGTT_PTE_PAT0;
1082 
1083 	if (pat_index & BIT(1))
1084 		pte |= XE_PPGTT_PTE_PAT1;
1085 
1086 	if (pat_index & BIT(2)) {
1087 		if (pt_level)
1088 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1089 		else
1090 			pte |= XE_PPGTT_PTE_PAT2;
1091 	}
1092 
1093 	if (pat_index & BIT(3))
1094 		pte |= XELPG_PPGTT_PTE_PAT3;
1095 
1096 	if (pat_index & (BIT(4)))
1097 		pte |= XE2_PPGTT_PTE_PAT4;
1098 
1099 	return pte;
1100 }
1101 
1102 static u64 pte_encode_ps(u32 pt_level)
1103 {
1104 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1105 
1106 	if (pt_level == 1)
1107 		return XE_PDE_PS_2M;
1108 	else if (pt_level == 2)
1109 		return XE_PDPE_PS_1G;
1110 
1111 	return 0;
1112 }
1113 
1114 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1115 			      const u16 pat_index)
1116 {
1117 	struct xe_device *xe = xe_bo_device(bo);
1118 	u64 pde;
1119 
1120 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1121 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1122 	pde |= pde_encode_pat_index(xe, pat_index);
1123 
1124 	return pde;
1125 }
1126 
1127 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1128 			      u16 pat_index, u32 pt_level)
1129 {
1130 	struct xe_device *xe = xe_bo_device(bo);
1131 	u64 pte;
1132 
1133 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1134 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1135 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1136 	pte |= pte_encode_ps(pt_level);
1137 
1138 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1139 		pte |= XE_PPGTT_PTE_DM;
1140 
1141 	return pte;
1142 }
1143 
1144 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1145 			       u16 pat_index, u32 pt_level)
1146 {
1147 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1148 
1149 	pte |= XE_PAGE_PRESENT;
1150 
1151 	if (likely(!xe_vma_read_only(vma)))
1152 		pte |= XE_PAGE_RW;
1153 
1154 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1155 	pte |= pte_encode_ps(pt_level);
1156 
1157 	if (unlikely(xe_vma_is_null(vma)))
1158 		pte |= XE_PTE_NULL;
1159 
1160 	return pte;
1161 }
1162 
1163 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1164 				u16 pat_index,
1165 				u32 pt_level, bool devmem, u64 flags)
1166 {
1167 	u64 pte;
1168 
1169 	/* Avoid passing random bits directly as flags */
1170 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1171 
1172 	pte = addr;
1173 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1174 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1175 	pte |= pte_encode_ps(pt_level);
1176 
1177 	if (devmem)
1178 		pte |= XE_PPGTT_PTE_DM;
1179 
1180 	pte |= flags;
1181 
1182 	return pte;
1183 }
1184 
1185 static const struct xe_pt_ops xelp_pt_ops = {
1186 	.pte_encode_bo = xelp_pte_encode_bo,
1187 	.pte_encode_vma = xelp_pte_encode_vma,
1188 	.pte_encode_addr = xelp_pte_encode_addr,
1189 	.pde_encode_bo = xelp_pde_encode_bo,
1190 };
1191 
1192 static void vm_destroy_work_func(struct work_struct *w);
1193 
1194 /**
1195  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1196  * given tile and vm.
1197  * @xe: xe device.
1198  * @tile: tile to set up for.
1199  * @vm: vm to set up for.
1200  *
1201  * Sets up a pagetable tree with one page-table per level and a single
1202  * leaf PTE. All pagetable entries point to the single page-table or,
1203  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1204  * writes become NOPs.
1205  *
1206  * Return: 0 on success, negative error code on error.
1207  */
1208 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1209 				struct xe_vm *vm)
1210 {
1211 	u8 id = tile->id;
1212 	int i;
1213 
1214 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1215 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1216 		if (IS_ERR(vm->scratch_pt[id][i]))
1217 			return PTR_ERR(vm->scratch_pt[id][i]);
1218 
1219 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 static void xe_vm_free_scratch(struct xe_vm *vm)
1226 {
1227 	struct xe_tile *tile;
1228 	u8 id;
1229 
1230 	if (!xe_vm_has_scratch(vm))
1231 		return;
1232 
1233 	for_each_tile(tile, vm->xe, id) {
1234 		u32 i;
1235 
1236 		if (!vm->pt_root[id])
1237 			continue;
1238 
1239 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1240 			if (vm->scratch_pt[id][i])
1241 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1242 	}
1243 }
1244 
1245 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1246 {
1247 	struct drm_gem_object *vm_resv_obj;
1248 	struct xe_vm *vm;
1249 	int err, number_tiles = 0;
1250 	struct xe_tile *tile;
1251 	u8 id;
1252 
1253 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1254 	if (!vm)
1255 		return ERR_PTR(-ENOMEM);
1256 
1257 	vm->xe = xe;
1258 
1259 	vm->size = 1ull << xe->info.va_bits;
1260 
1261 	vm->flags = flags;
1262 
1263 	init_rwsem(&vm->lock);
1264 
1265 	INIT_LIST_HEAD(&vm->rebind_list);
1266 
1267 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1268 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1269 	init_rwsem(&vm->userptr.notifier_lock);
1270 	spin_lock_init(&vm->userptr.invalidated_lock);
1271 
1272 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1273 
1274 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1275 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1276 
1277 	for_each_tile(tile, xe, id)
1278 		xe_range_fence_tree_init(&vm->rftree[id]);
1279 
1280 	vm->pt_ops = &xelp_pt_ops;
1281 
1282 	if (!(flags & XE_VM_FLAG_MIGRATION))
1283 		xe_device_mem_access_get(xe);
1284 
1285 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1286 	if (!vm_resv_obj) {
1287 		err = -ENOMEM;
1288 		goto err_no_resv;
1289 	}
1290 
1291 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1292 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1293 
1294 	drm_gem_object_put(vm_resv_obj);
1295 
1296 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1297 	if (err)
1298 		goto err_close;
1299 
1300 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1301 		vm->flags |= XE_VM_FLAG_64K;
1302 
1303 	for_each_tile(tile, xe, id) {
1304 		if (flags & XE_VM_FLAG_MIGRATION &&
1305 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1306 			continue;
1307 
1308 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1309 		if (IS_ERR(vm->pt_root[id])) {
1310 			err = PTR_ERR(vm->pt_root[id]);
1311 			vm->pt_root[id] = NULL;
1312 			goto err_unlock_close;
1313 		}
1314 	}
1315 
1316 	if (xe_vm_has_scratch(vm)) {
1317 		for_each_tile(tile, xe, id) {
1318 			if (!vm->pt_root[id])
1319 				continue;
1320 
1321 			err = xe_vm_create_scratch(xe, tile, vm);
1322 			if (err)
1323 				goto err_unlock_close;
1324 		}
1325 		vm->batch_invalidate_tlb = true;
1326 	}
1327 
1328 	if (flags & XE_VM_FLAG_LR_MODE) {
1329 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1330 		vm->flags |= XE_VM_FLAG_LR_MODE;
1331 		vm->batch_invalidate_tlb = false;
1332 	}
1333 
1334 	/* Fill pt_root after allocating scratch tables */
1335 	for_each_tile(tile, xe, id) {
1336 		if (!vm->pt_root[id])
1337 			continue;
1338 
1339 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1340 	}
1341 	dma_resv_unlock(xe_vm_resv(vm));
1342 
1343 	/* Kernel migration VM shouldn't have a circular loop.. */
1344 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1345 		for_each_tile(tile, xe, id) {
1346 			struct xe_gt *gt = tile->primary_gt;
1347 			struct xe_vm *migrate_vm;
1348 			struct xe_exec_queue *q;
1349 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1350 
1351 			if (!vm->pt_root[id])
1352 				continue;
1353 
1354 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1355 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1356 						       XE_ENGINE_CLASS_COPY,
1357 						       create_flags);
1358 			xe_vm_put(migrate_vm);
1359 			if (IS_ERR(q)) {
1360 				err = PTR_ERR(q);
1361 				goto err_close;
1362 			}
1363 			vm->q[id] = q;
1364 			number_tiles++;
1365 		}
1366 	}
1367 
1368 	if (number_tiles > 1)
1369 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1370 
1371 	mutex_lock(&xe->usm.lock);
1372 	if (flags & XE_VM_FLAG_FAULT_MODE)
1373 		xe->usm.num_vm_in_fault_mode++;
1374 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1375 		xe->usm.num_vm_in_non_fault_mode++;
1376 	mutex_unlock(&xe->usm.lock);
1377 
1378 	trace_xe_vm_create(vm);
1379 
1380 	return vm;
1381 
1382 err_unlock_close:
1383 	dma_resv_unlock(xe_vm_resv(vm));
1384 err_close:
1385 	xe_vm_close_and_put(vm);
1386 	return ERR_PTR(err);
1387 
1388 err_no_resv:
1389 	for_each_tile(tile, xe, id)
1390 		xe_range_fence_tree_fini(&vm->rftree[id]);
1391 	kfree(vm);
1392 	if (!(flags & XE_VM_FLAG_MIGRATION))
1393 		xe_device_mem_access_put(xe);
1394 	return ERR_PTR(err);
1395 }
1396 
1397 static void xe_vm_close(struct xe_vm *vm)
1398 {
1399 	down_write(&vm->lock);
1400 	vm->size = 0;
1401 	up_write(&vm->lock);
1402 }
1403 
1404 void xe_vm_close_and_put(struct xe_vm *vm)
1405 {
1406 	LIST_HEAD(contested);
1407 	struct xe_device *xe = vm->xe;
1408 	struct xe_tile *tile;
1409 	struct xe_vma *vma, *next_vma;
1410 	struct drm_gpuva *gpuva, *next;
1411 	u8 id;
1412 
1413 	xe_assert(xe, !vm->preempt.num_exec_queues);
1414 
1415 	xe_vm_close(vm);
1416 	if (xe_vm_in_preempt_fence_mode(vm))
1417 		flush_work(&vm->preempt.rebind_work);
1418 
1419 	down_write(&vm->lock);
1420 	for_each_tile(tile, xe, id) {
1421 		if (vm->q[id])
1422 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1423 	}
1424 	up_write(&vm->lock);
1425 
1426 	for_each_tile(tile, xe, id) {
1427 		if (vm->q[id]) {
1428 			xe_exec_queue_kill(vm->q[id]);
1429 			xe_exec_queue_put(vm->q[id]);
1430 			vm->q[id] = NULL;
1431 		}
1432 	}
1433 
1434 	down_write(&vm->lock);
1435 	xe_vm_lock(vm, false);
1436 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1437 		vma = gpuva_to_vma(gpuva);
1438 
1439 		if (xe_vma_has_no_bo(vma)) {
1440 			down_read(&vm->userptr.notifier_lock);
1441 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1442 			up_read(&vm->userptr.notifier_lock);
1443 		}
1444 
1445 		xe_vm_remove_vma(vm, vma);
1446 
1447 		/* easy case, remove from VMA? */
1448 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1449 			list_del_init(&vma->combined_links.rebind);
1450 			xe_vma_destroy(vma, NULL);
1451 			continue;
1452 		}
1453 
1454 		list_move_tail(&vma->combined_links.destroy, &contested);
1455 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1456 	}
1457 
1458 	/*
1459 	 * All vm operations will add shared fences to resv.
1460 	 * The only exception is eviction for a shared object,
1461 	 * but even so, the unbind when evicted would still
1462 	 * install a fence to resv. Hence it's safe to
1463 	 * destroy the pagetables immediately.
1464 	 */
1465 	xe_vm_free_scratch(vm);
1466 
1467 	for_each_tile(tile, xe, id) {
1468 		if (vm->pt_root[id]) {
1469 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1470 			vm->pt_root[id] = NULL;
1471 		}
1472 	}
1473 	xe_vm_unlock(vm);
1474 
1475 	/*
1476 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1477 	 * Since we hold a refcount to the bo, we can remove and free
1478 	 * the members safely without locking.
1479 	 */
1480 	list_for_each_entry_safe(vma, next_vma, &contested,
1481 				 combined_links.destroy) {
1482 		list_del_init(&vma->combined_links.destroy);
1483 		xe_vma_destroy_unlocked(vma);
1484 	}
1485 
1486 	up_write(&vm->lock);
1487 
1488 	mutex_lock(&xe->usm.lock);
1489 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1490 		xe->usm.num_vm_in_fault_mode--;
1491 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1492 		xe->usm.num_vm_in_non_fault_mode--;
1493 	mutex_unlock(&xe->usm.lock);
1494 
1495 	for_each_tile(tile, xe, id)
1496 		xe_range_fence_tree_fini(&vm->rftree[id]);
1497 
1498 	xe_vm_put(vm);
1499 }
1500 
1501 static void vm_destroy_work_func(struct work_struct *w)
1502 {
1503 	struct xe_vm *vm =
1504 		container_of(w, struct xe_vm, destroy_work);
1505 	struct xe_device *xe = vm->xe;
1506 	struct xe_tile *tile;
1507 	u8 id;
1508 	void *lookup;
1509 
1510 	/* xe_vm_close_and_put was not called? */
1511 	xe_assert(xe, !vm->size);
1512 
1513 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1514 		xe_device_mem_access_put(xe);
1515 
1516 		if (xe->info.has_asid && vm->usm.asid) {
1517 			mutex_lock(&xe->usm.lock);
1518 			lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1519 			xe_assert(xe, lookup == vm);
1520 			mutex_unlock(&xe->usm.lock);
1521 		}
1522 	}
1523 
1524 	for_each_tile(tile, xe, id)
1525 		XE_WARN_ON(vm->pt_root[id]);
1526 
1527 	trace_xe_vm_free(vm);
1528 	dma_fence_put(vm->rebind_fence);
1529 	kfree(vm);
1530 }
1531 
1532 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1533 {
1534 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1535 
1536 	/* To destroy the VM we need to be able to sleep */
1537 	queue_work(system_unbound_wq, &vm->destroy_work);
1538 }
1539 
1540 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1541 {
1542 	struct xe_vm *vm;
1543 
1544 	mutex_lock(&xef->vm.lock);
1545 	vm = xa_load(&xef->vm.xa, id);
1546 	if (vm)
1547 		xe_vm_get(vm);
1548 	mutex_unlock(&xef->vm.lock);
1549 
1550 	return vm;
1551 }
1552 
1553 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1554 {
1555 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1556 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1557 }
1558 
1559 static struct xe_exec_queue *
1560 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1561 {
1562 	return q ? q : vm->q[0];
1563 }
1564 
1565 static struct dma_fence *
1566 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1567 		 struct xe_sync_entry *syncs, u32 num_syncs,
1568 		 bool first_op, bool last_op)
1569 {
1570 	struct xe_vm *vm = xe_vma_vm(vma);
1571 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1572 	struct xe_tile *tile;
1573 	struct dma_fence *fence = NULL;
1574 	struct dma_fence **fences = NULL;
1575 	struct dma_fence_array *cf = NULL;
1576 	int cur_fence = 0, i;
1577 	int number_tiles = hweight8(vma->tile_present);
1578 	int err;
1579 	u8 id;
1580 
1581 	trace_xe_vma_unbind(vma);
1582 
1583 	if (number_tiles > 1) {
1584 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1585 				       GFP_KERNEL);
1586 		if (!fences)
1587 			return ERR_PTR(-ENOMEM);
1588 	}
1589 
1590 	for_each_tile(tile, vm->xe, id) {
1591 		if (!(vma->tile_present & BIT(id)))
1592 			goto next;
1593 
1594 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1595 					   first_op ? syncs : NULL,
1596 					   first_op ? num_syncs : 0);
1597 		if (IS_ERR(fence)) {
1598 			err = PTR_ERR(fence);
1599 			goto err_fences;
1600 		}
1601 
1602 		if (fences)
1603 			fences[cur_fence++] = fence;
1604 
1605 next:
1606 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1607 			q = list_next_entry(q, multi_gt_list);
1608 	}
1609 
1610 	if (fences) {
1611 		cf = dma_fence_array_create(number_tiles, fences,
1612 					    vm->composite_fence_ctx,
1613 					    vm->composite_fence_seqno++,
1614 					    false);
1615 		if (!cf) {
1616 			--vm->composite_fence_seqno;
1617 			err = -ENOMEM;
1618 			goto err_fences;
1619 		}
1620 	}
1621 
1622 	fence = cf ? &cf->base : !fence ?
1623 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1624 	if (last_op) {
1625 		for (i = 0; i < num_syncs; i++)
1626 			xe_sync_entry_signal(&syncs[i], NULL, fence);
1627 	}
1628 
1629 	return fence;
1630 
1631 err_fences:
1632 	if (fences) {
1633 		while (cur_fence)
1634 			dma_fence_put(fences[--cur_fence]);
1635 		kfree(fences);
1636 	}
1637 
1638 	return ERR_PTR(err);
1639 }
1640 
1641 static struct dma_fence *
1642 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1643 	       struct xe_sync_entry *syncs, u32 num_syncs,
1644 	       bool first_op, bool last_op)
1645 {
1646 	struct xe_tile *tile;
1647 	struct dma_fence *fence;
1648 	struct dma_fence **fences = NULL;
1649 	struct dma_fence_array *cf = NULL;
1650 	struct xe_vm *vm = xe_vma_vm(vma);
1651 	int cur_fence = 0, i;
1652 	int number_tiles = hweight8(vma->tile_mask);
1653 	int err;
1654 	u8 id;
1655 
1656 	trace_xe_vma_bind(vma);
1657 
1658 	if (number_tiles > 1) {
1659 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1660 				       GFP_KERNEL);
1661 		if (!fences)
1662 			return ERR_PTR(-ENOMEM);
1663 	}
1664 
1665 	for_each_tile(tile, vm->xe, id) {
1666 		if (!(vma->tile_mask & BIT(id)))
1667 			goto next;
1668 
1669 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1670 					 first_op ? syncs : NULL,
1671 					 first_op ? num_syncs : 0,
1672 					 vma->tile_present & BIT(id));
1673 		if (IS_ERR(fence)) {
1674 			err = PTR_ERR(fence);
1675 			goto err_fences;
1676 		}
1677 
1678 		if (fences)
1679 			fences[cur_fence++] = fence;
1680 
1681 next:
1682 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1683 			q = list_next_entry(q, multi_gt_list);
1684 	}
1685 
1686 	if (fences) {
1687 		cf = dma_fence_array_create(number_tiles, fences,
1688 					    vm->composite_fence_ctx,
1689 					    vm->composite_fence_seqno++,
1690 					    false);
1691 		if (!cf) {
1692 			--vm->composite_fence_seqno;
1693 			err = -ENOMEM;
1694 			goto err_fences;
1695 		}
1696 	}
1697 
1698 	if (last_op) {
1699 		for (i = 0; i < num_syncs; i++)
1700 			xe_sync_entry_signal(&syncs[i], NULL,
1701 					     cf ? &cf->base : fence);
1702 	}
1703 
1704 	return cf ? &cf->base : fence;
1705 
1706 err_fences:
1707 	if (fences) {
1708 		while (cur_fence)
1709 			dma_fence_put(fences[--cur_fence]);
1710 		kfree(fences);
1711 	}
1712 
1713 	return ERR_PTR(err);
1714 }
1715 
1716 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1717 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1718 			u32 num_syncs, bool immediate, bool first_op,
1719 			bool last_op)
1720 {
1721 	struct dma_fence *fence;
1722 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1723 
1724 	xe_vm_assert_held(vm);
1725 
1726 	if (immediate) {
1727 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1728 				       last_op);
1729 		if (IS_ERR(fence))
1730 			return PTR_ERR(fence);
1731 	} else {
1732 		int i;
1733 
1734 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1735 
1736 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1737 		if (last_op) {
1738 			for (i = 0; i < num_syncs; i++)
1739 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1740 		}
1741 	}
1742 
1743 	if (last_op)
1744 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1745 	dma_fence_put(fence);
1746 
1747 	return 0;
1748 }
1749 
1750 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1751 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1752 		      u32 num_syncs, bool immediate, bool first_op,
1753 		      bool last_op)
1754 {
1755 	int err;
1756 
1757 	xe_vm_assert_held(vm);
1758 	xe_bo_assert_held(bo);
1759 
1760 	if (bo && immediate) {
1761 		err = xe_bo_validate(bo, vm, true);
1762 		if (err)
1763 			return err;
1764 	}
1765 
1766 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1767 			    last_op);
1768 }
1769 
1770 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1771 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1772 			u32 num_syncs, bool first_op, bool last_op)
1773 {
1774 	struct dma_fence *fence;
1775 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1776 
1777 	xe_vm_assert_held(vm);
1778 	xe_bo_assert_held(xe_vma_bo(vma));
1779 
1780 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1781 	if (IS_ERR(fence))
1782 		return PTR_ERR(fence);
1783 
1784 	xe_vma_destroy(vma, fence);
1785 	if (last_op)
1786 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1787 	dma_fence_put(fence);
1788 
1789 	return 0;
1790 }
1791 
1792 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1793 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1794 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1795 
1796 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1797 		       struct drm_file *file)
1798 {
1799 	struct xe_device *xe = to_xe_device(dev);
1800 	struct xe_file *xef = to_xe_file(file);
1801 	struct drm_xe_vm_create *args = data;
1802 	struct xe_tile *tile;
1803 	struct xe_vm *vm;
1804 	u32 id, asid;
1805 	int err;
1806 	u32 flags = 0;
1807 
1808 	if (XE_IOCTL_DBG(xe, args->extensions))
1809 		return -EINVAL;
1810 
1811 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1812 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1813 
1814 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1815 			 !xe->info.has_usm))
1816 		return -EINVAL;
1817 
1818 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1819 		return -EINVAL;
1820 
1821 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1822 		return -EINVAL;
1823 
1824 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1825 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1826 		return -EINVAL;
1827 
1828 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1829 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1830 		return -EINVAL;
1831 
1832 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1833 			 xe_device_in_non_fault_mode(xe)))
1834 		return -EINVAL;
1835 
1836 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1837 			 xe_device_in_fault_mode(xe)))
1838 		return -EINVAL;
1839 
1840 	if (XE_IOCTL_DBG(xe, args->extensions))
1841 		return -EINVAL;
1842 
1843 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1844 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1845 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1846 		flags |= XE_VM_FLAG_LR_MODE;
1847 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1848 		flags |= XE_VM_FLAG_FAULT_MODE;
1849 
1850 	vm = xe_vm_create(xe, flags);
1851 	if (IS_ERR(vm))
1852 		return PTR_ERR(vm);
1853 
1854 	mutex_lock(&xef->vm.lock);
1855 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1856 	mutex_unlock(&xef->vm.lock);
1857 	if (err) {
1858 		xe_vm_close_and_put(vm);
1859 		return err;
1860 	}
1861 
1862 	if (xe->info.has_asid) {
1863 		mutex_lock(&xe->usm.lock);
1864 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1865 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1866 				      &xe->usm.next_asid, GFP_KERNEL);
1867 		mutex_unlock(&xe->usm.lock);
1868 		if (err < 0) {
1869 			xe_vm_close_and_put(vm);
1870 			return err;
1871 		}
1872 		err = 0;
1873 		vm->usm.asid = asid;
1874 	}
1875 
1876 	args->vm_id = id;
1877 	vm->xef = xef;
1878 
1879 	/* Record BO memory for VM pagetable created against client */
1880 	for_each_tile(tile, xe, id)
1881 		if (vm->pt_root[id])
1882 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1883 
1884 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1885 	/* Warning: Security issue - never enable by default */
1886 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1887 #endif
1888 
1889 	return 0;
1890 }
1891 
1892 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1893 			struct drm_file *file)
1894 {
1895 	struct xe_device *xe = to_xe_device(dev);
1896 	struct xe_file *xef = to_xe_file(file);
1897 	struct drm_xe_vm_destroy *args = data;
1898 	struct xe_vm *vm;
1899 	int err = 0;
1900 
1901 	if (XE_IOCTL_DBG(xe, args->pad) ||
1902 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1903 		return -EINVAL;
1904 
1905 	mutex_lock(&xef->vm.lock);
1906 	vm = xa_load(&xef->vm.xa, args->vm_id);
1907 	if (XE_IOCTL_DBG(xe, !vm))
1908 		err = -ENOENT;
1909 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1910 		err = -EBUSY;
1911 	else
1912 		xa_erase(&xef->vm.xa, args->vm_id);
1913 	mutex_unlock(&xef->vm.lock);
1914 
1915 	if (!err)
1916 		xe_vm_close_and_put(vm);
1917 
1918 	return err;
1919 }
1920 
1921 static const u32 region_to_mem_type[] = {
1922 	XE_PL_TT,
1923 	XE_PL_VRAM0,
1924 	XE_PL_VRAM1,
1925 };
1926 
1927 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1928 			  struct xe_exec_queue *q, u32 region,
1929 			  struct xe_sync_entry *syncs, u32 num_syncs,
1930 			  bool first_op, bool last_op)
1931 {
1932 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1933 	int err;
1934 
1935 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
1936 
1937 	if (!xe_vma_has_no_bo(vma)) {
1938 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
1939 		if (err)
1940 			return err;
1941 	}
1942 
1943 	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
1944 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1945 				  true, first_op, last_op);
1946 	} else {
1947 		int i;
1948 
1949 		/* Nothing to do, signal fences now */
1950 		if (last_op) {
1951 			for (i = 0; i < num_syncs; i++) {
1952 				struct dma_fence *fence =
1953 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1954 
1955 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1956 			}
1957 		}
1958 
1959 		return 0;
1960 	}
1961 }
1962 
1963 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1964 			     bool post_commit)
1965 {
1966 	down_read(&vm->userptr.notifier_lock);
1967 	vma->gpuva.flags |= XE_VMA_DESTROYED;
1968 	up_read(&vm->userptr.notifier_lock);
1969 	if (post_commit)
1970 		xe_vm_remove_vma(vm, vma);
1971 }
1972 
1973 #undef ULL
1974 #define ULL	unsigned long long
1975 
1976 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
1977 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
1978 {
1979 	struct xe_vma *vma;
1980 
1981 	switch (op->op) {
1982 	case DRM_GPUVA_OP_MAP:
1983 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
1984 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
1985 		break;
1986 	case DRM_GPUVA_OP_REMAP:
1987 		vma = gpuva_to_vma(op->remap.unmap->va);
1988 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
1989 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
1990 		       op->remap.unmap->keep ? 1 : 0);
1991 		if (op->remap.prev)
1992 			vm_dbg(&xe->drm,
1993 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
1994 			       (ULL)op->remap.prev->va.addr,
1995 			       (ULL)op->remap.prev->va.range);
1996 		if (op->remap.next)
1997 			vm_dbg(&xe->drm,
1998 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
1999 			       (ULL)op->remap.next->va.addr,
2000 			       (ULL)op->remap.next->va.range);
2001 		break;
2002 	case DRM_GPUVA_OP_UNMAP:
2003 		vma = gpuva_to_vma(op->unmap.va);
2004 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2005 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2006 		       op->unmap.keep ? 1 : 0);
2007 		break;
2008 	case DRM_GPUVA_OP_PREFETCH:
2009 		vma = gpuva_to_vma(op->prefetch.va);
2010 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2011 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2012 		break;
2013 	default:
2014 		drm_warn(&xe->drm, "NOT POSSIBLE");
2015 	}
2016 }
2017 #else
2018 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2019 {
2020 }
2021 #endif
2022 
2023 /*
2024  * Create operations list from IOCTL arguments, setup operations fields so parse
2025  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2026  */
2027 static struct drm_gpuva_ops *
2028 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2029 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2030 			 u32 operation, u32 flags,
2031 			 u32 prefetch_region, u16 pat_index)
2032 {
2033 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2034 	struct drm_gpuva_ops *ops;
2035 	struct drm_gpuva_op *__op;
2036 	struct xe_vma_op *op;
2037 	struct drm_gpuvm_bo *vm_bo;
2038 	int err;
2039 
2040 	lockdep_assert_held_write(&vm->lock);
2041 
2042 	vm_dbg(&vm->xe->drm,
2043 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2044 	       operation, (ULL)addr, (ULL)range,
2045 	       (ULL)bo_offset_or_userptr);
2046 
2047 	switch (operation) {
2048 	case DRM_XE_VM_BIND_OP_MAP:
2049 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2050 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2051 						  obj, bo_offset_or_userptr);
2052 		break;
2053 	case DRM_XE_VM_BIND_OP_UNMAP:
2054 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2055 		break;
2056 	case DRM_XE_VM_BIND_OP_PREFETCH:
2057 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2058 		break;
2059 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2060 		xe_assert(vm->xe, bo);
2061 
2062 		err = xe_bo_lock(bo, true);
2063 		if (err)
2064 			return ERR_PTR(err);
2065 
2066 		vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2067 		if (!vm_bo)
2068 			break;
2069 
2070 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2071 		drm_gpuvm_bo_put(vm_bo);
2072 		xe_bo_unlock(bo);
2073 		break;
2074 	default:
2075 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2076 		ops = ERR_PTR(-EINVAL);
2077 	}
2078 	if (IS_ERR(ops))
2079 		return ops;
2080 
2081 #ifdef TEST_VM_ASYNC_OPS_ERROR
2082 	if (operation & FORCE_ASYNC_OP_ERROR) {
2083 		op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2084 					      base.entry);
2085 		if (op)
2086 			op->inject_error = true;
2087 	}
2088 #endif
2089 
2090 	drm_gpuva_for_each_op(__op, ops) {
2091 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2092 
2093 		if (__op->op == DRM_GPUVA_OP_MAP) {
2094 			op->map.immediate =
2095 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2096 			op->map.read_only =
2097 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2098 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2099 			op->map.pat_index = pat_index;
2100 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2101 			op->prefetch.region = prefetch_region;
2102 		}
2103 
2104 		print_op(vm->xe, __op);
2105 	}
2106 
2107 	return ops;
2108 }
2109 
2110 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2111 			      u16 pat_index, unsigned int flags)
2112 {
2113 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2114 	struct drm_exec exec;
2115 	struct xe_vma *vma;
2116 	int err;
2117 
2118 	lockdep_assert_held_write(&vm->lock);
2119 
2120 	if (bo) {
2121 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2122 		drm_exec_until_all_locked(&exec) {
2123 			err = 0;
2124 			if (!bo->vm) {
2125 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2126 				drm_exec_retry_on_contention(&exec);
2127 			}
2128 			if (!err) {
2129 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2130 				drm_exec_retry_on_contention(&exec);
2131 			}
2132 			if (err) {
2133 				drm_exec_fini(&exec);
2134 				return ERR_PTR(err);
2135 			}
2136 		}
2137 	}
2138 	vma = xe_vma_create(vm, bo, op->gem.offset,
2139 			    op->va.addr, op->va.addr +
2140 			    op->va.range - 1, pat_index, flags);
2141 	if (bo)
2142 		drm_exec_fini(&exec);
2143 
2144 	if (xe_vma_is_userptr(vma)) {
2145 		err = xe_vma_userptr_pin_pages(vma);
2146 		if (err) {
2147 			prep_vma_destroy(vm, vma, false);
2148 			xe_vma_destroy_unlocked(vma);
2149 			return ERR_PTR(err);
2150 		}
2151 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2152 		err = add_preempt_fences(vm, bo);
2153 		if (err) {
2154 			prep_vma_destroy(vm, vma, false);
2155 			xe_vma_destroy_unlocked(vma);
2156 			return ERR_PTR(err);
2157 		}
2158 	}
2159 
2160 	return vma;
2161 }
2162 
2163 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2164 {
2165 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2166 		return SZ_1G;
2167 	else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2168 		return SZ_2M;
2169 
2170 	return SZ_4K;
2171 }
2172 
2173 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2174 {
2175 	switch (size) {
2176 	case SZ_1G:
2177 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2178 		break;
2179 	case SZ_2M:
2180 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2181 		break;
2182 	}
2183 
2184 	return SZ_4K;
2185 }
2186 
2187 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2188 {
2189 	int err = 0;
2190 
2191 	lockdep_assert_held_write(&vm->lock);
2192 
2193 	switch (op->base.op) {
2194 	case DRM_GPUVA_OP_MAP:
2195 		err |= xe_vm_insert_vma(vm, op->map.vma);
2196 		if (!err)
2197 			op->flags |= XE_VMA_OP_COMMITTED;
2198 		break;
2199 	case DRM_GPUVA_OP_REMAP:
2200 	{
2201 		u8 tile_present =
2202 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2203 
2204 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2205 				 true);
2206 		op->flags |= XE_VMA_OP_COMMITTED;
2207 
2208 		if (op->remap.prev) {
2209 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2210 			if (!err)
2211 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2212 			if (!err && op->remap.skip_prev) {
2213 				op->remap.prev->tile_present =
2214 					tile_present;
2215 				op->remap.prev = NULL;
2216 			}
2217 		}
2218 		if (op->remap.next) {
2219 			err |= xe_vm_insert_vma(vm, op->remap.next);
2220 			if (!err)
2221 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2222 			if (!err && op->remap.skip_next) {
2223 				op->remap.next->tile_present =
2224 					tile_present;
2225 				op->remap.next = NULL;
2226 			}
2227 		}
2228 
2229 		/* Adjust for partial unbind after removin VMA from VM */
2230 		if (!err) {
2231 			op->base.remap.unmap->va->va.addr = op->remap.start;
2232 			op->base.remap.unmap->va->va.range = op->remap.range;
2233 		}
2234 		break;
2235 	}
2236 	case DRM_GPUVA_OP_UNMAP:
2237 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2238 		op->flags |= XE_VMA_OP_COMMITTED;
2239 		break;
2240 	case DRM_GPUVA_OP_PREFETCH:
2241 		op->flags |= XE_VMA_OP_COMMITTED;
2242 		break;
2243 	default:
2244 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2245 	}
2246 
2247 	return err;
2248 }
2249 
2250 
2251 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2252 				   struct drm_gpuva_ops *ops,
2253 				   struct xe_sync_entry *syncs, u32 num_syncs,
2254 				   struct list_head *ops_list, bool last)
2255 {
2256 	struct xe_vma_op *last_op = NULL;
2257 	struct drm_gpuva_op *__op;
2258 	int err = 0;
2259 
2260 	lockdep_assert_held_write(&vm->lock);
2261 
2262 	drm_gpuva_for_each_op(__op, ops) {
2263 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2264 		struct xe_vma *vma;
2265 		bool first = list_empty(ops_list);
2266 		unsigned int flags = 0;
2267 
2268 		INIT_LIST_HEAD(&op->link);
2269 		list_add_tail(&op->link, ops_list);
2270 
2271 		if (first) {
2272 			op->flags |= XE_VMA_OP_FIRST;
2273 			op->num_syncs = num_syncs;
2274 			op->syncs = syncs;
2275 		}
2276 
2277 		op->q = q;
2278 
2279 		switch (op->base.op) {
2280 		case DRM_GPUVA_OP_MAP:
2281 		{
2282 			flags |= op->map.read_only ?
2283 				VMA_CREATE_FLAG_READ_ONLY : 0;
2284 			flags |= op->map.is_null ?
2285 				VMA_CREATE_FLAG_IS_NULL : 0;
2286 
2287 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2288 				      flags);
2289 			if (IS_ERR(vma))
2290 				return PTR_ERR(vma);
2291 
2292 			op->map.vma = vma;
2293 			break;
2294 		}
2295 		case DRM_GPUVA_OP_REMAP:
2296 		{
2297 			struct xe_vma *old =
2298 				gpuva_to_vma(op->base.remap.unmap->va);
2299 
2300 			op->remap.start = xe_vma_start(old);
2301 			op->remap.range = xe_vma_size(old);
2302 
2303 			if (op->base.remap.prev) {
2304 				flags |= op->base.remap.unmap->va->flags &
2305 					XE_VMA_READ_ONLY ?
2306 					VMA_CREATE_FLAG_READ_ONLY : 0;
2307 				flags |= op->base.remap.unmap->va->flags &
2308 					DRM_GPUVA_SPARSE ?
2309 					VMA_CREATE_FLAG_IS_NULL : 0;
2310 
2311 				vma = new_vma(vm, op->base.remap.prev,
2312 					      old->pat_index, flags);
2313 				if (IS_ERR(vma))
2314 					return PTR_ERR(vma);
2315 
2316 				op->remap.prev = vma;
2317 
2318 				/*
2319 				 * Userptr creates a new SG mapping so
2320 				 * we must also rebind.
2321 				 */
2322 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2323 					IS_ALIGNED(xe_vma_end(vma),
2324 						   xe_vma_max_pte_size(old));
2325 				if (op->remap.skip_prev) {
2326 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2327 					op->remap.range -=
2328 						xe_vma_end(vma) -
2329 						xe_vma_start(old);
2330 					op->remap.start = xe_vma_end(vma);
2331 				}
2332 			}
2333 
2334 			if (op->base.remap.next) {
2335 				flags |= op->base.remap.unmap->va->flags &
2336 					XE_VMA_READ_ONLY ?
2337 					VMA_CREATE_FLAG_READ_ONLY : 0;
2338 				flags |= op->base.remap.unmap->va->flags &
2339 					DRM_GPUVA_SPARSE ?
2340 					VMA_CREATE_FLAG_IS_NULL : 0;
2341 
2342 				vma = new_vma(vm, op->base.remap.next,
2343 					      old->pat_index, flags);
2344 				if (IS_ERR(vma))
2345 					return PTR_ERR(vma);
2346 
2347 				op->remap.next = vma;
2348 
2349 				/*
2350 				 * Userptr creates a new SG mapping so
2351 				 * we must also rebind.
2352 				 */
2353 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2354 					IS_ALIGNED(xe_vma_start(vma),
2355 						   xe_vma_max_pte_size(old));
2356 				if (op->remap.skip_next) {
2357 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2358 					op->remap.range -=
2359 						xe_vma_end(old) -
2360 						xe_vma_start(vma);
2361 				}
2362 			}
2363 			break;
2364 		}
2365 		case DRM_GPUVA_OP_UNMAP:
2366 		case DRM_GPUVA_OP_PREFETCH:
2367 			/* Nothing to do */
2368 			break;
2369 		default:
2370 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2371 		}
2372 
2373 		last_op = op;
2374 
2375 		err = xe_vma_op_commit(vm, op);
2376 		if (err)
2377 			return err;
2378 	}
2379 
2380 	/* FIXME: Unhandled corner case */
2381 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2382 
2383 	if (!last_op)
2384 		return 0;
2385 
2386 	last_op->ops = ops;
2387 	if (last) {
2388 		last_op->flags |= XE_VMA_OP_LAST;
2389 		last_op->num_syncs = num_syncs;
2390 		last_op->syncs = syncs;
2391 	}
2392 
2393 	return 0;
2394 }
2395 
2396 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2397 		      struct xe_vma *vma, struct xe_vma_op *op)
2398 {
2399 	int err;
2400 
2401 	lockdep_assert_held_write(&vm->lock);
2402 
2403 	err = xe_vm_prepare_vma(exec, vma, 1);
2404 	if (err)
2405 		return err;
2406 
2407 	xe_vm_assert_held(vm);
2408 	xe_bo_assert_held(xe_vma_bo(vma));
2409 
2410 	switch (op->base.op) {
2411 	case DRM_GPUVA_OP_MAP:
2412 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2413 				 op->syncs, op->num_syncs,
2414 				 op->map.immediate || !xe_vm_in_fault_mode(vm),
2415 				 op->flags & XE_VMA_OP_FIRST,
2416 				 op->flags & XE_VMA_OP_LAST);
2417 		break;
2418 	case DRM_GPUVA_OP_REMAP:
2419 	{
2420 		bool prev = !!op->remap.prev;
2421 		bool next = !!op->remap.next;
2422 
2423 		if (!op->remap.unmap_done) {
2424 			if (prev || next)
2425 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2426 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2427 					   op->num_syncs,
2428 					   op->flags & XE_VMA_OP_FIRST,
2429 					   op->flags & XE_VMA_OP_LAST &&
2430 					   !prev && !next);
2431 			if (err)
2432 				break;
2433 			op->remap.unmap_done = true;
2434 		}
2435 
2436 		if (prev) {
2437 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2438 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2439 					 xe_vma_bo(op->remap.prev), op->syncs,
2440 					 op->num_syncs, true, false,
2441 					 op->flags & XE_VMA_OP_LAST && !next);
2442 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2443 			if (err)
2444 				break;
2445 			op->remap.prev = NULL;
2446 		}
2447 
2448 		if (next) {
2449 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2450 			err = xe_vm_bind(vm, op->remap.next, op->q,
2451 					 xe_vma_bo(op->remap.next),
2452 					 op->syncs, op->num_syncs,
2453 					 true, false,
2454 					 op->flags & XE_VMA_OP_LAST);
2455 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2456 			if (err)
2457 				break;
2458 			op->remap.next = NULL;
2459 		}
2460 
2461 		break;
2462 	}
2463 	case DRM_GPUVA_OP_UNMAP:
2464 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2465 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2466 				   op->flags & XE_VMA_OP_LAST);
2467 		break;
2468 	case DRM_GPUVA_OP_PREFETCH:
2469 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2470 				     op->syncs, op->num_syncs,
2471 				     op->flags & XE_VMA_OP_FIRST,
2472 				     op->flags & XE_VMA_OP_LAST);
2473 		break;
2474 	default:
2475 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2476 	}
2477 
2478 	if (err)
2479 		trace_xe_vma_fail(vma);
2480 
2481 	return err;
2482 }
2483 
2484 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2485 			       struct xe_vma_op *op)
2486 {
2487 	struct drm_exec exec;
2488 	int err;
2489 
2490 retry_userptr:
2491 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2492 	drm_exec_until_all_locked(&exec) {
2493 		err = op_execute(&exec, vm, vma, op);
2494 		drm_exec_retry_on_contention(&exec);
2495 		if (err)
2496 			break;
2497 	}
2498 	drm_exec_fini(&exec);
2499 
2500 	if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2501 		lockdep_assert_held_write(&vm->lock);
2502 		err = xe_vma_userptr_pin_pages(vma);
2503 		if (!err)
2504 			goto retry_userptr;
2505 
2506 		trace_xe_vma_fail(vma);
2507 	}
2508 
2509 	return err;
2510 }
2511 
2512 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2513 {
2514 	int ret = 0;
2515 
2516 	lockdep_assert_held_write(&vm->lock);
2517 
2518 #ifdef TEST_VM_ASYNC_OPS_ERROR
2519 	if (op->inject_error) {
2520 		op->inject_error = false;
2521 		return -ENOMEM;
2522 	}
2523 #endif
2524 
2525 	switch (op->base.op) {
2526 	case DRM_GPUVA_OP_MAP:
2527 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2528 		break;
2529 	case DRM_GPUVA_OP_REMAP:
2530 	{
2531 		struct xe_vma *vma;
2532 
2533 		if (!op->remap.unmap_done)
2534 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2535 		else if (op->remap.prev)
2536 			vma = op->remap.prev;
2537 		else
2538 			vma = op->remap.next;
2539 
2540 		ret = __xe_vma_op_execute(vm, vma, op);
2541 		break;
2542 	}
2543 	case DRM_GPUVA_OP_UNMAP:
2544 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2545 					  op);
2546 		break;
2547 	case DRM_GPUVA_OP_PREFETCH:
2548 		ret = __xe_vma_op_execute(vm,
2549 					  gpuva_to_vma(op->base.prefetch.va),
2550 					  op);
2551 		break;
2552 	default:
2553 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2554 	}
2555 
2556 	return ret;
2557 }
2558 
2559 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2560 {
2561 	bool last = op->flags & XE_VMA_OP_LAST;
2562 
2563 	if (last) {
2564 		while (op->num_syncs--)
2565 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2566 		kfree(op->syncs);
2567 		if (op->q)
2568 			xe_exec_queue_put(op->q);
2569 	}
2570 	if (!list_empty(&op->link))
2571 		list_del(&op->link);
2572 	if (op->ops)
2573 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2574 	if (last)
2575 		xe_vm_put(vm);
2576 }
2577 
2578 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2579 			     bool post_commit, bool prev_post_commit,
2580 			     bool next_post_commit)
2581 {
2582 	lockdep_assert_held_write(&vm->lock);
2583 
2584 	switch (op->base.op) {
2585 	case DRM_GPUVA_OP_MAP:
2586 		if (op->map.vma) {
2587 			prep_vma_destroy(vm, op->map.vma, post_commit);
2588 			xe_vma_destroy_unlocked(op->map.vma);
2589 		}
2590 		break;
2591 	case DRM_GPUVA_OP_UNMAP:
2592 	{
2593 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2594 
2595 		if (vma) {
2596 			down_read(&vm->userptr.notifier_lock);
2597 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2598 			up_read(&vm->userptr.notifier_lock);
2599 			if (post_commit)
2600 				xe_vm_insert_vma(vm, vma);
2601 		}
2602 		break;
2603 	}
2604 	case DRM_GPUVA_OP_REMAP:
2605 	{
2606 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2607 
2608 		if (op->remap.prev) {
2609 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2610 			xe_vma_destroy_unlocked(op->remap.prev);
2611 		}
2612 		if (op->remap.next) {
2613 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2614 			xe_vma_destroy_unlocked(op->remap.next);
2615 		}
2616 		if (vma) {
2617 			down_read(&vm->userptr.notifier_lock);
2618 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2619 			up_read(&vm->userptr.notifier_lock);
2620 			if (post_commit)
2621 				xe_vm_insert_vma(vm, vma);
2622 		}
2623 		break;
2624 	}
2625 	case DRM_GPUVA_OP_PREFETCH:
2626 		/* Nothing to do */
2627 		break;
2628 	default:
2629 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2630 	}
2631 }
2632 
2633 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2634 				     struct drm_gpuva_ops **ops,
2635 				     int num_ops_list)
2636 {
2637 	int i;
2638 
2639 	for (i = num_ops_list - 1; i; ++i) {
2640 		struct drm_gpuva_ops *__ops = ops[i];
2641 		struct drm_gpuva_op *__op;
2642 
2643 		if (!__ops)
2644 			continue;
2645 
2646 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2647 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2648 
2649 			xe_vma_op_unwind(vm, op,
2650 					 op->flags & XE_VMA_OP_COMMITTED,
2651 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2652 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2653 		}
2654 
2655 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2656 	}
2657 }
2658 
2659 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2660 				     struct list_head *ops_list)
2661 {
2662 	struct xe_vma_op *op, *next;
2663 	int err;
2664 
2665 	lockdep_assert_held_write(&vm->lock);
2666 
2667 	list_for_each_entry_safe(op, next, ops_list, link) {
2668 		err = xe_vma_op_execute(vm, op);
2669 		if (err) {
2670 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2671 				 op->base.op, err);
2672 			/*
2673 			 * FIXME: Killing VM rather than proper error handling
2674 			 */
2675 			xe_vm_kill(vm);
2676 			return -ENOSPC;
2677 		}
2678 		xe_vma_op_cleanup(vm, op);
2679 	}
2680 
2681 	return 0;
2682 }
2683 
2684 #ifdef TEST_VM_ASYNC_OPS_ERROR
2685 #define SUPPORTED_FLAGS	\
2686 	(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
2687 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
2688 #else
2689 #define SUPPORTED_FLAGS	\
2690 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2691 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
2692 	 0xffff)
2693 #endif
2694 #define XE_64K_PAGE_MASK 0xffffull
2695 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2696 
2697 #define MAX_BINDS	512	/* FIXME: Picking random upper limit */
2698 
2699 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2700 				    struct drm_xe_vm_bind *args,
2701 				    struct drm_xe_vm_bind_op **bind_ops)
2702 {
2703 	int err;
2704 	int i;
2705 
2706 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2707 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2708 		return -EINVAL;
2709 
2710 	if (XE_IOCTL_DBG(xe, args->extensions) ||
2711 	    XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2712 		return -EINVAL;
2713 
2714 	if (args->num_binds > 1) {
2715 		u64 __user *bind_user =
2716 			u64_to_user_ptr(args->vector_of_binds);
2717 
2718 		*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2719 				    args->num_binds, GFP_KERNEL);
2720 		if (!*bind_ops)
2721 			return -ENOMEM;
2722 
2723 		err = __copy_from_user(*bind_ops, bind_user,
2724 				       sizeof(struct drm_xe_vm_bind_op) *
2725 				       args->num_binds);
2726 		if (XE_IOCTL_DBG(xe, err)) {
2727 			err = -EFAULT;
2728 			goto free_bind_ops;
2729 		}
2730 	} else {
2731 		*bind_ops = &args->bind;
2732 	}
2733 
2734 	for (i = 0; i < args->num_binds; ++i) {
2735 		u64 range = (*bind_ops)[i].range;
2736 		u64 addr = (*bind_ops)[i].addr;
2737 		u32 op = (*bind_ops)[i].op;
2738 		u32 flags = (*bind_ops)[i].flags;
2739 		u32 obj = (*bind_ops)[i].obj;
2740 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2741 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2742 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2743 		u16 pat_index = (*bind_ops)[i].pat_index;
2744 		u16 coh_mode;
2745 
2746 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2747 			err = -EINVAL;
2748 			goto free_bind_ops;
2749 		}
2750 
2751 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2752 		(*bind_ops)[i].pat_index = pat_index;
2753 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2754 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2755 			err = -EINVAL;
2756 			goto free_bind_ops;
2757 		}
2758 
2759 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2760 			err = -EINVAL;
2761 			goto free_bind_ops;
2762 		}
2763 
2764 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2765 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2766 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2767 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2768 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2769 				 is_null) ||
2770 		    XE_IOCTL_DBG(xe, !obj &&
2771 				 op == DRM_XE_VM_BIND_OP_MAP &&
2772 				 !is_null) ||
2773 		    XE_IOCTL_DBG(xe, !obj &&
2774 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2775 		    XE_IOCTL_DBG(xe, addr &&
2776 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2777 		    XE_IOCTL_DBG(xe, range &&
2778 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2779 		    XE_IOCTL_DBG(xe, obj &&
2780 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2781 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2782 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2783 		    XE_IOCTL_DBG(xe, obj &&
2784 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2785 		    XE_IOCTL_DBG(xe, prefetch_region &&
2786 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2787 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2788 				       xe->info.mem_region_mask)) ||
2789 		    XE_IOCTL_DBG(xe, obj &&
2790 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2791 			err = -EINVAL;
2792 			goto free_bind_ops;
2793 		}
2794 
2795 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2796 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2797 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2798 		    XE_IOCTL_DBG(xe, !range &&
2799 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2800 			err = -EINVAL;
2801 			goto free_bind_ops;
2802 		}
2803 	}
2804 
2805 	return 0;
2806 
2807 free_bind_ops:
2808 	if (args->num_binds > 1)
2809 		kfree(*bind_ops);
2810 	return err;
2811 }
2812 
2813 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2814 				       struct xe_exec_queue *q,
2815 				       struct xe_sync_entry *syncs,
2816 				       int num_syncs)
2817 {
2818 	struct dma_fence *fence;
2819 	int i, err = 0;
2820 
2821 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2822 				     to_wait_exec_queue(vm, q), vm);
2823 	if (IS_ERR(fence))
2824 		return PTR_ERR(fence);
2825 
2826 	for (i = 0; i < num_syncs; i++)
2827 		xe_sync_entry_signal(&syncs[i], NULL, fence);
2828 
2829 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2830 				     fence);
2831 	dma_fence_put(fence);
2832 
2833 	return err;
2834 }
2835 
2836 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2837 {
2838 	struct xe_device *xe = to_xe_device(dev);
2839 	struct xe_file *xef = to_xe_file(file);
2840 	struct drm_xe_vm_bind *args = data;
2841 	struct drm_xe_sync __user *syncs_user;
2842 	struct xe_bo **bos = NULL;
2843 	struct drm_gpuva_ops **ops = NULL;
2844 	struct xe_vm *vm;
2845 	struct xe_exec_queue *q = NULL;
2846 	u32 num_syncs;
2847 	struct xe_sync_entry *syncs = NULL;
2848 	struct drm_xe_vm_bind_op *bind_ops;
2849 	LIST_HEAD(ops_list);
2850 	int err;
2851 	int i;
2852 
2853 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2854 	if (err)
2855 		return err;
2856 
2857 	if (args->exec_queue_id) {
2858 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2859 		if (XE_IOCTL_DBG(xe, !q)) {
2860 			err = -ENOENT;
2861 			goto free_objs;
2862 		}
2863 
2864 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2865 			err = -EINVAL;
2866 			goto put_exec_queue;
2867 		}
2868 	}
2869 
2870 	vm = xe_vm_lookup(xef, args->vm_id);
2871 	if (XE_IOCTL_DBG(xe, !vm)) {
2872 		err = -EINVAL;
2873 		goto put_exec_queue;
2874 	}
2875 
2876 	err = down_write_killable(&vm->lock);
2877 	if (err)
2878 		goto put_vm;
2879 
2880 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2881 		err = -ENOENT;
2882 		goto release_vm_lock;
2883 	}
2884 
2885 	for (i = 0; i < args->num_binds; ++i) {
2886 		u64 range = bind_ops[i].range;
2887 		u64 addr = bind_ops[i].addr;
2888 
2889 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2890 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2891 			err = -EINVAL;
2892 			goto release_vm_lock;
2893 		}
2894 	}
2895 
2896 	if (args->num_binds) {
2897 		bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
2898 		if (!bos) {
2899 			err = -ENOMEM;
2900 			goto release_vm_lock;
2901 		}
2902 
2903 		ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
2904 		if (!ops) {
2905 			err = -ENOMEM;
2906 			goto release_vm_lock;
2907 		}
2908 	}
2909 
2910 	for (i = 0; i < args->num_binds; ++i) {
2911 		struct drm_gem_object *gem_obj;
2912 		u64 range = bind_ops[i].range;
2913 		u64 addr = bind_ops[i].addr;
2914 		u32 obj = bind_ops[i].obj;
2915 		u64 obj_offset = bind_ops[i].obj_offset;
2916 		u16 pat_index = bind_ops[i].pat_index;
2917 		u16 coh_mode;
2918 
2919 		if (!obj)
2920 			continue;
2921 
2922 		gem_obj = drm_gem_object_lookup(file, obj);
2923 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
2924 			err = -ENOENT;
2925 			goto put_obj;
2926 		}
2927 		bos[i] = gem_to_xe_bo(gem_obj);
2928 
2929 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2930 		    XE_IOCTL_DBG(xe, obj_offset >
2931 				 bos[i]->size - range)) {
2932 			err = -EINVAL;
2933 			goto put_obj;
2934 		}
2935 
2936 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
2937 			if (XE_IOCTL_DBG(xe, obj_offset &
2938 					 XE_64K_PAGE_MASK) ||
2939 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
2940 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
2941 				err = -EINVAL;
2942 				goto put_obj;
2943 			}
2944 		}
2945 
2946 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2947 		if (bos[i]->cpu_caching) {
2948 			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2949 					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
2950 				err = -EINVAL;
2951 				goto put_obj;
2952 			}
2953 		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
2954 			/*
2955 			 * Imported dma-buf from a different device should
2956 			 * require 1way or 2way coherency since we don't know
2957 			 * how it was mapped on the CPU. Just assume is it
2958 			 * potentially cached on CPU side.
2959 			 */
2960 			err = -EINVAL;
2961 			goto put_obj;
2962 		}
2963 	}
2964 
2965 	if (args->num_syncs) {
2966 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
2967 		if (!syncs) {
2968 			err = -ENOMEM;
2969 			goto put_obj;
2970 		}
2971 	}
2972 
2973 	syncs_user = u64_to_user_ptr(args->syncs);
2974 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
2975 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
2976 					  &syncs_user[num_syncs],
2977 					  (xe_vm_in_lr_mode(vm) ?
2978 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
2979 					  (!args->num_binds ?
2980 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
2981 		if (err)
2982 			goto free_syncs;
2983 	}
2984 
2985 	if (!args->num_binds) {
2986 		err = -ENODATA;
2987 		goto free_syncs;
2988 	}
2989 
2990 	for (i = 0; i < args->num_binds; ++i) {
2991 		u64 range = bind_ops[i].range;
2992 		u64 addr = bind_ops[i].addr;
2993 		u32 op = bind_ops[i].op;
2994 		u32 flags = bind_ops[i].flags;
2995 		u64 obj_offset = bind_ops[i].obj_offset;
2996 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
2997 		u16 pat_index = bind_ops[i].pat_index;
2998 
2999 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3000 						  addr, range, op, flags,
3001 						  prefetch_region, pat_index);
3002 		if (IS_ERR(ops[i])) {
3003 			err = PTR_ERR(ops[i]);
3004 			ops[i] = NULL;
3005 			goto unwind_ops;
3006 		}
3007 
3008 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3009 					      &ops_list,
3010 					      i == args->num_binds - 1);
3011 		if (err)
3012 			goto unwind_ops;
3013 	}
3014 
3015 	/* Nothing to do */
3016 	if (list_empty(&ops_list)) {
3017 		err = -ENODATA;
3018 		goto unwind_ops;
3019 	}
3020 
3021 	xe_vm_get(vm);
3022 	if (q)
3023 		xe_exec_queue_get(q);
3024 
3025 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3026 
3027 	up_write(&vm->lock);
3028 
3029 	if (q)
3030 		xe_exec_queue_put(q);
3031 	xe_vm_put(vm);
3032 
3033 	for (i = 0; bos && i < args->num_binds; ++i)
3034 		xe_bo_put(bos[i]);
3035 
3036 	kfree(bos);
3037 	kfree(ops);
3038 	if (args->num_binds > 1)
3039 		kfree(bind_ops);
3040 
3041 	return err;
3042 
3043 unwind_ops:
3044 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3045 free_syncs:
3046 	if (err == -ENODATA)
3047 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3048 	while (num_syncs--)
3049 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3050 
3051 	kfree(syncs);
3052 put_obj:
3053 	for (i = 0; i < args->num_binds; ++i)
3054 		xe_bo_put(bos[i]);
3055 release_vm_lock:
3056 	up_write(&vm->lock);
3057 put_vm:
3058 	xe_vm_put(vm);
3059 put_exec_queue:
3060 	if (q)
3061 		xe_exec_queue_put(q);
3062 free_objs:
3063 	kfree(bos);
3064 	kfree(ops);
3065 	if (args->num_binds > 1)
3066 		kfree(bind_ops);
3067 	return err;
3068 }
3069 
3070 /**
3071  * xe_vm_lock() - Lock the vm's dma_resv object
3072  * @vm: The struct xe_vm whose lock is to be locked
3073  * @intr: Whether to perform any wait interruptible
3074  *
3075  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3076  * contended lock was interrupted. If @intr is false, the function
3077  * always returns 0.
3078  */
3079 int xe_vm_lock(struct xe_vm *vm, bool intr)
3080 {
3081 	if (intr)
3082 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3083 
3084 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3085 }
3086 
3087 /**
3088  * xe_vm_unlock() - Unlock the vm's dma_resv object
3089  * @vm: The struct xe_vm whose lock is to be released.
3090  *
3091  * Unlock a buffer object lock that was locked by xe_vm_lock().
3092  */
3093 void xe_vm_unlock(struct xe_vm *vm)
3094 {
3095 	dma_resv_unlock(xe_vm_resv(vm));
3096 }
3097 
3098 /**
3099  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3100  * @vma: VMA to invalidate
3101  *
3102  * Walks a list of page tables leaves which it memset the entries owned by this
3103  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3104  * complete.
3105  *
3106  * Returns 0 for success, negative error code otherwise.
3107  */
3108 int xe_vm_invalidate_vma(struct xe_vma *vma)
3109 {
3110 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3111 	struct xe_tile *tile;
3112 	u32 tile_needs_invalidate = 0;
3113 	int seqno[XE_MAX_TILES_PER_DEVICE];
3114 	u8 id;
3115 	int ret;
3116 
3117 	xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3118 	xe_assert(xe, !xe_vma_is_null(vma));
3119 	trace_xe_vma_usm_invalidate(vma);
3120 
3121 	/* Check that we don't race with page-table updates */
3122 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3123 		if (xe_vma_is_userptr(vma)) {
3124 			WARN_ON_ONCE(!mmu_interval_check_retry
3125 				     (&vma->userptr.notifier,
3126 				      vma->userptr.notifier_seq));
3127 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3128 							     DMA_RESV_USAGE_BOOKKEEP));
3129 
3130 		} else {
3131 			xe_bo_assert_held(xe_vma_bo(vma));
3132 		}
3133 	}
3134 
3135 	for_each_tile(tile, xe, id) {
3136 		if (xe_pt_zap_ptes(tile, vma)) {
3137 			tile_needs_invalidate |= BIT(id);
3138 			xe_device_wmb(xe);
3139 			/*
3140 			 * FIXME: We potentially need to invalidate multiple
3141 			 * GTs within the tile
3142 			 */
3143 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3144 			if (seqno[id] < 0)
3145 				return seqno[id];
3146 		}
3147 	}
3148 
3149 	for_each_tile(tile, xe, id) {
3150 		if (tile_needs_invalidate & BIT(id)) {
3151 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3152 			if (ret < 0)
3153 				return ret;
3154 		}
3155 	}
3156 
3157 	vma->usm.tile_invalidated = vma->tile_mask;
3158 
3159 	return 0;
3160 }
3161 
3162 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3163 {
3164 	struct drm_gpuva *gpuva;
3165 	bool is_vram;
3166 	uint64_t addr;
3167 
3168 	if (!down_read_trylock(&vm->lock)) {
3169 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3170 		return 0;
3171 	}
3172 	if (vm->pt_root[gt_id]) {
3173 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3174 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3175 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3176 			   is_vram ? "VRAM" : "SYS");
3177 	}
3178 
3179 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3180 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3181 		bool is_userptr = xe_vma_is_userptr(vma);
3182 		bool is_null = xe_vma_is_null(vma);
3183 
3184 		if (is_null) {
3185 			addr = 0;
3186 		} else if (is_userptr) {
3187 			struct xe_res_cursor cur;
3188 
3189 			if (vma->userptr.sg) {
3190 				xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3191 						&cur);
3192 				addr = xe_res_dma(&cur);
3193 			} else {
3194 				addr = 0;
3195 			}
3196 		} else {
3197 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3198 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3199 		}
3200 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3201 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3202 			   xe_vma_size(vma),
3203 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3204 			   is_vram ? "VRAM" : "SYS");
3205 	}
3206 	up_read(&vm->lock);
3207 
3208 	return 0;
3209 }
3210