xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 8f8d74ee110c02137f5b78ca0a2bd6c10331f267)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "xe_assert.h"
25 #include "xe_bo.h"
26 #include "xe_device.h"
27 #include "xe_drm_client.h"
28 #include "xe_exec_queue.h"
29 #include "xe_gt.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace.h"
40 #include "xe_wa.h"
41 
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
43 {
44 	return vm->gpuvm.r_obj;
45 }
46 
47 /**
48  * xe_vma_userptr_check_repin() - Advisory check for repin needed
49  * @uvma: The userptr vma
50  *
51  * Check if the userptr vma has been invalidated since last successful
52  * repin. The check is advisory only and can the function can be called
53  * without the vm->userptr.notifier_lock held. There is no guarantee that the
54  * vma userptr will remain valid after a lockless check, so typically
55  * the call needs to be followed by a proper check under the notifier_lock.
56  *
57  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
58  */
59 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
60 {
61 	return mmu_interval_check_retry(&uvma->userptr.notifier,
62 					uvma->userptr.notifier_seq) ?
63 		-EAGAIN : 0;
64 }
65 
66 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
67 {
68 	struct xe_userptr *userptr = &uvma->userptr;
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
73 	struct page **pages;
74 	bool in_kthread = !current->mm;
75 	unsigned long notifier_seq;
76 	int pinned, ret, i;
77 	bool read_only = xe_vma_read_only(vma);
78 
79 	lockdep_assert_held(&vm->lock);
80 	xe_assert(xe, xe_vma_is_userptr(vma));
81 retry:
82 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
83 		return 0;
84 
85 	notifier_seq = mmu_interval_read_begin(&userptr->notifier);
86 	if (notifier_seq == userptr->notifier_seq)
87 		return 0;
88 
89 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
90 	if (!pages)
91 		return -ENOMEM;
92 
93 	if (userptr->sg) {
94 		dma_unmap_sgtable(xe->drm.dev,
95 				  userptr->sg,
96 				  read_only ? DMA_TO_DEVICE :
97 				  DMA_BIDIRECTIONAL, 0);
98 		sg_free_table(userptr->sg);
99 		userptr->sg = NULL;
100 	}
101 
102 	pinned = ret = 0;
103 	if (in_kthread) {
104 		if (!mmget_not_zero(userptr->notifier.mm)) {
105 			ret = -EFAULT;
106 			goto mm_closed;
107 		}
108 		kthread_use_mm(userptr->notifier.mm);
109 	}
110 
111 	while (pinned < num_pages) {
112 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
113 					  pinned * PAGE_SIZE,
114 					  num_pages - pinned,
115 					  read_only ? 0 : FOLL_WRITE,
116 					  &pages[pinned]);
117 		if (ret < 0)
118 			break;
119 
120 		pinned += ret;
121 		ret = 0;
122 	}
123 
124 	if (in_kthread) {
125 		kthread_unuse_mm(userptr->notifier.mm);
126 		mmput(userptr->notifier.mm);
127 	}
128 mm_closed:
129 	if (ret)
130 		goto out;
131 
132 	ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
133 						pinned, 0,
134 						(u64)pinned << PAGE_SHIFT,
135 						xe_sg_segment_size(xe->drm.dev),
136 						GFP_KERNEL);
137 	if (ret) {
138 		userptr->sg = NULL;
139 		goto out;
140 	}
141 	userptr->sg = &userptr->sgt;
142 
143 	ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
144 			      read_only ? DMA_TO_DEVICE :
145 			      DMA_BIDIRECTIONAL,
146 			      DMA_ATTR_SKIP_CPU_SYNC |
147 			      DMA_ATTR_NO_KERNEL_MAPPING);
148 	if (ret) {
149 		sg_free_table(userptr->sg);
150 		userptr->sg = NULL;
151 		goto out;
152 	}
153 
154 	for (i = 0; i < pinned; ++i) {
155 		if (!read_only) {
156 			lock_page(pages[i]);
157 			set_page_dirty(pages[i]);
158 			unlock_page(pages[i]);
159 		}
160 
161 		mark_page_accessed(pages[i]);
162 	}
163 
164 out:
165 	release_pages(pages, pinned);
166 	kvfree(pages);
167 
168 	if (!(ret < 0)) {
169 		userptr->notifier_seq = notifier_seq;
170 		if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
171 			goto retry;
172 	}
173 
174 	return ret < 0 ? ret : 0;
175 }
176 
177 static bool preempt_fences_waiting(struct xe_vm *vm)
178 {
179 	struct xe_exec_queue *q;
180 
181 	lockdep_assert_held(&vm->lock);
182 	xe_vm_assert_held(vm);
183 
184 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
185 		if (!q->compute.pfence ||
186 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
187 						   &q->compute.pfence->flags))) {
188 			return true;
189 		}
190 	}
191 
192 	return false;
193 }
194 
195 static void free_preempt_fences(struct list_head *list)
196 {
197 	struct list_head *link, *next;
198 
199 	list_for_each_safe(link, next, list)
200 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
201 }
202 
203 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
204 				unsigned int *count)
205 {
206 	lockdep_assert_held(&vm->lock);
207 	xe_vm_assert_held(vm);
208 
209 	if (*count >= vm->preempt.num_exec_queues)
210 		return 0;
211 
212 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
213 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
214 
215 		if (IS_ERR(pfence))
216 			return PTR_ERR(pfence);
217 
218 		list_move_tail(xe_preempt_fence_link(pfence), list);
219 	}
220 
221 	return 0;
222 }
223 
224 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
225 {
226 	struct xe_exec_queue *q;
227 
228 	xe_vm_assert_held(vm);
229 
230 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
231 		if (q->compute.pfence) {
232 			long timeout = dma_fence_wait(q->compute.pfence, false);
233 
234 			if (timeout < 0)
235 				return -ETIME;
236 			dma_fence_put(q->compute.pfence);
237 			q->compute.pfence = NULL;
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 static bool xe_vm_is_idle(struct xe_vm *vm)
245 {
246 	struct xe_exec_queue *q;
247 
248 	xe_vm_assert_held(vm);
249 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
250 		if (!xe_exec_queue_is_idle(q))
251 			return false;
252 	}
253 
254 	return true;
255 }
256 
257 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
258 {
259 	struct list_head *link;
260 	struct xe_exec_queue *q;
261 
262 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
263 		struct dma_fence *fence;
264 
265 		link = list->next;
266 		xe_assert(vm->xe, link != list);
267 
268 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
269 					     q, q->compute.context,
270 					     ++q->compute.seqno);
271 		dma_fence_put(q->compute.pfence);
272 		q->compute.pfence = fence;
273 	}
274 }
275 
276 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
277 {
278 	struct xe_exec_queue *q;
279 	int err;
280 
281 	if (!vm->preempt.num_exec_queues)
282 		return 0;
283 
284 	err = xe_bo_lock(bo, true);
285 	if (err)
286 		return err;
287 
288 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
289 	if (err)
290 		goto out_unlock;
291 
292 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
293 		if (q->compute.pfence) {
294 			dma_resv_add_fence(bo->ttm.base.resv,
295 					   q->compute.pfence,
296 					   DMA_RESV_USAGE_BOOKKEEP);
297 		}
298 
299 out_unlock:
300 	xe_bo_unlock(bo);
301 	return err;
302 }
303 
304 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
305 						struct drm_exec *exec)
306 {
307 	struct xe_exec_queue *q;
308 
309 	lockdep_assert_held(&vm->lock);
310 	xe_vm_assert_held(vm);
311 
312 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
313 		q->ops->resume(q);
314 
315 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
316 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
317 	}
318 }
319 
320 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
321 {
322 	struct drm_gpuvm_exec vm_exec = {
323 		.vm = &vm->gpuvm,
324 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
325 		.num_fences = 1,
326 	};
327 	struct drm_exec *exec = &vm_exec.exec;
328 	struct dma_fence *pfence;
329 	int err;
330 	bool wait;
331 
332 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
333 
334 	down_write(&vm->lock);
335 	err = drm_gpuvm_exec_lock(&vm_exec);
336 	if (err)
337 		goto out_up_write;
338 
339 	pfence = xe_preempt_fence_create(q, q->compute.context,
340 					 ++q->compute.seqno);
341 	if (!pfence) {
342 		err = -ENOMEM;
343 		goto out_fini;
344 	}
345 
346 	list_add(&q->compute.link, &vm->preempt.exec_queues);
347 	++vm->preempt.num_exec_queues;
348 	q->compute.pfence = pfence;
349 
350 	down_read(&vm->userptr.notifier_lock);
351 
352 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
353 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
354 
355 	/*
356 	 * Check to see if a preemption on VM is in flight or userptr
357 	 * invalidation, if so trigger this preempt fence to sync state with
358 	 * other preempt fences on the VM.
359 	 */
360 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
361 	if (wait)
362 		dma_fence_enable_sw_signaling(pfence);
363 
364 	up_read(&vm->userptr.notifier_lock);
365 
366 out_fini:
367 	drm_exec_fini(exec);
368 out_up_write:
369 	up_write(&vm->lock);
370 
371 	return err;
372 }
373 
374 /**
375  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
376  * @vm: The VM.
377  * @q: The exec_queue
378  */
379 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
380 {
381 	if (!xe_vm_in_preempt_fence_mode(vm))
382 		return;
383 
384 	down_write(&vm->lock);
385 	list_del(&q->compute.link);
386 	--vm->preempt.num_exec_queues;
387 	if (q->compute.pfence) {
388 		dma_fence_enable_sw_signaling(q->compute.pfence);
389 		dma_fence_put(q->compute.pfence);
390 		q->compute.pfence = NULL;
391 	}
392 	up_write(&vm->lock);
393 }
394 
395 /**
396  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
397  * that need repinning.
398  * @vm: The VM.
399  *
400  * This function checks for whether the VM has userptrs that need repinning,
401  * and provides a release-type barrier on the userptr.notifier_lock after
402  * checking.
403  *
404  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
405  */
406 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
407 {
408 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
409 
410 	return (list_empty(&vm->userptr.repin_list) &&
411 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
412 }
413 
414 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
415 
416 static void xe_vm_kill(struct xe_vm *vm)
417 {
418 	struct xe_exec_queue *q;
419 
420 	lockdep_assert_held(&vm->lock);
421 
422 	xe_vm_lock(vm, false);
423 	vm->flags |= XE_VM_FLAG_BANNED;
424 	trace_xe_vm_kill(vm);
425 
426 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
427 		q->ops->kill(q);
428 	xe_vm_unlock(vm);
429 
430 	/* TODO: Inform user the VM is banned */
431 }
432 
433 /**
434  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
435  * @exec: The drm_exec object used for locking before validation.
436  * @err: The error returned from ttm_bo_validate().
437  * @end: A ktime_t cookie that should be set to 0 before first use and
438  * that should be reused on subsequent calls.
439  *
440  * With multiple active VMs, under memory pressure, it is possible that
441  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
442  * Until ttm properly handles locking in such scenarios, best thing the
443  * driver can do is retry with a timeout. Check if that is necessary, and
444  * if so unlock the drm_exec's objects while keeping the ticket to prepare
445  * for a rerun.
446  *
447  * Return: true if a retry after drm_exec_init() is recommended;
448  * false otherwise.
449  */
450 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
451 {
452 	ktime_t cur;
453 
454 	if (err != -ENOMEM)
455 		return false;
456 
457 	cur = ktime_get();
458 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 	if (!ktime_before(cur, *end))
460 		return false;
461 
462 	msleep(20);
463 	return true;
464 }
465 
466 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
467 {
468 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
469 	struct drm_gpuva *gpuva;
470 	int ret;
471 
472 	lockdep_assert_held(&vm->lock);
473 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
474 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
475 			       &vm->rebind_list);
476 
477 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
478 	if (ret)
479 		return ret;
480 
481 	vm_bo->evicted = false;
482 	return 0;
483 }
484 
485 /**
486  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
487  * @vm: The vm for which we are rebinding.
488  * @exec: The struct drm_exec with the locked GEM objects.
489  * @num_fences: The number of fences to reserve for the operation, not
490  * including rebinds and validations.
491  *
492  * Validates all evicted gem objects and rebinds their vmas. Note that
493  * rebindings may cause evictions and hence the validation-rebind
494  * sequence is rerun until there are no more objects to validate.
495  *
496  * Return: 0 on success, negative error code on error. In particular,
497  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
498  * the drm_exec transaction needs to be restarted.
499  */
500 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
501 			  unsigned int num_fences)
502 {
503 	struct drm_gem_object *obj;
504 	unsigned long index;
505 	int ret;
506 
507 	do {
508 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
509 		if (ret)
510 			return ret;
511 
512 		ret = xe_vm_rebind(vm, false);
513 		if (ret)
514 			return ret;
515 	} while (!list_empty(&vm->gpuvm.evict.list));
516 
517 	drm_exec_for_each_locked_object(exec, index, obj) {
518 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
519 		if (ret)
520 			return ret;
521 	}
522 
523 	return 0;
524 }
525 
526 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
527 				 bool *done)
528 {
529 	int err;
530 
531 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
532 	if (err)
533 		return err;
534 
535 	if (xe_vm_is_idle(vm)) {
536 		vm->preempt.rebind_deactivated = true;
537 		*done = true;
538 		return 0;
539 	}
540 
541 	if (!preempt_fences_waiting(vm)) {
542 		*done = true;
543 		return 0;
544 	}
545 
546 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
547 	if (err)
548 		return err;
549 
550 	err = wait_for_existing_preempt_fences(vm);
551 	if (err)
552 		return err;
553 
554 	/*
555 	 * Add validation and rebinding to the locking loop since both can
556 	 * cause evictions which may require blocing dma_resv locks.
557 	 * The fence reservation here is intended for the new preempt fences
558 	 * we attach at the end of the rebind work.
559 	 */
560 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
561 }
562 
563 static void preempt_rebind_work_func(struct work_struct *w)
564 {
565 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
566 	struct drm_exec exec;
567 	unsigned int fence_count = 0;
568 	LIST_HEAD(preempt_fences);
569 	ktime_t end = 0;
570 	int err = 0;
571 	long wait;
572 	int __maybe_unused tries = 0;
573 
574 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
575 	trace_xe_vm_rebind_worker_enter(vm);
576 
577 	down_write(&vm->lock);
578 
579 	if (xe_vm_is_closed_or_banned(vm)) {
580 		up_write(&vm->lock);
581 		trace_xe_vm_rebind_worker_exit(vm);
582 		return;
583 	}
584 
585 retry:
586 	if (xe_vm_userptr_check_repin(vm)) {
587 		err = xe_vm_userptr_pin(vm);
588 		if (err)
589 			goto out_unlock_outer;
590 	}
591 
592 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
593 
594 	drm_exec_until_all_locked(&exec) {
595 		bool done = false;
596 
597 		err = xe_preempt_work_begin(&exec, vm, &done);
598 		drm_exec_retry_on_contention(&exec);
599 		if (err || done) {
600 			drm_exec_fini(&exec);
601 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
602 				err = -EAGAIN;
603 
604 			goto out_unlock_outer;
605 		}
606 	}
607 
608 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
609 	if (err)
610 		goto out_unlock;
611 
612 	err = xe_vm_rebind(vm, true);
613 	if (err)
614 		goto out_unlock;
615 
616 	/* Wait on rebinds and munmap style VM unbinds */
617 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
618 				     DMA_RESV_USAGE_KERNEL,
619 				     false, MAX_SCHEDULE_TIMEOUT);
620 	if (wait <= 0) {
621 		err = -ETIME;
622 		goto out_unlock;
623 	}
624 
625 #define retry_required(__tries, __vm) \
626 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
627 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
628 	__xe_vm_userptr_needs_repin(__vm))
629 
630 	down_read(&vm->userptr.notifier_lock);
631 	if (retry_required(tries, vm)) {
632 		up_read(&vm->userptr.notifier_lock);
633 		err = -EAGAIN;
634 		goto out_unlock;
635 	}
636 
637 #undef retry_required
638 
639 	spin_lock(&vm->xe->ttm.lru_lock);
640 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
641 	spin_unlock(&vm->xe->ttm.lru_lock);
642 
643 	/* Point of no return. */
644 	arm_preempt_fences(vm, &preempt_fences);
645 	resume_and_reinstall_preempt_fences(vm, &exec);
646 	up_read(&vm->userptr.notifier_lock);
647 
648 out_unlock:
649 	drm_exec_fini(&exec);
650 out_unlock_outer:
651 	if (err == -EAGAIN) {
652 		trace_xe_vm_rebind_worker_retry(vm);
653 		goto retry;
654 	}
655 
656 	if (err) {
657 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
658 		xe_vm_kill(vm);
659 	}
660 	up_write(&vm->lock);
661 
662 	free_preempt_fences(&preempt_fences);
663 
664 	trace_xe_vm_rebind_worker_exit(vm);
665 }
666 
667 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
668 				   const struct mmu_notifier_range *range,
669 				   unsigned long cur_seq)
670 {
671 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
672 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
673 	struct xe_vma *vma = &uvma->vma;
674 	struct xe_vm *vm = xe_vma_vm(vma);
675 	struct dma_resv_iter cursor;
676 	struct dma_fence *fence;
677 	long err;
678 
679 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
680 	trace_xe_vma_userptr_invalidate(vma);
681 
682 	if (!mmu_notifier_range_blockable(range))
683 		return false;
684 
685 	down_write(&vm->userptr.notifier_lock);
686 	mmu_interval_set_seq(mni, cur_seq);
687 
688 	/* No need to stop gpu access if the userptr is not yet bound. */
689 	if (!userptr->initial_bind) {
690 		up_write(&vm->userptr.notifier_lock);
691 		return true;
692 	}
693 
694 	/*
695 	 * Tell exec and rebind worker they need to repin and rebind this
696 	 * userptr.
697 	 */
698 	if (!xe_vm_in_fault_mode(vm) &&
699 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
700 		spin_lock(&vm->userptr.invalidated_lock);
701 		list_move_tail(&userptr->invalidate_link,
702 			       &vm->userptr.invalidated);
703 		spin_unlock(&vm->userptr.invalidated_lock);
704 	}
705 
706 	up_write(&vm->userptr.notifier_lock);
707 
708 	/*
709 	 * Preempt fences turn into schedule disables, pipeline these.
710 	 * Note that even in fault mode, we need to wait for binds and
711 	 * unbinds to complete, and those are attached as BOOKMARK fences
712 	 * to the vm.
713 	 */
714 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
715 			    DMA_RESV_USAGE_BOOKKEEP);
716 	dma_resv_for_each_fence_unlocked(&cursor, fence)
717 		dma_fence_enable_sw_signaling(fence);
718 	dma_resv_iter_end(&cursor);
719 
720 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
721 				    DMA_RESV_USAGE_BOOKKEEP,
722 				    false, MAX_SCHEDULE_TIMEOUT);
723 	XE_WARN_ON(err <= 0);
724 
725 	if (xe_vm_in_fault_mode(vm)) {
726 		err = xe_vm_invalidate_vma(vma);
727 		XE_WARN_ON(err);
728 	}
729 
730 	trace_xe_vma_userptr_invalidate_complete(vma);
731 
732 	return true;
733 }
734 
735 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
736 	.invalidate = vma_userptr_invalidate,
737 };
738 
739 int xe_vm_userptr_pin(struct xe_vm *vm)
740 {
741 	struct xe_userptr_vma *uvma, *next;
742 	int err = 0;
743 	LIST_HEAD(tmp_evict);
744 
745 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
746 	lockdep_assert_held_write(&vm->lock);
747 
748 	/* Collect invalidated userptrs */
749 	spin_lock(&vm->userptr.invalidated_lock);
750 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
751 				 userptr.invalidate_link) {
752 		list_del_init(&uvma->userptr.invalidate_link);
753 		list_move_tail(&uvma->userptr.repin_link,
754 			       &vm->userptr.repin_list);
755 	}
756 	spin_unlock(&vm->userptr.invalidated_lock);
757 
758 	/* Pin and move to temporary list */
759 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
760 				 userptr.repin_link) {
761 		err = xe_vma_userptr_pin_pages(uvma);
762 		if (err == -EFAULT) {
763 			list_del_init(&uvma->userptr.repin_link);
764 
765 			/* Wait for pending binds */
766 			xe_vm_lock(vm, false);
767 			dma_resv_wait_timeout(xe_vm_resv(vm),
768 					      DMA_RESV_USAGE_BOOKKEEP,
769 					      false, MAX_SCHEDULE_TIMEOUT);
770 
771 			err = xe_vm_invalidate_vma(&uvma->vma);
772 			xe_vm_unlock(vm);
773 			if (err)
774 				return err;
775 		} else {
776 			if (err < 0)
777 				return err;
778 
779 			list_del_init(&uvma->userptr.repin_link);
780 			list_move_tail(&uvma->vma.combined_links.rebind,
781 				       &vm->rebind_list);
782 		}
783 	}
784 
785 	return 0;
786 }
787 
788 /**
789  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
790  * that need repinning.
791  * @vm: The VM.
792  *
793  * This function does an advisory check for whether the VM has userptrs that
794  * need repinning.
795  *
796  * Return: 0 if there are no indications of userptrs needing repinning,
797  * -EAGAIN if there are.
798  */
799 int xe_vm_userptr_check_repin(struct xe_vm *vm)
800 {
801 	return (list_empty_careful(&vm->userptr.repin_list) &&
802 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
803 }
804 
805 static struct dma_fence *
806 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
807 	       struct xe_sync_entry *syncs, u32 num_syncs,
808 	       bool first_op, bool last_op);
809 
810 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
811 {
812 	struct dma_fence *fence;
813 	struct xe_vma *vma, *next;
814 
815 	lockdep_assert_held(&vm->lock);
816 	if (xe_vm_in_lr_mode(vm) && !rebind_worker)
817 		return 0;
818 
819 	xe_vm_assert_held(vm);
820 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
821 				 combined_links.rebind) {
822 		xe_assert(vm->xe, vma->tile_present);
823 
824 		list_del_init(&vma->combined_links.rebind);
825 		if (rebind_worker)
826 			trace_xe_vma_rebind_worker(vma);
827 		else
828 			trace_xe_vma_rebind_exec(vma);
829 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
830 		if (IS_ERR(fence))
831 			return PTR_ERR(fence);
832 		dma_fence_put(fence);
833 	}
834 
835 	return 0;
836 }
837 
838 static void xe_vma_free(struct xe_vma *vma)
839 {
840 	if (xe_vma_is_userptr(vma))
841 		kfree(to_userptr_vma(vma));
842 	else
843 		kfree(vma);
844 }
845 
846 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
847 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
848 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
849 
850 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
851 				    struct xe_bo *bo,
852 				    u64 bo_offset_or_userptr,
853 				    u64 start, u64 end,
854 				    u16 pat_index, unsigned int flags)
855 {
856 	struct xe_vma *vma;
857 	struct xe_tile *tile;
858 	u8 id;
859 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
860 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
861 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
862 
863 	xe_assert(vm->xe, start < end);
864 	xe_assert(vm->xe, end < vm->size);
865 
866 	/*
867 	 * Allocate and ensure that the xe_vma_is_userptr() return
868 	 * matches what was allocated.
869 	 */
870 	if (!bo && !is_null) {
871 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
872 
873 		if (!uvma)
874 			return ERR_PTR(-ENOMEM);
875 
876 		vma = &uvma->vma;
877 	} else {
878 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
879 		if (!vma)
880 			return ERR_PTR(-ENOMEM);
881 
882 		if (is_null)
883 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
884 		if (bo)
885 			vma->gpuva.gem.obj = &bo->ttm.base;
886 	}
887 
888 	INIT_LIST_HEAD(&vma->combined_links.rebind);
889 
890 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
891 	vma->gpuva.vm = &vm->gpuvm;
892 	vma->gpuva.va.addr = start;
893 	vma->gpuva.va.range = end - start + 1;
894 	if (read_only)
895 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
896 	if (dumpable)
897 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
898 
899 	for_each_tile(tile, vm->xe, id)
900 		vma->tile_mask |= 0x1 << id;
901 
902 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
903 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
904 
905 	vma->pat_index = pat_index;
906 
907 	if (bo) {
908 		struct drm_gpuvm_bo *vm_bo;
909 
910 		xe_bo_assert_held(bo);
911 
912 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
913 		if (IS_ERR(vm_bo)) {
914 			xe_vma_free(vma);
915 			return ERR_CAST(vm_bo);
916 		}
917 
918 		drm_gpuvm_bo_extobj_add(vm_bo);
919 		drm_gem_object_get(&bo->ttm.base);
920 		vma->gpuva.gem.offset = bo_offset_or_userptr;
921 		drm_gpuva_link(&vma->gpuva, vm_bo);
922 		drm_gpuvm_bo_put(vm_bo);
923 	} else /* userptr or null */ {
924 		if (!is_null) {
925 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
926 			u64 size = end - start + 1;
927 			int err;
928 
929 			INIT_LIST_HEAD(&userptr->invalidate_link);
930 			INIT_LIST_HEAD(&userptr->repin_link);
931 			vma->gpuva.gem.offset = bo_offset_or_userptr;
932 
933 			err = mmu_interval_notifier_insert(&userptr->notifier,
934 							   current->mm,
935 							   xe_vma_userptr(vma), size,
936 							   &vma_userptr_notifier_ops);
937 			if (err) {
938 				xe_vma_free(vma);
939 				return ERR_PTR(err);
940 			}
941 
942 			userptr->notifier_seq = LONG_MAX;
943 		}
944 
945 		xe_vm_get(vm);
946 	}
947 
948 	return vma;
949 }
950 
951 static void xe_vma_destroy_late(struct xe_vma *vma)
952 {
953 	struct xe_vm *vm = xe_vma_vm(vma);
954 	struct xe_device *xe = vm->xe;
955 	bool read_only = xe_vma_read_only(vma);
956 
957 	if (vma->ufence) {
958 		xe_sync_ufence_put(vma->ufence);
959 		vma->ufence = NULL;
960 	}
961 
962 	if (xe_vma_is_userptr(vma)) {
963 		struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
964 
965 		if (userptr->sg) {
966 			dma_unmap_sgtable(xe->drm.dev,
967 					  userptr->sg,
968 					  read_only ? DMA_TO_DEVICE :
969 					  DMA_BIDIRECTIONAL, 0);
970 			sg_free_table(userptr->sg);
971 			userptr->sg = NULL;
972 		}
973 
974 		/*
975 		 * Since userptr pages are not pinned, we can't remove
976 		 * the notifer until we're sure the GPU is not accessing
977 		 * them anymore
978 		 */
979 		mmu_interval_notifier_remove(&userptr->notifier);
980 		xe_vm_put(vm);
981 	} else if (xe_vma_is_null(vma)) {
982 		xe_vm_put(vm);
983 	} else {
984 		xe_bo_put(xe_vma_bo(vma));
985 	}
986 
987 	xe_vma_free(vma);
988 }
989 
990 static void vma_destroy_work_func(struct work_struct *w)
991 {
992 	struct xe_vma *vma =
993 		container_of(w, struct xe_vma, destroy_work);
994 
995 	xe_vma_destroy_late(vma);
996 }
997 
998 static void vma_destroy_cb(struct dma_fence *fence,
999 			   struct dma_fence_cb *cb)
1000 {
1001 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1002 
1003 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1004 	queue_work(system_unbound_wq, &vma->destroy_work);
1005 }
1006 
1007 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1008 {
1009 	struct xe_vm *vm = xe_vma_vm(vma);
1010 
1011 	lockdep_assert_held_write(&vm->lock);
1012 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1013 
1014 	if (xe_vma_is_userptr(vma)) {
1015 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1016 
1017 		spin_lock(&vm->userptr.invalidated_lock);
1018 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1019 		spin_unlock(&vm->userptr.invalidated_lock);
1020 	} else if (!xe_vma_is_null(vma)) {
1021 		xe_bo_assert_held(xe_vma_bo(vma));
1022 
1023 		drm_gpuva_unlink(&vma->gpuva);
1024 	}
1025 
1026 	xe_vm_assert_held(vm);
1027 	if (fence) {
1028 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1029 						 vma_destroy_cb);
1030 
1031 		if (ret) {
1032 			XE_WARN_ON(ret != -ENOENT);
1033 			xe_vma_destroy_late(vma);
1034 		}
1035 	} else {
1036 		xe_vma_destroy_late(vma);
1037 	}
1038 }
1039 
1040 /**
1041  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1042  * @exec: The drm_exec object we're currently locking for.
1043  * @vma: The vma for witch we want to lock the vm resv and any attached
1044  * object's resv.
1045  *
1046  * Return: 0 on success, negative error code on error. In particular
1047  * may return -EDEADLK on WW transaction contention and -EINTR if
1048  * an interruptible wait is terminated by a signal.
1049  */
1050 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1051 {
1052 	struct xe_vm *vm = xe_vma_vm(vma);
1053 	struct xe_bo *bo = xe_vma_bo(vma);
1054 	int err;
1055 
1056 	XE_WARN_ON(!vm);
1057 
1058 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1059 	if (!err && bo && !bo->vm)
1060 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1061 
1062 	return err;
1063 }
1064 
1065 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1066 {
1067 	struct drm_exec exec;
1068 	int err;
1069 
1070 	drm_exec_init(&exec, 0, 0);
1071 	drm_exec_until_all_locked(&exec) {
1072 		err = xe_vm_lock_vma(&exec, vma);
1073 		drm_exec_retry_on_contention(&exec);
1074 		if (XE_WARN_ON(err))
1075 			break;
1076 	}
1077 
1078 	xe_vma_destroy(vma, NULL);
1079 
1080 	drm_exec_fini(&exec);
1081 }
1082 
1083 struct xe_vma *
1084 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1085 {
1086 	struct drm_gpuva *gpuva;
1087 
1088 	lockdep_assert_held(&vm->lock);
1089 
1090 	if (xe_vm_is_closed_or_banned(vm))
1091 		return NULL;
1092 
1093 	xe_assert(vm->xe, start + range <= vm->size);
1094 
1095 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1096 
1097 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1098 }
1099 
1100 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1101 {
1102 	int err;
1103 
1104 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1105 	lockdep_assert_held(&vm->lock);
1106 
1107 	mutex_lock(&vm->snap_mutex);
1108 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1109 	mutex_unlock(&vm->snap_mutex);
1110 	XE_WARN_ON(err);	/* Shouldn't be possible */
1111 
1112 	return err;
1113 }
1114 
1115 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1116 {
1117 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1118 	lockdep_assert_held(&vm->lock);
1119 
1120 	mutex_lock(&vm->snap_mutex);
1121 	drm_gpuva_remove(&vma->gpuva);
1122 	mutex_unlock(&vm->snap_mutex);
1123 	if (vm->usm.last_fault_vma == vma)
1124 		vm->usm.last_fault_vma = NULL;
1125 }
1126 
1127 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1128 {
1129 	struct xe_vma_op *op;
1130 
1131 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1132 
1133 	if (unlikely(!op))
1134 		return NULL;
1135 
1136 	return &op->base;
1137 }
1138 
1139 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1140 
1141 static const struct drm_gpuvm_ops gpuvm_ops = {
1142 	.op_alloc = xe_vm_op_alloc,
1143 	.vm_bo_validate = xe_gpuvm_validate,
1144 	.vm_free = xe_vm_free,
1145 };
1146 
1147 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1148 {
1149 	u64 pte = 0;
1150 
1151 	if (pat_index & BIT(0))
1152 		pte |= XE_PPGTT_PTE_PAT0;
1153 
1154 	if (pat_index & BIT(1))
1155 		pte |= XE_PPGTT_PTE_PAT1;
1156 
1157 	return pte;
1158 }
1159 
1160 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1161 				u32 pt_level)
1162 {
1163 	u64 pte = 0;
1164 
1165 	if (pat_index & BIT(0))
1166 		pte |= XE_PPGTT_PTE_PAT0;
1167 
1168 	if (pat_index & BIT(1))
1169 		pte |= XE_PPGTT_PTE_PAT1;
1170 
1171 	if (pat_index & BIT(2)) {
1172 		if (pt_level)
1173 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1174 		else
1175 			pte |= XE_PPGTT_PTE_PAT2;
1176 	}
1177 
1178 	if (pat_index & BIT(3))
1179 		pte |= XELPG_PPGTT_PTE_PAT3;
1180 
1181 	if (pat_index & (BIT(4)))
1182 		pte |= XE2_PPGTT_PTE_PAT4;
1183 
1184 	return pte;
1185 }
1186 
1187 static u64 pte_encode_ps(u32 pt_level)
1188 {
1189 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1190 
1191 	if (pt_level == 1)
1192 		return XE_PDE_PS_2M;
1193 	else if (pt_level == 2)
1194 		return XE_PDPE_PS_1G;
1195 
1196 	return 0;
1197 }
1198 
1199 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1200 			      const u16 pat_index)
1201 {
1202 	struct xe_device *xe = xe_bo_device(bo);
1203 	u64 pde;
1204 
1205 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1206 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1207 	pde |= pde_encode_pat_index(xe, pat_index);
1208 
1209 	return pde;
1210 }
1211 
1212 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1213 			      u16 pat_index, u32 pt_level)
1214 {
1215 	struct xe_device *xe = xe_bo_device(bo);
1216 	u64 pte;
1217 
1218 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1219 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1220 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1221 	pte |= pte_encode_ps(pt_level);
1222 
1223 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1224 		pte |= XE_PPGTT_PTE_DM;
1225 
1226 	return pte;
1227 }
1228 
1229 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1230 			       u16 pat_index, u32 pt_level)
1231 {
1232 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1233 
1234 	pte |= XE_PAGE_PRESENT;
1235 
1236 	if (likely(!xe_vma_read_only(vma)))
1237 		pte |= XE_PAGE_RW;
1238 
1239 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1240 	pte |= pte_encode_ps(pt_level);
1241 
1242 	if (unlikely(xe_vma_is_null(vma)))
1243 		pte |= XE_PTE_NULL;
1244 
1245 	return pte;
1246 }
1247 
1248 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1249 				u16 pat_index,
1250 				u32 pt_level, bool devmem, u64 flags)
1251 {
1252 	u64 pte;
1253 
1254 	/* Avoid passing random bits directly as flags */
1255 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1256 
1257 	pte = addr;
1258 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1259 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1260 	pte |= pte_encode_ps(pt_level);
1261 
1262 	if (devmem)
1263 		pte |= XE_PPGTT_PTE_DM;
1264 
1265 	pte |= flags;
1266 
1267 	return pte;
1268 }
1269 
1270 static const struct xe_pt_ops xelp_pt_ops = {
1271 	.pte_encode_bo = xelp_pte_encode_bo,
1272 	.pte_encode_vma = xelp_pte_encode_vma,
1273 	.pte_encode_addr = xelp_pte_encode_addr,
1274 	.pde_encode_bo = xelp_pde_encode_bo,
1275 };
1276 
1277 static void vm_destroy_work_func(struct work_struct *w);
1278 
1279 /**
1280  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1281  * given tile and vm.
1282  * @xe: xe device.
1283  * @tile: tile to set up for.
1284  * @vm: vm to set up for.
1285  *
1286  * Sets up a pagetable tree with one page-table per level and a single
1287  * leaf PTE. All pagetable entries point to the single page-table or,
1288  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1289  * writes become NOPs.
1290  *
1291  * Return: 0 on success, negative error code on error.
1292  */
1293 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1294 				struct xe_vm *vm)
1295 {
1296 	u8 id = tile->id;
1297 	int i;
1298 
1299 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1300 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1301 		if (IS_ERR(vm->scratch_pt[id][i]))
1302 			return PTR_ERR(vm->scratch_pt[id][i]);
1303 
1304 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 static void xe_vm_free_scratch(struct xe_vm *vm)
1311 {
1312 	struct xe_tile *tile;
1313 	u8 id;
1314 
1315 	if (!xe_vm_has_scratch(vm))
1316 		return;
1317 
1318 	for_each_tile(tile, vm->xe, id) {
1319 		u32 i;
1320 
1321 		if (!vm->pt_root[id])
1322 			continue;
1323 
1324 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1325 			if (vm->scratch_pt[id][i])
1326 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1327 	}
1328 }
1329 
1330 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1331 {
1332 	struct drm_gem_object *vm_resv_obj;
1333 	struct xe_vm *vm;
1334 	int err, number_tiles = 0;
1335 	struct xe_tile *tile;
1336 	u8 id;
1337 
1338 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1339 	if (!vm)
1340 		return ERR_PTR(-ENOMEM);
1341 
1342 	vm->xe = xe;
1343 
1344 	vm->size = 1ull << xe->info.va_bits;
1345 
1346 	vm->flags = flags;
1347 
1348 	init_rwsem(&vm->lock);
1349 	mutex_init(&vm->snap_mutex);
1350 
1351 	INIT_LIST_HEAD(&vm->rebind_list);
1352 
1353 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1354 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1355 	init_rwsem(&vm->userptr.notifier_lock);
1356 	spin_lock_init(&vm->userptr.invalidated_lock);
1357 
1358 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1359 
1360 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1361 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1362 
1363 	for_each_tile(tile, xe, id)
1364 		xe_range_fence_tree_init(&vm->rftree[id]);
1365 
1366 	vm->pt_ops = &xelp_pt_ops;
1367 
1368 	if (!(flags & XE_VM_FLAG_MIGRATION))
1369 		xe_device_mem_access_get(xe);
1370 
1371 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1372 	if (!vm_resv_obj) {
1373 		err = -ENOMEM;
1374 		goto err_no_resv;
1375 	}
1376 
1377 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1378 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1379 
1380 	drm_gem_object_put(vm_resv_obj);
1381 
1382 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1383 	if (err)
1384 		goto err_close;
1385 
1386 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1387 		vm->flags |= XE_VM_FLAG_64K;
1388 
1389 	for_each_tile(tile, xe, id) {
1390 		if (flags & XE_VM_FLAG_MIGRATION &&
1391 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1392 			continue;
1393 
1394 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1395 		if (IS_ERR(vm->pt_root[id])) {
1396 			err = PTR_ERR(vm->pt_root[id]);
1397 			vm->pt_root[id] = NULL;
1398 			goto err_unlock_close;
1399 		}
1400 	}
1401 
1402 	if (xe_vm_has_scratch(vm)) {
1403 		for_each_tile(tile, xe, id) {
1404 			if (!vm->pt_root[id])
1405 				continue;
1406 
1407 			err = xe_vm_create_scratch(xe, tile, vm);
1408 			if (err)
1409 				goto err_unlock_close;
1410 		}
1411 		vm->batch_invalidate_tlb = true;
1412 	}
1413 
1414 	if (flags & XE_VM_FLAG_LR_MODE) {
1415 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1416 		vm->flags |= XE_VM_FLAG_LR_MODE;
1417 		vm->batch_invalidate_tlb = false;
1418 	}
1419 
1420 	/* Fill pt_root after allocating scratch tables */
1421 	for_each_tile(tile, xe, id) {
1422 		if (!vm->pt_root[id])
1423 			continue;
1424 
1425 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1426 	}
1427 	dma_resv_unlock(xe_vm_resv(vm));
1428 
1429 	/* Kernel migration VM shouldn't have a circular loop.. */
1430 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1431 		for_each_tile(tile, xe, id) {
1432 			struct xe_gt *gt = tile->primary_gt;
1433 			struct xe_vm *migrate_vm;
1434 			struct xe_exec_queue *q;
1435 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1436 
1437 			if (!vm->pt_root[id])
1438 				continue;
1439 
1440 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1441 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1442 						       XE_ENGINE_CLASS_COPY,
1443 						       create_flags);
1444 			xe_vm_put(migrate_vm);
1445 			if (IS_ERR(q)) {
1446 				err = PTR_ERR(q);
1447 				goto err_close;
1448 			}
1449 			vm->q[id] = q;
1450 			number_tiles++;
1451 		}
1452 	}
1453 
1454 	if (number_tiles > 1)
1455 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1456 
1457 	mutex_lock(&xe->usm.lock);
1458 	if (flags & XE_VM_FLAG_FAULT_MODE)
1459 		xe->usm.num_vm_in_fault_mode++;
1460 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1461 		xe->usm.num_vm_in_non_fault_mode++;
1462 	mutex_unlock(&xe->usm.lock);
1463 
1464 	trace_xe_vm_create(vm);
1465 
1466 	return vm;
1467 
1468 err_unlock_close:
1469 	dma_resv_unlock(xe_vm_resv(vm));
1470 err_close:
1471 	xe_vm_close_and_put(vm);
1472 	return ERR_PTR(err);
1473 
1474 err_no_resv:
1475 	mutex_destroy(&vm->snap_mutex);
1476 	for_each_tile(tile, xe, id)
1477 		xe_range_fence_tree_fini(&vm->rftree[id]);
1478 	kfree(vm);
1479 	if (!(flags & XE_VM_FLAG_MIGRATION))
1480 		xe_device_mem_access_put(xe);
1481 	return ERR_PTR(err);
1482 }
1483 
1484 static void xe_vm_close(struct xe_vm *vm)
1485 {
1486 	down_write(&vm->lock);
1487 	vm->size = 0;
1488 	up_write(&vm->lock);
1489 }
1490 
1491 void xe_vm_close_and_put(struct xe_vm *vm)
1492 {
1493 	LIST_HEAD(contested);
1494 	struct xe_device *xe = vm->xe;
1495 	struct xe_tile *tile;
1496 	struct xe_vma *vma, *next_vma;
1497 	struct drm_gpuva *gpuva, *next;
1498 	u8 id;
1499 
1500 	xe_assert(xe, !vm->preempt.num_exec_queues);
1501 
1502 	xe_vm_close(vm);
1503 	if (xe_vm_in_preempt_fence_mode(vm))
1504 		flush_work(&vm->preempt.rebind_work);
1505 
1506 	down_write(&vm->lock);
1507 	for_each_tile(tile, xe, id) {
1508 		if (vm->q[id])
1509 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1510 	}
1511 	up_write(&vm->lock);
1512 
1513 	for_each_tile(tile, xe, id) {
1514 		if (vm->q[id]) {
1515 			xe_exec_queue_kill(vm->q[id]);
1516 			xe_exec_queue_put(vm->q[id]);
1517 			vm->q[id] = NULL;
1518 		}
1519 	}
1520 
1521 	down_write(&vm->lock);
1522 	xe_vm_lock(vm, false);
1523 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1524 		vma = gpuva_to_vma(gpuva);
1525 
1526 		if (xe_vma_has_no_bo(vma)) {
1527 			down_read(&vm->userptr.notifier_lock);
1528 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1529 			up_read(&vm->userptr.notifier_lock);
1530 		}
1531 
1532 		xe_vm_remove_vma(vm, vma);
1533 
1534 		/* easy case, remove from VMA? */
1535 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1536 			list_del_init(&vma->combined_links.rebind);
1537 			xe_vma_destroy(vma, NULL);
1538 			continue;
1539 		}
1540 
1541 		list_move_tail(&vma->combined_links.destroy, &contested);
1542 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1543 	}
1544 
1545 	/*
1546 	 * All vm operations will add shared fences to resv.
1547 	 * The only exception is eviction for a shared object,
1548 	 * but even so, the unbind when evicted would still
1549 	 * install a fence to resv. Hence it's safe to
1550 	 * destroy the pagetables immediately.
1551 	 */
1552 	xe_vm_free_scratch(vm);
1553 
1554 	for_each_tile(tile, xe, id) {
1555 		if (vm->pt_root[id]) {
1556 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1557 			vm->pt_root[id] = NULL;
1558 		}
1559 	}
1560 	xe_vm_unlock(vm);
1561 
1562 	/*
1563 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1564 	 * Since we hold a refcount to the bo, we can remove and free
1565 	 * the members safely without locking.
1566 	 */
1567 	list_for_each_entry_safe(vma, next_vma, &contested,
1568 				 combined_links.destroy) {
1569 		list_del_init(&vma->combined_links.destroy);
1570 		xe_vma_destroy_unlocked(vma);
1571 	}
1572 
1573 	up_write(&vm->lock);
1574 
1575 	mutex_lock(&xe->usm.lock);
1576 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1577 		xe->usm.num_vm_in_fault_mode--;
1578 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1579 		xe->usm.num_vm_in_non_fault_mode--;
1580 
1581 	if (vm->usm.asid) {
1582 		void *lookup;
1583 
1584 		xe_assert(xe, xe->info.has_asid);
1585 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1586 
1587 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1588 		xe_assert(xe, lookup == vm);
1589 	}
1590 	mutex_unlock(&xe->usm.lock);
1591 
1592 	for_each_tile(tile, xe, id)
1593 		xe_range_fence_tree_fini(&vm->rftree[id]);
1594 
1595 	xe_vm_put(vm);
1596 }
1597 
1598 static void vm_destroy_work_func(struct work_struct *w)
1599 {
1600 	struct xe_vm *vm =
1601 		container_of(w, struct xe_vm, destroy_work);
1602 	struct xe_device *xe = vm->xe;
1603 	struct xe_tile *tile;
1604 	u8 id;
1605 
1606 	/* xe_vm_close_and_put was not called? */
1607 	xe_assert(xe, !vm->size);
1608 
1609 	if (xe_vm_in_preempt_fence_mode(vm))
1610 		flush_work(&vm->preempt.rebind_work);
1611 
1612 	mutex_destroy(&vm->snap_mutex);
1613 
1614 	if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1615 		xe_device_mem_access_put(xe);
1616 
1617 	for_each_tile(tile, xe, id)
1618 		XE_WARN_ON(vm->pt_root[id]);
1619 
1620 	trace_xe_vm_free(vm);
1621 	kfree(vm);
1622 }
1623 
1624 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1625 {
1626 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1627 
1628 	/* To destroy the VM we need to be able to sleep */
1629 	queue_work(system_unbound_wq, &vm->destroy_work);
1630 }
1631 
1632 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1633 {
1634 	struct xe_vm *vm;
1635 
1636 	mutex_lock(&xef->vm.lock);
1637 	vm = xa_load(&xef->vm.xa, id);
1638 	if (vm)
1639 		xe_vm_get(vm);
1640 	mutex_unlock(&xef->vm.lock);
1641 
1642 	return vm;
1643 }
1644 
1645 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1646 {
1647 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1648 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1649 }
1650 
1651 static struct xe_exec_queue *
1652 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1653 {
1654 	return q ? q : vm->q[0];
1655 }
1656 
1657 static struct dma_fence *
1658 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1659 		 struct xe_sync_entry *syncs, u32 num_syncs,
1660 		 bool first_op, bool last_op)
1661 {
1662 	struct xe_vm *vm = xe_vma_vm(vma);
1663 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1664 	struct xe_tile *tile;
1665 	struct dma_fence *fence = NULL;
1666 	struct dma_fence **fences = NULL;
1667 	struct dma_fence_array *cf = NULL;
1668 	int cur_fence = 0, i;
1669 	int number_tiles = hweight8(vma->tile_present);
1670 	int err;
1671 	u8 id;
1672 
1673 	trace_xe_vma_unbind(vma);
1674 
1675 	if (vma->ufence) {
1676 		struct xe_user_fence * const f = vma->ufence;
1677 
1678 		if (!xe_sync_ufence_get_status(f))
1679 			return ERR_PTR(-EBUSY);
1680 
1681 		vma->ufence = NULL;
1682 		xe_sync_ufence_put(f);
1683 	}
1684 
1685 	if (number_tiles > 1) {
1686 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1687 				       GFP_KERNEL);
1688 		if (!fences)
1689 			return ERR_PTR(-ENOMEM);
1690 	}
1691 
1692 	for_each_tile(tile, vm->xe, id) {
1693 		if (!(vma->tile_present & BIT(id)))
1694 			goto next;
1695 
1696 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1697 					   first_op ? syncs : NULL,
1698 					   first_op ? num_syncs : 0);
1699 		if (IS_ERR(fence)) {
1700 			err = PTR_ERR(fence);
1701 			goto err_fences;
1702 		}
1703 
1704 		if (fences)
1705 			fences[cur_fence++] = fence;
1706 
1707 next:
1708 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1709 			q = list_next_entry(q, multi_gt_list);
1710 	}
1711 
1712 	if (fences) {
1713 		cf = dma_fence_array_create(number_tiles, fences,
1714 					    vm->composite_fence_ctx,
1715 					    vm->composite_fence_seqno++,
1716 					    false);
1717 		if (!cf) {
1718 			--vm->composite_fence_seqno;
1719 			err = -ENOMEM;
1720 			goto err_fences;
1721 		}
1722 	}
1723 
1724 	fence = cf ? &cf->base : !fence ?
1725 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1726 	if (last_op) {
1727 		for (i = 0; i < num_syncs; i++)
1728 			xe_sync_entry_signal(&syncs[i], NULL, fence);
1729 	}
1730 
1731 	return fence;
1732 
1733 err_fences:
1734 	if (fences) {
1735 		while (cur_fence)
1736 			dma_fence_put(fences[--cur_fence]);
1737 		kfree(fences);
1738 	}
1739 
1740 	return ERR_PTR(err);
1741 }
1742 
1743 static struct dma_fence *
1744 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1745 	       struct xe_sync_entry *syncs, u32 num_syncs,
1746 	       bool first_op, bool last_op)
1747 {
1748 	struct xe_tile *tile;
1749 	struct dma_fence *fence;
1750 	struct dma_fence **fences = NULL;
1751 	struct dma_fence_array *cf = NULL;
1752 	struct xe_vm *vm = xe_vma_vm(vma);
1753 	int cur_fence = 0, i;
1754 	int number_tiles = hweight8(vma->tile_mask);
1755 	int err;
1756 	u8 id;
1757 
1758 	trace_xe_vma_bind(vma);
1759 
1760 	if (number_tiles > 1) {
1761 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1762 				       GFP_KERNEL);
1763 		if (!fences)
1764 			return ERR_PTR(-ENOMEM);
1765 	}
1766 
1767 	for_each_tile(tile, vm->xe, id) {
1768 		if (!(vma->tile_mask & BIT(id)))
1769 			goto next;
1770 
1771 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1772 					 first_op ? syncs : NULL,
1773 					 first_op ? num_syncs : 0,
1774 					 vma->tile_present & BIT(id));
1775 		if (IS_ERR(fence)) {
1776 			err = PTR_ERR(fence);
1777 			goto err_fences;
1778 		}
1779 
1780 		if (fences)
1781 			fences[cur_fence++] = fence;
1782 
1783 next:
1784 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1785 			q = list_next_entry(q, multi_gt_list);
1786 	}
1787 
1788 	if (fences) {
1789 		cf = dma_fence_array_create(number_tiles, fences,
1790 					    vm->composite_fence_ctx,
1791 					    vm->composite_fence_seqno++,
1792 					    false);
1793 		if (!cf) {
1794 			--vm->composite_fence_seqno;
1795 			err = -ENOMEM;
1796 			goto err_fences;
1797 		}
1798 	}
1799 
1800 	if (last_op) {
1801 		for (i = 0; i < num_syncs; i++)
1802 			xe_sync_entry_signal(&syncs[i], NULL,
1803 					     cf ? &cf->base : fence);
1804 	}
1805 
1806 	return cf ? &cf->base : fence;
1807 
1808 err_fences:
1809 	if (fences) {
1810 		while (cur_fence)
1811 			dma_fence_put(fences[--cur_fence]);
1812 		kfree(fences);
1813 	}
1814 
1815 	return ERR_PTR(err);
1816 }
1817 
1818 static struct xe_user_fence *
1819 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1820 {
1821 	unsigned int i;
1822 
1823 	for (i = 0; i < num_syncs; i++) {
1824 		struct xe_sync_entry *e = &syncs[i];
1825 
1826 		if (xe_sync_is_ufence(e))
1827 			return xe_sync_ufence_get(e);
1828 	}
1829 
1830 	return NULL;
1831 }
1832 
1833 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1834 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1835 			u32 num_syncs, bool immediate, bool first_op,
1836 			bool last_op)
1837 {
1838 	struct dma_fence *fence;
1839 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1840 	struct xe_user_fence *ufence;
1841 
1842 	xe_vm_assert_held(vm);
1843 
1844 	ufence = find_ufence_get(syncs, num_syncs);
1845 	if (vma->ufence && ufence)
1846 		xe_sync_ufence_put(vma->ufence);
1847 
1848 	vma->ufence = ufence ?: vma->ufence;
1849 
1850 	if (immediate) {
1851 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1852 				       last_op);
1853 		if (IS_ERR(fence))
1854 			return PTR_ERR(fence);
1855 	} else {
1856 		int i;
1857 
1858 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1859 
1860 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1861 		if (last_op) {
1862 			for (i = 0; i < num_syncs; i++)
1863 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1864 		}
1865 	}
1866 
1867 	if (last_op)
1868 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1869 	dma_fence_put(fence);
1870 
1871 	return 0;
1872 }
1873 
1874 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1875 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1876 		      u32 num_syncs, bool immediate, bool first_op,
1877 		      bool last_op)
1878 {
1879 	int err;
1880 
1881 	xe_vm_assert_held(vm);
1882 	xe_bo_assert_held(bo);
1883 
1884 	if (bo && immediate) {
1885 		err = xe_bo_validate(bo, vm, true);
1886 		if (err)
1887 			return err;
1888 	}
1889 
1890 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1891 			    last_op);
1892 }
1893 
1894 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1895 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1896 			u32 num_syncs, bool first_op, bool last_op)
1897 {
1898 	struct dma_fence *fence;
1899 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1900 
1901 	xe_vm_assert_held(vm);
1902 	xe_bo_assert_held(xe_vma_bo(vma));
1903 
1904 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1905 	if (IS_ERR(fence))
1906 		return PTR_ERR(fence);
1907 
1908 	xe_vma_destroy(vma, fence);
1909 	if (last_op)
1910 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1911 	dma_fence_put(fence);
1912 
1913 	return 0;
1914 }
1915 
1916 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1917 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1918 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1919 
1920 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1921 		       struct drm_file *file)
1922 {
1923 	struct xe_device *xe = to_xe_device(dev);
1924 	struct xe_file *xef = to_xe_file(file);
1925 	struct drm_xe_vm_create *args = data;
1926 	struct xe_tile *tile;
1927 	struct xe_vm *vm;
1928 	u32 id, asid;
1929 	int err;
1930 	u32 flags = 0;
1931 
1932 	if (XE_IOCTL_DBG(xe, args->extensions))
1933 		return -EINVAL;
1934 
1935 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1936 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1937 
1938 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1939 			 !xe->info.has_usm))
1940 		return -EINVAL;
1941 
1942 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1943 		return -EINVAL;
1944 
1945 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1946 		return -EINVAL;
1947 
1948 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1949 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1950 		return -EINVAL;
1951 
1952 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1953 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1954 		return -EINVAL;
1955 
1956 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1957 			 xe_device_in_non_fault_mode(xe)))
1958 		return -EINVAL;
1959 
1960 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1961 			 xe_device_in_fault_mode(xe)))
1962 		return -EINVAL;
1963 
1964 	if (XE_IOCTL_DBG(xe, args->extensions))
1965 		return -EINVAL;
1966 
1967 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1968 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1969 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1970 		flags |= XE_VM_FLAG_LR_MODE;
1971 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1972 		flags |= XE_VM_FLAG_FAULT_MODE;
1973 
1974 	vm = xe_vm_create(xe, flags);
1975 	if (IS_ERR(vm))
1976 		return PTR_ERR(vm);
1977 
1978 	mutex_lock(&xef->vm.lock);
1979 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1980 	mutex_unlock(&xef->vm.lock);
1981 	if (err)
1982 		goto err_close_and_put;
1983 
1984 	if (xe->info.has_asid) {
1985 		mutex_lock(&xe->usm.lock);
1986 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1987 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1988 				      &xe->usm.next_asid, GFP_KERNEL);
1989 		mutex_unlock(&xe->usm.lock);
1990 		if (err < 0)
1991 			goto err_free_id;
1992 
1993 		vm->usm.asid = asid;
1994 	}
1995 
1996 	args->vm_id = id;
1997 	vm->xef = xef;
1998 
1999 	/* Record BO memory for VM pagetable created against client */
2000 	for_each_tile(tile, xe, id)
2001 		if (vm->pt_root[id])
2002 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2003 
2004 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2005 	/* Warning: Security issue - never enable by default */
2006 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2007 #endif
2008 
2009 	return 0;
2010 
2011 err_free_id:
2012 	mutex_lock(&xef->vm.lock);
2013 	xa_erase(&xef->vm.xa, id);
2014 	mutex_unlock(&xef->vm.lock);
2015 err_close_and_put:
2016 	xe_vm_close_and_put(vm);
2017 
2018 	return err;
2019 }
2020 
2021 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2022 			struct drm_file *file)
2023 {
2024 	struct xe_device *xe = to_xe_device(dev);
2025 	struct xe_file *xef = to_xe_file(file);
2026 	struct drm_xe_vm_destroy *args = data;
2027 	struct xe_vm *vm;
2028 	int err = 0;
2029 
2030 	if (XE_IOCTL_DBG(xe, args->pad) ||
2031 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2032 		return -EINVAL;
2033 
2034 	mutex_lock(&xef->vm.lock);
2035 	vm = xa_load(&xef->vm.xa, args->vm_id);
2036 	if (XE_IOCTL_DBG(xe, !vm))
2037 		err = -ENOENT;
2038 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2039 		err = -EBUSY;
2040 	else
2041 		xa_erase(&xef->vm.xa, args->vm_id);
2042 	mutex_unlock(&xef->vm.lock);
2043 
2044 	if (!err)
2045 		xe_vm_close_and_put(vm);
2046 
2047 	return err;
2048 }
2049 
2050 static const u32 region_to_mem_type[] = {
2051 	XE_PL_TT,
2052 	XE_PL_VRAM0,
2053 	XE_PL_VRAM1,
2054 };
2055 
2056 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2057 			  struct xe_exec_queue *q, u32 region,
2058 			  struct xe_sync_entry *syncs, u32 num_syncs,
2059 			  bool first_op, bool last_op)
2060 {
2061 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2062 	int err;
2063 
2064 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2065 
2066 	if (!xe_vma_has_no_bo(vma)) {
2067 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2068 		if (err)
2069 			return err;
2070 	}
2071 
2072 	if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
2073 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2074 				  true, first_op, last_op);
2075 	} else {
2076 		int i;
2077 
2078 		/* Nothing to do, signal fences now */
2079 		if (last_op) {
2080 			for (i = 0; i < num_syncs; i++) {
2081 				struct dma_fence *fence =
2082 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2083 
2084 				xe_sync_entry_signal(&syncs[i], NULL, fence);
2085 				dma_fence_put(fence);
2086 			}
2087 		}
2088 
2089 		return 0;
2090 	}
2091 }
2092 
2093 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2094 			     bool post_commit)
2095 {
2096 	down_read(&vm->userptr.notifier_lock);
2097 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2098 	up_read(&vm->userptr.notifier_lock);
2099 	if (post_commit)
2100 		xe_vm_remove_vma(vm, vma);
2101 }
2102 
2103 #undef ULL
2104 #define ULL	unsigned long long
2105 
2106 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2107 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2108 {
2109 	struct xe_vma *vma;
2110 
2111 	switch (op->op) {
2112 	case DRM_GPUVA_OP_MAP:
2113 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2114 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2115 		break;
2116 	case DRM_GPUVA_OP_REMAP:
2117 		vma = gpuva_to_vma(op->remap.unmap->va);
2118 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2119 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2120 		       op->remap.unmap->keep ? 1 : 0);
2121 		if (op->remap.prev)
2122 			vm_dbg(&xe->drm,
2123 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2124 			       (ULL)op->remap.prev->va.addr,
2125 			       (ULL)op->remap.prev->va.range);
2126 		if (op->remap.next)
2127 			vm_dbg(&xe->drm,
2128 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2129 			       (ULL)op->remap.next->va.addr,
2130 			       (ULL)op->remap.next->va.range);
2131 		break;
2132 	case DRM_GPUVA_OP_UNMAP:
2133 		vma = gpuva_to_vma(op->unmap.va);
2134 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2135 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2136 		       op->unmap.keep ? 1 : 0);
2137 		break;
2138 	case DRM_GPUVA_OP_PREFETCH:
2139 		vma = gpuva_to_vma(op->prefetch.va);
2140 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2141 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2142 		break;
2143 	default:
2144 		drm_warn(&xe->drm, "NOT POSSIBLE");
2145 	}
2146 }
2147 #else
2148 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2149 {
2150 }
2151 #endif
2152 
2153 /*
2154  * Create operations list from IOCTL arguments, setup operations fields so parse
2155  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2156  */
2157 static struct drm_gpuva_ops *
2158 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2159 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2160 			 u32 operation, u32 flags,
2161 			 u32 prefetch_region, u16 pat_index)
2162 {
2163 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2164 	struct drm_gpuva_ops *ops;
2165 	struct drm_gpuva_op *__op;
2166 	struct drm_gpuvm_bo *vm_bo;
2167 	int err;
2168 
2169 	lockdep_assert_held_write(&vm->lock);
2170 
2171 	vm_dbg(&vm->xe->drm,
2172 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2173 	       operation, (ULL)addr, (ULL)range,
2174 	       (ULL)bo_offset_or_userptr);
2175 
2176 	switch (operation) {
2177 	case DRM_XE_VM_BIND_OP_MAP:
2178 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2179 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2180 						  obj, bo_offset_or_userptr);
2181 		break;
2182 	case DRM_XE_VM_BIND_OP_UNMAP:
2183 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2184 		break;
2185 	case DRM_XE_VM_BIND_OP_PREFETCH:
2186 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2187 		break;
2188 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2189 		xe_assert(vm->xe, bo);
2190 
2191 		err = xe_bo_lock(bo, true);
2192 		if (err)
2193 			return ERR_PTR(err);
2194 
2195 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2196 		if (IS_ERR(vm_bo)) {
2197 			xe_bo_unlock(bo);
2198 			return ERR_CAST(vm_bo);
2199 		}
2200 
2201 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2202 		drm_gpuvm_bo_put(vm_bo);
2203 		xe_bo_unlock(bo);
2204 		break;
2205 	default:
2206 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2207 		ops = ERR_PTR(-EINVAL);
2208 	}
2209 	if (IS_ERR(ops))
2210 		return ops;
2211 
2212 	drm_gpuva_for_each_op(__op, ops) {
2213 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2214 
2215 		if (__op->op == DRM_GPUVA_OP_MAP) {
2216 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2217 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2218 			op->map.pat_index = pat_index;
2219 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2220 			op->prefetch.region = prefetch_region;
2221 		}
2222 
2223 		print_op(vm->xe, __op);
2224 	}
2225 
2226 	return ops;
2227 }
2228 
2229 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2230 			      u16 pat_index, unsigned int flags)
2231 {
2232 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2233 	struct drm_exec exec;
2234 	struct xe_vma *vma;
2235 	int err;
2236 
2237 	lockdep_assert_held_write(&vm->lock);
2238 
2239 	if (bo) {
2240 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2241 		drm_exec_until_all_locked(&exec) {
2242 			err = 0;
2243 			if (!bo->vm) {
2244 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2245 				drm_exec_retry_on_contention(&exec);
2246 			}
2247 			if (!err) {
2248 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2249 				drm_exec_retry_on_contention(&exec);
2250 			}
2251 			if (err) {
2252 				drm_exec_fini(&exec);
2253 				return ERR_PTR(err);
2254 			}
2255 		}
2256 	}
2257 	vma = xe_vma_create(vm, bo, op->gem.offset,
2258 			    op->va.addr, op->va.addr +
2259 			    op->va.range - 1, pat_index, flags);
2260 	if (bo)
2261 		drm_exec_fini(&exec);
2262 
2263 	if (xe_vma_is_userptr(vma)) {
2264 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2265 		if (err) {
2266 			prep_vma_destroy(vm, vma, false);
2267 			xe_vma_destroy_unlocked(vma);
2268 			return ERR_PTR(err);
2269 		}
2270 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2271 		err = add_preempt_fences(vm, bo);
2272 		if (err) {
2273 			prep_vma_destroy(vm, vma, false);
2274 			xe_vma_destroy_unlocked(vma);
2275 			return ERR_PTR(err);
2276 		}
2277 	}
2278 
2279 	return vma;
2280 }
2281 
2282 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2283 {
2284 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2285 		return SZ_1G;
2286 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2287 		return SZ_2M;
2288 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2289 		return SZ_64K;
2290 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2291 		return SZ_4K;
2292 
2293 	return SZ_1G;	/* Uninitialized, used max size */
2294 }
2295 
2296 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2297 {
2298 	switch (size) {
2299 	case SZ_1G:
2300 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2301 		break;
2302 	case SZ_2M:
2303 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2304 		break;
2305 	case SZ_64K:
2306 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2307 		break;
2308 	case SZ_4K:
2309 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2310 		break;
2311 	}
2312 }
2313 
2314 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2315 {
2316 	int err = 0;
2317 
2318 	lockdep_assert_held_write(&vm->lock);
2319 
2320 	switch (op->base.op) {
2321 	case DRM_GPUVA_OP_MAP:
2322 		err |= xe_vm_insert_vma(vm, op->map.vma);
2323 		if (!err)
2324 			op->flags |= XE_VMA_OP_COMMITTED;
2325 		break;
2326 	case DRM_GPUVA_OP_REMAP:
2327 	{
2328 		u8 tile_present =
2329 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2330 
2331 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2332 				 true);
2333 		op->flags |= XE_VMA_OP_COMMITTED;
2334 
2335 		if (op->remap.prev) {
2336 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2337 			if (!err)
2338 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2339 			if (!err && op->remap.skip_prev) {
2340 				op->remap.prev->tile_present =
2341 					tile_present;
2342 				op->remap.prev = NULL;
2343 			}
2344 		}
2345 		if (op->remap.next) {
2346 			err |= xe_vm_insert_vma(vm, op->remap.next);
2347 			if (!err)
2348 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2349 			if (!err && op->remap.skip_next) {
2350 				op->remap.next->tile_present =
2351 					tile_present;
2352 				op->remap.next = NULL;
2353 			}
2354 		}
2355 
2356 		/* Adjust for partial unbind after removin VMA from VM */
2357 		if (!err) {
2358 			op->base.remap.unmap->va->va.addr = op->remap.start;
2359 			op->base.remap.unmap->va->va.range = op->remap.range;
2360 		}
2361 		break;
2362 	}
2363 	case DRM_GPUVA_OP_UNMAP:
2364 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2365 		op->flags |= XE_VMA_OP_COMMITTED;
2366 		break;
2367 	case DRM_GPUVA_OP_PREFETCH:
2368 		op->flags |= XE_VMA_OP_COMMITTED;
2369 		break;
2370 	default:
2371 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2372 	}
2373 
2374 	return err;
2375 }
2376 
2377 
2378 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2379 				   struct drm_gpuva_ops *ops,
2380 				   struct xe_sync_entry *syncs, u32 num_syncs,
2381 				   struct list_head *ops_list, bool last)
2382 {
2383 	struct xe_device *xe = vm->xe;
2384 	struct xe_vma_op *last_op = NULL;
2385 	struct drm_gpuva_op *__op;
2386 	int err = 0;
2387 
2388 	lockdep_assert_held_write(&vm->lock);
2389 
2390 	drm_gpuva_for_each_op(__op, ops) {
2391 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2392 		struct xe_vma *vma;
2393 		bool first = list_empty(ops_list);
2394 		unsigned int flags = 0;
2395 
2396 		INIT_LIST_HEAD(&op->link);
2397 		list_add_tail(&op->link, ops_list);
2398 
2399 		if (first) {
2400 			op->flags |= XE_VMA_OP_FIRST;
2401 			op->num_syncs = num_syncs;
2402 			op->syncs = syncs;
2403 		}
2404 
2405 		op->q = q;
2406 
2407 		switch (op->base.op) {
2408 		case DRM_GPUVA_OP_MAP:
2409 		{
2410 			flags |= op->map.is_null ?
2411 				VMA_CREATE_FLAG_IS_NULL : 0;
2412 			flags |= op->map.dumpable ?
2413 				VMA_CREATE_FLAG_DUMPABLE : 0;
2414 
2415 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2416 				      flags);
2417 			if (IS_ERR(vma))
2418 				return PTR_ERR(vma);
2419 
2420 			op->map.vma = vma;
2421 			break;
2422 		}
2423 		case DRM_GPUVA_OP_REMAP:
2424 		{
2425 			struct xe_vma *old =
2426 				gpuva_to_vma(op->base.remap.unmap->va);
2427 
2428 			op->remap.start = xe_vma_start(old);
2429 			op->remap.range = xe_vma_size(old);
2430 
2431 			if (op->base.remap.prev) {
2432 				flags |= op->base.remap.unmap->va->flags &
2433 					XE_VMA_READ_ONLY ?
2434 					VMA_CREATE_FLAG_READ_ONLY : 0;
2435 				flags |= op->base.remap.unmap->va->flags &
2436 					DRM_GPUVA_SPARSE ?
2437 					VMA_CREATE_FLAG_IS_NULL : 0;
2438 				flags |= op->base.remap.unmap->va->flags &
2439 					XE_VMA_DUMPABLE ?
2440 					VMA_CREATE_FLAG_DUMPABLE : 0;
2441 
2442 				vma = new_vma(vm, op->base.remap.prev,
2443 					      old->pat_index, flags);
2444 				if (IS_ERR(vma))
2445 					return PTR_ERR(vma);
2446 
2447 				op->remap.prev = vma;
2448 
2449 				/*
2450 				 * Userptr creates a new SG mapping so
2451 				 * we must also rebind.
2452 				 */
2453 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2454 					IS_ALIGNED(xe_vma_end(vma),
2455 						   xe_vma_max_pte_size(old));
2456 				if (op->remap.skip_prev) {
2457 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2458 					op->remap.range -=
2459 						xe_vma_end(vma) -
2460 						xe_vma_start(old);
2461 					op->remap.start = xe_vma_end(vma);
2462 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2463 					       (ULL)op->remap.start,
2464 					       (ULL)op->remap.range);
2465 				}
2466 			}
2467 
2468 			if (op->base.remap.next) {
2469 				flags |= op->base.remap.unmap->va->flags &
2470 					XE_VMA_READ_ONLY ?
2471 					VMA_CREATE_FLAG_READ_ONLY : 0;
2472 				flags |= op->base.remap.unmap->va->flags &
2473 					DRM_GPUVA_SPARSE ?
2474 					VMA_CREATE_FLAG_IS_NULL : 0;
2475 				flags |= op->base.remap.unmap->va->flags &
2476 					XE_VMA_DUMPABLE ?
2477 					VMA_CREATE_FLAG_DUMPABLE : 0;
2478 
2479 				vma = new_vma(vm, op->base.remap.next,
2480 					      old->pat_index, flags);
2481 				if (IS_ERR(vma))
2482 					return PTR_ERR(vma);
2483 
2484 				op->remap.next = vma;
2485 
2486 				/*
2487 				 * Userptr creates a new SG mapping so
2488 				 * we must also rebind.
2489 				 */
2490 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2491 					IS_ALIGNED(xe_vma_start(vma),
2492 						   xe_vma_max_pte_size(old));
2493 				if (op->remap.skip_next) {
2494 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2495 					op->remap.range -=
2496 						xe_vma_end(old) -
2497 						xe_vma_start(vma);
2498 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2499 					       (ULL)op->remap.start,
2500 					       (ULL)op->remap.range);
2501 				}
2502 			}
2503 			break;
2504 		}
2505 		case DRM_GPUVA_OP_UNMAP:
2506 		case DRM_GPUVA_OP_PREFETCH:
2507 			/* Nothing to do */
2508 			break;
2509 		default:
2510 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2511 		}
2512 
2513 		last_op = op;
2514 
2515 		err = xe_vma_op_commit(vm, op);
2516 		if (err)
2517 			return err;
2518 	}
2519 
2520 	/* FIXME: Unhandled corner case */
2521 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2522 
2523 	if (!last_op)
2524 		return 0;
2525 
2526 	last_op->ops = ops;
2527 	if (last) {
2528 		last_op->flags |= XE_VMA_OP_LAST;
2529 		last_op->num_syncs = num_syncs;
2530 		last_op->syncs = syncs;
2531 	}
2532 
2533 	return 0;
2534 }
2535 
2536 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2537 		      struct xe_vma *vma, struct xe_vma_op *op)
2538 {
2539 	int err;
2540 
2541 	lockdep_assert_held_write(&vm->lock);
2542 
2543 	err = xe_vm_lock_vma(exec, vma);
2544 	if (err)
2545 		return err;
2546 
2547 	xe_vm_assert_held(vm);
2548 	xe_bo_assert_held(xe_vma_bo(vma));
2549 
2550 	switch (op->base.op) {
2551 	case DRM_GPUVA_OP_MAP:
2552 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2553 				 op->syncs, op->num_syncs,
2554 				 !xe_vm_in_fault_mode(vm),
2555 				 op->flags & XE_VMA_OP_FIRST,
2556 				 op->flags & XE_VMA_OP_LAST);
2557 		break;
2558 	case DRM_GPUVA_OP_REMAP:
2559 	{
2560 		bool prev = !!op->remap.prev;
2561 		bool next = !!op->remap.next;
2562 
2563 		if (!op->remap.unmap_done) {
2564 			if (prev || next)
2565 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2566 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2567 					   op->num_syncs,
2568 					   op->flags & XE_VMA_OP_FIRST,
2569 					   op->flags & XE_VMA_OP_LAST &&
2570 					   !prev && !next);
2571 			if (err)
2572 				break;
2573 			op->remap.unmap_done = true;
2574 		}
2575 
2576 		if (prev) {
2577 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2578 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2579 					 xe_vma_bo(op->remap.prev), op->syncs,
2580 					 op->num_syncs, true, false,
2581 					 op->flags & XE_VMA_OP_LAST && !next);
2582 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2583 			if (err)
2584 				break;
2585 			op->remap.prev = NULL;
2586 		}
2587 
2588 		if (next) {
2589 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2590 			err = xe_vm_bind(vm, op->remap.next, op->q,
2591 					 xe_vma_bo(op->remap.next),
2592 					 op->syncs, op->num_syncs,
2593 					 true, false,
2594 					 op->flags & XE_VMA_OP_LAST);
2595 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2596 			if (err)
2597 				break;
2598 			op->remap.next = NULL;
2599 		}
2600 
2601 		break;
2602 	}
2603 	case DRM_GPUVA_OP_UNMAP:
2604 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2605 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2606 				   op->flags & XE_VMA_OP_LAST);
2607 		break;
2608 	case DRM_GPUVA_OP_PREFETCH:
2609 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2610 				     op->syncs, op->num_syncs,
2611 				     op->flags & XE_VMA_OP_FIRST,
2612 				     op->flags & XE_VMA_OP_LAST);
2613 		break;
2614 	default:
2615 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2616 	}
2617 
2618 	if (err)
2619 		trace_xe_vma_fail(vma);
2620 
2621 	return err;
2622 }
2623 
2624 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2625 			       struct xe_vma_op *op)
2626 {
2627 	struct drm_exec exec;
2628 	int err;
2629 
2630 retry_userptr:
2631 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2632 	drm_exec_until_all_locked(&exec) {
2633 		err = op_execute(&exec, vm, vma, op);
2634 		drm_exec_retry_on_contention(&exec);
2635 		if (err)
2636 			break;
2637 	}
2638 	drm_exec_fini(&exec);
2639 
2640 	if (err == -EAGAIN) {
2641 		lockdep_assert_held_write(&vm->lock);
2642 
2643 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2644 			if (!op->remap.unmap_done)
2645 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2646 			else if (op->remap.prev)
2647 				vma = op->remap.prev;
2648 			else
2649 				vma = op->remap.next;
2650 		}
2651 
2652 		if (xe_vma_is_userptr(vma)) {
2653 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2654 			if (!err)
2655 				goto retry_userptr;
2656 
2657 			trace_xe_vma_fail(vma);
2658 		}
2659 	}
2660 
2661 	return err;
2662 }
2663 
2664 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2665 {
2666 	int ret = 0;
2667 
2668 	lockdep_assert_held_write(&vm->lock);
2669 
2670 	switch (op->base.op) {
2671 	case DRM_GPUVA_OP_MAP:
2672 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2673 		break;
2674 	case DRM_GPUVA_OP_REMAP:
2675 	{
2676 		struct xe_vma *vma;
2677 
2678 		if (!op->remap.unmap_done)
2679 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2680 		else if (op->remap.prev)
2681 			vma = op->remap.prev;
2682 		else
2683 			vma = op->remap.next;
2684 
2685 		ret = __xe_vma_op_execute(vm, vma, op);
2686 		break;
2687 	}
2688 	case DRM_GPUVA_OP_UNMAP:
2689 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2690 					  op);
2691 		break;
2692 	case DRM_GPUVA_OP_PREFETCH:
2693 		ret = __xe_vma_op_execute(vm,
2694 					  gpuva_to_vma(op->base.prefetch.va),
2695 					  op);
2696 		break;
2697 	default:
2698 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2699 	}
2700 
2701 	return ret;
2702 }
2703 
2704 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2705 {
2706 	bool last = op->flags & XE_VMA_OP_LAST;
2707 
2708 	if (last) {
2709 		while (op->num_syncs--)
2710 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2711 		kfree(op->syncs);
2712 		if (op->q)
2713 			xe_exec_queue_put(op->q);
2714 	}
2715 	if (!list_empty(&op->link))
2716 		list_del(&op->link);
2717 	if (op->ops)
2718 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2719 	if (last)
2720 		xe_vm_put(vm);
2721 }
2722 
2723 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2724 			     bool post_commit, bool prev_post_commit,
2725 			     bool next_post_commit)
2726 {
2727 	lockdep_assert_held_write(&vm->lock);
2728 
2729 	switch (op->base.op) {
2730 	case DRM_GPUVA_OP_MAP:
2731 		if (op->map.vma) {
2732 			prep_vma_destroy(vm, op->map.vma, post_commit);
2733 			xe_vma_destroy_unlocked(op->map.vma);
2734 		}
2735 		break;
2736 	case DRM_GPUVA_OP_UNMAP:
2737 	{
2738 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2739 
2740 		if (vma) {
2741 			down_read(&vm->userptr.notifier_lock);
2742 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2743 			up_read(&vm->userptr.notifier_lock);
2744 			if (post_commit)
2745 				xe_vm_insert_vma(vm, vma);
2746 		}
2747 		break;
2748 	}
2749 	case DRM_GPUVA_OP_REMAP:
2750 	{
2751 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2752 
2753 		if (op->remap.prev) {
2754 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2755 			xe_vma_destroy_unlocked(op->remap.prev);
2756 		}
2757 		if (op->remap.next) {
2758 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2759 			xe_vma_destroy_unlocked(op->remap.next);
2760 		}
2761 		if (vma) {
2762 			down_read(&vm->userptr.notifier_lock);
2763 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2764 			up_read(&vm->userptr.notifier_lock);
2765 			if (post_commit)
2766 				xe_vm_insert_vma(vm, vma);
2767 		}
2768 		break;
2769 	}
2770 	case DRM_GPUVA_OP_PREFETCH:
2771 		/* Nothing to do */
2772 		break;
2773 	default:
2774 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2775 	}
2776 }
2777 
2778 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2779 				     struct drm_gpuva_ops **ops,
2780 				     int num_ops_list)
2781 {
2782 	int i;
2783 
2784 	for (i = num_ops_list - 1; i >= 0; --i) {
2785 		struct drm_gpuva_ops *__ops = ops[i];
2786 		struct drm_gpuva_op *__op;
2787 
2788 		if (!__ops)
2789 			continue;
2790 
2791 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2792 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2793 
2794 			xe_vma_op_unwind(vm, op,
2795 					 op->flags & XE_VMA_OP_COMMITTED,
2796 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2797 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2798 		}
2799 
2800 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2801 	}
2802 }
2803 
2804 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2805 				     struct list_head *ops_list)
2806 {
2807 	struct xe_vma_op *op, *next;
2808 	int err;
2809 
2810 	lockdep_assert_held_write(&vm->lock);
2811 
2812 	list_for_each_entry_safe(op, next, ops_list, link) {
2813 		err = xe_vma_op_execute(vm, op);
2814 		if (err) {
2815 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2816 				 op->base.op, err);
2817 			/*
2818 			 * FIXME: Killing VM rather than proper error handling
2819 			 */
2820 			xe_vm_kill(vm);
2821 			return -ENOSPC;
2822 		}
2823 		xe_vma_op_cleanup(vm, op);
2824 	}
2825 
2826 	return 0;
2827 }
2828 
2829 #define SUPPORTED_FLAGS	(DRM_XE_VM_BIND_FLAG_NULL | \
2830 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2831 #define XE_64K_PAGE_MASK 0xffffull
2832 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2833 
2834 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2835 				    struct drm_xe_vm_bind *args,
2836 				    struct drm_xe_vm_bind_op **bind_ops)
2837 {
2838 	int err;
2839 	int i;
2840 
2841 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2842 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2843 		return -EINVAL;
2844 
2845 	if (XE_IOCTL_DBG(xe, args->extensions))
2846 		return -EINVAL;
2847 
2848 	if (args->num_binds > 1) {
2849 		u64 __user *bind_user =
2850 			u64_to_user_ptr(args->vector_of_binds);
2851 
2852 		*bind_ops = kvmalloc_array(args->num_binds,
2853 					   sizeof(struct drm_xe_vm_bind_op),
2854 					   GFP_KERNEL | __GFP_ACCOUNT);
2855 		if (!*bind_ops)
2856 			return -ENOMEM;
2857 
2858 		err = __copy_from_user(*bind_ops, bind_user,
2859 				       sizeof(struct drm_xe_vm_bind_op) *
2860 				       args->num_binds);
2861 		if (XE_IOCTL_DBG(xe, err)) {
2862 			err = -EFAULT;
2863 			goto free_bind_ops;
2864 		}
2865 	} else {
2866 		*bind_ops = &args->bind;
2867 	}
2868 
2869 	for (i = 0; i < args->num_binds; ++i) {
2870 		u64 range = (*bind_ops)[i].range;
2871 		u64 addr = (*bind_ops)[i].addr;
2872 		u32 op = (*bind_ops)[i].op;
2873 		u32 flags = (*bind_ops)[i].flags;
2874 		u32 obj = (*bind_ops)[i].obj;
2875 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2876 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2877 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2878 		u16 pat_index = (*bind_ops)[i].pat_index;
2879 		u16 coh_mode;
2880 
2881 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2882 			err = -EINVAL;
2883 			goto free_bind_ops;
2884 		}
2885 
2886 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2887 		(*bind_ops)[i].pat_index = pat_index;
2888 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2889 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2890 			err = -EINVAL;
2891 			goto free_bind_ops;
2892 		}
2893 
2894 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2895 			err = -EINVAL;
2896 			goto free_bind_ops;
2897 		}
2898 
2899 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2900 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2901 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2902 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2903 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2904 				 is_null) ||
2905 		    XE_IOCTL_DBG(xe, !obj &&
2906 				 op == DRM_XE_VM_BIND_OP_MAP &&
2907 				 !is_null) ||
2908 		    XE_IOCTL_DBG(xe, !obj &&
2909 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2910 		    XE_IOCTL_DBG(xe, addr &&
2911 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2912 		    XE_IOCTL_DBG(xe, range &&
2913 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2914 		    XE_IOCTL_DBG(xe, obj &&
2915 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2916 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2917 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2918 		    XE_IOCTL_DBG(xe, obj &&
2919 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2920 		    XE_IOCTL_DBG(xe, prefetch_region &&
2921 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2922 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2923 				       xe->info.mem_region_mask)) ||
2924 		    XE_IOCTL_DBG(xe, obj &&
2925 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2926 			err = -EINVAL;
2927 			goto free_bind_ops;
2928 		}
2929 
2930 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2931 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2932 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2933 		    XE_IOCTL_DBG(xe, !range &&
2934 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2935 			err = -EINVAL;
2936 			goto free_bind_ops;
2937 		}
2938 	}
2939 
2940 	return 0;
2941 
2942 free_bind_ops:
2943 	if (args->num_binds > 1)
2944 		kvfree(*bind_ops);
2945 	return err;
2946 }
2947 
2948 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2949 				       struct xe_exec_queue *q,
2950 				       struct xe_sync_entry *syncs,
2951 				       int num_syncs)
2952 {
2953 	struct dma_fence *fence;
2954 	int i, err = 0;
2955 
2956 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2957 				     to_wait_exec_queue(vm, q), vm);
2958 	if (IS_ERR(fence))
2959 		return PTR_ERR(fence);
2960 
2961 	for (i = 0; i < num_syncs; i++)
2962 		xe_sync_entry_signal(&syncs[i], NULL, fence);
2963 
2964 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2965 				     fence);
2966 	dma_fence_put(fence);
2967 
2968 	return err;
2969 }
2970 
2971 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2972 {
2973 	struct xe_device *xe = to_xe_device(dev);
2974 	struct xe_file *xef = to_xe_file(file);
2975 	struct drm_xe_vm_bind *args = data;
2976 	struct drm_xe_sync __user *syncs_user;
2977 	struct xe_bo **bos = NULL;
2978 	struct drm_gpuva_ops **ops = NULL;
2979 	struct xe_vm *vm;
2980 	struct xe_exec_queue *q = NULL;
2981 	u32 num_syncs, num_ufence = 0;
2982 	struct xe_sync_entry *syncs = NULL;
2983 	struct drm_xe_vm_bind_op *bind_ops;
2984 	LIST_HEAD(ops_list);
2985 	int err;
2986 	int i;
2987 
2988 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2989 	if (err)
2990 		return err;
2991 
2992 	if (args->exec_queue_id) {
2993 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2994 		if (XE_IOCTL_DBG(xe, !q)) {
2995 			err = -ENOENT;
2996 			goto free_objs;
2997 		}
2998 
2999 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3000 			err = -EINVAL;
3001 			goto put_exec_queue;
3002 		}
3003 	}
3004 
3005 	vm = xe_vm_lookup(xef, args->vm_id);
3006 	if (XE_IOCTL_DBG(xe, !vm)) {
3007 		err = -EINVAL;
3008 		goto put_exec_queue;
3009 	}
3010 
3011 	err = down_write_killable(&vm->lock);
3012 	if (err)
3013 		goto put_vm;
3014 
3015 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3016 		err = -ENOENT;
3017 		goto release_vm_lock;
3018 	}
3019 
3020 	for (i = 0; i < args->num_binds; ++i) {
3021 		u64 range = bind_ops[i].range;
3022 		u64 addr = bind_ops[i].addr;
3023 
3024 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3025 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3026 			err = -EINVAL;
3027 			goto release_vm_lock;
3028 		}
3029 	}
3030 
3031 	if (args->num_binds) {
3032 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3033 			       GFP_KERNEL | __GFP_ACCOUNT);
3034 		if (!bos) {
3035 			err = -ENOMEM;
3036 			goto release_vm_lock;
3037 		}
3038 
3039 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3040 			       GFP_KERNEL | __GFP_ACCOUNT);
3041 		if (!ops) {
3042 			err = -ENOMEM;
3043 			goto release_vm_lock;
3044 		}
3045 	}
3046 
3047 	for (i = 0; i < args->num_binds; ++i) {
3048 		struct drm_gem_object *gem_obj;
3049 		u64 range = bind_ops[i].range;
3050 		u64 addr = bind_ops[i].addr;
3051 		u32 obj = bind_ops[i].obj;
3052 		u64 obj_offset = bind_ops[i].obj_offset;
3053 		u16 pat_index = bind_ops[i].pat_index;
3054 		u16 coh_mode;
3055 
3056 		if (!obj)
3057 			continue;
3058 
3059 		gem_obj = drm_gem_object_lookup(file, obj);
3060 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3061 			err = -ENOENT;
3062 			goto put_obj;
3063 		}
3064 		bos[i] = gem_to_xe_bo(gem_obj);
3065 
3066 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3067 		    XE_IOCTL_DBG(xe, obj_offset >
3068 				 bos[i]->size - range)) {
3069 			err = -EINVAL;
3070 			goto put_obj;
3071 		}
3072 
3073 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3074 			if (XE_IOCTL_DBG(xe, obj_offset &
3075 					 XE_64K_PAGE_MASK) ||
3076 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3077 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3078 				err = -EINVAL;
3079 				goto put_obj;
3080 			}
3081 		}
3082 
3083 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3084 		if (bos[i]->cpu_caching) {
3085 			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3086 					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3087 				err = -EINVAL;
3088 				goto put_obj;
3089 			}
3090 		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3091 			/*
3092 			 * Imported dma-buf from a different device should
3093 			 * require 1way or 2way coherency since we don't know
3094 			 * how it was mapped on the CPU. Just assume is it
3095 			 * potentially cached on CPU side.
3096 			 */
3097 			err = -EINVAL;
3098 			goto put_obj;
3099 		}
3100 	}
3101 
3102 	if (args->num_syncs) {
3103 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3104 		if (!syncs) {
3105 			err = -ENOMEM;
3106 			goto put_obj;
3107 		}
3108 	}
3109 
3110 	syncs_user = u64_to_user_ptr(args->syncs);
3111 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3112 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3113 					  &syncs_user[num_syncs],
3114 					  (xe_vm_in_lr_mode(vm) ?
3115 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3116 					  (!args->num_binds ?
3117 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3118 		if (err)
3119 			goto free_syncs;
3120 
3121 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3122 			num_ufence++;
3123 	}
3124 
3125 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3126 		err = -EINVAL;
3127 		goto free_syncs;
3128 	}
3129 
3130 	if (!args->num_binds) {
3131 		err = -ENODATA;
3132 		goto free_syncs;
3133 	}
3134 
3135 	for (i = 0; i < args->num_binds; ++i) {
3136 		u64 range = bind_ops[i].range;
3137 		u64 addr = bind_ops[i].addr;
3138 		u32 op = bind_ops[i].op;
3139 		u32 flags = bind_ops[i].flags;
3140 		u64 obj_offset = bind_ops[i].obj_offset;
3141 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3142 		u16 pat_index = bind_ops[i].pat_index;
3143 
3144 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3145 						  addr, range, op, flags,
3146 						  prefetch_region, pat_index);
3147 		if (IS_ERR(ops[i])) {
3148 			err = PTR_ERR(ops[i]);
3149 			ops[i] = NULL;
3150 			goto unwind_ops;
3151 		}
3152 
3153 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3154 					      &ops_list,
3155 					      i == args->num_binds - 1);
3156 		if (err)
3157 			goto unwind_ops;
3158 	}
3159 
3160 	/* Nothing to do */
3161 	if (list_empty(&ops_list)) {
3162 		err = -ENODATA;
3163 		goto unwind_ops;
3164 	}
3165 
3166 	xe_vm_get(vm);
3167 	if (q)
3168 		xe_exec_queue_get(q);
3169 
3170 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3171 
3172 	up_write(&vm->lock);
3173 
3174 	if (q)
3175 		xe_exec_queue_put(q);
3176 	xe_vm_put(vm);
3177 
3178 	for (i = 0; bos && i < args->num_binds; ++i)
3179 		xe_bo_put(bos[i]);
3180 
3181 	kvfree(bos);
3182 	kvfree(ops);
3183 	if (args->num_binds > 1)
3184 		kvfree(bind_ops);
3185 
3186 	return err;
3187 
3188 unwind_ops:
3189 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3190 free_syncs:
3191 	if (err == -ENODATA)
3192 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3193 	while (num_syncs--)
3194 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3195 
3196 	kfree(syncs);
3197 put_obj:
3198 	for (i = 0; i < args->num_binds; ++i)
3199 		xe_bo_put(bos[i]);
3200 release_vm_lock:
3201 	up_write(&vm->lock);
3202 put_vm:
3203 	xe_vm_put(vm);
3204 put_exec_queue:
3205 	if (q)
3206 		xe_exec_queue_put(q);
3207 free_objs:
3208 	kvfree(bos);
3209 	kvfree(ops);
3210 	if (args->num_binds > 1)
3211 		kvfree(bind_ops);
3212 	return err;
3213 }
3214 
3215 /**
3216  * xe_vm_lock() - Lock the vm's dma_resv object
3217  * @vm: The struct xe_vm whose lock is to be locked
3218  * @intr: Whether to perform any wait interruptible
3219  *
3220  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3221  * contended lock was interrupted. If @intr is false, the function
3222  * always returns 0.
3223  */
3224 int xe_vm_lock(struct xe_vm *vm, bool intr)
3225 {
3226 	if (intr)
3227 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3228 
3229 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3230 }
3231 
3232 /**
3233  * xe_vm_unlock() - Unlock the vm's dma_resv object
3234  * @vm: The struct xe_vm whose lock is to be released.
3235  *
3236  * Unlock a buffer object lock that was locked by xe_vm_lock().
3237  */
3238 void xe_vm_unlock(struct xe_vm *vm)
3239 {
3240 	dma_resv_unlock(xe_vm_resv(vm));
3241 }
3242 
3243 /**
3244  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3245  * @vma: VMA to invalidate
3246  *
3247  * Walks a list of page tables leaves which it memset the entries owned by this
3248  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3249  * complete.
3250  *
3251  * Returns 0 for success, negative error code otherwise.
3252  */
3253 int xe_vm_invalidate_vma(struct xe_vma *vma)
3254 {
3255 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3256 	struct xe_tile *tile;
3257 	u32 tile_needs_invalidate = 0;
3258 	int seqno[XE_MAX_TILES_PER_DEVICE];
3259 	u8 id;
3260 	int ret;
3261 
3262 	xe_assert(xe, !xe_vma_is_null(vma));
3263 	trace_xe_vma_invalidate(vma);
3264 
3265 	/* Check that we don't race with page-table updates */
3266 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3267 		if (xe_vma_is_userptr(vma)) {
3268 			WARN_ON_ONCE(!mmu_interval_check_retry
3269 				     (&to_userptr_vma(vma)->userptr.notifier,
3270 				      to_userptr_vma(vma)->userptr.notifier_seq));
3271 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3272 							     DMA_RESV_USAGE_BOOKKEEP));
3273 
3274 		} else {
3275 			xe_bo_assert_held(xe_vma_bo(vma));
3276 		}
3277 	}
3278 
3279 	for_each_tile(tile, xe, id) {
3280 		if (xe_pt_zap_ptes(tile, vma)) {
3281 			tile_needs_invalidate |= BIT(id);
3282 			xe_device_wmb(xe);
3283 			/*
3284 			 * FIXME: We potentially need to invalidate multiple
3285 			 * GTs within the tile
3286 			 */
3287 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3288 			if (seqno[id] < 0)
3289 				return seqno[id];
3290 		}
3291 	}
3292 
3293 	for_each_tile(tile, xe, id) {
3294 		if (tile_needs_invalidate & BIT(id)) {
3295 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3296 			if (ret < 0)
3297 				return ret;
3298 		}
3299 	}
3300 
3301 	vma->tile_invalidated = vma->tile_mask;
3302 
3303 	return 0;
3304 }
3305 
3306 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3307 {
3308 	struct drm_gpuva *gpuva;
3309 	bool is_vram;
3310 	uint64_t addr;
3311 
3312 	if (!down_read_trylock(&vm->lock)) {
3313 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3314 		return 0;
3315 	}
3316 	if (vm->pt_root[gt_id]) {
3317 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3318 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3319 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3320 			   is_vram ? "VRAM" : "SYS");
3321 	}
3322 
3323 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3324 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3325 		bool is_userptr = xe_vma_is_userptr(vma);
3326 		bool is_null = xe_vma_is_null(vma);
3327 
3328 		if (is_null) {
3329 			addr = 0;
3330 		} else if (is_userptr) {
3331 			struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3332 			struct xe_res_cursor cur;
3333 
3334 			if (sg) {
3335 				xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3336 				addr = xe_res_dma(&cur);
3337 			} else {
3338 				addr = 0;
3339 			}
3340 		} else {
3341 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3342 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3343 		}
3344 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3345 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3346 			   xe_vma_size(vma),
3347 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3348 			   is_vram ? "VRAM" : "SYS");
3349 	}
3350 	up_read(&vm->lock);
3351 
3352 	return 0;
3353 }
3354 
3355 struct xe_vm_snapshot {
3356 	unsigned long num_snaps;
3357 	struct {
3358 		u64 ofs, bo_ofs;
3359 		unsigned long len;
3360 		struct xe_bo *bo;
3361 		void *data;
3362 		struct mm_struct *mm;
3363 	} snap[];
3364 };
3365 
3366 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3367 {
3368 	unsigned long num_snaps = 0, i;
3369 	struct xe_vm_snapshot *snap = NULL;
3370 	struct drm_gpuva *gpuva;
3371 
3372 	if (!vm)
3373 		return NULL;
3374 
3375 	mutex_lock(&vm->snap_mutex);
3376 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3377 		if (gpuva->flags & XE_VMA_DUMPABLE)
3378 			num_snaps++;
3379 	}
3380 
3381 	if (num_snaps)
3382 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3383 	if (!snap)
3384 		goto out_unlock;
3385 
3386 	snap->num_snaps = num_snaps;
3387 	i = 0;
3388 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3389 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3390 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3391 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3392 
3393 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3394 			continue;
3395 
3396 		snap->snap[i].ofs = xe_vma_start(vma);
3397 		snap->snap[i].len = xe_vma_size(vma);
3398 		if (bo) {
3399 			snap->snap[i].bo = xe_bo_get(bo);
3400 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3401 		} else if (xe_vma_is_userptr(vma)) {
3402 			struct mm_struct *mm =
3403 				to_userptr_vma(vma)->userptr.notifier.mm;
3404 
3405 			if (mmget_not_zero(mm))
3406 				snap->snap[i].mm = mm;
3407 			else
3408 				snap->snap[i].data = ERR_PTR(-EFAULT);
3409 
3410 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3411 		} else {
3412 			snap->snap[i].data = ERR_PTR(-ENOENT);
3413 		}
3414 		i++;
3415 	}
3416 
3417 out_unlock:
3418 	mutex_unlock(&vm->snap_mutex);
3419 	return snap;
3420 }
3421 
3422 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3423 {
3424 	for (int i = 0; i < snap->num_snaps; i++) {
3425 		struct xe_bo *bo = snap->snap[i].bo;
3426 		struct iosys_map src;
3427 		int err;
3428 
3429 		if (IS_ERR(snap->snap[i].data))
3430 			continue;
3431 
3432 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3433 		if (!snap->snap[i].data) {
3434 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3435 			goto cleanup_bo;
3436 		}
3437 
3438 		if (bo) {
3439 			dma_resv_lock(bo->ttm.base.resv, NULL);
3440 			err = ttm_bo_vmap(&bo->ttm, &src);
3441 			if (!err) {
3442 				xe_map_memcpy_from(xe_bo_device(bo),
3443 						   snap->snap[i].data,
3444 						   &src, snap->snap[i].bo_ofs,
3445 						   snap->snap[i].len);
3446 				ttm_bo_vunmap(&bo->ttm, &src);
3447 			}
3448 			dma_resv_unlock(bo->ttm.base.resv);
3449 		} else {
3450 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3451 
3452 			kthread_use_mm(snap->snap[i].mm);
3453 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3454 				err = 0;
3455 			else
3456 				err = -EFAULT;
3457 			kthread_unuse_mm(snap->snap[i].mm);
3458 
3459 			mmput(snap->snap[i].mm);
3460 			snap->snap[i].mm = NULL;
3461 		}
3462 
3463 		if (err) {
3464 			kvfree(snap->snap[i].data);
3465 			snap->snap[i].data = ERR_PTR(err);
3466 		}
3467 
3468 cleanup_bo:
3469 		xe_bo_put(bo);
3470 		snap->snap[i].bo = NULL;
3471 	}
3472 }
3473 
3474 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3475 {
3476 	unsigned long i, j;
3477 
3478 	for (i = 0; i < snap->num_snaps; i++) {
3479 		if (IS_ERR(snap->snap[i].data))
3480 			goto uncaptured;
3481 
3482 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3483 		drm_printf(p, "[%llx].data: ",
3484 			   snap->snap[i].ofs);
3485 
3486 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3487 			u32 *val = snap->snap[i].data + j;
3488 			char dumped[ASCII85_BUFSZ];
3489 
3490 			drm_puts(p, ascii85_encode(*val, dumped));
3491 		}
3492 
3493 		drm_puts(p, "\n");
3494 		continue;
3495 
3496 uncaptured:
3497 		drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
3498 			   snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
3499 			   PTR_ERR(snap->snap[i].data));
3500 	}
3501 }
3502 
3503 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3504 {
3505 	unsigned long i;
3506 
3507 	if (!snap)
3508 		return;
3509 
3510 	for (i = 0; i < snap->num_snaps; i++) {
3511 		if (!IS_ERR(snap->snap[i].data))
3512 			kvfree(snap->snap[i].data);
3513 		xe_bo_put(snap->snap[i].bo);
3514 		if (snap->snap[i].mm)
3515 			mmput(snap->snap[i].mm);
3516 	}
3517 	kvfree(snap);
3518 }
3519