xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 6d9b262afe0ec1d6e0ef99321ca9d6b921310471)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "xe_assert.h"
25 #include "xe_bo.h"
26 #include "xe_device.h"
27 #include "xe_drm_client.h"
28 #include "xe_exec_queue.h"
29 #include "xe_gt.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace.h"
40 #include "xe_wa.h"
41 
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
43 {
44 	return vm->gpuvm.r_obj;
45 }
46 
47 /**
48  * xe_vma_userptr_check_repin() - Advisory check for repin needed
49  * @uvma: The userptr vma
50  *
51  * Check if the userptr vma has been invalidated since last successful
52  * repin. The check is advisory only and can the function can be called
53  * without the vm->userptr.notifier_lock held. There is no guarantee that the
54  * vma userptr will remain valid after a lockless check, so typically
55  * the call needs to be followed by a proper check under the notifier_lock.
56  *
57  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
58  */
59 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
60 {
61 	return mmu_interval_check_retry(&uvma->userptr.notifier,
62 					uvma->userptr.notifier_seq) ?
63 		-EAGAIN : 0;
64 }
65 
66 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
67 {
68 	struct xe_userptr *userptr = &uvma->userptr;
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
73 	struct page **pages;
74 	bool in_kthread = !current->mm;
75 	unsigned long notifier_seq;
76 	int pinned, ret, i;
77 	bool read_only = xe_vma_read_only(vma);
78 
79 	lockdep_assert_held(&vm->lock);
80 	xe_assert(xe, xe_vma_is_userptr(vma));
81 retry:
82 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
83 		return 0;
84 
85 	notifier_seq = mmu_interval_read_begin(&userptr->notifier);
86 	if (notifier_seq == userptr->notifier_seq)
87 		return 0;
88 
89 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
90 	if (!pages)
91 		return -ENOMEM;
92 
93 	if (userptr->sg) {
94 		dma_unmap_sgtable(xe->drm.dev,
95 				  userptr->sg,
96 				  read_only ? DMA_TO_DEVICE :
97 				  DMA_BIDIRECTIONAL, 0);
98 		sg_free_table(userptr->sg);
99 		userptr->sg = NULL;
100 	}
101 
102 	pinned = ret = 0;
103 	if (in_kthread) {
104 		if (!mmget_not_zero(userptr->notifier.mm)) {
105 			ret = -EFAULT;
106 			goto mm_closed;
107 		}
108 		kthread_use_mm(userptr->notifier.mm);
109 	}
110 
111 	while (pinned < num_pages) {
112 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
113 					  pinned * PAGE_SIZE,
114 					  num_pages - pinned,
115 					  read_only ? 0 : FOLL_WRITE,
116 					  &pages[pinned]);
117 		if (ret < 0)
118 			break;
119 
120 		pinned += ret;
121 		ret = 0;
122 	}
123 
124 	if (in_kthread) {
125 		kthread_unuse_mm(userptr->notifier.mm);
126 		mmput(userptr->notifier.mm);
127 	}
128 mm_closed:
129 	if (ret)
130 		goto out;
131 
132 	ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
133 						pinned, 0,
134 						(u64)pinned << PAGE_SHIFT,
135 						xe_sg_segment_size(xe->drm.dev),
136 						GFP_KERNEL);
137 	if (ret) {
138 		userptr->sg = NULL;
139 		goto out;
140 	}
141 	userptr->sg = &userptr->sgt;
142 
143 	ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
144 			      read_only ? DMA_TO_DEVICE :
145 			      DMA_BIDIRECTIONAL,
146 			      DMA_ATTR_SKIP_CPU_SYNC |
147 			      DMA_ATTR_NO_KERNEL_MAPPING);
148 	if (ret) {
149 		sg_free_table(userptr->sg);
150 		userptr->sg = NULL;
151 		goto out;
152 	}
153 
154 	for (i = 0; i < pinned; ++i) {
155 		if (!read_only) {
156 			lock_page(pages[i]);
157 			set_page_dirty(pages[i]);
158 			unlock_page(pages[i]);
159 		}
160 
161 		mark_page_accessed(pages[i]);
162 	}
163 
164 out:
165 	release_pages(pages, pinned);
166 	kvfree(pages);
167 
168 	if (!(ret < 0)) {
169 		userptr->notifier_seq = notifier_seq;
170 		if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
171 			goto retry;
172 	}
173 
174 	return ret < 0 ? ret : 0;
175 }
176 
177 static bool preempt_fences_waiting(struct xe_vm *vm)
178 {
179 	struct xe_exec_queue *q;
180 
181 	lockdep_assert_held(&vm->lock);
182 	xe_vm_assert_held(vm);
183 
184 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
185 		if (!q->compute.pfence ||
186 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
187 						   &q->compute.pfence->flags))) {
188 			return true;
189 		}
190 	}
191 
192 	return false;
193 }
194 
195 static void free_preempt_fences(struct list_head *list)
196 {
197 	struct list_head *link, *next;
198 
199 	list_for_each_safe(link, next, list)
200 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
201 }
202 
203 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
204 				unsigned int *count)
205 {
206 	lockdep_assert_held(&vm->lock);
207 	xe_vm_assert_held(vm);
208 
209 	if (*count >= vm->preempt.num_exec_queues)
210 		return 0;
211 
212 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
213 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
214 
215 		if (IS_ERR(pfence))
216 			return PTR_ERR(pfence);
217 
218 		list_move_tail(xe_preempt_fence_link(pfence), list);
219 	}
220 
221 	return 0;
222 }
223 
224 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
225 {
226 	struct xe_exec_queue *q;
227 
228 	xe_vm_assert_held(vm);
229 
230 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
231 		if (q->compute.pfence) {
232 			long timeout = dma_fence_wait(q->compute.pfence, false);
233 
234 			if (timeout < 0)
235 				return -ETIME;
236 			dma_fence_put(q->compute.pfence);
237 			q->compute.pfence = NULL;
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 static bool xe_vm_is_idle(struct xe_vm *vm)
245 {
246 	struct xe_exec_queue *q;
247 
248 	xe_vm_assert_held(vm);
249 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
250 		if (!xe_exec_queue_is_idle(q))
251 			return false;
252 	}
253 
254 	return true;
255 }
256 
257 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
258 {
259 	struct list_head *link;
260 	struct xe_exec_queue *q;
261 
262 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
263 		struct dma_fence *fence;
264 
265 		link = list->next;
266 		xe_assert(vm->xe, link != list);
267 
268 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
269 					     q, q->compute.context,
270 					     ++q->compute.seqno);
271 		dma_fence_put(q->compute.pfence);
272 		q->compute.pfence = fence;
273 	}
274 }
275 
276 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
277 {
278 	struct xe_exec_queue *q;
279 	int err;
280 
281 	if (!vm->preempt.num_exec_queues)
282 		return 0;
283 
284 	err = xe_bo_lock(bo, true);
285 	if (err)
286 		return err;
287 
288 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
289 	if (err)
290 		goto out_unlock;
291 
292 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
293 		if (q->compute.pfence) {
294 			dma_resv_add_fence(bo->ttm.base.resv,
295 					   q->compute.pfence,
296 					   DMA_RESV_USAGE_BOOKKEEP);
297 		}
298 
299 out_unlock:
300 	xe_bo_unlock(bo);
301 	return err;
302 }
303 
304 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
305 						struct drm_exec *exec)
306 {
307 	struct xe_exec_queue *q;
308 
309 	lockdep_assert_held(&vm->lock);
310 	xe_vm_assert_held(vm);
311 
312 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
313 		q->ops->resume(q);
314 
315 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
316 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
317 	}
318 }
319 
320 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
321 {
322 	struct drm_gpuvm_exec vm_exec = {
323 		.vm = &vm->gpuvm,
324 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
325 		.num_fences = 1,
326 	};
327 	struct drm_exec *exec = &vm_exec.exec;
328 	struct dma_fence *pfence;
329 	int err;
330 	bool wait;
331 
332 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
333 
334 	down_write(&vm->lock);
335 	err = drm_gpuvm_exec_lock(&vm_exec);
336 	if (err)
337 		goto out_up_write;
338 
339 	pfence = xe_preempt_fence_create(q, q->compute.context,
340 					 ++q->compute.seqno);
341 	if (!pfence) {
342 		err = -ENOMEM;
343 		goto out_fini;
344 	}
345 
346 	list_add(&q->compute.link, &vm->preempt.exec_queues);
347 	++vm->preempt.num_exec_queues;
348 	q->compute.pfence = pfence;
349 
350 	down_read(&vm->userptr.notifier_lock);
351 
352 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
353 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
354 
355 	/*
356 	 * Check to see if a preemption on VM is in flight or userptr
357 	 * invalidation, if so trigger this preempt fence to sync state with
358 	 * other preempt fences on the VM.
359 	 */
360 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
361 	if (wait)
362 		dma_fence_enable_sw_signaling(pfence);
363 
364 	up_read(&vm->userptr.notifier_lock);
365 
366 out_fini:
367 	drm_exec_fini(exec);
368 out_up_write:
369 	up_write(&vm->lock);
370 
371 	return err;
372 }
373 
374 /**
375  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
376  * @vm: The VM.
377  * @q: The exec_queue
378  */
379 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
380 {
381 	if (!xe_vm_in_preempt_fence_mode(vm))
382 		return;
383 
384 	down_write(&vm->lock);
385 	list_del(&q->compute.link);
386 	--vm->preempt.num_exec_queues;
387 	if (q->compute.pfence) {
388 		dma_fence_enable_sw_signaling(q->compute.pfence);
389 		dma_fence_put(q->compute.pfence);
390 		q->compute.pfence = NULL;
391 	}
392 	up_write(&vm->lock);
393 }
394 
395 /**
396  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
397  * that need repinning.
398  * @vm: The VM.
399  *
400  * This function checks for whether the VM has userptrs that need repinning,
401  * and provides a release-type barrier on the userptr.notifier_lock after
402  * checking.
403  *
404  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
405  */
406 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
407 {
408 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
409 
410 	return (list_empty(&vm->userptr.repin_list) &&
411 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
412 }
413 
414 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
415 
416 static void xe_vm_kill(struct xe_vm *vm)
417 {
418 	struct xe_exec_queue *q;
419 
420 	lockdep_assert_held(&vm->lock);
421 
422 	xe_vm_lock(vm, false);
423 	vm->flags |= XE_VM_FLAG_BANNED;
424 	trace_xe_vm_kill(vm);
425 
426 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
427 		q->ops->kill(q);
428 	xe_vm_unlock(vm);
429 
430 	/* TODO: Inform user the VM is banned */
431 }
432 
433 /**
434  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
435  * @exec: The drm_exec object used for locking before validation.
436  * @err: The error returned from ttm_bo_validate().
437  * @end: A ktime_t cookie that should be set to 0 before first use and
438  * that should be reused on subsequent calls.
439  *
440  * With multiple active VMs, under memory pressure, it is possible that
441  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
442  * Until ttm properly handles locking in such scenarios, best thing the
443  * driver can do is retry with a timeout. Check if that is necessary, and
444  * if so unlock the drm_exec's objects while keeping the ticket to prepare
445  * for a rerun.
446  *
447  * Return: true if a retry after drm_exec_init() is recommended;
448  * false otherwise.
449  */
450 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
451 {
452 	ktime_t cur;
453 
454 	if (err != -ENOMEM)
455 		return false;
456 
457 	cur = ktime_get();
458 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 	if (!ktime_before(cur, *end))
460 		return false;
461 
462 	msleep(20);
463 	return true;
464 }
465 
466 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
467 {
468 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
469 	struct drm_gpuva *gpuva;
470 	int ret;
471 
472 	lockdep_assert_held(&vm->lock);
473 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
474 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
475 			       &vm->rebind_list);
476 
477 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
478 	if (ret)
479 		return ret;
480 
481 	vm_bo->evicted = false;
482 	return 0;
483 }
484 
485 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
486 				 bool *done)
487 {
488 	int err;
489 
490 	/*
491 	 * 1 fence for each preempt fence plus a fence for each tile from a
492 	 * possible rebind
493 	 */
494 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
495 				   vm->xe->info.tile_count);
496 	if (err)
497 		return err;
498 
499 	if (xe_vm_is_idle(vm)) {
500 		vm->preempt.rebind_deactivated = true;
501 		*done = true;
502 		return 0;
503 	}
504 
505 	if (!preempt_fences_waiting(vm)) {
506 		*done = true;
507 		return 0;
508 	}
509 
510 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
511 	if (err)
512 		return err;
513 
514 	err = wait_for_existing_preempt_fences(vm);
515 	if (err)
516 		return err;
517 
518 	return drm_gpuvm_validate(&vm->gpuvm, exec);
519 }
520 
521 static void preempt_rebind_work_func(struct work_struct *w)
522 {
523 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
524 	struct drm_exec exec;
525 	struct dma_fence *rebind_fence;
526 	unsigned int fence_count = 0;
527 	LIST_HEAD(preempt_fences);
528 	ktime_t end = 0;
529 	int err = 0;
530 	long wait;
531 	int __maybe_unused tries = 0;
532 
533 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
534 	trace_xe_vm_rebind_worker_enter(vm);
535 
536 	down_write(&vm->lock);
537 
538 	if (xe_vm_is_closed_or_banned(vm)) {
539 		up_write(&vm->lock);
540 		trace_xe_vm_rebind_worker_exit(vm);
541 		return;
542 	}
543 
544 retry:
545 	if (xe_vm_userptr_check_repin(vm)) {
546 		err = xe_vm_userptr_pin(vm);
547 		if (err)
548 			goto out_unlock_outer;
549 	}
550 
551 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
552 
553 	drm_exec_until_all_locked(&exec) {
554 		bool done = false;
555 
556 		err = xe_preempt_work_begin(&exec, vm, &done);
557 		drm_exec_retry_on_contention(&exec);
558 		if (err || done) {
559 			drm_exec_fini(&exec);
560 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
561 				err = -EAGAIN;
562 
563 			goto out_unlock_outer;
564 		}
565 	}
566 
567 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
568 	if (err)
569 		goto out_unlock;
570 
571 	rebind_fence = xe_vm_rebind(vm, true);
572 	if (IS_ERR(rebind_fence)) {
573 		err = PTR_ERR(rebind_fence);
574 		goto out_unlock;
575 	}
576 
577 	if (rebind_fence) {
578 		dma_fence_wait(rebind_fence, false);
579 		dma_fence_put(rebind_fence);
580 	}
581 
582 	/* Wait on munmap style VM unbinds */
583 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
584 				     DMA_RESV_USAGE_KERNEL,
585 				     false, MAX_SCHEDULE_TIMEOUT);
586 	if (wait <= 0) {
587 		err = -ETIME;
588 		goto out_unlock;
589 	}
590 
591 #define retry_required(__tries, __vm) \
592 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
593 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
594 	__xe_vm_userptr_needs_repin(__vm))
595 
596 	down_read(&vm->userptr.notifier_lock);
597 	if (retry_required(tries, vm)) {
598 		up_read(&vm->userptr.notifier_lock);
599 		err = -EAGAIN;
600 		goto out_unlock;
601 	}
602 
603 #undef retry_required
604 
605 	spin_lock(&vm->xe->ttm.lru_lock);
606 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
607 	spin_unlock(&vm->xe->ttm.lru_lock);
608 
609 	/* Point of no return. */
610 	arm_preempt_fences(vm, &preempt_fences);
611 	resume_and_reinstall_preempt_fences(vm, &exec);
612 	up_read(&vm->userptr.notifier_lock);
613 
614 out_unlock:
615 	drm_exec_fini(&exec);
616 out_unlock_outer:
617 	if (err == -EAGAIN) {
618 		trace_xe_vm_rebind_worker_retry(vm);
619 		goto retry;
620 	}
621 
622 	if (err) {
623 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
624 		xe_vm_kill(vm);
625 	}
626 	up_write(&vm->lock);
627 
628 	free_preempt_fences(&preempt_fences);
629 
630 	trace_xe_vm_rebind_worker_exit(vm);
631 }
632 
633 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
634 				   const struct mmu_notifier_range *range,
635 				   unsigned long cur_seq)
636 {
637 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
638 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
639 	struct xe_vma *vma = &uvma->vma;
640 	struct xe_vm *vm = xe_vma_vm(vma);
641 	struct dma_resv_iter cursor;
642 	struct dma_fence *fence;
643 	long err;
644 
645 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
646 	trace_xe_vma_userptr_invalidate(vma);
647 
648 	if (!mmu_notifier_range_blockable(range))
649 		return false;
650 
651 	down_write(&vm->userptr.notifier_lock);
652 	mmu_interval_set_seq(mni, cur_seq);
653 
654 	/* No need to stop gpu access if the userptr is not yet bound. */
655 	if (!userptr->initial_bind) {
656 		up_write(&vm->userptr.notifier_lock);
657 		return true;
658 	}
659 
660 	/*
661 	 * Tell exec and rebind worker they need to repin and rebind this
662 	 * userptr.
663 	 */
664 	if (!xe_vm_in_fault_mode(vm) &&
665 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
666 		spin_lock(&vm->userptr.invalidated_lock);
667 		list_move_tail(&userptr->invalidate_link,
668 			       &vm->userptr.invalidated);
669 		spin_unlock(&vm->userptr.invalidated_lock);
670 	}
671 
672 	up_write(&vm->userptr.notifier_lock);
673 
674 	/*
675 	 * Preempt fences turn into schedule disables, pipeline these.
676 	 * Note that even in fault mode, we need to wait for binds and
677 	 * unbinds to complete, and those are attached as BOOKMARK fences
678 	 * to the vm.
679 	 */
680 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
681 			    DMA_RESV_USAGE_BOOKKEEP);
682 	dma_resv_for_each_fence_unlocked(&cursor, fence)
683 		dma_fence_enable_sw_signaling(fence);
684 	dma_resv_iter_end(&cursor);
685 
686 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
687 				    DMA_RESV_USAGE_BOOKKEEP,
688 				    false, MAX_SCHEDULE_TIMEOUT);
689 	XE_WARN_ON(err <= 0);
690 
691 	if (xe_vm_in_fault_mode(vm)) {
692 		err = xe_vm_invalidate_vma(vma);
693 		XE_WARN_ON(err);
694 	}
695 
696 	trace_xe_vma_userptr_invalidate_complete(vma);
697 
698 	return true;
699 }
700 
701 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
702 	.invalidate = vma_userptr_invalidate,
703 };
704 
705 int xe_vm_userptr_pin(struct xe_vm *vm)
706 {
707 	struct xe_userptr_vma *uvma, *next;
708 	int err = 0;
709 	LIST_HEAD(tmp_evict);
710 
711 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
712 	lockdep_assert_held_write(&vm->lock);
713 
714 	/* Collect invalidated userptrs */
715 	spin_lock(&vm->userptr.invalidated_lock);
716 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
717 				 userptr.invalidate_link) {
718 		list_del_init(&uvma->userptr.invalidate_link);
719 		list_move_tail(&uvma->userptr.repin_link,
720 			       &vm->userptr.repin_list);
721 	}
722 	spin_unlock(&vm->userptr.invalidated_lock);
723 
724 	/* Pin and move to temporary list */
725 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
726 				 userptr.repin_link) {
727 		err = xe_vma_userptr_pin_pages(uvma);
728 		if (err == -EFAULT) {
729 			list_del_init(&uvma->userptr.repin_link);
730 
731 			/* Wait for pending binds */
732 			xe_vm_lock(vm, false);
733 			dma_resv_wait_timeout(xe_vm_resv(vm),
734 					      DMA_RESV_USAGE_BOOKKEEP,
735 					      false, MAX_SCHEDULE_TIMEOUT);
736 
737 			err = xe_vm_invalidate_vma(&uvma->vma);
738 			xe_vm_unlock(vm);
739 			if (err)
740 				return err;
741 		} else {
742 			if (err < 0)
743 				return err;
744 
745 			list_del_init(&uvma->userptr.repin_link);
746 			list_move_tail(&uvma->vma.combined_links.rebind,
747 				       &vm->rebind_list);
748 		}
749 	}
750 
751 	return 0;
752 }
753 
754 /**
755  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
756  * that need repinning.
757  * @vm: The VM.
758  *
759  * This function does an advisory check for whether the VM has userptrs that
760  * need repinning.
761  *
762  * Return: 0 if there are no indications of userptrs needing repinning,
763  * -EAGAIN if there are.
764  */
765 int xe_vm_userptr_check_repin(struct xe_vm *vm)
766 {
767 	return (list_empty_careful(&vm->userptr.repin_list) &&
768 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
769 }
770 
771 static struct dma_fence *
772 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
773 	       struct xe_sync_entry *syncs, u32 num_syncs,
774 	       bool first_op, bool last_op);
775 
776 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
777 {
778 	struct dma_fence *fence = NULL;
779 	struct xe_vma *vma, *next;
780 
781 	lockdep_assert_held(&vm->lock);
782 	if (xe_vm_in_lr_mode(vm) && !rebind_worker)
783 		return NULL;
784 
785 	xe_vm_assert_held(vm);
786 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
787 				 combined_links.rebind) {
788 		xe_assert(vm->xe, vma->tile_present);
789 
790 		list_del_init(&vma->combined_links.rebind);
791 		dma_fence_put(fence);
792 		if (rebind_worker)
793 			trace_xe_vma_rebind_worker(vma);
794 		else
795 			trace_xe_vma_rebind_exec(vma);
796 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
797 		if (IS_ERR(fence))
798 			return fence;
799 	}
800 
801 	return fence;
802 }
803 
804 static void xe_vma_free(struct xe_vma *vma)
805 {
806 	if (xe_vma_is_userptr(vma))
807 		kfree(to_userptr_vma(vma));
808 	else
809 		kfree(vma);
810 }
811 
812 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
813 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
814 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
815 
816 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
817 				    struct xe_bo *bo,
818 				    u64 bo_offset_or_userptr,
819 				    u64 start, u64 end,
820 				    u16 pat_index, unsigned int flags)
821 {
822 	struct xe_vma *vma;
823 	struct xe_tile *tile;
824 	u8 id;
825 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
826 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
827 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
828 
829 	xe_assert(vm->xe, start < end);
830 	xe_assert(vm->xe, end < vm->size);
831 
832 	/*
833 	 * Allocate and ensure that the xe_vma_is_userptr() return
834 	 * matches what was allocated.
835 	 */
836 	if (!bo && !is_null) {
837 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
838 
839 		if (!uvma)
840 			return ERR_PTR(-ENOMEM);
841 
842 		vma = &uvma->vma;
843 	} else {
844 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
845 		if (!vma)
846 			return ERR_PTR(-ENOMEM);
847 
848 		if (is_null)
849 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
850 		if (bo)
851 			vma->gpuva.gem.obj = &bo->ttm.base;
852 	}
853 
854 	INIT_LIST_HEAD(&vma->combined_links.rebind);
855 
856 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
857 	vma->gpuva.vm = &vm->gpuvm;
858 	vma->gpuva.va.addr = start;
859 	vma->gpuva.va.range = end - start + 1;
860 	if (read_only)
861 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
862 	if (dumpable)
863 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
864 
865 	for_each_tile(tile, vm->xe, id)
866 		vma->tile_mask |= 0x1 << id;
867 
868 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
869 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
870 
871 	vma->pat_index = pat_index;
872 
873 	if (bo) {
874 		struct drm_gpuvm_bo *vm_bo;
875 
876 		xe_bo_assert_held(bo);
877 
878 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
879 		if (IS_ERR(vm_bo)) {
880 			xe_vma_free(vma);
881 			return ERR_CAST(vm_bo);
882 		}
883 
884 		drm_gpuvm_bo_extobj_add(vm_bo);
885 		drm_gem_object_get(&bo->ttm.base);
886 		vma->gpuva.gem.offset = bo_offset_or_userptr;
887 		drm_gpuva_link(&vma->gpuva, vm_bo);
888 		drm_gpuvm_bo_put(vm_bo);
889 	} else /* userptr or null */ {
890 		if (!is_null) {
891 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
892 			u64 size = end - start + 1;
893 			int err;
894 
895 			INIT_LIST_HEAD(&userptr->invalidate_link);
896 			INIT_LIST_HEAD(&userptr->repin_link);
897 			vma->gpuva.gem.offset = bo_offset_or_userptr;
898 
899 			err = mmu_interval_notifier_insert(&userptr->notifier,
900 							   current->mm,
901 							   xe_vma_userptr(vma), size,
902 							   &vma_userptr_notifier_ops);
903 			if (err) {
904 				xe_vma_free(vma);
905 				return ERR_PTR(err);
906 			}
907 
908 			userptr->notifier_seq = LONG_MAX;
909 		}
910 
911 		xe_vm_get(vm);
912 	}
913 
914 	return vma;
915 }
916 
917 static void xe_vma_destroy_late(struct xe_vma *vma)
918 {
919 	struct xe_vm *vm = xe_vma_vm(vma);
920 	struct xe_device *xe = vm->xe;
921 	bool read_only = xe_vma_read_only(vma);
922 
923 	if (vma->ufence) {
924 		xe_sync_ufence_put(vma->ufence);
925 		vma->ufence = NULL;
926 	}
927 
928 	if (xe_vma_is_userptr(vma)) {
929 		struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
930 
931 		if (userptr->sg) {
932 			dma_unmap_sgtable(xe->drm.dev,
933 					  userptr->sg,
934 					  read_only ? DMA_TO_DEVICE :
935 					  DMA_BIDIRECTIONAL, 0);
936 			sg_free_table(userptr->sg);
937 			userptr->sg = NULL;
938 		}
939 
940 		/*
941 		 * Since userptr pages are not pinned, we can't remove
942 		 * the notifer until we're sure the GPU is not accessing
943 		 * them anymore
944 		 */
945 		mmu_interval_notifier_remove(&userptr->notifier);
946 		xe_vm_put(vm);
947 	} else if (xe_vma_is_null(vma)) {
948 		xe_vm_put(vm);
949 	} else {
950 		xe_bo_put(xe_vma_bo(vma));
951 	}
952 
953 	xe_vma_free(vma);
954 }
955 
956 static void vma_destroy_work_func(struct work_struct *w)
957 {
958 	struct xe_vma *vma =
959 		container_of(w, struct xe_vma, destroy_work);
960 
961 	xe_vma_destroy_late(vma);
962 }
963 
964 static void vma_destroy_cb(struct dma_fence *fence,
965 			   struct dma_fence_cb *cb)
966 {
967 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
968 
969 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
970 	queue_work(system_unbound_wq, &vma->destroy_work);
971 }
972 
973 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
974 {
975 	struct xe_vm *vm = xe_vma_vm(vma);
976 
977 	lockdep_assert_held_write(&vm->lock);
978 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
979 
980 	if (xe_vma_is_userptr(vma)) {
981 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
982 
983 		spin_lock(&vm->userptr.invalidated_lock);
984 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
985 		spin_unlock(&vm->userptr.invalidated_lock);
986 	} else if (!xe_vma_is_null(vma)) {
987 		xe_bo_assert_held(xe_vma_bo(vma));
988 
989 		drm_gpuva_unlink(&vma->gpuva);
990 	}
991 
992 	xe_vm_assert_held(vm);
993 	if (fence) {
994 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
995 						 vma_destroy_cb);
996 
997 		if (ret) {
998 			XE_WARN_ON(ret != -ENOENT);
999 			xe_vma_destroy_late(vma);
1000 		}
1001 	} else {
1002 		xe_vma_destroy_late(vma);
1003 	}
1004 }
1005 
1006 /**
1007  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1008  * @exec: The drm_exec object we're currently locking for.
1009  * @vma: The vma for witch we want to lock the vm resv and any attached
1010  * object's resv.
1011  * @num_shared: The number of dma-fence slots to pre-allocate in the
1012  * objects' reservation objects.
1013  *
1014  * Return: 0 on success, negative error code on error. In particular
1015  * may return -EDEADLK on WW transaction contention and -EINTR if
1016  * an interruptible wait is terminated by a signal.
1017  */
1018 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1019 		      unsigned int num_shared)
1020 {
1021 	struct xe_vm *vm = xe_vma_vm(vma);
1022 	struct xe_bo *bo = xe_vma_bo(vma);
1023 	int err;
1024 
1025 	XE_WARN_ON(!vm);
1026 	if (num_shared)
1027 		err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1028 	else
1029 		err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1030 	if (!err && bo && !bo->vm) {
1031 		if (num_shared)
1032 			err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1033 		else
1034 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
1035 	}
1036 
1037 	return err;
1038 }
1039 
1040 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1041 {
1042 	struct drm_exec exec;
1043 	int err;
1044 
1045 	drm_exec_init(&exec, 0, 0);
1046 	drm_exec_until_all_locked(&exec) {
1047 		err = xe_vm_prepare_vma(&exec, vma, 0);
1048 		drm_exec_retry_on_contention(&exec);
1049 		if (XE_WARN_ON(err))
1050 			break;
1051 	}
1052 
1053 	xe_vma_destroy(vma, NULL);
1054 
1055 	drm_exec_fini(&exec);
1056 }
1057 
1058 struct xe_vma *
1059 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1060 {
1061 	struct drm_gpuva *gpuva;
1062 
1063 	lockdep_assert_held(&vm->lock);
1064 
1065 	if (xe_vm_is_closed_or_banned(vm))
1066 		return NULL;
1067 
1068 	xe_assert(vm->xe, start + range <= vm->size);
1069 
1070 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1071 
1072 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1073 }
1074 
1075 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1076 {
1077 	int err;
1078 
1079 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1080 	lockdep_assert_held(&vm->lock);
1081 
1082 	mutex_lock(&vm->snap_mutex);
1083 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1084 	mutex_unlock(&vm->snap_mutex);
1085 	XE_WARN_ON(err);	/* Shouldn't be possible */
1086 
1087 	return err;
1088 }
1089 
1090 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1091 {
1092 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1093 	lockdep_assert_held(&vm->lock);
1094 
1095 	mutex_lock(&vm->snap_mutex);
1096 	drm_gpuva_remove(&vma->gpuva);
1097 	mutex_unlock(&vm->snap_mutex);
1098 	if (vm->usm.last_fault_vma == vma)
1099 		vm->usm.last_fault_vma = NULL;
1100 }
1101 
1102 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1103 {
1104 	struct xe_vma_op *op;
1105 
1106 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1107 
1108 	if (unlikely(!op))
1109 		return NULL;
1110 
1111 	return &op->base;
1112 }
1113 
1114 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1115 
1116 static const struct drm_gpuvm_ops gpuvm_ops = {
1117 	.op_alloc = xe_vm_op_alloc,
1118 	.vm_bo_validate = xe_gpuvm_validate,
1119 	.vm_free = xe_vm_free,
1120 };
1121 
1122 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1123 {
1124 	u64 pte = 0;
1125 
1126 	if (pat_index & BIT(0))
1127 		pte |= XE_PPGTT_PTE_PAT0;
1128 
1129 	if (pat_index & BIT(1))
1130 		pte |= XE_PPGTT_PTE_PAT1;
1131 
1132 	return pte;
1133 }
1134 
1135 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1136 				u32 pt_level)
1137 {
1138 	u64 pte = 0;
1139 
1140 	if (pat_index & BIT(0))
1141 		pte |= XE_PPGTT_PTE_PAT0;
1142 
1143 	if (pat_index & BIT(1))
1144 		pte |= XE_PPGTT_PTE_PAT1;
1145 
1146 	if (pat_index & BIT(2)) {
1147 		if (pt_level)
1148 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1149 		else
1150 			pte |= XE_PPGTT_PTE_PAT2;
1151 	}
1152 
1153 	if (pat_index & BIT(3))
1154 		pte |= XELPG_PPGTT_PTE_PAT3;
1155 
1156 	if (pat_index & (BIT(4)))
1157 		pte |= XE2_PPGTT_PTE_PAT4;
1158 
1159 	return pte;
1160 }
1161 
1162 static u64 pte_encode_ps(u32 pt_level)
1163 {
1164 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1165 
1166 	if (pt_level == 1)
1167 		return XE_PDE_PS_2M;
1168 	else if (pt_level == 2)
1169 		return XE_PDPE_PS_1G;
1170 
1171 	return 0;
1172 }
1173 
1174 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1175 			      const u16 pat_index)
1176 {
1177 	struct xe_device *xe = xe_bo_device(bo);
1178 	u64 pde;
1179 
1180 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1181 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1182 	pde |= pde_encode_pat_index(xe, pat_index);
1183 
1184 	return pde;
1185 }
1186 
1187 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1188 			      u16 pat_index, u32 pt_level)
1189 {
1190 	struct xe_device *xe = xe_bo_device(bo);
1191 	u64 pte;
1192 
1193 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1194 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1195 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1196 	pte |= pte_encode_ps(pt_level);
1197 
1198 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1199 		pte |= XE_PPGTT_PTE_DM;
1200 
1201 	return pte;
1202 }
1203 
1204 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1205 			       u16 pat_index, u32 pt_level)
1206 {
1207 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1208 
1209 	pte |= XE_PAGE_PRESENT;
1210 
1211 	if (likely(!xe_vma_read_only(vma)))
1212 		pte |= XE_PAGE_RW;
1213 
1214 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1215 	pte |= pte_encode_ps(pt_level);
1216 
1217 	if (unlikely(xe_vma_is_null(vma)))
1218 		pte |= XE_PTE_NULL;
1219 
1220 	return pte;
1221 }
1222 
1223 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1224 				u16 pat_index,
1225 				u32 pt_level, bool devmem, u64 flags)
1226 {
1227 	u64 pte;
1228 
1229 	/* Avoid passing random bits directly as flags */
1230 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1231 
1232 	pte = addr;
1233 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1234 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1235 	pte |= pte_encode_ps(pt_level);
1236 
1237 	if (devmem)
1238 		pte |= XE_PPGTT_PTE_DM;
1239 
1240 	pte |= flags;
1241 
1242 	return pte;
1243 }
1244 
1245 static const struct xe_pt_ops xelp_pt_ops = {
1246 	.pte_encode_bo = xelp_pte_encode_bo,
1247 	.pte_encode_vma = xelp_pte_encode_vma,
1248 	.pte_encode_addr = xelp_pte_encode_addr,
1249 	.pde_encode_bo = xelp_pde_encode_bo,
1250 };
1251 
1252 static void vm_destroy_work_func(struct work_struct *w);
1253 
1254 /**
1255  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1256  * given tile and vm.
1257  * @xe: xe device.
1258  * @tile: tile to set up for.
1259  * @vm: vm to set up for.
1260  *
1261  * Sets up a pagetable tree with one page-table per level and a single
1262  * leaf PTE. All pagetable entries point to the single page-table or,
1263  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1264  * writes become NOPs.
1265  *
1266  * Return: 0 on success, negative error code on error.
1267  */
1268 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1269 				struct xe_vm *vm)
1270 {
1271 	u8 id = tile->id;
1272 	int i;
1273 
1274 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1275 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1276 		if (IS_ERR(vm->scratch_pt[id][i]))
1277 			return PTR_ERR(vm->scratch_pt[id][i]);
1278 
1279 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 static void xe_vm_free_scratch(struct xe_vm *vm)
1286 {
1287 	struct xe_tile *tile;
1288 	u8 id;
1289 
1290 	if (!xe_vm_has_scratch(vm))
1291 		return;
1292 
1293 	for_each_tile(tile, vm->xe, id) {
1294 		u32 i;
1295 
1296 		if (!vm->pt_root[id])
1297 			continue;
1298 
1299 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1300 			if (vm->scratch_pt[id][i])
1301 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1302 	}
1303 }
1304 
1305 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1306 {
1307 	struct drm_gem_object *vm_resv_obj;
1308 	struct xe_vm *vm;
1309 	int err, number_tiles = 0;
1310 	struct xe_tile *tile;
1311 	u8 id;
1312 
1313 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1314 	if (!vm)
1315 		return ERR_PTR(-ENOMEM);
1316 
1317 	vm->xe = xe;
1318 
1319 	vm->size = 1ull << xe->info.va_bits;
1320 
1321 	vm->flags = flags;
1322 
1323 	init_rwsem(&vm->lock);
1324 	mutex_init(&vm->snap_mutex);
1325 
1326 	INIT_LIST_HEAD(&vm->rebind_list);
1327 
1328 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1329 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1330 	init_rwsem(&vm->userptr.notifier_lock);
1331 	spin_lock_init(&vm->userptr.invalidated_lock);
1332 
1333 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1334 
1335 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1336 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1337 
1338 	for_each_tile(tile, xe, id)
1339 		xe_range_fence_tree_init(&vm->rftree[id]);
1340 
1341 	vm->pt_ops = &xelp_pt_ops;
1342 
1343 	if (!(flags & XE_VM_FLAG_MIGRATION))
1344 		xe_device_mem_access_get(xe);
1345 
1346 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1347 	if (!vm_resv_obj) {
1348 		err = -ENOMEM;
1349 		goto err_no_resv;
1350 	}
1351 
1352 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1353 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1354 
1355 	drm_gem_object_put(vm_resv_obj);
1356 
1357 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1358 	if (err)
1359 		goto err_close;
1360 
1361 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1362 		vm->flags |= XE_VM_FLAG_64K;
1363 
1364 	for_each_tile(tile, xe, id) {
1365 		if (flags & XE_VM_FLAG_MIGRATION &&
1366 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1367 			continue;
1368 
1369 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1370 		if (IS_ERR(vm->pt_root[id])) {
1371 			err = PTR_ERR(vm->pt_root[id]);
1372 			vm->pt_root[id] = NULL;
1373 			goto err_unlock_close;
1374 		}
1375 	}
1376 
1377 	if (xe_vm_has_scratch(vm)) {
1378 		for_each_tile(tile, xe, id) {
1379 			if (!vm->pt_root[id])
1380 				continue;
1381 
1382 			err = xe_vm_create_scratch(xe, tile, vm);
1383 			if (err)
1384 				goto err_unlock_close;
1385 		}
1386 		vm->batch_invalidate_tlb = true;
1387 	}
1388 
1389 	if (flags & XE_VM_FLAG_LR_MODE) {
1390 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1391 		vm->flags |= XE_VM_FLAG_LR_MODE;
1392 		vm->batch_invalidate_tlb = false;
1393 	}
1394 
1395 	/* Fill pt_root after allocating scratch tables */
1396 	for_each_tile(tile, xe, id) {
1397 		if (!vm->pt_root[id])
1398 			continue;
1399 
1400 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1401 	}
1402 	dma_resv_unlock(xe_vm_resv(vm));
1403 
1404 	/* Kernel migration VM shouldn't have a circular loop.. */
1405 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1406 		for_each_tile(tile, xe, id) {
1407 			struct xe_gt *gt = tile->primary_gt;
1408 			struct xe_vm *migrate_vm;
1409 			struct xe_exec_queue *q;
1410 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1411 
1412 			if (!vm->pt_root[id])
1413 				continue;
1414 
1415 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1416 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1417 						       XE_ENGINE_CLASS_COPY,
1418 						       create_flags);
1419 			xe_vm_put(migrate_vm);
1420 			if (IS_ERR(q)) {
1421 				err = PTR_ERR(q);
1422 				goto err_close;
1423 			}
1424 			vm->q[id] = q;
1425 			number_tiles++;
1426 		}
1427 	}
1428 
1429 	if (number_tiles > 1)
1430 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1431 
1432 	mutex_lock(&xe->usm.lock);
1433 	if (flags & XE_VM_FLAG_FAULT_MODE)
1434 		xe->usm.num_vm_in_fault_mode++;
1435 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1436 		xe->usm.num_vm_in_non_fault_mode++;
1437 	mutex_unlock(&xe->usm.lock);
1438 
1439 	trace_xe_vm_create(vm);
1440 
1441 	return vm;
1442 
1443 err_unlock_close:
1444 	dma_resv_unlock(xe_vm_resv(vm));
1445 err_close:
1446 	xe_vm_close_and_put(vm);
1447 	return ERR_PTR(err);
1448 
1449 err_no_resv:
1450 	mutex_destroy(&vm->snap_mutex);
1451 	for_each_tile(tile, xe, id)
1452 		xe_range_fence_tree_fini(&vm->rftree[id]);
1453 	kfree(vm);
1454 	if (!(flags & XE_VM_FLAG_MIGRATION))
1455 		xe_device_mem_access_put(xe);
1456 	return ERR_PTR(err);
1457 }
1458 
1459 static void xe_vm_close(struct xe_vm *vm)
1460 {
1461 	down_write(&vm->lock);
1462 	vm->size = 0;
1463 	up_write(&vm->lock);
1464 }
1465 
1466 void xe_vm_close_and_put(struct xe_vm *vm)
1467 {
1468 	LIST_HEAD(contested);
1469 	struct xe_device *xe = vm->xe;
1470 	struct xe_tile *tile;
1471 	struct xe_vma *vma, *next_vma;
1472 	struct drm_gpuva *gpuva, *next;
1473 	u8 id;
1474 
1475 	xe_assert(xe, !vm->preempt.num_exec_queues);
1476 
1477 	xe_vm_close(vm);
1478 	if (xe_vm_in_preempt_fence_mode(vm))
1479 		flush_work(&vm->preempt.rebind_work);
1480 
1481 	down_write(&vm->lock);
1482 	for_each_tile(tile, xe, id) {
1483 		if (vm->q[id])
1484 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1485 	}
1486 	up_write(&vm->lock);
1487 
1488 	for_each_tile(tile, xe, id) {
1489 		if (vm->q[id]) {
1490 			xe_exec_queue_kill(vm->q[id]);
1491 			xe_exec_queue_put(vm->q[id]);
1492 			vm->q[id] = NULL;
1493 		}
1494 	}
1495 
1496 	down_write(&vm->lock);
1497 	xe_vm_lock(vm, false);
1498 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1499 		vma = gpuva_to_vma(gpuva);
1500 
1501 		if (xe_vma_has_no_bo(vma)) {
1502 			down_read(&vm->userptr.notifier_lock);
1503 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1504 			up_read(&vm->userptr.notifier_lock);
1505 		}
1506 
1507 		xe_vm_remove_vma(vm, vma);
1508 
1509 		/* easy case, remove from VMA? */
1510 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1511 			list_del_init(&vma->combined_links.rebind);
1512 			xe_vma_destroy(vma, NULL);
1513 			continue;
1514 		}
1515 
1516 		list_move_tail(&vma->combined_links.destroy, &contested);
1517 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1518 	}
1519 
1520 	/*
1521 	 * All vm operations will add shared fences to resv.
1522 	 * The only exception is eviction for a shared object,
1523 	 * but even so, the unbind when evicted would still
1524 	 * install a fence to resv. Hence it's safe to
1525 	 * destroy the pagetables immediately.
1526 	 */
1527 	xe_vm_free_scratch(vm);
1528 
1529 	for_each_tile(tile, xe, id) {
1530 		if (vm->pt_root[id]) {
1531 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1532 			vm->pt_root[id] = NULL;
1533 		}
1534 	}
1535 	xe_vm_unlock(vm);
1536 
1537 	/*
1538 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1539 	 * Since we hold a refcount to the bo, we can remove and free
1540 	 * the members safely without locking.
1541 	 */
1542 	list_for_each_entry_safe(vma, next_vma, &contested,
1543 				 combined_links.destroy) {
1544 		list_del_init(&vma->combined_links.destroy);
1545 		xe_vma_destroy_unlocked(vma);
1546 	}
1547 
1548 	up_write(&vm->lock);
1549 
1550 	mutex_lock(&xe->usm.lock);
1551 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1552 		xe->usm.num_vm_in_fault_mode--;
1553 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1554 		xe->usm.num_vm_in_non_fault_mode--;
1555 	mutex_unlock(&xe->usm.lock);
1556 
1557 	for_each_tile(tile, xe, id)
1558 		xe_range_fence_tree_fini(&vm->rftree[id]);
1559 
1560 	xe_vm_put(vm);
1561 }
1562 
1563 static void vm_destroy_work_func(struct work_struct *w)
1564 {
1565 	struct xe_vm *vm =
1566 		container_of(w, struct xe_vm, destroy_work);
1567 	struct xe_device *xe = vm->xe;
1568 	struct xe_tile *tile;
1569 	u8 id;
1570 	void *lookup;
1571 
1572 	/* xe_vm_close_and_put was not called? */
1573 	xe_assert(xe, !vm->size);
1574 
1575 	mutex_destroy(&vm->snap_mutex);
1576 
1577 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1578 		xe_device_mem_access_put(xe);
1579 
1580 		if (xe->info.has_asid && vm->usm.asid) {
1581 			mutex_lock(&xe->usm.lock);
1582 			lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1583 			xe_assert(xe, lookup == vm);
1584 			mutex_unlock(&xe->usm.lock);
1585 		}
1586 	}
1587 
1588 	for_each_tile(tile, xe, id)
1589 		XE_WARN_ON(vm->pt_root[id]);
1590 
1591 	trace_xe_vm_free(vm);
1592 	dma_fence_put(vm->rebind_fence);
1593 	kfree(vm);
1594 }
1595 
1596 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1597 {
1598 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1599 
1600 	/* To destroy the VM we need to be able to sleep */
1601 	queue_work(system_unbound_wq, &vm->destroy_work);
1602 }
1603 
1604 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1605 {
1606 	struct xe_vm *vm;
1607 
1608 	mutex_lock(&xef->vm.lock);
1609 	vm = xa_load(&xef->vm.xa, id);
1610 	if (vm)
1611 		xe_vm_get(vm);
1612 	mutex_unlock(&xef->vm.lock);
1613 
1614 	return vm;
1615 }
1616 
1617 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1618 {
1619 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1620 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1621 }
1622 
1623 static struct xe_exec_queue *
1624 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1625 {
1626 	return q ? q : vm->q[0];
1627 }
1628 
1629 static struct dma_fence *
1630 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1631 		 struct xe_sync_entry *syncs, u32 num_syncs,
1632 		 bool first_op, bool last_op)
1633 {
1634 	struct xe_vm *vm = xe_vma_vm(vma);
1635 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1636 	struct xe_tile *tile;
1637 	struct dma_fence *fence = NULL;
1638 	struct dma_fence **fences = NULL;
1639 	struct dma_fence_array *cf = NULL;
1640 	int cur_fence = 0, i;
1641 	int number_tiles = hweight8(vma->tile_present);
1642 	int err;
1643 	u8 id;
1644 
1645 	trace_xe_vma_unbind(vma);
1646 
1647 	if (vma->ufence) {
1648 		struct xe_user_fence * const f = vma->ufence;
1649 
1650 		if (!xe_sync_ufence_get_status(f))
1651 			return ERR_PTR(-EBUSY);
1652 
1653 		vma->ufence = NULL;
1654 		xe_sync_ufence_put(f);
1655 	}
1656 
1657 	if (number_tiles > 1) {
1658 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1659 				       GFP_KERNEL);
1660 		if (!fences)
1661 			return ERR_PTR(-ENOMEM);
1662 	}
1663 
1664 	for_each_tile(tile, vm->xe, id) {
1665 		if (!(vma->tile_present & BIT(id)))
1666 			goto next;
1667 
1668 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1669 					   first_op ? syncs : NULL,
1670 					   first_op ? num_syncs : 0);
1671 		if (IS_ERR(fence)) {
1672 			err = PTR_ERR(fence);
1673 			goto err_fences;
1674 		}
1675 
1676 		if (fences)
1677 			fences[cur_fence++] = fence;
1678 
1679 next:
1680 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1681 			q = list_next_entry(q, multi_gt_list);
1682 	}
1683 
1684 	if (fences) {
1685 		cf = dma_fence_array_create(number_tiles, fences,
1686 					    vm->composite_fence_ctx,
1687 					    vm->composite_fence_seqno++,
1688 					    false);
1689 		if (!cf) {
1690 			--vm->composite_fence_seqno;
1691 			err = -ENOMEM;
1692 			goto err_fences;
1693 		}
1694 	}
1695 
1696 	fence = cf ? &cf->base : !fence ?
1697 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1698 	if (last_op) {
1699 		for (i = 0; i < num_syncs; i++)
1700 			xe_sync_entry_signal(&syncs[i], NULL, fence);
1701 	}
1702 
1703 	return fence;
1704 
1705 err_fences:
1706 	if (fences) {
1707 		while (cur_fence)
1708 			dma_fence_put(fences[--cur_fence]);
1709 		kfree(fences);
1710 	}
1711 
1712 	return ERR_PTR(err);
1713 }
1714 
1715 static struct dma_fence *
1716 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1717 	       struct xe_sync_entry *syncs, u32 num_syncs,
1718 	       bool first_op, bool last_op)
1719 {
1720 	struct xe_tile *tile;
1721 	struct dma_fence *fence;
1722 	struct dma_fence **fences = NULL;
1723 	struct dma_fence_array *cf = NULL;
1724 	struct xe_vm *vm = xe_vma_vm(vma);
1725 	int cur_fence = 0, i;
1726 	int number_tiles = hweight8(vma->tile_mask);
1727 	int err;
1728 	u8 id;
1729 
1730 	trace_xe_vma_bind(vma);
1731 
1732 	if (number_tiles > 1) {
1733 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1734 				       GFP_KERNEL);
1735 		if (!fences)
1736 			return ERR_PTR(-ENOMEM);
1737 	}
1738 
1739 	for_each_tile(tile, vm->xe, id) {
1740 		if (!(vma->tile_mask & BIT(id)))
1741 			goto next;
1742 
1743 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1744 					 first_op ? syncs : NULL,
1745 					 first_op ? num_syncs : 0,
1746 					 vma->tile_present & BIT(id));
1747 		if (IS_ERR(fence)) {
1748 			err = PTR_ERR(fence);
1749 			goto err_fences;
1750 		}
1751 
1752 		if (fences)
1753 			fences[cur_fence++] = fence;
1754 
1755 next:
1756 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1757 			q = list_next_entry(q, multi_gt_list);
1758 	}
1759 
1760 	if (fences) {
1761 		cf = dma_fence_array_create(number_tiles, fences,
1762 					    vm->composite_fence_ctx,
1763 					    vm->composite_fence_seqno++,
1764 					    false);
1765 		if (!cf) {
1766 			--vm->composite_fence_seqno;
1767 			err = -ENOMEM;
1768 			goto err_fences;
1769 		}
1770 	}
1771 
1772 	if (last_op) {
1773 		for (i = 0; i < num_syncs; i++)
1774 			xe_sync_entry_signal(&syncs[i], NULL,
1775 					     cf ? &cf->base : fence);
1776 	}
1777 
1778 	return cf ? &cf->base : fence;
1779 
1780 err_fences:
1781 	if (fences) {
1782 		while (cur_fence)
1783 			dma_fence_put(fences[--cur_fence]);
1784 		kfree(fences);
1785 	}
1786 
1787 	return ERR_PTR(err);
1788 }
1789 
1790 static struct xe_user_fence *
1791 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1792 {
1793 	unsigned int i;
1794 
1795 	for (i = 0; i < num_syncs; i++) {
1796 		struct xe_sync_entry *e = &syncs[i];
1797 
1798 		if (xe_sync_is_ufence(e))
1799 			return xe_sync_ufence_get(e);
1800 	}
1801 
1802 	return NULL;
1803 }
1804 
1805 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1806 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1807 			u32 num_syncs, bool immediate, bool first_op,
1808 			bool last_op)
1809 {
1810 	struct dma_fence *fence;
1811 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1812 	struct xe_user_fence *ufence;
1813 
1814 	xe_vm_assert_held(vm);
1815 
1816 	ufence = find_ufence_get(syncs, num_syncs);
1817 	if (vma->ufence && ufence)
1818 		xe_sync_ufence_put(vma->ufence);
1819 
1820 	vma->ufence = ufence ?: vma->ufence;
1821 
1822 	if (immediate) {
1823 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1824 				       last_op);
1825 		if (IS_ERR(fence))
1826 			return PTR_ERR(fence);
1827 	} else {
1828 		int i;
1829 
1830 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1831 
1832 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1833 		if (last_op) {
1834 			for (i = 0; i < num_syncs; i++)
1835 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1836 		}
1837 	}
1838 
1839 	if (last_op)
1840 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1841 	dma_fence_put(fence);
1842 
1843 	return 0;
1844 }
1845 
1846 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1847 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1848 		      u32 num_syncs, bool immediate, bool first_op,
1849 		      bool last_op)
1850 {
1851 	int err;
1852 
1853 	xe_vm_assert_held(vm);
1854 	xe_bo_assert_held(bo);
1855 
1856 	if (bo && immediate) {
1857 		err = xe_bo_validate(bo, vm, true);
1858 		if (err)
1859 			return err;
1860 	}
1861 
1862 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1863 			    last_op);
1864 }
1865 
1866 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1867 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1868 			u32 num_syncs, bool first_op, bool last_op)
1869 {
1870 	struct dma_fence *fence;
1871 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1872 
1873 	xe_vm_assert_held(vm);
1874 	xe_bo_assert_held(xe_vma_bo(vma));
1875 
1876 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1877 	if (IS_ERR(fence))
1878 		return PTR_ERR(fence);
1879 
1880 	xe_vma_destroy(vma, fence);
1881 	if (last_op)
1882 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1883 	dma_fence_put(fence);
1884 
1885 	return 0;
1886 }
1887 
1888 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1889 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1890 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1891 
1892 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1893 		       struct drm_file *file)
1894 {
1895 	struct xe_device *xe = to_xe_device(dev);
1896 	struct xe_file *xef = to_xe_file(file);
1897 	struct drm_xe_vm_create *args = data;
1898 	struct xe_tile *tile;
1899 	struct xe_vm *vm;
1900 	u32 id, asid;
1901 	int err;
1902 	u32 flags = 0;
1903 
1904 	if (XE_IOCTL_DBG(xe, args->extensions))
1905 		return -EINVAL;
1906 
1907 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1908 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1909 
1910 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1911 			 !xe->info.has_usm))
1912 		return -EINVAL;
1913 
1914 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1915 		return -EINVAL;
1916 
1917 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1918 		return -EINVAL;
1919 
1920 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1921 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1922 		return -EINVAL;
1923 
1924 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1925 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1926 		return -EINVAL;
1927 
1928 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1929 			 xe_device_in_non_fault_mode(xe)))
1930 		return -EINVAL;
1931 
1932 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1933 			 xe_device_in_fault_mode(xe)))
1934 		return -EINVAL;
1935 
1936 	if (XE_IOCTL_DBG(xe, args->extensions))
1937 		return -EINVAL;
1938 
1939 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1940 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1941 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1942 		flags |= XE_VM_FLAG_LR_MODE;
1943 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1944 		flags |= XE_VM_FLAG_FAULT_MODE;
1945 
1946 	vm = xe_vm_create(xe, flags);
1947 	if (IS_ERR(vm))
1948 		return PTR_ERR(vm);
1949 
1950 	mutex_lock(&xef->vm.lock);
1951 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1952 	mutex_unlock(&xef->vm.lock);
1953 	if (err)
1954 		goto err_close_and_put;
1955 
1956 	if (xe->info.has_asid) {
1957 		mutex_lock(&xe->usm.lock);
1958 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1959 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1960 				      &xe->usm.next_asid, GFP_KERNEL);
1961 		mutex_unlock(&xe->usm.lock);
1962 		if (err < 0)
1963 			goto err_free_id;
1964 
1965 		vm->usm.asid = asid;
1966 	}
1967 
1968 	args->vm_id = id;
1969 	vm->xef = xef;
1970 
1971 	/* Record BO memory for VM pagetable created against client */
1972 	for_each_tile(tile, xe, id)
1973 		if (vm->pt_root[id])
1974 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1975 
1976 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1977 	/* Warning: Security issue - never enable by default */
1978 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1979 #endif
1980 
1981 	return 0;
1982 
1983 err_free_id:
1984 	mutex_lock(&xef->vm.lock);
1985 	xa_erase(&xef->vm.xa, id);
1986 	mutex_unlock(&xef->vm.lock);
1987 err_close_and_put:
1988 	xe_vm_close_and_put(vm);
1989 
1990 	return err;
1991 }
1992 
1993 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1994 			struct drm_file *file)
1995 {
1996 	struct xe_device *xe = to_xe_device(dev);
1997 	struct xe_file *xef = to_xe_file(file);
1998 	struct drm_xe_vm_destroy *args = data;
1999 	struct xe_vm *vm;
2000 	int err = 0;
2001 
2002 	if (XE_IOCTL_DBG(xe, args->pad) ||
2003 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2004 		return -EINVAL;
2005 
2006 	mutex_lock(&xef->vm.lock);
2007 	vm = xa_load(&xef->vm.xa, args->vm_id);
2008 	if (XE_IOCTL_DBG(xe, !vm))
2009 		err = -ENOENT;
2010 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2011 		err = -EBUSY;
2012 	else
2013 		xa_erase(&xef->vm.xa, args->vm_id);
2014 	mutex_unlock(&xef->vm.lock);
2015 
2016 	if (!err)
2017 		xe_vm_close_and_put(vm);
2018 
2019 	return err;
2020 }
2021 
2022 static const u32 region_to_mem_type[] = {
2023 	XE_PL_TT,
2024 	XE_PL_VRAM0,
2025 	XE_PL_VRAM1,
2026 };
2027 
2028 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2029 			  struct xe_exec_queue *q, u32 region,
2030 			  struct xe_sync_entry *syncs, u32 num_syncs,
2031 			  bool first_op, bool last_op)
2032 {
2033 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2034 	int err;
2035 
2036 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2037 
2038 	if (!xe_vma_has_no_bo(vma)) {
2039 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2040 		if (err)
2041 			return err;
2042 	}
2043 
2044 	if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
2045 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2046 				  true, first_op, last_op);
2047 	} else {
2048 		int i;
2049 
2050 		/* Nothing to do, signal fences now */
2051 		if (last_op) {
2052 			for (i = 0; i < num_syncs; i++) {
2053 				struct dma_fence *fence =
2054 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2055 
2056 				xe_sync_entry_signal(&syncs[i], NULL, fence);
2057 				dma_fence_put(fence);
2058 			}
2059 		}
2060 
2061 		return 0;
2062 	}
2063 }
2064 
2065 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2066 			     bool post_commit)
2067 {
2068 	down_read(&vm->userptr.notifier_lock);
2069 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2070 	up_read(&vm->userptr.notifier_lock);
2071 	if (post_commit)
2072 		xe_vm_remove_vma(vm, vma);
2073 }
2074 
2075 #undef ULL
2076 #define ULL	unsigned long long
2077 
2078 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2079 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2080 {
2081 	struct xe_vma *vma;
2082 
2083 	switch (op->op) {
2084 	case DRM_GPUVA_OP_MAP:
2085 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2086 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2087 		break;
2088 	case DRM_GPUVA_OP_REMAP:
2089 		vma = gpuva_to_vma(op->remap.unmap->va);
2090 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2091 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2092 		       op->remap.unmap->keep ? 1 : 0);
2093 		if (op->remap.prev)
2094 			vm_dbg(&xe->drm,
2095 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2096 			       (ULL)op->remap.prev->va.addr,
2097 			       (ULL)op->remap.prev->va.range);
2098 		if (op->remap.next)
2099 			vm_dbg(&xe->drm,
2100 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2101 			       (ULL)op->remap.next->va.addr,
2102 			       (ULL)op->remap.next->va.range);
2103 		break;
2104 	case DRM_GPUVA_OP_UNMAP:
2105 		vma = gpuva_to_vma(op->unmap.va);
2106 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2107 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2108 		       op->unmap.keep ? 1 : 0);
2109 		break;
2110 	case DRM_GPUVA_OP_PREFETCH:
2111 		vma = gpuva_to_vma(op->prefetch.va);
2112 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2113 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2114 		break;
2115 	default:
2116 		drm_warn(&xe->drm, "NOT POSSIBLE");
2117 	}
2118 }
2119 #else
2120 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2121 {
2122 }
2123 #endif
2124 
2125 /*
2126  * Create operations list from IOCTL arguments, setup operations fields so parse
2127  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2128  */
2129 static struct drm_gpuva_ops *
2130 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2131 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2132 			 u32 operation, u32 flags,
2133 			 u32 prefetch_region, u16 pat_index)
2134 {
2135 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2136 	struct drm_gpuva_ops *ops;
2137 	struct drm_gpuva_op *__op;
2138 	struct drm_gpuvm_bo *vm_bo;
2139 	int err;
2140 
2141 	lockdep_assert_held_write(&vm->lock);
2142 
2143 	vm_dbg(&vm->xe->drm,
2144 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2145 	       operation, (ULL)addr, (ULL)range,
2146 	       (ULL)bo_offset_or_userptr);
2147 
2148 	switch (operation) {
2149 	case DRM_XE_VM_BIND_OP_MAP:
2150 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2151 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2152 						  obj, bo_offset_or_userptr);
2153 		break;
2154 	case DRM_XE_VM_BIND_OP_UNMAP:
2155 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2156 		break;
2157 	case DRM_XE_VM_BIND_OP_PREFETCH:
2158 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2159 		break;
2160 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2161 		xe_assert(vm->xe, bo);
2162 
2163 		err = xe_bo_lock(bo, true);
2164 		if (err)
2165 			return ERR_PTR(err);
2166 
2167 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2168 		if (IS_ERR(vm_bo)) {
2169 			xe_bo_unlock(bo);
2170 			return ERR_CAST(vm_bo);
2171 		}
2172 
2173 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2174 		drm_gpuvm_bo_put(vm_bo);
2175 		xe_bo_unlock(bo);
2176 		break;
2177 	default:
2178 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2179 		ops = ERR_PTR(-EINVAL);
2180 	}
2181 	if (IS_ERR(ops))
2182 		return ops;
2183 
2184 	drm_gpuva_for_each_op(__op, ops) {
2185 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2186 
2187 		if (__op->op == DRM_GPUVA_OP_MAP) {
2188 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2189 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2190 			op->map.pat_index = pat_index;
2191 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2192 			op->prefetch.region = prefetch_region;
2193 		}
2194 
2195 		print_op(vm->xe, __op);
2196 	}
2197 
2198 	return ops;
2199 }
2200 
2201 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2202 			      u16 pat_index, unsigned int flags)
2203 {
2204 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2205 	struct drm_exec exec;
2206 	struct xe_vma *vma;
2207 	int err;
2208 
2209 	lockdep_assert_held_write(&vm->lock);
2210 
2211 	if (bo) {
2212 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2213 		drm_exec_until_all_locked(&exec) {
2214 			err = 0;
2215 			if (!bo->vm) {
2216 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2217 				drm_exec_retry_on_contention(&exec);
2218 			}
2219 			if (!err) {
2220 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2221 				drm_exec_retry_on_contention(&exec);
2222 			}
2223 			if (err) {
2224 				drm_exec_fini(&exec);
2225 				return ERR_PTR(err);
2226 			}
2227 		}
2228 	}
2229 	vma = xe_vma_create(vm, bo, op->gem.offset,
2230 			    op->va.addr, op->va.addr +
2231 			    op->va.range - 1, pat_index, flags);
2232 	if (bo)
2233 		drm_exec_fini(&exec);
2234 
2235 	if (xe_vma_is_userptr(vma)) {
2236 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2237 		if (err) {
2238 			prep_vma_destroy(vm, vma, false);
2239 			xe_vma_destroy_unlocked(vma);
2240 			return ERR_PTR(err);
2241 		}
2242 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2243 		err = add_preempt_fences(vm, bo);
2244 		if (err) {
2245 			prep_vma_destroy(vm, vma, false);
2246 			xe_vma_destroy_unlocked(vma);
2247 			return ERR_PTR(err);
2248 		}
2249 	}
2250 
2251 	return vma;
2252 }
2253 
2254 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2255 {
2256 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2257 		return SZ_1G;
2258 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2259 		return SZ_2M;
2260 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2261 		return SZ_64K;
2262 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2263 		return SZ_4K;
2264 
2265 	return SZ_1G;	/* Uninitialized, used max size */
2266 }
2267 
2268 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2269 {
2270 	switch (size) {
2271 	case SZ_1G:
2272 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2273 		break;
2274 	case SZ_2M:
2275 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2276 		break;
2277 	case SZ_64K:
2278 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2279 		break;
2280 	case SZ_4K:
2281 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2282 		break;
2283 	}
2284 }
2285 
2286 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2287 {
2288 	int err = 0;
2289 
2290 	lockdep_assert_held_write(&vm->lock);
2291 
2292 	switch (op->base.op) {
2293 	case DRM_GPUVA_OP_MAP:
2294 		err |= xe_vm_insert_vma(vm, op->map.vma);
2295 		if (!err)
2296 			op->flags |= XE_VMA_OP_COMMITTED;
2297 		break;
2298 	case DRM_GPUVA_OP_REMAP:
2299 	{
2300 		u8 tile_present =
2301 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2302 
2303 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2304 				 true);
2305 		op->flags |= XE_VMA_OP_COMMITTED;
2306 
2307 		if (op->remap.prev) {
2308 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2309 			if (!err)
2310 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2311 			if (!err && op->remap.skip_prev) {
2312 				op->remap.prev->tile_present =
2313 					tile_present;
2314 				op->remap.prev = NULL;
2315 			}
2316 		}
2317 		if (op->remap.next) {
2318 			err |= xe_vm_insert_vma(vm, op->remap.next);
2319 			if (!err)
2320 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2321 			if (!err && op->remap.skip_next) {
2322 				op->remap.next->tile_present =
2323 					tile_present;
2324 				op->remap.next = NULL;
2325 			}
2326 		}
2327 
2328 		/* Adjust for partial unbind after removin VMA from VM */
2329 		if (!err) {
2330 			op->base.remap.unmap->va->va.addr = op->remap.start;
2331 			op->base.remap.unmap->va->va.range = op->remap.range;
2332 		}
2333 		break;
2334 	}
2335 	case DRM_GPUVA_OP_UNMAP:
2336 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2337 		op->flags |= XE_VMA_OP_COMMITTED;
2338 		break;
2339 	case DRM_GPUVA_OP_PREFETCH:
2340 		op->flags |= XE_VMA_OP_COMMITTED;
2341 		break;
2342 	default:
2343 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2344 	}
2345 
2346 	return err;
2347 }
2348 
2349 
2350 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2351 				   struct drm_gpuva_ops *ops,
2352 				   struct xe_sync_entry *syncs, u32 num_syncs,
2353 				   struct list_head *ops_list, bool last)
2354 {
2355 	struct xe_device *xe = vm->xe;
2356 	struct xe_vma_op *last_op = NULL;
2357 	struct drm_gpuva_op *__op;
2358 	int err = 0;
2359 
2360 	lockdep_assert_held_write(&vm->lock);
2361 
2362 	drm_gpuva_for_each_op(__op, ops) {
2363 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2364 		struct xe_vma *vma;
2365 		bool first = list_empty(ops_list);
2366 		unsigned int flags = 0;
2367 
2368 		INIT_LIST_HEAD(&op->link);
2369 		list_add_tail(&op->link, ops_list);
2370 
2371 		if (first) {
2372 			op->flags |= XE_VMA_OP_FIRST;
2373 			op->num_syncs = num_syncs;
2374 			op->syncs = syncs;
2375 		}
2376 
2377 		op->q = q;
2378 
2379 		switch (op->base.op) {
2380 		case DRM_GPUVA_OP_MAP:
2381 		{
2382 			flags |= op->map.is_null ?
2383 				VMA_CREATE_FLAG_IS_NULL : 0;
2384 			flags |= op->map.dumpable ?
2385 				VMA_CREATE_FLAG_DUMPABLE : 0;
2386 
2387 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2388 				      flags);
2389 			if (IS_ERR(vma))
2390 				return PTR_ERR(vma);
2391 
2392 			op->map.vma = vma;
2393 			break;
2394 		}
2395 		case DRM_GPUVA_OP_REMAP:
2396 		{
2397 			struct xe_vma *old =
2398 				gpuva_to_vma(op->base.remap.unmap->va);
2399 
2400 			op->remap.start = xe_vma_start(old);
2401 			op->remap.range = xe_vma_size(old);
2402 
2403 			if (op->base.remap.prev) {
2404 				flags |= op->base.remap.unmap->va->flags &
2405 					XE_VMA_READ_ONLY ?
2406 					VMA_CREATE_FLAG_READ_ONLY : 0;
2407 				flags |= op->base.remap.unmap->va->flags &
2408 					DRM_GPUVA_SPARSE ?
2409 					VMA_CREATE_FLAG_IS_NULL : 0;
2410 				flags |= op->base.remap.unmap->va->flags &
2411 					XE_VMA_DUMPABLE ?
2412 					VMA_CREATE_FLAG_DUMPABLE : 0;
2413 
2414 				vma = new_vma(vm, op->base.remap.prev,
2415 					      old->pat_index, flags);
2416 				if (IS_ERR(vma))
2417 					return PTR_ERR(vma);
2418 
2419 				op->remap.prev = vma;
2420 
2421 				/*
2422 				 * Userptr creates a new SG mapping so
2423 				 * we must also rebind.
2424 				 */
2425 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2426 					IS_ALIGNED(xe_vma_end(vma),
2427 						   xe_vma_max_pte_size(old));
2428 				if (op->remap.skip_prev) {
2429 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2430 					op->remap.range -=
2431 						xe_vma_end(vma) -
2432 						xe_vma_start(old);
2433 					op->remap.start = xe_vma_end(vma);
2434 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2435 					       (ULL)op->remap.start,
2436 					       (ULL)op->remap.range);
2437 				}
2438 			}
2439 
2440 			if (op->base.remap.next) {
2441 				flags |= op->base.remap.unmap->va->flags &
2442 					XE_VMA_READ_ONLY ?
2443 					VMA_CREATE_FLAG_READ_ONLY : 0;
2444 				flags |= op->base.remap.unmap->va->flags &
2445 					DRM_GPUVA_SPARSE ?
2446 					VMA_CREATE_FLAG_IS_NULL : 0;
2447 				flags |= op->base.remap.unmap->va->flags &
2448 					XE_VMA_DUMPABLE ?
2449 					VMA_CREATE_FLAG_DUMPABLE : 0;
2450 
2451 				vma = new_vma(vm, op->base.remap.next,
2452 					      old->pat_index, flags);
2453 				if (IS_ERR(vma))
2454 					return PTR_ERR(vma);
2455 
2456 				op->remap.next = vma;
2457 
2458 				/*
2459 				 * Userptr creates a new SG mapping so
2460 				 * we must also rebind.
2461 				 */
2462 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2463 					IS_ALIGNED(xe_vma_start(vma),
2464 						   xe_vma_max_pte_size(old));
2465 				if (op->remap.skip_next) {
2466 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2467 					op->remap.range -=
2468 						xe_vma_end(old) -
2469 						xe_vma_start(vma);
2470 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2471 					       (ULL)op->remap.start,
2472 					       (ULL)op->remap.range);
2473 				}
2474 			}
2475 			break;
2476 		}
2477 		case DRM_GPUVA_OP_UNMAP:
2478 		case DRM_GPUVA_OP_PREFETCH:
2479 			/* Nothing to do */
2480 			break;
2481 		default:
2482 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2483 		}
2484 
2485 		last_op = op;
2486 
2487 		err = xe_vma_op_commit(vm, op);
2488 		if (err)
2489 			return err;
2490 	}
2491 
2492 	/* FIXME: Unhandled corner case */
2493 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2494 
2495 	if (!last_op)
2496 		return 0;
2497 
2498 	last_op->ops = ops;
2499 	if (last) {
2500 		last_op->flags |= XE_VMA_OP_LAST;
2501 		last_op->num_syncs = num_syncs;
2502 		last_op->syncs = syncs;
2503 	}
2504 
2505 	return 0;
2506 }
2507 
2508 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2509 		      struct xe_vma *vma, struct xe_vma_op *op)
2510 {
2511 	int err;
2512 
2513 	lockdep_assert_held_write(&vm->lock);
2514 
2515 	err = xe_vm_prepare_vma(exec, vma, 1);
2516 	if (err)
2517 		return err;
2518 
2519 	xe_vm_assert_held(vm);
2520 	xe_bo_assert_held(xe_vma_bo(vma));
2521 
2522 	switch (op->base.op) {
2523 	case DRM_GPUVA_OP_MAP:
2524 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2525 				 op->syncs, op->num_syncs,
2526 				 !xe_vm_in_fault_mode(vm),
2527 				 op->flags & XE_VMA_OP_FIRST,
2528 				 op->flags & XE_VMA_OP_LAST);
2529 		break;
2530 	case DRM_GPUVA_OP_REMAP:
2531 	{
2532 		bool prev = !!op->remap.prev;
2533 		bool next = !!op->remap.next;
2534 
2535 		if (!op->remap.unmap_done) {
2536 			if (prev || next)
2537 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2538 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2539 					   op->num_syncs,
2540 					   op->flags & XE_VMA_OP_FIRST,
2541 					   op->flags & XE_VMA_OP_LAST &&
2542 					   !prev && !next);
2543 			if (err)
2544 				break;
2545 			op->remap.unmap_done = true;
2546 		}
2547 
2548 		if (prev) {
2549 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2550 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2551 					 xe_vma_bo(op->remap.prev), op->syncs,
2552 					 op->num_syncs, true, false,
2553 					 op->flags & XE_VMA_OP_LAST && !next);
2554 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2555 			if (err)
2556 				break;
2557 			op->remap.prev = NULL;
2558 		}
2559 
2560 		if (next) {
2561 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2562 			err = xe_vm_bind(vm, op->remap.next, op->q,
2563 					 xe_vma_bo(op->remap.next),
2564 					 op->syncs, op->num_syncs,
2565 					 true, false,
2566 					 op->flags & XE_VMA_OP_LAST);
2567 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2568 			if (err)
2569 				break;
2570 			op->remap.next = NULL;
2571 		}
2572 
2573 		break;
2574 	}
2575 	case DRM_GPUVA_OP_UNMAP:
2576 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2577 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2578 				   op->flags & XE_VMA_OP_LAST);
2579 		break;
2580 	case DRM_GPUVA_OP_PREFETCH:
2581 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2582 				     op->syncs, op->num_syncs,
2583 				     op->flags & XE_VMA_OP_FIRST,
2584 				     op->flags & XE_VMA_OP_LAST);
2585 		break;
2586 	default:
2587 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2588 	}
2589 
2590 	if (err)
2591 		trace_xe_vma_fail(vma);
2592 
2593 	return err;
2594 }
2595 
2596 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2597 			       struct xe_vma_op *op)
2598 {
2599 	struct drm_exec exec;
2600 	int err;
2601 
2602 retry_userptr:
2603 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2604 	drm_exec_until_all_locked(&exec) {
2605 		err = op_execute(&exec, vm, vma, op);
2606 		drm_exec_retry_on_contention(&exec);
2607 		if (err)
2608 			break;
2609 	}
2610 	drm_exec_fini(&exec);
2611 
2612 	if (err == -EAGAIN) {
2613 		lockdep_assert_held_write(&vm->lock);
2614 
2615 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2616 			if (!op->remap.unmap_done)
2617 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2618 			else if (op->remap.prev)
2619 				vma = op->remap.prev;
2620 			else
2621 				vma = op->remap.next;
2622 		}
2623 
2624 		if (xe_vma_is_userptr(vma)) {
2625 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2626 			if (!err)
2627 				goto retry_userptr;
2628 
2629 			trace_xe_vma_fail(vma);
2630 		}
2631 	}
2632 
2633 	return err;
2634 }
2635 
2636 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2637 {
2638 	int ret = 0;
2639 
2640 	lockdep_assert_held_write(&vm->lock);
2641 
2642 	switch (op->base.op) {
2643 	case DRM_GPUVA_OP_MAP:
2644 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2645 		break;
2646 	case DRM_GPUVA_OP_REMAP:
2647 	{
2648 		struct xe_vma *vma;
2649 
2650 		if (!op->remap.unmap_done)
2651 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2652 		else if (op->remap.prev)
2653 			vma = op->remap.prev;
2654 		else
2655 			vma = op->remap.next;
2656 
2657 		ret = __xe_vma_op_execute(vm, vma, op);
2658 		break;
2659 	}
2660 	case DRM_GPUVA_OP_UNMAP:
2661 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2662 					  op);
2663 		break;
2664 	case DRM_GPUVA_OP_PREFETCH:
2665 		ret = __xe_vma_op_execute(vm,
2666 					  gpuva_to_vma(op->base.prefetch.va),
2667 					  op);
2668 		break;
2669 	default:
2670 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2671 	}
2672 
2673 	return ret;
2674 }
2675 
2676 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2677 {
2678 	bool last = op->flags & XE_VMA_OP_LAST;
2679 
2680 	if (last) {
2681 		while (op->num_syncs--)
2682 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2683 		kfree(op->syncs);
2684 		if (op->q)
2685 			xe_exec_queue_put(op->q);
2686 	}
2687 	if (!list_empty(&op->link))
2688 		list_del(&op->link);
2689 	if (op->ops)
2690 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2691 	if (last)
2692 		xe_vm_put(vm);
2693 }
2694 
2695 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2696 			     bool post_commit, bool prev_post_commit,
2697 			     bool next_post_commit)
2698 {
2699 	lockdep_assert_held_write(&vm->lock);
2700 
2701 	switch (op->base.op) {
2702 	case DRM_GPUVA_OP_MAP:
2703 		if (op->map.vma) {
2704 			prep_vma_destroy(vm, op->map.vma, post_commit);
2705 			xe_vma_destroy_unlocked(op->map.vma);
2706 		}
2707 		break;
2708 	case DRM_GPUVA_OP_UNMAP:
2709 	{
2710 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2711 
2712 		if (vma) {
2713 			down_read(&vm->userptr.notifier_lock);
2714 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2715 			up_read(&vm->userptr.notifier_lock);
2716 			if (post_commit)
2717 				xe_vm_insert_vma(vm, vma);
2718 		}
2719 		break;
2720 	}
2721 	case DRM_GPUVA_OP_REMAP:
2722 	{
2723 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2724 
2725 		if (op->remap.prev) {
2726 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2727 			xe_vma_destroy_unlocked(op->remap.prev);
2728 		}
2729 		if (op->remap.next) {
2730 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2731 			xe_vma_destroy_unlocked(op->remap.next);
2732 		}
2733 		if (vma) {
2734 			down_read(&vm->userptr.notifier_lock);
2735 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2736 			up_read(&vm->userptr.notifier_lock);
2737 			if (post_commit)
2738 				xe_vm_insert_vma(vm, vma);
2739 		}
2740 		break;
2741 	}
2742 	case DRM_GPUVA_OP_PREFETCH:
2743 		/* Nothing to do */
2744 		break;
2745 	default:
2746 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2747 	}
2748 }
2749 
2750 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2751 				     struct drm_gpuva_ops **ops,
2752 				     int num_ops_list)
2753 {
2754 	int i;
2755 
2756 	for (i = num_ops_list - 1; i >= 0; --i) {
2757 		struct drm_gpuva_ops *__ops = ops[i];
2758 		struct drm_gpuva_op *__op;
2759 
2760 		if (!__ops)
2761 			continue;
2762 
2763 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2764 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2765 
2766 			xe_vma_op_unwind(vm, op,
2767 					 op->flags & XE_VMA_OP_COMMITTED,
2768 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2769 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2770 		}
2771 
2772 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2773 	}
2774 }
2775 
2776 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2777 				     struct list_head *ops_list)
2778 {
2779 	struct xe_vma_op *op, *next;
2780 	int err;
2781 
2782 	lockdep_assert_held_write(&vm->lock);
2783 
2784 	list_for_each_entry_safe(op, next, ops_list, link) {
2785 		err = xe_vma_op_execute(vm, op);
2786 		if (err) {
2787 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2788 				 op->base.op, err);
2789 			/*
2790 			 * FIXME: Killing VM rather than proper error handling
2791 			 */
2792 			xe_vm_kill(vm);
2793 			return -ENOSPC;
2794 		}
2795 		xe_vma_op_cleanup(vm, op);
2796 	}
2797 
2798 	return 0;
2799 }
2800 
2801 #define SUPPORTED_FLAGS	(DRM_XE_VM_BIND_FLAG_NULL | \
2802 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2803 #define XE_64K_PAGE_MASK 0xffffull
2804 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2805 
2806 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2807 				    struct drm_xe_vm_bind *args,
2808 				    struct drm_xe_vm_bind_op **bind_ops)
2809 {
2810 	int err;
2811 	int i;
2812 
2813 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2814 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2815 		return -EINVAL;
2816 
2817 	if (XE_IOCTL_DBG(xe, args->extensions))
2818 		return -EINVAL;
2819 
2820 	if (args->num_binds > 1) {
2821 		u64 __user *bind_user =
2822 			u64_to_user_ptr(args->vector_of_binds);
2823 
2824 		*bind_ops = kvmalloc_array(args->num_binds,
2825 					   sizeof(struct drm_xe_vm_bind_op),
2826 					   GFP_KERNEL | __GFP_ACCOUNT);
2827 		if (!*bind_ops)
2828 			return -ENOMEM;
2829 
2830 		err = __copy_from_user(*bind_ops, bind_user,
2831 				       sizeof(struct drm_xe_vm_bind_op) *
2832 				       args->num_binds);
2833 		if (XE_IOCTL_DBG(xe, err)) {
2834 			err = -EFAULT;
2835 			goto free_bind_ops;
2836 		}
2837 	} else {
2838 		*bind_ops = &args->bind;
2839 	}
2840 
2841 	for (i = 0; i < args->num_binds; ++i) {
2842 		u64 range = (*bind_ops)[i].range;
2843 		u64 addr = (*bind_ops)[i].addr;
2844 		u32 op = (*bind_ops)[i].op;
2845 		u32 flags = (*bind_ops)[i].flags;
2846 		u32 obj = (*bind_ops)[i].obj;
2847 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2848 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2849 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2850 		u16 pat_index = (*bind_ops)[i].pat_index;
2851 		u16 coh_mode;
2852 
2853 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2854 			err = -EINVAL;
2855 			goto free_bind_ops;
2856 		}
2857 
2858 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2859 		(*bind_ops)[i].pat_index = pat_index;
2860 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2861 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2862 			err = -EINVAL;
2863 			goto free_bind_ops;
2864 		}
2865 
2866 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2867 			err = -EINVAL;
2868 			goto free_bind_ops;
2869 		}
2870 
2871 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2872 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2873 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2874 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2875 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2876 				 is_null) ||
2877 		    XE_IOCTL_DBG(xe, !obj &&
2878 				 op == DRM_XE_VM_BIND_OP_MAP &&
2879 				 !is_null) ||
2880 		    XE_IOCTL_DBG(xe, !obj &&
2881 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2882 		    XE_IOCTL_DBG(xe, addr &&
2883 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2884 		    XE_IOCTL_DBG(xe, range &&
2885 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2886 		    XE_IOCTL_DBG(xe, obj &&
2887 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2888 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2889 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2890 		    XE_IOCTL_DBG(xe, obj &&
2891 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2892 		    XE_IOCTL_DBG(xe, prefetch_region &&
2893 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2894 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2895 				       xe->info.mem_region_mask)) ||
2896 		    XE_IOCTL_DBG(xe, obj &&
2897 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2898 			err = -EINVAL;
2899 			goto free_bind_ops;
2900 		}
2901 
2902 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2903 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2904 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2905 		    XE_IOCTL_DBG(xe, !range &&
2906 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2907 			err = -EINVAL;
2908 			goto free_bind_ops;
2909 		}
2910 	}
2911 
2912 	return 0;
2913 
2914 free_bind_ops:
2915 	if (args->num_binds > 1)
2916 		kvfree(*bind_ops);
2917 	return err;
2918 }
2919 
2920 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2921 				       struct xe_exec_queue *q,
2922 				       struct xe_sync_entry *syncs,
2923 				       int num_syncs)
2924 {
2925 	struct dma_fence *fence;
2926 	int i, err = 0;
2927 
2928 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2929 				     to_wait_exec_queue(vm, q), vm);
2930 	if (IS_ERR(fence))
2931 		return PTR_ERR(fence);
2932 
2933 	for (i = 0; i < num_syncs; i++)
2934 		xe_sync_entry_signal(&syncs[i], NULL, fence);
2935 
2936 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2937 				     fence);
2938 	dma_fence_put(fence);
2939 
2940 	return err;
2941 }
2942 
2943 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2944 {
2945 	struct xe_device *xe = to_xe_device(dev);
2946 	struct xe_file *xef = to_xe_file(file);
2947 	struct drm_xe_vm_bind *args = data;
2948 	struct drm_xe_sync __user *syncs_user;
2949 	struct xe_bo **bos = NULL;
2950 	struct drm_gpuva_ops **ops = NULL;
2951 	struct xe_vm *vm;
2952 	struct xe_exec_queue *q = NULL;
2953 	u32 num_syncs, num_ufence = 0;
2954 	struct xe_sync_entry *syncs = NULL;
2955 	struct drm_xe_vm_bind_op *bind_ops;
2956 	LIST_HEAD(ops_list);
2957 	int err;
2958 	int i;
2959 
2960 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2961 	if (err)
2962 		return err;
2963 
2964 	if (args->exec_queue_id) {
2965 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2966 		if (XE_IOCTL_DBG(xe, !q)) {
2967 			err = -ENOENT;
2968 			goto free_objs;
2969 		}
2970 
2971 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2972 			err = -EINVAL;
2973 			goto put_exec_queue;
2974 		}
2975 	}
2976 
2977 	vm = xe_vm_lookup(xef, args->vm_id);
2978 	if (XE_IOCTL_DBG(xe, !vm)) {
2979 		err = -EINVAL;
2980 		goto put_exec_queue;
2981 	}
2982 
2983 	err = down_write_killable(&vm->lock);
2984 	if (err)
2985 		goto put_vm;
2986 
2987 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2988 		err = -ENOENT;
2989 		goto release_vm_lock;
2990 	}
2991 
2992 	for (i = 0; i < args->num_binds; ++i) {
2993 		u64 range = bind_ops[i].range;
2994 		u64 addr = bind_ops[i].addr;
2995 
2996 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2997 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2998 			err = -EINVAL;
2999 			goto release_vm_lock;
3000 		}
3001 	}
3002 
3003 	if (args->num_binds) {
3004 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3005 			       GFP_KERNEL | __GFP_ACCOUNT);
3006 		if (!bos) {
3007 			err = -ENOMEM;
3008 			goto release_vm_lock;
3009 		}
3010 
3011 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3012 			       GFP_KERNEL | __GFP_ACCOUNT);
3013 		if (!ops) {
3014 			err = -ENOMEM;
3015 			goto release_vm_lock;
3016 		}
3017 	}
3018 
3019 	for (i = 0; i < args->num_binds; ++i) {
3020 		struct drm_gem_object *gem_obj;
3021 		u64 range = bind_ops[i].range;
3022 		u64 addr = bind_ops[i].addr;
3023 		u32 obj = bind_ops[i].obj;
3024 		u64 obj_offset = bind_ops[i].obj_offset;
3025 		u16 pat_index = bind_ops[i].pat_index;
3026 		u16 coh_mode;
3027 
3028 		if (!obj)
3029 			continue;
3030 
3031 		gem_obj = drm_gem_object_lookup(file, obj);
3032 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3033 			err = -ENOENT;
3034 			goto put_obj;
3035 		}
3036 		bos[i] = gem_to_xe_bo(gem_obj);
3037 
3038 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3039 		    XE_IOCTL_DBG(xe, obj_offset >
3040 				 bos[i]->size - range)) {
3041 			err = -EINVAL;
3042 			goto put_obj;
3043 		}
3044 
3045 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3046 			if (XE_IOCTL_DBG(xe, obj_offset &
3047 					 XE_64K_PAGE_MASK) ||
3048 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3049 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3050 				err = -EINVAL;
3051 				goto put_obj;
3052 			}
3053 		}
3054 
3055 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3056 		if (bos[i]->cpu_caching) {
3057 			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3058 					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3059 				err = -EINVAL;
3060 				goto put_obj;
3061 			}
3062 		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3063 			/*
3064 			 * Imported dma-buf from a different device should
3065 			 * require 1way or 2way coherency since we don't know
3066 			 * how it was mapped on the CPU. Just assume is it
3067 			 * potentially cached on CPU side.
3068 			 */
3069 			err = -EINVAL;
3070 			goto put_obj;
3071 		}
3072 	}
3073 
3074 	if (args->num_syncs) {
3075 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3076 		if (!syncs) {
3077 			err = -ENOMEM;
3078 			goto put_obj;
3079 		}
3080 	}
3081 
3082 	syncs_user = u64_to_user_ptr(args->syncs);
3083 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3084 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3085 					  &syncs_user[num_syncs],
3086 					  (xe_vm_in_lr_mode(vm) ?
3087 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3088 					  (!args->num_binds ?
3089 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3090 		if (err)
3091 			goto free_syncs;
3092 
3093 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3094 			num_ufence++;
3095 	}
3096 
3097 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3098 		err = -EINVAL;
3099 		goto free_syncs;
3100 	}
3101 
3102 	if (!args->num_binds) {
3103 		err = -ENODATA;
3104 		goto free_syncs;
3105 	}
3106 
3107 	for (i = 0; i < args->num_binds; ++i) {
3108 		u64 range = bind_ops[i].range;
3109 		u64 addr = bind_ops[i].addr;
3110 		u32 op = bind_ops[i].op;
3111 		u32 flags = bind_ops[i].flags;
3112 		u64 obj_offset = bind_ops[i].obj_offset;
3113 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3114 		u16 pat_index = bind_ops[i].pat_index;
3115 
3116 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3117 						  addr, range, op, flags,
3118 						  prefetch_region, pat_index);
3119 		if (IS_ERR(ops[i])) {
3120 			err = PTR_ERR(ops[i]);
3121 			ops[i] = NULL;
3122 			goto unwind_ops;
3123 		}
3124 
3125 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3126 					      &ops_list,
3127 					      i == args->num_binds - 1);
3128 		if (err)
3129 			goto unwind_ops;
3130 	}
3131 
3132 	/* Nothing to do */
3133 	if (list_empty(&ops_list)) {
3134 		err = -ENODATA;
3135 		goto unwind_ops;
3136 	}
3137 
3138 	xe_vm_get(vm);
3139 	if (q)
3140 		xe_exec_queue_get(q);
3141 
3142 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3143 
3144 	up_write(&vm->lock);
3145 
3146 	if (q)
3147 		xe_exec_queue_put(q);
3148 	xe_vm_put(vm);
3149 
3150 	for (i = 0; bos && i < args->num_binds; ++i)
3151 		xe_bo_put(bos[i]);
3152 
3153 	kvfree(bos);
3154 	kvfree(ops);
3155 	if (args->num_binds > 1)
3156 		kvfree(bind_ops);
3157 
3158 	return err;
3159 
3160 unwind_ops:
3161 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3162 free_syncs:
3163 	if (err == -ENODATA)
3164 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3165 	while (num_syncs--)
3166 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3167 
3168 	kfree(syncs);
3169 put_obj:
3170 	for (i = 0; i < args->num_binds; ++i)
3171 		xe_bo_put(bos[i]);
3172 release_vm_lock:
3173 	up_write(&vm->lock);
3174 put_vm:
3175 	xe_vm_put(vm);
3176 put_exec_queue:
3177 	if (q)
3178 		xe_exec_queue_put(q);
3179 free_objs:
3180 	kvfree(bos);
3181 	kvfree(ops);
3182 	if (args->num_binds > 1)
3183 		kvfree(bind_ops);
3184 	return err;
3185 }
3186 
3187 /**
3188  * xe_vm_lock() - Lock the vm's dma_resv object
3189  * @vm: The struct xe_vm whose lock is to be locked
3190  * @intr: Whether to perform any wait interruptible
3191  *
3192  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3193  * contended lock was interrupted. If @intr is false, the function
3194  * always returns 0.
3195  */
3196 int xe_vm_lock(struct xe_vm *vm, bool intr)
3197 {
3198 	if (intr)
3199 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3200 
3201 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3202 }
3203 
3204 /**
3205  * xe_vm_unlock() - Unlock the vm's dma_resv object
3206  * @vm: The struct xe_vm whose lock is to be released.
3207  *
3208  * Unlock a buffer object lock that was locked by xe_vm_lock().
3209  */
3210 void xe_vm_unlock(struct xe_vm *vm)
3211 {
3212 	dma_resv_unlock(xe_vm_resv(vm));
3213 }
3214 
3215 /**
3216  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3217  * @vma: VMA to invalidate
3218  *
3219  * Walks a list of page tables leaves which it memset the entries owned by this
3220  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3221  * complete.
3222  *
3223  * Returns 0 for success, negative error code otherwise.
3224  */
3225 int xe_vm_invalidate_vma(struct xe_vma *vma)
3226 {
3227 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3228 	struct xe_tile *tile;
3229 	u32 tile_needs_invalidate = 0;
3230 	int seqno[XE_MAX_TILES_PER_DEVICE];
3231 	u8 id;
3232 	int ret;
3233 
3234 	xe_assert(xe, !xe_vma_is_null(vma));
3235 	trace_xe_vma_invalidate(vma);
3236 
3237 	/* Check that we don't race with page-table updates */
3238 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3239 		if (xe_vma_is_userptr(vma)) {
3240 			WARN_ON_ONCE(!mmu_interval_check_retry
3241 				     (&to_userptr_vma(vma)->userptr.notifier,
3242 				      to_userptr_vma(vma)->userptr.notifier_seq));
3243 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3244 							     DMA_RESV_USAGE_BOOKKEEP));
3245 
3246 		} else {
3247 			xe_bo_assert_held(xe_vma_bo(vma));
3248 		}
3249 	}
3250 
3251 	for_each_tile(tile, xe, id) {
3252 		if (xe_pt_zap_ptes(tile, vma)) {
3253 			tile_needs_invalidate |= BIT(id);
3254 			xe_device_wmb(xe);
3255 			/*
3256 			 * FIXME: We potentially need to invalidate multiple
3257 			 * GTs within the tile
3258 			 */
3259 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3260 			if (seqno[id] < 0)
3261 				return seqno[id];
3262 		}
3263 	}
3264 
3265 	for_each_tile(tile, xe, id) {
3266 		if (tile_needs_invalidate & BIT(id)) {
3267 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3268 			if (ret < 0)
3269 				return ret;
3270 		}
3271 	}
3272 
3273 	vma->tile_invalidated = vma->tile_mask;
3274 
3275 	return 0;
3276 }
3277 
3278 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3279 {
3280 	struct drm_gpuva *gpuva;
3281 	bool is_vram;
3282 	uint64_t addr;
3283 
3284 	if (!down_read_trylock(&vm->lock)) {
3285 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3286 		return 0;
3287 	}
3288 	if (vm->pt_root[gt_id]) {
3289 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3290 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3291 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3292 			   is_vram ? "VRAM" : "SYS");
3293 	}
3294 
3295 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3296 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3297 		bool is_userptr = xe_vma_is_userptr(vma);
3298 		bool is_null = xe_vma_is_null(vma);
3299 
3300 		if (is_null) {
3301 			addr = 0;
3302 		} else if (is_userptr) {
3303 			struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3304 			struct xe_res_cursor cur;
3305 
3306 			if (sg) {
3307 				xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3308 				addr = xe_res_dma(&cur);
3309 			} else {
3310 				addr = 0;
3311 			}
3312 		} else {
3313 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3314 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3315 		}
3316 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3317 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3318 			   xe_vma_size(vma),
3319 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3320 			   is_vram ? "VRAM" : "SYS");
3321 	}
3322 	up_read(&vm->lock);
3323 
3324 	return 0;
3325 }
3326 
3327 struct xe_vm_snapshot {
3328 	unsigned long num_snaps;
3329 	struct {
3330 		u64 ofs, bo_ofs;
3331 		unsigned long len;
3332 		struct xe_bo *bo;
3333 		void *data;
3334 		struct mm_struct *mm;
3335 	} snap[];
3336 };
3337 
3338 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3339 {
3340 	unsigned long num_snaps = 0, i;
3341 	struct xe_vm_snapshot *snap = NULL;
3342 	struct drm_gpuva *gpuva;
3343 
3344 	if (!vm)
3345 		return NULL;
3346 
3347 	mutex_lock(&vm->snap_mutex);
3348 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3349 		if (gpuva->flags & XE_VMA_DUMPABLE)
3350 			num_snaps++;
3351 	}
3352 
3353 	if (num_snaps)
3354 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3355 	if (!snap)
3356 		goto out_unlock;
3357 
3358 	snap->num_snaps = num_snaps;
3359 	i = 0;
3360 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3361 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3362 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3363 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3364 
3365 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3366 			continue;
3367 
3368 		snap->snap[i].ofs = xe_vma_start(vma);
3369 		snap->snap[i].len = xe_vma_size(vma);
3370 		if (bo) {
3371 			snap->snap[i].bo = xe_bo_get(bo);
3372 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3373 		} else if (xe_vma_is_userptr(vma)) {
3374 			struct mm_struct *mm =
3375 				to_userptr_vma(vma)->userptr.notifier.mm;
3376 
3377 			if (mmget_not_zero(mm))
3378 				snap->snap[i].mm = mm;
3379 			else
3380 				snap->snap[i].data = ERR_PTR(-EFAULT);
3381 
3382 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3383 		} else {
3384 			snap->snap[i].data = ERR_PTR(-ENOENT);
3385 		}
3386 		i++;
3387 	}
3388 
3389 out_unlock:
3390 	mutex_unlock(&vm->snap_mutex);
3391 	return snap;
3392 }
3393 
3394 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3395 {
3396 	for (int i = 0; i < snap->num_snaps; i++) {
3397 		struct xe_bo *bo = snap->snap[i].bo;
3398 		struct iosys_map src;
3399 		int err;
3400 
3401 		if (IS_ERR(snap->snap[i].data))
3402 			continue;
3403 
3404 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3405 		if (!snap->snap[i].data) {
3406 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3407 			goto cleanup_bo;
3408 		}
3409 
3410 		if (bo) {
3411 			dma_resv_lock(bo->ttm.base.resv, NULL);
3412 			err = ttm_bo_vmap(&bo->ttm, &src);
3413 			if (!err) {
3414 				xe_map_memcpy_from(xe_bo_device(bo),
3415 						   snap->snap[i].data,
3416 						   &src, snap->snap[i].bo_ofs,
3417 						   snap->snap[i].len);
3418 				ttm_bo_vunmap(&bo->ttm, &src);
3419 			}
3420 			dma_resv_unlock(bo->ttm.base.resv);
3421 		} else {
3422 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3423 
3424 			kthread_use_mm(snap->snap[i].mm);
3425 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3426 				err = 0;
3427 			else
3428 				err = -EFAULT;
3429 			kthread_unuse_mm(snap->snap[i].mm);
3430 
3431 			mmput(snap->snap[i].mm);
3432 			snap->snap[i].mm = NULL;
3433 		}
3434 
3435 		if (err) {
3436 			kvfree(snap->snap[i].data);
3437 			snap->snap[i].data = ERR_PTR(err);
3438 		}
3439 
3440 cleanup_bo:
3441 		xe_bo_put(bo);
3442 		snap->snap[i].bo = NULL;
3443 	}
3444 }
3445 
3446 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3447 {
3448 	unsigned long i, j;
3449 
3450 	for (i = 0; i < snap->num_snaps; i++) {
3451 		if (IS_ERR(snap->snap[i].data))
3452 			goto uncaptured;
3453 
3454 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3455 		drm_printf(p, "[%llx].data: ",
3456 			   snap->snap[i].ofs);
3457 
3458 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3459 			u32 *val = snap->snap[i].data + j;
3460 			char dumped[ASCII85_BUFSZ];
3461 
3462 			drm_puts(p, ascii85_encode(*val, dumped));
3463 		}
3464 
3465 		drm_puts(p, "\n");
3466 		continue;
3467 
3468 uncaptured:
3469 		drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
3470 			   snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
3471 			   PTR_ERR(snap->snap[i].data));
3472 	}
3473 }
3474 
3475 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3476 {
3477 	unsigned long i;
3478 
3479 	if (!snap)
3480 		return;
3481 
3482 	for (i = 0; i < snap->num_snaps; i++) {
3483 		if (!IS_ERR(snap->snap[i].data))
3484 			kvfree(snap->snap[i].data);
3485 		xe_bo_put(snap->snap[i].bo);
3486 		if (snap->snap[i].mm)
3487 			mmput(snap->snap[i].mm);
3488 	}
3489 	kvfree(snap);
3490 }
3491