xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision ee15c8bf5d77a306614bdefe33828310662dee05)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "xe_assert.h"
25 #include "xe_bo.h"
26 #include "xe_device.h"
27 #include "xe_drm_client.h"
28 #include "xe_exec_queue.h"
29 #include "xe_gt.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace.h"
40 #include "xe_wa.h"
41 
42 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
43 {
44 	return vm->gpuvm.r_obj;
45 }
46 
47 /**
48  * xe_vma_userptr_check_repin() - Advisory check for repin needed
49  * @uvma: The userptr vma
50  *
51  * Check if the userptr vma has been invalidated since last successful
52  * repin. The check is advisory only and can the function can be called
53  * without the vm->userptr.notifier_lock held. There is no guarantee that the
54  * vma userptr will remain valid after a lockless check, so typically
55  * the call needs to be followed by a proper check under the notifier_lock.
56  *
57  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
58  */
59 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
60 {
61 	return mmu_interval_check_retry(&uvma->userptr.notifier,
62 					uvma->userptr.notifier_seq) ?
63 		-EAGAIN : 0;
64 }
65 
66 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
67 {
68 	struct xe_userptr *userptr = &uvma->userptr;
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
73 	struct page **pages;
74 	bool in_kthread = !current->mm;
75 	unsigned long notifier_seq;
76 	int pinned, ret, i;
77 	bool read_only = xe_vma_read_only(vma);
78 
79 	lockdep_assert_held(&vm->lock);
80 	xe_assert(xe, xe_vma_is_userptr(vma));
81 retry:
82 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
83 		return 0;
84 
85 	notifier_seq = mmu_interval_read_begin(&userptr->notifier);
86 	if (notifier_seq == userptr->notifier_seq)
87 		return 0;
88 
89 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
90 	if (!pages)
91 		return -ENOMEM;
92 
93 	if (userptr->sg) {
94 		dma_unmap_sgtable(xe->drm.dev,
95 				  userptr->sg,
96 				  read_only ? DMA_TO_DEVICE :
97 				  DMA_BIDIRECTIONAL, 0);
98 		sg_free_table(userptr->sg);
99 		userptr->sg = NULL;
100 	}
101 
102 	pinned = ret = 0;
103 	if (in_kthread) {
104 		if (!mmget_not_zero(userptr->notifier.mm)) {
105 			ret = -EFAULT;
106 			goto mm_closed;
107 		}
108 		kthread_use_mm(userptr->notifier.mm);
109 	}
110 
111 	while (pinned < num_pages) {
112 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
113 					  pinned * PAGE_SIZE,
114 					  num_pages - pinned,
115 					  read_only ? 0 : FOLL_WRITE,
116 					  &pages[pinned]);
117 		if (ret < 0)
118 			break;
119 
120 		pinned += ret;
121 		ret = 0;
122 	}
123 
124 	if (in_kthread) {
125 		kthread_unuse_mm(userptr->notifier.mm);
126 		mmput(userptr->notifier.mm);
127 	}
128 mm_closed:
129 	if (ret)
130 		goto out;
131 
132 	ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
133 						pinned, 0,
134 						(u64)pinned << PAGE_SHIFT,
135 						xe_sg_segment_size(xe->drm.dev),
136 						GFP_KERNEL);
137 	if (ret) {
138 		userptr->sg = NULL;
139 		goto out;
140 	}
141 	userptr->sg = &userptr->sgt;
142 
143 	ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
144 			      read_only ? DMA_TO_DEVICE :
145 			      DMA_BIDIRECTIONAL,
146 			      DMA_ATTR_SKIP_CPU_SYNC |
147 			      DMA_ATTR_NO_KERNEL_MAPPING);
148 	if (ret) {
149 		sg_free_table(userptr->sg);
150 		userptr->sg = NULL;
151 		goto out;
152 	}
153 
154 	for (i = 0; i < pinned; ++i) {
155 		if (!read_only) {
156 			lock_page(pages[i]);
157 			set_page_dirty(pages[i]);
158 			unlock_page(pages[i]);
159 		}
160 
161 		mark_page_accessed(pages[i]);
162 	}
163 
164 out:
165 	release_pages(pages, pinned);
166 	kvfree(pages);
167 
168 	if (!(ret < 0)) {
169 		userptr->notifier_seq = notifier_seq;
170 		if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
171 			goto retry;
172 	}
173 
174 	return ret < 0 ? ret : 0;
175 }
176 
177 static bool preempt_fences_waiting(struct xe_vm *vm)
178 {
179 	struct xe_exec_queue *q;
180 
181 	lockdep_assert_held(&vm->lock);
182 	xe_vm_assert_held(vm);
183 
184 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
185 		if (!q->compute.pfence ||
186 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
187 						   &q->compute.pfence->flags))) {
188 			return true;
189 		}
190 	}
191 
192 	return false;
193 }
194 
195 static void free_preempt_fences(struct list_head *list)
196 {
197 	struct list_head *link, *next;
198 
199 	list_for_each_safe(link, next, list)
200 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
201 }
202 
203 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
204 				unsigned int *count)
205 {
206 	lockdep_assert_held(&vm->lock);
207 	xe_vm_assert_held(vm);
208 
209 	if (*count >= vm->preempt.num_exec_queues)
210 		return 0;
211 
212 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
213 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
214 
215 		if (IS_ERR(pfence))
216 			return PTR_ERR(pfence);
217 
218 		list_move_tail(xe_preempt_fence_link(pfence), list);
219 	}
220 
221 	return 0;
222 }
223 
224 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
225 {
226 	struct xe_exec_queue *q;
227 
228 	xe_vm_assert_held(vm);
229 
230 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
231 		if (q->compute.pfence) {
232 			long timeout = dma_fence_wait(q->compute.pfence, false);
233 
234 			if (timeout < 0)
235 				return -ETIME;
236 			dma_fence_put(q->compute.pfence);
237 			q->compute.pfence = NULL;
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 static bool xe_vm_is_idle(struct xe_vm *vm)
245 {
246 	struct xe_exec_queue *q;
247 
248 	xe_vm_assert_held(vm);
249 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
250 		if (!xe_exec_queue_is_idle(q))
251 			return false;
252 	}
253 
254 	return true;
255 }
256 
257 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
258 {
259 	struct list_head *link;
260 	struct xe_exec_queue *q;
261 
262 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
263 		struct dma_fence *fence;
264 
265 		link = list->next;
266 		xe_assert(vm->xe, link != list);
267 
268 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
269 					     q, q->compute.context,
270 					     ++q->compute.seqno);
271 		dma_fence_put(q->compute.pfence);
272 		q->compute.pfence = fence;
273 	}
274 }
275 
276 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
277 {
278 	struct xe_exec_queue *q;
279 	int err;
280 
281 	if (!vm->preempt.num_exec_queues)
282 		return 0;
283 
284 	err = xe_bo_lock(bo, true);
285 	if (err)
286 		return err;
287 
288 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
289 	if (err)
290 		goto out_unlock;
291 
292 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
293 		if (q->compute.pfence) {
294 			dma_resv_add_fence(bo->ttm.base.resv,
295 					   q->compute.pfence,
296 					   DMA_RESV_USAGE_BOOKKEEP);
297 		}
298 
299 out_unlock:
300 	xe_bo_unlock(bo);
301 	return err;
302 }
303 
304 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
305 						struct drm_exec *exec)
306 {
307 	struct xe_exec_queue *q;
308 
309 	lockdep_assert_held(&vm->lock);
310 	xe_vm_assert_held(vm);
311 
312 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
313 		q->ops->resume(q);
314 
315 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
316 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
317 	}
318 }
319 
320 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
321 {
322 	struct drm_gpuvm_exec vm_exec = {
323 		.vm = &vm->gpuvm,
324 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
325 		.num_fences = 1,
326 	};
327 	struct drm_exec *exec = &vm_exec.exec;
328 	struct dma_fence *pfence;
329 	int err;
330 	bool wait;
331 
332 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
333 
334 	down_write(&vm->lock);
335 	err = drm_gpuvm_exec_lock(&vm_exec);
336 	if (err)
337 		goto out_up_write;
338 
339 	pfence = xe_preempt_fence_create(q, q->compute.context,
340 					 ++q->compute.seqno);
341 	if (!pfence) {
342 		err = -ENOMEM;
343 		goto out_fini;
344 	}
345 
346 	list_add(&q->compute.link, &vm->preempt.exec_queues);
347 	++vm->preempt.num_exec_queues;
348 	q->compute.pfence = pfence;
349 
350 	down_read(&vm->userptr.notifier_lock);
351 
352 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
353 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
354 
355 	/*
356 	 * Check to see if a preemption on VM is in flight or userptr
357 	 * invalidation, if so trigger this preempt fence to sync state with
358 	 * other preempt fences on the VM.
359 	 */
360 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
361 	if (wait)
362 		dma_fence_enable_sw_signaling(pfence);
363 
364 	up_read(&vm->userptr.notifier_lock);
365 
366 out_fini:
367 	drm_exec_fini(exec);
368 out_up_write:
369 	up_write(&vm->lock);
370 
371 	return err;
372 }
373 
374 /**
375  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
376  * @vm: The VM.
377  * @q: The exec_queue
378  */
379 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
380 {
381 	if (!xe_vm_in_preempt_fence_mode(vm))
382 		return;
383 
384 	down_write(&vm->lock);
385 	list_del(&q->compute.link);
386 	--vm->preempt.num_exec_queues;
387 	if (q->compute.pfence) {
388 		dma_fence_enable_sw_signaling(q->compute.pfence);
389 		dma_fence_put(q->compute.pfence);
390 		q->compute.pfence = NULL;
391 	}
392 	up_write(&vm->lock);
393 }
394 
395 /**
396  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
397  * that need repinning.
398  * @vm: The VM.
399  *
400  * This function checks for whether the VM has userptrs that need repinning,
401  * and provides a release-type barrier on the userptr.notifier_lock after
402  * checking.
403  *
404  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
405  */
406 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
407 {
408 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
409 
410 	return (list_empty(&vm->userptr.repin_list) &&
411 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
412 }
413 
414 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
415 
416 static void xe_vm_kill(struct xe_vm *vm)
417 {
418 	struct xe_exec_queue *q;
419 
420 	lockdep_assert_held(&vm->lock);
421 
422 	xe_vm_lock(vm, false);
423 	vm->flags |= XE_VM_FLAG_BANNED;
424 	trace_xe_vm_kill(vm);
425 
426 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
427 		q->ops->kill(q);
428 	xe_vm_unlock(vm);
429 
430 	/* TODO: Inform user the VM is banned */
431 }
432 
433 /**
434  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
435  * @exec: The drm_exec object used for locking before validation.
436  * @err: The error returned from ttm_bo_validate().
437  * @end: A ktime_t cookie that should be set to 0 before first use and
438  * that should be reused on subsequent calls.
439  *
440  * With multiple active VMs, under memory pressure, it is possible that
441  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
442  * Until ttm properly handles locking in such scenarios, best thing the
443  * driver can do is retry with a timeout. Check if that is necessary, and
444  * if so unlock the drm_exec's objects while keeping the ticket to prepare
445  * for a rerun.
446  *
447  * Return: true if a retry after drm_exec_init() is recommended;
448  * false otherwise.
449  */
450 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
451 {
452 	ktime_t cur;
453 
454 	if (err != -ENOMEM)
455 		return false;
456 
457 	cur = ktime_get();
458 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
459 	if (!ktime_before(cur, *end))
460 		return false;
461 
462 	msleep(20);
463 	return true;
464 }
465 
466 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
467 {
468 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
469 	struct drm_gpuva *gpuva;
470 	int ret;
471 
472 	lockdep_assert_held(&vm->lock);
473 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
474 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
475 			       &vm->rebind_list);
476 
477 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
478 	if (ret)
479 		return ret;
480 
481 	vm_bo->evicted = false;
482 	return 0;
483 }
484 
485 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
486 				 bool *done)
487 {
488 	int err;
489 
490 	/*
491 	 * 1 fence for each preempt fence plus a fence for each tile from a
492 	 * possible rebind
493 	 */
494 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
495 				   vm->xe->info.tile_count);
496 	if (err)
497 		return err;
498 
499 	if (xe_vm_is_idle(vm)) {
500 		vm->preempt.rebind_deactivated = true;
501 		*done = true;
502 		return 0;
503 	}
504 
505 	if (!preempt_fences_waiting(vm)) {
506 		*done = true;
507 		return 0;
508 	}
509 
510 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
511 	if (err)
512 		return err;
513 
514 	err = wait_for_existing_preempt_fences(vm);
515 	if (err)
516 		return err;
517 
518 	return drm_gpuvm_validate(&vm->gpuvm, exec);
519 }
520 
521 static void preempt_rebind_work_func(struct work_struct *w)
522 {
523 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
524 	struct drm_exec exec;
525 	struct dma_fence *rebind_fence;
526 	unsigned int fence_count = 0;
527 	LIST_HEAD(preempt_fences);
528 	ktime_t end = 0;
529 	int err = 0;
530 	long wait;
531 	int __maybe_unused tries = 0;
532 
533 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
534 	trace_xe_vm_rebind_worker_enter(vm);
535 
536 	down_write(&vm->lock);
537 
538 	if (xe_vm_is_closed_or_banned(vm)) {
539 		up_write(&vm->lock);
540 		trace_xe_vm_rebind_worker_exit(vm);
541 		return;
542 	}
543 
544 retry:
545 	if (xe_vm_userptr_check_repin(vm)) {
546 		err = xe_vm_userptr_pin(vm);
547 		if (err)
548 			goto out_unlock_outer;
549 	}
550 
551 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
552 
553 	drm_exec_until_all_locked(&exec) {
554 		bool done = false;
555 
556 		err = xe_preempt_work_begin(&exec, vm, &done);
557 		drm_exec_retry_on_contention(&exec);
558 		if (err || done) {
559 			drm_exec_fini(&exec);
560 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
561 				err = -EAGAIN;
562 
563 			goto out_unlock_outer;
564 		}
565 	}
566 
567 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
568 	if (err)
569 		goto out_unlock;
570 
571 	rebind_fence = xe_vm_rebind(vm, true);
572 	if (IS_ERR(rebind_fence)) {
573 		err = PTR_ERR(rebind_fence);
574 		goto out_unlock;
575 	}
576 
577 	if (rebind_fence) {
578 		dma_fence_wait(rebind_fence, false);
579 		dma_fence_put(rebind_fence);
580 	}
581 
582 	/* Wait on munmap style VM unbinds */
583 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
584 				     DMA_RESV_USAGE_KERNEL,
585 				     false, MAX_SCHEDULE_TIMEOUT);
586 	if (wait <= 0) {
587 		err = -ETIME;
588 		goto out_unlock;
589 	}
590 
591 #define retry_required(__tries, __vm) \
592 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
593 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
594 	__xe_vm_userptr_needs_repin(__vm))
595 
596 	down_read(&vm->userptr.notifier_lock);
597 	if (retry_required(tries, vm)) {
598 		up_read(&vm->userptr.notifier_lock);
599 		err = -EAGAIN;
600 		goto out_unlock;
601 	}
602 
603 #undef retry_required
604 
605 	spin_lock(&vm->xe->ttm.lru_lock);
606 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
607 	spin_unlock(&vm->xe->ttm.lru_lock);
608 
609 	/* Point of no return. */
610 	arm_preempt_fences(vm, &preempt_fences);
611 	resume_and_reinstall_preempt_fences(vm, &exec);
612 	up_read(&vm->userptr.notifier_lock);
613 
614 out_unlock:
615 	drm_exec_fini(&exec);
616 out_unlock_outer:
617 	if (err == -EAGAIN) {
618 		trace_xe_vm_rebind_worker_retry(vm);
619 		goto retry;
620 	}
621 
622 	if (err) {
623 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
624 		xe_vm_kill(vm);
625 	}
626 	up_write(&vm->lock);
627 
628 	free_preempt_fences(&preempt_fences);
629 
630 	trace_xe_vm_rebind_worker_exit(vm);
631 }
632 
633 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
634 				   const struct mmu_notifier_range *range,
635 				   unsigned long cur_seq)
636 {
637 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
638 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
639 	struct xe_vma *vma = &uvma->vma;
640 	struct xe_vm *vm = xe_vma_vm(vma);
641 	struct dma_resv_iter cursor;
642 	struct dma_fence *fence;
643 	long err;
644 
645 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
646 	trace_xe_vma_userptr_invalidate(vma);
647 
648 	if (!mmu_notifier_range_blockable(range))
649 		return false;
650 
651 	down_write(&vm->userptr.notifier_lock);
652 	mmu_interval_set_seq(mni, cur_seq);
653 
654 	/* No need to stop gpu access if the userptr is not yet bound. */
655 	if (!userptr->initial_bind) {
656 		up_write(&vm->userptr.notifier_lock);
657 		return true;
658 	}
659 
660 	/*
661 	 * Tell exec and rebind worker they need to repin and rebind this
662 	 * userptr.
663 	 */
664 	if (!xe_vm_in_fault_mode(vm) &&
665 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
666 		spin_lock(&vm->userptr.invalidated_lock);
667 		list_move_tail(&userptr->invalidate_link,
668 			       &vm->userptr.invalidated);
669 		spin_unlock(&vm->userptr.invalidated_lock);
670 	}
671 
672 	up_write(&vm->userptr.notifier_lock);
673 
674 	/*
675 	 * Preempt fences turn into schedule disables, pipeline these.
676 	 * Note that even in fault mode, we need to wait for binds and
677 	 * unbinds to complete, and those are attached as BOOKMARK fences
678 	 * to the vm.
679 	 */
680 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
681 			    DMA_RESV_USAGE_BOOKKEEP);
682 	dma_resv_for_each_fence_unlocked(&cursor, fence)
683 		dma_fence_enable_sw_signaling(fence);
684 	dma_resv_iter_end(&cursor);
685 
686 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
687 				    DMA_RESV_USAGE_BOOKKEEP,
688 				    false, MAX_SCHEDULE_TIMEOUT);
689 	XE_WARN_ON(err <= 0);
690 
691 	if (xe_vm_in_fault_mode(vm)) {
692 		err = xe_vm_invalidate_vma(vma);
693 		XE_WARN_ON(err);
694 	}
695 
696 	trace_xe_vma_userptr_invalidate_complete(vma);
697 
698 	return true;
699 }
700 
701 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
702 	.invalidate = vma_userptr_invalidate,
703 };
704 
705 int xe_vm_userptr_pin(struct xe_vm *vm)
706 {
707 	struct xe_userptr_vma *uvma, *next;
708 	int err = 0;
709 	LIST_HEAD(tmp_evict);
710 
711 	lockdep_assert_held_write(&vm->lock);
712 
713 	/* Collect invalidated userptrs */
714 	spin_lock(&vm->userptr.invalidated_lock);
715 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
716 				 userptr.invalidate_link) {
717 		list_del_init(&uvma->userptr.invalidate_link);
718 		list_move_tail(&uvma->userptr.repin_link,
719 			       &vm->userptr.repin_list);
720 	}
721 	spin_unlock(&vm->userptr.invalidated_lock);
722 
723 	/* Pin and move to temporary list */
724 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
725 				 userptr.repin_link) {
726 		err = xe_vma_userptr_pin_pages(uvma);
727 		if (err < 0)
728 			return err;
729 
730 		list_del_init(&uvma->userptr.repin_link);
731 		list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
732 	}
733 
734 	return 0;
735 }
736 
737 /**
738  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
739  * that need repinning.
740  * @vm: The VM.
741  *
742  * This function does an advisory check for whether the VM has userptrs that
743  * need repinning.
744  *
745  * Return: 0 if there are no indications of userptrs needing repinning,
746  * -EAGAIN if there are.
747  */
748 int xe_vm_userptr_check_repin(struct xe_vm *vm)
749 {
750 	return (list_empty_careful(&vm->userptr.repin_list) &&
751 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
752 }
753 
754 static struct dma_fence *
755 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
756 	       struct xe_sync_entry *syncs, u32 num_syncs,
757 	       bool first_op, bool last_op);
758 
759 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
760 {
761 	struct dma_fence *fence = NULL;
762 	struct xe_vma *vma, *next;
763 
764 	lockdep_assert_held(&vm->lock);
765 	if (xe_vm_in_lr_mode(vm) && !rebind_worker)
766 		return NULL;
767 
768 	xe_vm_assert_held(vm);
769 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
770 				 combined_links.rebind) {
771 		xe_assert(vm->xe, vma->tile_present);
772 
773 		list_del_init(&vma->combined_links.rebind);
774 		dma_fence_put(fence);
775 		if (rebind_worker)
776 			trace_xe_vma_rebind_worker(vma);
777 		else
778 			trace_xe_vma_rebind_exec(vma);
779 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
780 		if (IS_ERR(fence))
781 			return fence;
782 	}
783 
784 	return fence;
785 }
786 
787 static void xe_vma_free(struct xe_vma *vma)
788 {
789 	if (xe_vma_is_userptr(vma))
790 		kfree(to_userptr_vma(vma));
791 	else
792 		kfree(vma);
793 }
794 
795 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
796 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
797 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
798 
799 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
800 				    struct xe_bo *bo,
801 				    u64 bo_offset_or_userptr,
802 				    u64 start, u64 end,
803 				    u16 pat_index, unsigned int flags)
804 {
805 	struct xe_vma *vma;
806 	struct xe_tile *tile;
807 	u8 id;
808 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
809 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
810 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
811 
812 	xe_assert(vm->xe, start < end);
813 	xe_assert(vm->xe, end < vm->size);
814 
815 	/*
816 	 * Allocate and ensure that the xe_vma_is_userptr() return
817 	 * matches what was allocated.
818 	 */
819 	if (!bo && !is_null) {
820 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
821 
822 		if (!uvma)
823 			return ERR_PTR(-ENOMEM);
824 
825 		vma = &uvma->vma;
826 	} else {
827 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
828 		if (!vma)
829 			return ERR_PTR(-ENOMEM);
830 
831 		if (is_null)
832 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
833 		if (bo)
834 			vma->gpuva.gem.obj = &bo->ttm.base;
835 	}
836 
837 	INIT_LIST_HEAD(&vma->combined_links.rebind);
838 
839 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
840 	vma->gpuva.vm = &vm->gpuvm;
841 	vma->gpuva.va.addr = start;
842 	vma->gpuva.va.range = end - start + 1;
843 	if (read_only)
844 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
845 	if (dumpable)
846 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
847 
848 	for_each_tile(tile, vm->xe, id)
849 		vma->tile_mask |= 0x1 << id;
850 
851 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
852 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
853 
854 	vma->pat_index = pat_index;
855 
856 	if (bo) {
857 		struct drm_gpuvm_bo *vm_bo;
858 
859 		xe_bo_assert_held(bo);
860 
861 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
862 		if (IS_ERR(vm_bo)) {
863 			xe_vma_free(vma);
864 			return ERR_CAST(vm_bo);
865 		}
866 
867 		drm_gpuvm_bo_extobj_add(vm_bo);
868 		drm_gem_object_get(&bo->ttm.base);
869 		vma->gpuva.gem.offset = bo_offset_or_userptr;
870 		drm_gpuva_link(&vma->gpuva, vm_bo);
871 		drm_gpuvm_bo_put(vm_bo);
872 	} else /* userptr or null */ {
873 		if (!is_null) {
874 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
875 			u64 size = end - start + 1;
876 			int err;
877 
878 			INIT_LIST_HEAD(&userptr->invalidate_link);
879 			INIT_LIST_HEAD(&userptr->repin_link);
880 			vma->gpuva.gem.offset = bo_offset_or_userptr;
881 
882 			err = mmu_interval_notifier_insert(&userptr->notifier,
883 							   current->mm,
884 							   xe_vma_userptr(vma), size,
885 							   &vma_userptr_notifier_ops);
886 			if (err) {
887 				xe_vma_free(vma);
888 				return ERR_PTR(err);
889 			}
890 
891 			userptr->notifier_seq = LONG_MAX;
892 		}
893 
894 		xe_vm_get(vm);
895 	}
896 
897 	return vma;
898 }
899 
900 static void xe_vma_destroy_late(struct xe_vma *vma)
901 {
902 	struct xe_vm *vm = xe_vma_vm(vma);
903 	struct xe_device *xe = vm->xe;
904 	bool read_only = xe_vma_read_only(vma);
905 
906 	if (xe_vma_is_userptr(vma)) {
907 		struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
908 
909 		if (userptr->sg) {
910 			dma_unmap_sgtable(xe->drm.dev,
911 					  userptr->sg,
912 					  read_only ? DMA_TO_DEVICE :
913 					  DMA_BIDIRECTIONAL, 0);
914 			sg_free_table(userptr->sg);
915 			userptr->sg = NULL;
916 		}
917 
918 		/*
919 		 * Since userptr pages are not pinned, we can't remove
920 		 * the notifer until we're sure the GPU is not accessing
921 		 * them anymore
922 		 */
923 		mmu_interval_notifier_remove(&userptr->notifier);
924 		xe_vm_put(vm);
925 	} else if (xe_vma_is_null(vma)) {
926 		xe_vm_put(vm);
927 	} else {
928 		xe_bo_put(xe_vma_bo(vma));
929 	}
930 
931 	xe_vma_free(vma);
932 }
933 
934 static void vma_destroy_work_func(struct work_struct *w)
935 {
936 	struct xe_vma *vma =
937 		container_of(w, struct xe_vma, destroy_work);
938 
939 	xe_vma_destroy_late(vma);
940 }
941 
942 static void vma_destroy_cb(struct dma_fence *fence,
943 			   struct dma_fence_cb *cb)
944 {
945 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
946 
947 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
948 	queue_work(system_unbound_wq, &vma->destroy_work);
949 }
950 
951 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
952 {
953 	struct xe_vm *vm = xe_vma_vm(vma);
954 
955 	lockdep_assert_held_write(&vm->lock);
956 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
957 
958 	if (xe_vma_is_userptr(vma)) {
959 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
960 
961 		spin_lock(&vm->userptr.invalidated_lock);
962 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
963 		spin_unlock(&vm->userptr.invalidated_lock);
964 	} else if (!xe_vma_is_null(vma)) {
965 		xe_bo_assert_held(xe_vma_bo(vma));
966 
967 		drm_gpuva_unlink(&vma->gpuva);
968 	}
969 
970 	xe_vm_assert_held(vm);
971 	if (fence) {
972 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
973 						 vma_destroy_cb);
974 
975 		if (ret) {
976 			XE_WARN_ON(ret != -ENOENT);
977 			xe_vma_destroy_late(vma);
978 		}
979 	} else {
980 		xe_vma_destroy_late(vma);
981 	}
982 }
983 
984 /**
985  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
986  * @exec: The drm_exec object we're currently locking for.
987  * @vma: The vma for witch we want to lock the vm resv and any attached
988  * object's resv.
989  * @num_shared: The number of dma-fence slots to pre-allocate in the
990  * objects' reservation objects.
991  *
992  * Return: 0 on success, negative error code on error. In particular
993  * may return -EDEADLK on WW transaction contention and -EINTR if
994  * an interruptible wait is terminated by a signal.
995  */
996 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
997 		      unsigned int num_shared)
998 {
999 	struct xe_vm *vm = xe_vma_vm(vma);
1000 	struct xe_bo *bo = xe_vma_bo(vma);
1001 	int err;
1002 
1003 	XE_WARN_ON(!vm);
1004 	if (num_shared)
1005 		err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1006 	else
1007 		err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1008 	if (!err && bo && !bo->vm) {
1009 		if (num_shared)
1010 			err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1011 		else
1012 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
1013 	}
1014 
1015 	return err;
1016 }
1017 
1018 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1019 {
1020 	struct drm_exec exec;
1021 	int err;
1022 
1023 	drm_exec_init(&exec, 0, 0);
1024 	drm_exec_until_all_locked(&exec) {
1025 		err = xe_vm_prepare_vma(&exec, vma, 0);
1026 		drm_exec_retry_on_contention(&exec);
1027 		if (XE_WARN_ON(err))
1028 			break;
1029 	}
1030 
1031 	xe_vma_destroy(vma, NULL);
1032 
1033 	drm_exec_fini(&exec);
1034 }
1035 
1036 struct xe_vma *
1037 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1038 {
1039 	struct drm_gpuva *gpuva;
1040 
1041 	lockdep_assert_held(&vm->lock);
1042 
1043 	if (xe_vm_is_closed_or_banned(vm))
1044 		return NULL;
1045 
1046 	xe_assert(vm->xe, start + range <= vm->size);
1047 
1048 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1049 
1050 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1051 }
1052 
1053 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1054 {
1055 	int err;
1056 
1057 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1058 	lockdep_assert_held(&vm->lock);
1059 
1060 	mutex_lock(&vm->snap_mutex);
1061 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1062 	mutex_unlock(&vm->snap_mutex);
1063 	XE_WARN_ON(err);	/* Shouldn't be possible */
1064 
1065 	return err;
1066 }
1067 
1068 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1069 {
1070 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1071 	lockdep_assert_held(&vm->lock);
1072 
1073 	mutex_lock(&vm->snap_mutex);
1074 	drm_gpuva_remove(&vma->gpuva);
1075 	mutex_unlock(&vm->snap_mutex);
1076 	if (vm->usm.last_fault_vma == vma)
1077 		vm->usm.last_fault_vma = NULL;
1078 }
1079 
1080 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1081 {
1082 	struct xe_vma_op *op;
1083 
1084 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1085 
1086 	if (unlikely(!op))
1087 		return NULL;
1088 
1089 	return &op->base;
1090 }
1091 
1092 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1093 
1094 static const struct drm_gpuvm_ops gpuvm_ops = {
1095 	.op_alloc = xe_vm_op_alloc,
1096 	.vm_bo_validate = xe_gpuvm_validate,
1097 	.vm_free = xe_vm_free,
1098 };
1099 
1100 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1101 {
1102 	u64 pte = 0;
1103 
1104 	if (pat_index & BIT(0))
1105 		pte |= XE_PPGTT_PTE_PAT0;
1106 
1107 	if (pat_index & BIT(1))
1108 		pte |= XE_PPGTT_PTE_PAT1;
1109 
1110 	return pte;
1111 }
1112 
1113 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1114 				u32 pt_level)
1115 {
1116 	u64 pte = 0;
1117 
1118 	if (pat_index & BIT(0))
1119 		pte |= XE_PPGTT_PTE_PAT0;
1120 
1121 	if (pat_index & BIT(1))
1122 		pte |= XE_PPGTT_PTE_PAT1;
1123 
1124 	if (pat_index & BIT(2)) {
1125 		if (pt_level)
1126 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1127 		else
1128 			pte |= XE_PPGTT_PTE_PAT2;
1129 	}
1130 
1131 	if (pat_index & BIT(3))
1132 		pte |= XELPG_PPGTT_PTE_PAT3;
1133 
1134 	if (pat_index & (BIT(4)))
1135 		pte |= XE2_PPGTT_PTE_PAT4;
1136 
1137 	return pte;
1138 }
1139 
1140 static u64 pte_encode_ps(u32 pt_level)
1141 {
1142 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1143 
1144 	if (pt_level == 1)
1145 		return XE_PDE_PS_2M;
1146 	else if (pt_level == 2)
1147 		return XE_PDPE_PS_1G;
1148 
1149 	return 0;
1150 }
1151 
1152 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1153 			      const u16 pat_index)
1154 {
1155 	struct xe_device *xe = xe_bo_device(bo);
1156 	u64 pde;
1157 
1158 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1159 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1160 	pde |= pde_encode_pat_index(xe, pat_index);
1161 
1162 	return pde;
1163 }
1164 
1165 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1166 			      u16 pat_index, u32 pt_level)
1167 {
1168 	struct xe_device *xe = xe_bo_device(bo);
1169 	u64 pte;
1170 
1171 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1172 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1173 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1174 	pte |= pte_encode_ps(pt_level);
1175 
1176 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1177 		pte |= XE_PPGTT_PTE_DM;
1178 
1179 	return pte;
1180 }
1181 
1182 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1183 			       u16 pat_index, u32 pt_level)
1184 {
1185 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1186 
1187 	pte |= XE_PAGE_PRESENT;
1188 
1189 	if (likely(!xe_vma_read_only(vma)))
1190 		pte |= XE_PAGE_RW;
1191 
1192 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1193 	pte |= pte_encode_ps(pt_level);
1194 
1195 	if (unlikely(xe_vma_is_null(vma)))
1196 		pte |= XE_PTE_NULL;
1197 
1198 	return pte;
1199 }
1200 
1201 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1202 				u16 pat_index,
1203 				u32 pt_level, bool devmem, u64 flags)
1204 {
1205 	u64 pte;
1206 
1207 	/* Avoid passing random bits directly as flags */
1208 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1209 
1210 	pte = addr;
1211 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1212 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1213 	pte |= pte_encode_ps(pt_level);
1214 
1215 	if (devmem)
1216 		pte |= XE_PPGTT_PTE_DM;
1217 
1218 	pte |= flags;
1219 
1220 	return pte;
1221 }
1222 
1223 static const struct xe_pt_ops xelp_pt_ops = {
1224 	.pte_encode_bo = xelp_pte_encode_bo,
1225 	.pte_encode_vma = xelp_pte_encode_vma,
1226 	.pte_encode_addr = xelp_pte_encode_addr,
1227 	.pde_encode_bo = xelp_pde_encode_bo,
1228 };
1229 
1230 static void vm_destroy_work_func(struct work_struct *w);
1231 
1232 /**
1233  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1234  * given tile and vm.
1235  * @xe: xe device.
1236  * @tile: tile to set up for.
1237  * @vm: vm to set up for.
1238  *
1239  * Sets up a pagetable tree with one page-table per level and a single
1240  * leaf PTE. All pagetable entries point to the single page-table or,
1241  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1242  * writes become NOPs.
1243  *
1244  * Return: 0 on success, negative error code on error.
1245  */
1246 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1247 				struct xe_vm *vm)
1248 {
1249 	u8 id = tile->id;
1250 	int i;
1251 
1252 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1253 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1254 		if (IS_ERR(vm->scratch_pt[id][i]))
1255 			return PTR_ERR(vm->scratch_pt[id][i]);
1256 
1257 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static void xe_vm_free_scratch(struct xe_vm *vm)
1264 {
1265 	struct xe_tile *tile;
1266 	u8 id;
1267 
1268 	if (!xe_vm_has_scratch(vm))
1269 		return;
1270 
1271 	for_each_tile(tile, vm->xe, id) {
1272 		u32 i;
1273 
1274 		if (!vm->pt_root[id])
1275 			continue;
1276 
1277 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1278 			if (vm->scratch_pt[id][i])
1279 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1280 	}
1281 }
1282 
1283 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1284 {
1285 	struct drm_gem_object *vm_resv_obj;
1286 	struct xe_vm *vm;
1287 	int err, number_tiles = 0;
1288 	struct xe_tile *tile;
1289 	u8 id;
1290 
1291 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1292 	if (!vm)
1293 		return ERR_PTR(-ENOMEM);
1294 
1295 	vm->xe = xe;
1296 
1297 	vm->size = 1ull << xe->info.va_bits;
1298 
1299 	vm->flags = flags;
1300 
1301 	init_rwsem(&vm->lock);
1302 	mutex_init(&vm->snap_mutex);
1303 
1304 	INIT_LIST_HEAD(&vm->rebind_list);
1305 
1306 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1307 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1308 	init_rwsem(&vm->userptr.notifier_lock);
1309 	spin_lock_init(&vm->userptr.invalidated_lock);
1310 
1311 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1312 
1313 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1314 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1315 
1316 	for_each_tile(tile, xe, id)
1317 		xe_range_fence_tree_init(&vm->rftree[id]);
1318 
1319 	vm->pt_ops = &xelp_pt_ops;
1320 
1321 	if (!(flags & XE_VM_FLAG_MIGRATION))
1322 		xe_device_mem_access_get(xe);
1323 
1324 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1325 	if (!vm_resv_obj) {
1326 		err = -ENOMEM;
1327 		goto err_no_resv;
1328 	}
1329 
1330 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1331 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1332 
1333 	drm_gem_object_put(vm_resv_obj);
1334 
1335 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1336 	if (err)
1337 		goto err_close;
1338 
1339 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1340 		vm->flags |= XE_VM_FLAG_64K;
1341 
1342 	for_each_tile(tile, xe, id) {
1343 		if (flags & XE_VM_FLAG_MIGRATION &&
1344 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1345 			continue;
1346 
1347 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1348 		if (IS_ERR(vm->pt_root[id])) {
1349 			err = PTR_ERR(vm->pt_root[id]);
1350 			vm->pt_root[id] = NULL;
1351 			goto err_unlock_close;
1352 		}
1353 	}
1354 
1355 	if (xe_vm_has_scratch(vm)) {
1356 		for_each_tile(tile, xe, id) {
1357 			if (!vm->pt_root[id])
1358 				continue;
1359 
1360 			err = xe_vm_create_scratch(xe, tile, vm);
1361 			if (err)
1362 				goto err_unlock_close;
1363 		}
1364 		vm->batch_invalidate_tlb = true;
1365 	}
1366 
1367 	if (flags & XE_VM_FLAG_LR_MODE) {
1368 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1369 		vm->flags |= XE_VM_FLAG_LR_MODE;
1370 		vm->batch_invalidate_tlb = false;
1371 	}
1372 
1373 	/* Fill pt_root after allocating scratch tables */
1374 	for_each_tile(tile, xe, id) {
1375 		if (!vm->pt_root[id])
1376 			continue;
1377 
1378 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1379 	}
1380 	dma_resv_unlock(xe_vm_resv(vm));
1381 
1382 	/* Kernel migration VM shouldn't have a circular loop.. */
1383 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1384 		for_each_tile(tile, xe, id) {
1385 			struct xe_gt *gt = tile->primary_gt;
1386 			struct xe_vm *migrate_vm;
1387 			struct xe_exec_queue *q;
1388 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1389 
1390 			if (!vm->pt_root[id])
1391 				continue;
1392 
1393 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1394 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1395 						       XE_ENGINE_CLASS_COPY,
1396 						       create_flags);
1397 			xe_vm_put(migrate_vm);
1398 			if (IS_ERR(q)) {
1399 				err = PTR_ERR(q);
1400 				goto err_close;
1401 			}
1402 			vm->q[id] = q;
1403 			number_tiles++;
1404 		}
1405 	}
1406 
1407 	if (number_tiles > 1)
1408 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1409 
1410 	mutex_lock(&xe->usm.lock);
1411 	if (flags & XE_VM_FLAG_FAULT_MODE)
1412 		xe->usm.num_vm_in_fault_mode++;
1413 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1414 		xe->usm.num_vm_in_non_fault_mode++;
1415 	mutex_unlock(&xe->usm.lock);
1416 
1417 	trace_xe_vm_create(vm);
1418 
1419 	return vm;
1420 
1421 err_unlock_close:
1422 	dma_resv_unlock(xe_vm_resv(vm));
1423 err_close:
1424 	xe_vm_close_and_put(vm);
1425 	return ERR_PTR(err);
1426 
1427 err_no_resv:
1428 	mutex_destroy(&vm->snap_mutex);
1429 	for_each_tile(tile, xe, id)
1430 		xe_range_fence_tree_fini(&vm->rftree[id]);
1431 	kfree(vm);
1432 	if (!(flags & XE_VM_FLAG_MIGRATION))
1433 		xe_device_mem_access_put(xe);
1434 	return ERR_PTR(err);
1435 }
1436 
1437 static void xe_vm_close(struct xe_vm *vm)
1438 {
1439 	down_write(&vm->lock);
1440 	vm->size = 0;
1441 	up_write(&vm->lock);
1442 }
1443 
1444 void xe_vm_close_and_put(struct xe_vm *vm)
1445 {
1446 	LIST_HEAD(contested);
1447 	struct xe_device *xe = vm->xe;
1448 	struct xe_tile *tile;
1449 	struct xe_vma *vma, *next_vma;
1450 	struct drm_gpuva *gpuva, *next;
1451 	u8 id;
1452 
1453 	xe_assert(xe, !vm->preempt.num_exec_queues);
1454 
1455 	xe_vm_close(vm);
1456 	if (xe_vm_in_preempt_fence_mode(vm))
1457 		flush_work(&vm->preempt.rebind_work);
1458 
1459 	down_write(&vm->lock);
1460 	for_each_tile(tile, xe, id) {
1461 		if (vm->q[id])
1462 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1463 	}
1464 	up_write(&vm->lock);
1465 
1466 	for_each_tile(tile, xe, id) {
1467 		if (vm->q[id]) {
1468 			xe_exec_queue_kill(vm->q[id]);
1469 			xe_exec_queue_put(vm->q[id]);
1470 			vm->q[id] = NULL;
1471 		}
1472 	}
1473 
1474 	down_write(&vm->lock);
1475 	xe_vm_lock(vm, false);
1476 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1477 		vma = gpuva_to_vma(gpuva);
1478 
1479 		if (xe_vma_has_no_bo(vma)) {
1480 			down_read(&vm->userptr.notifier_lock);
1481 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1482 			up_read(&vm->userptr.notifier_lock);
1483 		}
1484 
1485 		xe_vm_remove_vma(vm, vma);
1486 
1487 		/* easy case, remove from VMA? */
1488 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1489 			list_del_init(&vma->combined_links.rebind);
1490 			xe_vma_destroy(vma, NULL);
1491 			continue;
1492 		}
1493 
1494 		list_move_tail(&vma->combined_links.destroy, &contested);
1495 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1496 	}
1497 
1498 	/*
1499 	 * All vm operations will add shared fences to resv.
1500 	 * The only exception is eviction for a shared object,
1501 	 * but even so, the unbind when evicted would still
1502 	 * install a fence to resv. Hence it's safe to
1503 	 * destroy the pagetables immediately.
1504 	 */
1505 	xe_vm_free_scratch(vm);
1506 
1507 	for_each_tile(tile, xe, id) {
1508 		if (vm->pt_root[id]) {
1509 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1510 			vm->pt_root[id] = NULL;
1511 		}
1512 	}
1513 	xe_vm_unlock(vm);
1514 
1515 	/*
1516 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1517 	 * Since we hold a refcount to the bo, we can remove and free
1518 	 * the members safely without locking.
1519 	 */
1520 	list_for_each_entry_safe(vma, next_vma, &contested,
1521 				 combined_links.destroy) {
1522 		list_del_init(&vma->combined_links.destroy);
1523 		xe_vma_destroy_unlocked(vma);
1524 	}
1525 
1526 	up_write(&vm->lock);
1527 
1528 	mutex_lock(&xe->usm.lock);
1529 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1530 		xe->usm.num_vm_in_fault_mode--;
1531 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1532 		xe->usm.num_vm_in_non_fault_mode--;
1533 	mutex_unlock(&xe->usm.lock);
1534 
1535 	for_each_tile(tile, xe, id)
1536 		xe_range_fence_tree_fini(&vm->rftree[id]);
1537 
1538 	xe_vm_put(vm);
1539 }
1540 
1541 static void vm_destroy_work_func(struct work_struct *w)
1542 {
1543 	struct xe_vm *vm =
1544 		container_of(w, struct xe_vm, destroy_work);
1545 	struct xe_device *xe = vm->xe;
1546 	struct xe_tile *tile;
1547 	u8 id;
1548 	void *lookup;
1549 
1550 	/* xe_vm_close_and_put was not called? */
1551 	xe_assert(xe, !vm->size);
1552 
1553 	mutex_destroy(&vm->snap_mutex);
1554 
1555 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1556 		xe_device_mem_access_put(xe);
1557 
1558 		if (xe->info.has_asid && vm->usm.asid) {
1559 			mutex_lock(&xe->usm.lock);
1560 			lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1561 			xe_assert(xe, lookup == vm);
1562 			mutex_unlock(&xe->usm.lock);
1563 		}
1564 	}
1565 
1566 	for_each_tile(tile, xe, id)
1567 		XE_WARN_ON(vm->pt_root[id]);
1568 
1569 	trace_xe_vm_free(vm);
1570 	dma_fence_put(vm->rebind_fence);
1571 	kfree(vm);
1572 }
1573 
1574 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1575 {
1576 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1577 
1578 	/* To destroy the VM we need to be able to sleep */
1579 	queue_work(system_unbound_wq, &vm->destroy_work);
1580 }
1581 
1582 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1583 {
1584 	struct xe_vm *vm;
1585 
1586 	mutex_lock(&xef->vm.lock);
1587 	vm = xa_load(&xef->vm.xa, id);
1588 	if (vm)
1589 		xe_vm_get(vm);
1590 	mutex_unlock(&xef->vm.lock);
1591 
1592 	return vm;
1593 }
1594 
1595 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1596 {
1597 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1598 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1599 }
1600 
1601 static struct xe_exec_queue *
1602 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1603 {
1604 	return q ? q : vm->q[0];
1605 }
1606 
1607 static struct dma_fence *
1608 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1609 		 struct xe_sync_entry *syncs, u32 num_syncs,
1610 		 bool first_op, bool last_op)
1611 {
1612 	struct xe_vm *vm = xe_vma_vm(vma);
1613 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1614 	struct xe_tile *tile;
1615 	struct dma_fence *fence = NULL;
1616 	struct dma_fence **fences = NULL;
1617 	struct dma_fence_array *cf = NULL;
1618 	int cur_fence = 0, i;
1619 	int number_tiles = hweight8(vma->tile_present);
1620 	int err;
1621 	u8 id;
1622 
1623 	trace_xe_vma_unbind(vma);
1624 
1625 	if (number_tiles > 1) {
1626 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1627 				       GFP_KERNEL);
1628 		if (!fences)
1629 			return ERR_PTR(-ENOMEM);
1630 	}
1631 
1632 	for_each_tile(tile, vm->xe, id) {
1633 		if (!(vma->tile_present & BIT(id)))
1634 			goto next;
1635 
1636 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1637 					   first_op ? syncs : NULL,
1638 					   first_op ? num_syncs : 0);
1639 		if (IS_ERR(fence)) {
1640 			err = PTR_ERR(fence);
1641 			goto err_fences;
1642 		}
1643 
1644 		if (fences)
1645 			fences[cur_fence++] = fence;
1646 
1647 next:
1648 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1649 			q = list_next_entry(q, multi_gt_list);
1650 	}
1651 
1652 	if (fences) {
1653 		cf = dma_fence_array_create(number_tiles, fences,
1654 					    vm->composite_fence_ctx,
1655 					    vm->composite_fence_seqno++,
1656 					    false);
1657 		if (!cf) {
1658 			--vm->composite_fence_seqno;
1659 			err = -ENOMEM;
1660 			goto err_fences;
1661 		}
1662 	}
1663 
1664 	fence = cf ? &cf->base : !fence ?
1665 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1666 	if (last_op) {
1667 		for (i = 0; i < num_syncs; i++)
1668 			xe_sync_entry_signal(&syncs[i], NULL, fence);
1669 	}
1670 
1671 	return fence;
1672 
1673 err_fences:
1674 	if (fences) {
1675 		while (cur_fence)
1676 			dma_fence_put(fences[--cur_fence]);
1677 		kfree(fences);
1678 	}
1679 
1680 	return ERR_PTR(err);
1681 }
1682 
1683 static struct dma_fence *
1684 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1685 	       struct xe_sync_entry *syncs, u32 num_syncs,
1686 	       bool first_op, bool last_op)
1687 {
1688 	struct xe_tile *tile;
1689 	struct dma_fence *fence;
1690 	struct dma_fence **fences = NULL;
1691 	struct dma_fence_array *cf = NULL;
1692 	struct xe_vm *vm = xe_vma_vm(vma);
1693 	int cur_fence = 0, i;
1694 	int number_tiles = hweight8(vma->tile_mask);
1695 	int err;
1696 	u8 id;
1697 
1698 	trace_xe_vma_bind(vma);
1699 
1700 	if (number_tiles > 1) {
1701 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1702 				       GFP_KERNEL);
1703 		if (!fences)
1704 			return ERR_PTR(-ENOMEM);
1705 	}
1706 
1707 	for_each_tile(tile, vm->xe, id) {
1708 		if (!(vma->tile_mask & BIT(id)))
1709 			goto next;
1710 
1711 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1712 					 first_op ? syncs : NULL,
1713 					 first_op ? num_syncs : 0,
1714 					 vma->tile_present & BIT(id));
1715 		if (IS_ERR(fence)) {
1716 			err = PTR_ERR(fence);
1717 			goto err_fences;
1718 		}
1719 
1720 		if (fences)
1721 			fences[cur_fence++] = fence;
1722 
1723 next:
1724 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1725 			q = list_next_entry(q, multi_gt_list);
1726 	}
1727 
1728 	if (fences) {
1729 		cf = dma_fence_array_create(number_tiles, fences,
1730 					    vm->composite_fence_ctx,
1731 					    vm->composite_fence_seqno++,
1732 					    false);
1733 		if (!cf) {
1734 			--vm->composite_fence_seqno;
1735 			err = -ENOMEM;
1736 			goto err_fences;
1737 		}
1738 	}
1739 
1740 	if (last_op) {
1741 		for (i = 0; i < num_syncs; i++)
1742 			xe_sync_entry_signal(&syncs[i], NULL,
1743 					     cf ? &cf->base : fence);
1744 	}
1745 
1746 	return cf ? &cf->base : fence;
1747 
1748 err_fences:
1749 	if (fences) {
1750 		while (cur_fence)
1751 			dma_fence_put(fences[--cur_fence]);
1752 		kfree(fences);
1753 	}
1754 
1755 	return ERR_PTR(err);
1756 }
1757 
1758 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1759 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1760 			u32 num_syncs, bool immediate, bool first_op,
1761 			bool last_op)
1762 {
1763 	struct dma_fence *fence;
1764 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1765 
1766 	xe_vm_assert_held(vm);
1767 
1768 	if (immediate) {
1769 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1770 				       last_op);
1771 		if (IS_ERR(fence))
1772 			return PTR_ERR(fence);
1773 	} else {
1774 		int i;
1775 
1776 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1777 
1778 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1779 		if (last_op) {
1780 			for (i = 0; i < num_syncs; i++)
1781 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1782 		}
1783 	}
1784 
1785 	if (last_op)
1786 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1787 	dma_fence_put(fence);
1788 
1789 	return 0;
1790 }
1791 
1792 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1793 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1794 		      u32 num_syncs, bool immediate, bool first_op,
1795 		      bool last_op)
1796 {
1797 	int err;
1798 
1799 	xe_vm_assert_held(vm);
1800 	xe_bo_assert_held(bo);
1801 
1802 	if (bo && immediate) {
1803 		err = xe_bo_validate(bo, vm, true);
1804 		if (err)
1805 			return err;
1806 	}
1807 
1808 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1809 			    last_op);
1810 }
1811 
1812 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1813 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1814 			u32 num_syncs, bool first_op, bool last_op)
1815 {
1816 	struct dma_fence *fence;
1817 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1818 
1819 	xe_vm_assert_held(vm);
1820 	xe_bo_assert_held(xe_vma_bo(vma));
1821 
1822 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1823 	if (IS_ERR(fence))
1824 		return PTR_ERR(fence);
1825 
1826 	xe_vma_destroy(vma, fence);
1827 	if (last_op)
1828 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1829 	dma_fence_put(fence);
1830 
1831 	return 0;
1832 }
1833 
1834 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1835 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1836 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1837 
1838 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1839 		       struct drm_file *file)
1840 {
1841 	struct xe_device *xe = to_xe_device(dev);
1842 	struct xe_file *xef = to_xe_file(file);
1843 	struct drm_xe_vm_create *args = data;
1844 	struct xe_tile *tile;
1845 	struct xe_vm *vm;
1846 	u32 id, asid;
1847 	int err;
1848 	u32 flags = 0;
1849 
1850 	if (XE_IOCTL_DBG(xe, args->extensions))
1851 		return -EINVAL;
1852 
1853 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1854 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1855 
1856 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1857 			 !xe->info.has_usm))
1858 		return -EINVAL;
1859 
1860 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1861 		return -EINVAL;
1862 
1863 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1864 		return -EINVAL;
1865 
1866 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1867 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1868 		return -EINVAL;
1869 
1870 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1871 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1872 		return -EINVAL;
1873 
1874 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1875 			 xe_device_in_non_fault_mode(xe)))
1876 		return -EINVAL;
1877 
1878 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1879 			 xe_device_in_fault_mode(xe)))
1880 		return -EINVAL;
1881 
1882 	if (XE_IOCTL_DBG(xe, args->extensions))
1883 		return -EINVAL;
1884 
1885 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1886 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1887 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1888 		flags |= XE_VM_FLAG_LR_MODE;
1889 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1890 		flags |= XE_VM_FLAG_FAULT_MODE;
1891 
1892 	vm = xe_vm_create(xe, flags);
1893 	if (IS_ERR(vm))
1894 		return PTR_ERR(vm);
1895 
1896 	mutex_lock(&xef->vm.lock);
1897 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1898 	mutex_unlock(&xef->vm.lock);
1899 	if (err)
1900 		goto err_close_and_put;
1901 
1902 	if (xe->info.has_asid) {
1903 		mutex_lock(&xe->usm.lock);
1904 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1905 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1906 				      &xe->usm.next_asid, GFP_KERNEL);
1907 		mutex_unlock(&xe->usm.lock);
1908 		if (err < 0)
1909 			goto err_free_id;
1910 
1911 		vm->usm.asid = asid;
1912 	}
1913 
1914 	args->vm_id = id;
1915 	vm->xef = xef;
1916 
1917 	/* Record BO memory for VM pagetable created against client */
1918 	for_each_tile(tile, xe, id)
1919 		if (vm->pt_root[id])
1920 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1921 
1922 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1923 	/* Warning: Security issue - never enable by default */
1924 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1925 #endif
1926 
1927 	return 0;
1928 
1929 err_free_id:
1930 	mutex_lock(&xef->vm.lock);
1931 	xa_erase(&xef->vm.xa, id);
1932 	mutex_unlock(&xef->vm.lock);
1933 err_close_and_put:
1934 	xe_vm_close_and_put(vm);
1935 
1936 	return err;
1937 }
1938 
1939 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1940 			struct drm_file *file)
1941 {
1942 	struct xe_device *xe = to_xe_device(dev);
1943 	struct xe_file *xef = to_xe_file(file);
1944 	struct drm_xe_vm_destroy *args = data;
1945 	struct xe_vm *vm;
1946 	int err = 0;
1947 
1948 	if (XE_IOCTL_DBG(xe, args->pad) ||
1949 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1950 		return -EINVAL;
1951 
1952 	mutex_lock(&xef->vm.lock);
1953 	vm = xa_load(&xef->vm.xa, args->vm_id);
1954 	if (XE_IOCTL_DBG(xe, !vm))
1955 		err = -ENOENT;
1956 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1957 		err = -EBUSY;
1958 	else
1959 		xa_erase(&xef->vm.xa, args->vm_id);
1960 	mutex_unlock(&xef->vm.lock);
1961 
1962 	if (!err)
1963 		xe_vm_close_and_put(vm);
1964 
1965 	return err;
1966 }
1967 
1968 static const u32 region_to_mem_type[] = {
1969 	XE_PL_TT,
1970 	XE_PL_VRAM0,
1971 	XE_PL_VRAM1,
1972 };
1973 
1974 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1975 			  struct xe_exec_queue *q, u32 region,
1976 			  struct xe_sync_entry *syncs, u32 num_syncs,
1977 			  bool first_op, bool last_op)
1978 {
1979 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1980 	int err;
1981 
1982 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
1983 
1984 	if (!xe_vma_has_no_bo(vma)) {
1985 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
1986 		if (err)
1987 			return err;
1988 	}
1989 
1990 	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
1991 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1992 				  true, first_op, last_op);
1993 	} else {
1994 		int i;
1995 
1996 		/* Nothing to do, signal fences now */
1997 		if (last_op) {
1998 			for (i = 0; i < num_syncs; i++) {
1999 				struct dma_fence *fence =
2000 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2001 
2002 				xe_sync_entry_signal(&syncs[i], NULL, fence);
2003 				dma_fence_put(fence);
2004 			}
2005 		}
2006 
2007 		return 0;
2008 	}
2009 }
2010 
2011 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2012 			     bool post_commit)
2013 {
2014 	down_read(&vm->userptr.notifier_lock);
2015 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2016 	up_read(&vm->userptr.notifier_lock);
2017 	if (post_commit)
2018 		xe_vm_remove_vma(vm, vma);
2019 }
2020 
2021 #undef ULL
2022 #define ULL	unsigned long long
2023 
2024 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2025 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2026 {
2027 	struct xe_vma *vma;
2028 
2029 	switch (op->op) {
2030 	case DRM_GPUVA_OP_MAP:
2031 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2032 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2033 		break;
2034 	case DRM_GPUVA_OP_REMAP:
2035 		vma = gpuva_to_vma(op->remap.unmap->va);
2036 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2037 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2038 		       op->remap.unmap->keep ? 1 : 0);
2039 		if (op->remap.prev)
2040 			vm_dbg(&xe->drm,
2041 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2042 			       (ULL)op->remap.prev->va.addr,
2043 			       (ULL)op->remap.prev->va.range);
2044 		if (op->remap.next)
2045 			vm_dbg(&xe->drm,
2046 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2047 			       (ULL)op->remap.next->va.addr,
2048 			       (ULL)op->remap.next->va.range);
2049 		break;
2050 	case DRM_GPUVA_OP_UNMAP:
2051 		vma = gpuva_to_vma(op->unmap.va);
2052 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2053 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2054 		       op->unmap.keep ? 1 : 0);
2055 		break;
2056 	case DRM_GPUVA_OP_PREFETCH:
2057 		vma = gpuva_to_vma(op->prefetch.va);
2058 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2059 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2060 		break;
2061 	default:
2062 		drm_warn(&xe->drm, "NOT POSSIBLE");
2063 	}
2064 }
2065 #else
2066 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2067 {
2068 }
2069 #endif
2070 
2071 /*
2072  * Create operations list from IOCTL arguments, setup operations fields so parse
2073  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2074  */
2075 static struct drm_gpuva_ops *
2076 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2077 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2078 			 u32 operation, u32 flags,
2079 			 u32 prefetch_region, u16 pat_index)
2080 {
2081 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2082 	struct drm_gpuva_ops *ops;
2083 	struct drm_gpuva_op *__op;
2084 	struct drm_gpuvm_bo *vm_bo;
2085 	int err;
2086 
2087 	lockdep_assert_held_write(&vm->lock);
2088 
2089 	vm_dbg(&vm->xe->drm,
2090 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2091 	       operation, (ULL)addr, (ULL)range,
2092 	       (ULL)bo_offset_or_userptr);
2093 
2094 	switch (operation) {
2095 	case DRM_XE_VM_BIND_OP_MAP:
2096 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2097 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2098 						  obj, bo_offset_or_userptr);
2099 		break;
2100 	case DRM_XE_VM_BIND_OP_UNMAP:
2101 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2102 		break;
2103 	case DRM_XE_VM_BIND_OP_PREFETCH:
2104 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2105 		break;
2106 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2107 		xe_assert(vm->xe, bo);
2108 
2109 		err = xe_bo_lock(bo, true);
2110 		if (err)
2111 			return ERR_PTR(err);
2112 
2113 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2114 		if (IS_ERR(vm_bo)) {
2115 			xe_bo_unlock(bo);
2116 			return ERR_CAST(vm_bo);
2117 		}
2118 
2119 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2120 		drm_gpuvm_bo_put(vm_bo);
2121 		xe_bo_unlock(bo);
2122 		break;
2123 	default:
2124 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2125 		ops = ERR_PTR(-EINVAL);
2126 	}
2127 	if (IS_ERR(ops))
2128 		return ops;
2129 
2130 	drm_gpuva_for_each_op(__op, ops) {
2131 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2132 
2133 		if (__op->op == DRM_GPUVA_OP_MAP) {
2134 			op->map.immediate =
2135 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2136 			op->map.read_only =
2137 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2138 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2139 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2140 			op->map.pat_index = pat_index;
2141 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2142 			op->prefetch.region = prefetch_region;
2143 		}
2144 
2145 		print_op(vm->xe, __op);
2146 	}
2147 
2148 	return ops;
2149 }
2150 
2151 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2152 			      u16 pat_index, unsigned int flags)
2153 {
2154 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2155 	struct drm_exec exec;
2156 	struct xe_vma *vma;
2157 	int err;
2158 
2159 	lockdep_assert_held_write(&vm->lock);
2160 
2161 	if (bo) {
2162 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2163 		drm_exec_until_all_locked(&exec) {
2164 			err = 0;
2165 			if (!bo->vm) {
2166 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2167 				drm_exec_retry_on_contention(&exec);
2168 			}
2169 			if (!err) {
2170 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2171 				drm_exec_retry_on_contention(&exec);
2172 			}
2173 			if (err) {
2174 				drm_exec_fini(&exec);
2175 				return ERR_PTR(err);
2176 			}
2177 		}
2178 	}
2179 	vma = xe_vma_create(vm, bo, op->gem.offset,
2180 			    op->va.addr, op->va.addr +
2181 			    op->va.range - 1, pat_index, flags);
2182 	if (bo)
2183 		drm_exec_fini(&exec);
2184 
2185 	if (xe_vma_is_userptr(vma)) {
2186 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2187 		if (err) {
2188 			prep_vma_destroy(vm, vma, false);
2189 			xe_vma_destroy_unlocked(vma);
2190 			return ERR_PTR(err);
2191 		}
2192 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2193 		err = add_preempt_fences(vm, bo);
2194 		if (err) {
2195 			prep_vma_destroy(vm, vma, false);
2196 			xe_vma_destroy_unlocked(vma);
2197 			return ERR_PTR(err);
2198 		}
2199 	}
2200 
2201 	return vma;
2202 }
2203 
2204 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2205 {
2206 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2207 		return SZ_1G;
2208 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2209 		return SZ_2M;
2210 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2211 		return SZ_64K;
2212 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2213 		return SZ_4K;
2214 
2215 	return SZ_1G;	/* Uninitialized, used max size */
2216 }
2217 
2218 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2219 {
2220 	switch (size) {
2221 	case SZ_1G:
2222 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2223 		break;
2224 	case SZ_2M:
2225 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2226 		break;
2227 	case SZ_64K:
2228 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2229 		break;
2230 	case SZ_4K:
2231 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2232 		break;
2233 	}
2234 }
2235 
2236 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2237 {
2238 	int err = 0;
2239 
2240 	lockdep_assert_held_write(&vm->lock);
2241 
2242 	switch (op->base.op) {
2243 	case DRM_GPUVA_OP_MAP:
2244 		err |= xe_vm_insert_vma(vm, op->map.vma);
2245 		if (!err)
2246 			op->flags |= XE_VMA_OP_COMMITTED;
2247 		break;
2248 	case DRM_GPUVA_OP_REMAP:
2249 	{
2250 		u8 tile_present =
2251 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2252 
2253 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2254 				 true);
2255 		op->flags |= XE_VMA_OP_COMMITTED;
2256 
2257 		if (op->remap.prev) {
2258 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2259 			if (!err)
2260 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2261 			if (!err && op->remap.skip_prev) {
2262 				op->remap.prev->tile_present =
2263 					tile_present;
2264 				op->remap.prev = NULL;
2265 			}
2266 		}
2267 		if (op->remap.next) {
2268 			err |= xe_vm_insert_vma(vm, op->remap.next);
2269 			if (!err)
2270 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2271 			if (!err && op->remap.skip_next) {
2272 				op->remap.next->tile_present =
2273 					tile_present;
2274 				op->remap.next = NULL;
2275 			}
2276 		}
2277 
2278 		/* Adjust for partial unbind after removin VMA from VM */
2279 		if (!err) {
2280 			op->base.remap.unmap->va->va.addr = op->remap.start;
2281 			op->base.remap.unmap->va->va.range = op->remap.range;
2282 		}
2283 		break;
2284 	}
2285 	case DRM_GPUVA_OP_UNMAP:
2286 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2287 		op->flags |= XE_VMA_OP_COMMITTED;
2288 		break;
2289 	case DRM_GPUVA_OP_PREFETCH:
2290 		op->flags |= XE_VMA_OP_COMMITTED;
2291 		break;
2292 	default:
2293 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2294 	}
2295 
2296 	return err;
2297 }
2298 
2299 
2300 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2301 				   struct drm_gpuva_ops *ops,
2302 				   struct xe_sync_entry *syncs, u32 num_syncs,
2303 				   struct list_head *ops_list, bool last)
2304 {
2305 	struct xe_device *xe = vm->xe;
2306 	struct xe_vma_op *last_op = NULL;
2307 	struct drm_gpuva_op *__op;
2308 	int err = 0;
2309 
2310 	lockdep_assert_held_write(&vm->lock);
2311 
2312 	drm_gpuva_for_each_op(__op, ops) {
2313 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2314 		struct xe_vma *vma;
2315 		bool first = list_empty(ops_list);
2316 		unsigned int flags = 0;
2317 
2318 		INIT_LIST_HEAD(&op->link);
2319 		list_add_tail(&op->link, ops_list);
2320 
2321 		if (first) {
2322 			op->flags |= XE_VMA_OP_FIRST;
2323 			op->num_syncs = num_syncs;
2324 			op->syncs = syncs;
2325 		}
2326 
2327 		op->q = q;
2328 
2329 		switch (op->base.op) {
2330 		case DRM_GPUVA_OP_MAP:
2331 		{
2332 			flags |= op->map.read_only ?
2333 				VMA_CREATE_FLAG_READ_ONLY : 0;
2334 			flags |= op->map.is_null ?
2335 				VMA_CREATE_FLAG_IS_NULL : 0;
2336 			flags |= op->map.dumpable ?
2337 				VMA_CREATE_FLAG_DUMPABLE : 0;
2338 
2339 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2340 				      flags);
2341 			if (IS_ERR(vma))
2342 				return PTR_ERR(vma);
2343 
2344 			op->map.vma = vma;
2345 			break;
2346 		}
2347 		case DRM_GPUVA_OP_REMAP:
2348 		{
2349 			struct xe_vma *old =
2350 				gpuva_to_vma(op->base.remap.unmap->va);
2351 
2352 			op->remap.start = xe_vma_start(old);
2353 			op->remap.range = xe_vma_size(old);
2354 
2355 			if (op->base.remap.prev) {
2356 				flags |= op->base.remap.unmap->va->flags &
2357 					XE_VMA_READ_ONLY ?
2358 					VMA_CREATE_FLAG_READ_ONLY : 0;
2359 				flags |= op->base.remap.unmap->va->flags &
2360 					DRM_GPUVA_SPARSE ?
2361 					VMA_CREATE_FLAG_IS_NULL : 0;
2362 				flags |= op->base.remap.unmap->va->flags &
2363 					XE_VMA_DUMPABLE ?
2364 					VMA_CREATE_FLAG_DUMPABLE : 0;
2365 
2366 				vma = new_vma(vm, op->base.remap.prev,
2367 					      old->pat_index, flags);
2368 				if (IS_ERR(vma))
2369 					return PTR_ERR(vma);
2370 
2371 				op->remap.prev = vma;
2372 
2373 				/*
2374 				 * Userptr creates a new SG mapping so
2375 				 * we must also rebind.
2376 				 */
2377 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2378 					IS_ALIGNED(xe_vma_end(vma),
2379 						   xe_vma_max_pte_size(old));
2380 				if (op->remap.skip_prev) {
2381 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2382 					op->remap.range -=
2383 						xe_vma_end(vma) -
2384 						xe_vma_start(old);
2385 					op->remap.start = xe_vma_end(vma);
2386 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2387 					       (ULL)op->remap.start,
2388 					       (ULL)op->remap.range);
2389 				}
2390 			}
2391 
2392 			if (op->base.remap.next) {
2393 				flags |= op->base.remap.unmap->va->flags &
2394 					XE_VMA_READ_ONLY ?
2395 					VMA_CREATE_FLAG_READ_ONLY : 0;
2396 				flags |= op->base.remap.unmap->va->flags &
2397 					DRM_GPUVA_SPARSE ?
2398 					VMA_CREATE_FLAG_IS_NULL : 0;
2399 				flags |= op->base.remap.unmap->va->flags &
2400 					XE_VMA_DUMPABLE ?
2401 					VMA_CREATE_FLAG_DUMPABLE : 0;
2402 
2403 				vma = new_vma(vm, op->base.remap.next,
2404 					      old->pat_index, flags);
2405 				if (IS_ERR(vma))
2406 					return PTR_ERR(vma);
2407 
2408 				op->remap.next = vma;
2409 
2410 				/*
2411 				 * Userptr creates a new SG mapping so
2412 				 * we must also rebind.
2413 				 */
2414 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2415 					IS_ALIGNED(xe_vma_start(vma),
2416 						   xe_vma_max_pte_size(old));
2417 				if (op->remap.skip_next) {
2418 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2419 					op->remap.range -=
2420 						xe_vma_end(old) -
2421 						xe_vma_start(vma);
2422 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2423 					       (ULL)op->remap.start,
2424 					       (ULL)op->remap.range);
2425 				}
2426 			}
2427 			break;
2428 		}
2429 		case DRM_GPUVA_OP_UNMAP:
2430 		case DRM_GPUVA_OP_PREFETCH:
2431 			/* Nothing to do */
2432 			break;
2433 		default:
2434 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2435 		}
2436 
2437 		last_op = op;
2438 
2439 		err = xe_vma_op_commit(vm, op);
2440 		if (err)
2441 			return err;
2442 	}
2443 
2444 	/* FIXME: Unhandled corner case */
2445 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2446 
2447 	if (!last_op)
2448 		return 0;
2449 
2450 	last_op->ops = ops;
2451 	if (last) {
2452 		last_op->flags |= XE_VMA_OP_LAST;
2453 		last_op->num_syncs = num_syncs;
2454 		last_op->syncs = syncs;
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2461 		      struct xe_vma *vma, struct xe_vma_op *op)
2462 {
2463 	int err;
2464 
2465 	lockdep_assert_held_write(&vm->lock);
2466 
2467 	err = xe_vm_prepare_vma(exec, vma, 1);
2468 	if (err)
2469 		return err;
2470 
2471 	xe_vm_assert_held(vm);
2472 	xe_bo_assert_held(xe_vma_bo(vma));
2473 
2474 	switch (op->base.op) {
2475 	case DRM_GPUVA_OP_MAP:
2476 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2477 				 op->syncs, op->num_syncs,
2478 				 op->map.immediate || !xe_vm_in_fault_mode(vm),
2479 				 op->flags & XE_VMA_OP_FIRST,
2480 				 op->flags & XE_VMA_OP_LAST);
2481 		break;
2482 	case DRM_GPUVA_OP_REMAP:
2483 	{
2484 		bool prev = !!op->remap.prev;
2485 		bool next = !!op->remap.next;
2486 
2487 		if (!op->remap.unmap_done) {
2488 			if (prev || next)
2489 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2490 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2491 					   op->num_syncs,
2492 					   op->flags & XE_VMA_OP_FIRST,
2493 					   op->flags & XE_VMA_OP_LAST &&
2494 					   !prev && !next);
2495 			if (err)
2496 				break;
2497 			op->remap.unmap_done = true;
2498 		}
2499 
2500 		if (prev) {
2501 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2502 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2503 					 xe_vma_bo(op->remap.prev), op->syncs,
2504 					 op->num_syncs, true, false,
2505 					 op->flags & XE_VMA_OP_LAST && !next);
2506 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2507 			if (err)
2508 				break;
2509 			op->remap.prev = NULL;
2510 		}
2511 
2512 		if (next) {
2513 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2514 			err = xe_vm_bind(vm, op->remap.next, op->q,
2515 					 xe_vma_bo(op->remap.next),
2516 					 op->syncs, op->num_syncs,
2517 					 true, false,
2518 					 op->flags & XE_VMA_OP_LAST);
2519 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2520 			if (err)
2521 				break;
2522 			op->remap.next = NULL;
2523 		}
2524 
2525 		break;
2526 	}
2527 	case DRM_GPUVA_OP_UNMAP:
2528 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2529 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2530 				   op->flags & XE_VMA_OP_LAST);
2531 		break;
2532 	case DRM_GPUVA_OP_PREFETCH:
2533 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2534 				     op->syncs, op->num_syncs,
2535 				     op->flags & XE_VMA_OP_FIRST,
2536 				     op->flags & XE_VMA_OP_LAST);
2537 		break;
2538 	default:
2539 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2540 	}
2541 
2542 	if (err)
2543 		trace_xe_vma_fail(vma);
2544 
2545 	return err;
2546 }
2547 
2548 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2549 			       struct xe_vma_op *op)
2550 {
2551 	struct drm_exec exec;
2552 	int err;
2553 
2554 retry_userptr:
2555 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2556 	drm_exec_until_all_locked(&exec) {
2557 		err = op_execute(&exec, vm, vma, op);
2558 		drm_exec_retry_on_contention(&exec);
2559 		if (err)
2560 			break;
2561 	}
2562 	drm_exec_fini(&exec);
2563 
2564 	if (err == -EAGAIN) {
2565 		lockdep_assert_held_write(&vm->lock);
2566 
2567 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2568 			if (!op->remap.unmap_done)
2569 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2570 			else if (op->remap.prev)
2571 				vma = op->remap.prev;
2572 			else
2573 				vma = op->remap.next;
2574 		}
2575 
2576 		if (xe_vma_is_userptr(vma)) {
2577 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2578 			if (!err)
2579 				goto retry_userptr;
2580 
2581 			trace_xe_vma_fail(vma);
2582 		}
2583 	}
2584 
2585 	return err;
2586 }
2587 
2588 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2589 {
2590 	int ret = 0;
2591 
2592 	lockdep_assert_held_write(&vm->lock);
2593 
2594 	switch (op->base.op) {
2595 	case DRM_GPUVA_OP_MAP:
2596 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2597 		break;
2598 	case DRM_GPUVA_OP_REMAP:
2599 	{
2600 		struct xe_vma *vma;
2601 
2602 		if (!op->remap.unmap_done)
2603 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2604 		else if (op->remap.prev)
2605 			vma = op->remap.prev;
2606 		else
2607 			vma = op->remap.next;
2608 
2609 		ret = __xe_vma_op_execute(vm, vma, op);
2610 		break;
2611 	}
2612 	case DRM_GPUVA_OP_UNMAP:
2613 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2614 					  op);
2615 		break;
2616 	case DRM_GPUVA_OP_PREFETCH:
2617 		ret = __xe_vma_op_execute(vm,
2618 					  gpuva_to_vma(op->base.prefetch.va),
2619 					  op);
2620 		break;
2621 	default:
2622 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2623 	}
2624 
2625 	return ret;
2626 }
2627 
2628 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2629 {
2630 	bool last = op->flags & XE_VMA_OP_LAST;
2631 
2632 	if (last) {
2633 		while (op->num_syncs--)
2634 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2635 		kfree(op->syncs);
2636 		if (op->q)
2637 			xe_exec_queue_put(op->q);
2638 	}
2639 	if (!list_empty(&op->link))
2640 		list_del(&op->link);
2641 	if (op->ops)
2642 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2643 	if (last)
2644 		xe_vm_put(vm);
2645 }
2646 
2647 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2648 			     bool post_commit, bool prev_post_commit,
2649 			     bool next_post_commit)
2650 {
2651 	lockdep_assert_held_write(&vm->lock);
2652 
2653 	switch (op->base.op) {
2654 	case DRM_GPUVA_OP_MAP:
2655 		if (op->map.vma) {
2656 			prep_vma_destroy(vm, op->map.vma, post_commit);
2657 			xe_vma_destroy_unlocked(op->map.vma);
2658 		}
2659 		break;
2660 	case DRM_GPUVA_OP_UNMAP:
2661 	{
2662 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2663 
2664 		if (vma) {
2665 			down_read(&vm->userptr.notifier_lock);
2666 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2667 			up_read(&vm->userptr.notifier_lock);
2668 			if (post_commit)
2669 				xe_vm_insert_vma(vm, vma);
2670 		}
2671 		break;
2672 	}
2673 	case DRM_GPUVA_OP_REMAP:
2674 	{
2675 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2676 
2677 		if (op->remap.prev) {
2678 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2679 			xe_vma_destroy_unlocked(op->remap.prev);
2680 		}
2681 		if (op->remap.next) {
2682 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2683 			xe_vma_destroy_unlocked(op->remap.next);
2684 		}
2685 		if (vma) {
2686 			down_read(&vm->userptr.notifier_lock);
2687 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2688 			up_read(&vm->userptr.notifier_lock);
2689 			if (post_commit)
2690 				xe_vm_insert_vma(vm, vma);
2691 		}
2692 		break;
2693 	}
2694 	case DRM_GPUVA_OP_PREFETCH:
2695 		/* Nothing to do */
2696 		break;
2697 	default:
2698 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2699 	}
2700 }
2701 
2702 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2703 				     struct drm_gpuva_ops **ops,
2704 				     int num_ops_list)
2705 {
2706 	int i;
2707 
2708 	for (i = num_ops_list - 1; i >= 0; --i) {
2709 		struct drm_gpuva_ops *__ops = ops[i];
2710 		struct drm_gpuva_op *__op;
2711 
2712 		if (!__ops)
2713 			continue;
2714 
2715 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2716 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2717 
2718 			xe_vma_op_unwind(vm, op,
2719 					 op->flags & XE_VMA_OP_COMMITTED,
2720 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2721 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2722 		}
2723 
2724 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2725 	}
2726 }
2727 
2728 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2729 				     struct list_head *ops_list)
2730 {
2731 	struct xe_vma_op *op, *next;
2732 	int err;
2733 
2734 	lockdep_assert_held_write(&vm->lock);
2735 
2736 	list_for_each_entry_safe(op, next, ops_list, link) {
2737 		err = xe_vma_op_execute(vm, op);
2738 		if (err) {
2739 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2740 				 op->base.op, err);
2741 			/*
2742 			 * FIXME: Killing VM rather than proper error handling
2743 			 */
2744 			xe_vm_kill(vm);
2745 			return -ENOSPC;
2746 		}
2747 		xe_vma_op_cleanup(vm, op);
2748 	}
2749 
2750 	return 0;
2751 }
2752 
2753 #define SUPPORTED_FLAGS	\
2754 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2755 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
2756 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2757 #define XE_64K_PAGE_MASK 0xffffull
2758 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2759 
2760 #define MAX_BINDS	512	/* FIXME: Picking random upper limit */
2761 
2762 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2763 				    struct drm_xe_vm_bind *args,
2764 				    struct drm_xe_vm_bind_op **bind_ops)
2765 {
2766 	int err;
2767 	int i;
2768 
2769 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2770 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2771 		return -EINVAL;
2772 
2773 	if (XE_IOCTL_DBG(xe, args->extensions) ||
2774 	    XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2775 		return -EINVAL;
2776 
2777 	if (args->num_binds > 1) {
2778 		u64 __user *bind_user =
2779 			u64_to_user_ptr(args->vector_of_binds);
2780 
2781 		*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2782 				    args->num_binds, GFP_KERNEL);
2783 		if (!*bind_ops)
2784 			return -ENOMEM;
2785 
2786 		err = __copy_from_user(*bind_ops, bind_user,
2787 				       sizeof(struct drm_xe_vm_bind_op) *
2788 				       args->num_binds);
2789 		if (XE_IOCTL_DBG(xe, err)) {
2790 			err = -EFAULT;
2791 			goto free_bind_ops;
2792 		}
2793 	} else {
2794 		*bind_ops = &args->bind;
2795 	}
2796 
2797 	for (i = 0; i < args->num_binds; ++i) {
2798 		u64 range = (*bind_ops)[i].range;
2799 		u64 addr = (*bind_ops)[i].addr;
2800 		u32 op = (*bind_ops)[i].op;
2801 		u32 flags = (*bind_ops)[i].flags;
2802 		u32 obj = (*bind_ops)[i].obj;
2803 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2804 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2805 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2806 		u16 pat_index = (*bind_ops)[i].pat_index;
2807 		u16 coh_mode;
2808 
2809 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2810 			err = -EINVAL;
2811 			goto free_bind_ops;
2812 		}
2813 
2814 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2815 		(*bind_ops)[i].pat_index = pat_index;
2816 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2817 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2818 			err = -EINVAL;
2819 			goto free_bind_ops;
2820 		}
2821 
2822 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2823 			err = -EINVAL;
2824 			goto free_bind_ops;
2825 		}
2826 
2827 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2828 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2829 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2830 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2831 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2832 				 is_null) ||
2833 		    XE_IOCTL_DBG(xe, !obj &&
2834 				 op == DRM_XE_VM_BIND_OP_MAP &&
2835 				 !is_null) ||
2836 		    XE_IOCTL_DBG(xe, !obj &&
2837 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2838 		    XE_IOCTL_DBG(xe, addr &&
2839 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2840 		    XE_IOCTL_DBG(xe, range &&
2841 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2842 		    XE_IOCTL_DBG(xe, obj &&
2843 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2844 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2845 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2846 		    XE_IOCTL_DBG(xe, obj &&
2847 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2848 		    XE_IOCTL_DBG(xe, prefetch_region &&
2849 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
2850 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
2851 				       xe->info.mem_region_mask)) ||
2852 		    XE_IOCTL_DBG(xe, obj &&
2853 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
2854 			err = -EINVAL;
2855 			goto free_bind_ops;
2856 		}
2857 
2858 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2859 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2860 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2861 		    XE_IOCTL_DBG(xe, !range &&
2862 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
2863 			err = -EINVAL;
2864 			goto free_bind_ops;
2865 		}
2866 	}
2867 
2868 	return 0;
2869 
2870 free_bind_ops:
2871 	if (args->num_binds > 1)
2872 		kfree(*bind_ops);
2873 	return err;
2874 }
2875 
2876 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
2877 				       struct xe_exec_queue *q,
2878 				       struct xe_sync_entry *syncs,
2879 				       int num_syncs)
2880 {
2881 	struct dma_fence *fence;
2882 	int i, err = 0;
2883 
2884 	fence = xe_sync_in_fence_get(syncs, num_syncs,
2885 				     to_wait_exec_queue(vm, q), vm);
2886 	if (IS_ERR(fence))
2887 		return PTR_ERR(fence);
2888 
2889 	for (i = 0; i < num_syncs; i++)
2890 		xe_sync_entry_signal(&syncs[i], NULL, fence);
2891 
2892 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
2893 				     fence);
2894 	dma_fence_put(fence);
2895 
2896 	return err;
2897 }
2898 
2899 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2900 {
2901 	struct xe_device *xe = to_xe_device(dev);
2902 	struct xe_file *xef = to_xe_file(file);
2903 	struct drm_xe_vm_bind *args = data;
2904 	struct drm_xe_sync __user *syncs_user;
2905 	struct xe_bo **bos = NULL;
2906 	struct drm_gpuva_ops **ops = NULL;
2907 	struct xe_vm *vm;
2908 	struct xe_exec_queue *q = NULL;
2909 	u32 num_syncs, num_ufence = 0;
2910 	struct xe_sync_entry *syncs = NULL;
2911 	struct drm_xe_vm_bind_op *bind_ops;
2912 	LIST_HEAD(ops_list);
2913 	int err;
2914 	int i;
2915 
2916 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
2917 	if (err)
2918 		return err;
2919 
2920 	if (args->exec_queue_id) {
2921 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2922 		if (XE_IOCTL_DBG(xe, !q)) {
2923 			err = -ENOENT;
2924 			goto free_objs;
2925 		}
2926 
2927 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2928 			err = -EINVAL;
2929 			goto put_exec_queue;
2930 		}
2931 	}
2932 
2933 	vm = xe_vm_lookup(xef, args->vm_id);
2934 	if (XE_IOCTL_DBG(xe, !vm)) {
2935 		err = -EINVAL;
2936 		goto put_exec_queue;
2937 	}
2938 
2939 	err = down_write_killable(&vm->lock);
2940 	if (err)
2941 		goto put_vm;
2942 
2943 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2944 		err = -ENOENT;
2945 		goto release_vm_lock;
2946 	}
2947 
2948 	for (i = 0; i < args->num_binds; ++i) {
2949 		u64 range = bind_ops[i].range;
2950 		u64 addr = bind_ops[i].addr;
2951 
2952 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
2953 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
2954 			err = -EINVAL;
2955 			goto release_vm_lock;
2956 		}
2957 	}
2958 
2959 	if (args->num_binds) {
2960 		bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
2961 		if (!bos) {
2962 			err = -ENOMEM;
2963 			goto release_vm_lock;
2964 		}
2965 
2966 		ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
2967 		if (!ops) {
2968 			err = -ENOMEM;
2969 			goto release_vm_lock;
2970 		}
2971 	}
2972 
2973 	for (i = 0; i < args->num_binds; ++i) {
2974 		struct drm_gem_object *gem_obj;
2975 		u64 range = bind_ops[i].range;
2976 		u64 addr = bind_ops[i].addr;
2977 		u32 obj = bind_ops[i].obj;
2978 		u64 obj_offset = bind_ops[i].obj_offset;
2979 		u16 pat_index = bind_ops[i].pat_index;
2980 		u16 coh_mode;
2981 
2982 		if (!obj)
2983 			continue;
2984 
2985 		gem_obj = drm_gem_object_lookup(file, obj);
2986 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
2987 			err = -ENOENT;
2988 			goto put_obj;
2989 		}
2990 		bos[i] = gem_to_xe_bo(gem_obj);
2991 
2992 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
2993 		    XE_IOCTL_DBG(xe, obj_offset >
2994 				 bos[i]->size - range)) {
2995 			err = -EINVAL;
2996 			goto put_obj;
2997 		}
2998 
2999 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3000 			if (XE_IOCTL_DBG(xe, obj_offset &
3001 					 XE_64K_PAGE_MASK) ||
3002 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3003 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3004 				err = -EINVAL;
3005 				goto put_obj;
3006 			}
3007 		}
3008 
3009 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3010 		if (bos[i]->cpu_caching) {
3011 			if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3012 					 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3013 				err = -EINVAL;
3014 				goto put_obj;
3015 			}
3016 		} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3017 			/*
3018 			 * Imported dma-buf from a different device should
3019 			 * require 1way or 2way coherency since we don't know
3020 			 * how it was mapped on the CPU. Just assume is it
3021 			 * potentially cached on CPU side.
3022 			 */
3023 			err = -EINVAL;
3024 			goto put_obj;
3025 		}
3026 	}
3027 
3028 	if (args->num_syncs) {
3029 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3030 		if (!syncs) {
3031 			err = -ENOMEM;
3032 			goto put_obj;
3033 		}
3034 	}
3035 
3036 	syncs_user = u64_to_user_ptr(args->syncs);
3037 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3038 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3039 					  &syncs_user[num_syncs],
3040 					  (xe_vm_in_lr_mode(vm) ?
3041 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3042 					  (!args->num_binds ?
3043 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3044 		if (err)
3045 			goto free_syncs;
3046 
3047 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3048 			num_ufence++;
3049 	}
3050 
3051 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3052 		err = -EINVAL;
3053 		goto free_syncs;
3054 	}
3055 
3056 	if (!args->num_binds) {
3057 		err = -ENODATA;
3058 		goto free_syncs;
3059 	}
3060 
3061 	for (i = 0; i < args->num_binds; ++i) {
3062 		u64 range = bind_ops[i].range;
3063 		u64 addr = bind_ops[i].addr;
3064 		u32 op = bind_ops[i].op;
3065 		u32 flags = bind_ops[i].flags;
3066 		u64 obj_offset = bind_ops[i].obj_offset;
3067 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3068 		u16 pat_index = bind_ops[i].pat_index;
3069 
3070 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3071 						  addr, range, op, flags,
3072 						  prefetch_region, pat_index);
3073 		if (IS_ERR(ops[i])) {
3074 			err = PTR_ERR(ops[i]);
3075 			ops[i] = NULL;
3076 			goto unwind_ops;
3077 		}
3078 
3079 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3080 					      &ops_list,
3081 					      i == args->num_binds - 1);
3082 		if (err)
3083 			goto unwind_ops;
3084 	}
3085 
3086 	/* Nothing to do */
3087 	if (list_empty(&ops_list)) {
3088 		err = -ENODATA;
3089 		goto unwind_ops;
3090 	}
3091 
3092 	xe_vm_get(vm);
3093 	if (q)
3094 		xe_exec_queue_get(q);
3095 
3096 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3097 
3098 	up_write(&vm->lock);
3099 
3100 	if (q)
3101 		xe_exec_queue_put(q);
3102 	xe_vm_put(vm);
3103 
3104 	for (i = 0; bos && i < args->num_binds; ++i)
3105 		xe_bo_put(bos[i]);
3106 
3107 	kfree(bos);
3108 	kfree(ops);
3109 	if (args->num_binds > 1)
3110 		kfree(bind_ops);
3111 
3112 	return err;
3113 
3114 unwind_ops:
3115 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3116 free_syncs:
3117 	if (err == -ENODATA)
3118 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3119 	while (num_syncs--)
3120 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3121 
3122 	kfree(syncs);
3123 put_obj:
3124 	for (i = 0; i < args->num_binds; ++i)
3125 		xe_bo_put(bos[i]);
3126 release_vm_lock:
3127 	up_write(&vm->lock);
3128 put_vm:
3129 	xe_vm_put(vm);
3130 put_exec_queue:
3131 	if (q)
3132 		xe_exec_queue_put(q);
3133 free_objs:
3134 	kfree(bos);
3135 	kfree(ops);
3136 	if (args->num_binds > 1)
3137 		kfree(bind_ops);
3138 	return err;
3139 }
3140 
3141 /**
3142  * xe_vm_lock() - Lock the vm's dma_resv object
3143  * @vm: The struct xe_vm whose lock is to be locked
3144  * @intr: Whether to perform any wait interruptible
3145  *
3146  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3147  * contended lock was interrupted. If @intr is false, the function
3148  * always returns 0.
3149  */
3150 int xe_vm_lock(struct xe_vm *vm, bool intr)
3151 {
3152 	if (intr)
3153 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3154 
3155 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3156 }
3157 
3158 /**
3159  * xe_vm_unlock() - Unlock the vm's dma_resv object
3160  * @vm: The struct xe_vm whose lock is to be released.
3161  *
3162  * Unlock a buffer object lock that was locked by xe_vm_lock().
3163  */
3164 void xe_vm_unlock(struct xe_vm *vm)
3165 {
3166 	dma_resv_unlock(xe_vm_resv(vm));
3167 }
3168 
3169 /**
3170  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3171  * @vma: VMA to invalidate
3172  *
3173  * Walks a list of page tables leaves which it memset the entries owned by this
3174  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3175  * complete.
3176  *
3177  * Returns 0 for success, negative error code otherwise.
3178  */
3179 int xe_vm_invalidate_vma(struct xe_vma *vma)
3180 {
3181 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3182 	struct xe_tile *tile;
3183 	u32 tile_needs_invalidate = 0;
3184 	int seqno[XE_MAX_TILES_PER_DEVICE];
3185 	u8 id;
3186 	int ret;
3187 
3188 	xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3189 	xe_assert(xe, !xe_vma_is_null(vma));
3190 	trace_xe_vma_usm_invalidate(vma);
3191 
3192 	/* Check that we don't race with page-table updates */
3193 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3194 		if (xe_vma_is_userptr(vma)) {
3195 			WARN_ON_ONCE(!mmu_interval_check_retry
3196 				     (&to_userptr_vma(vma)->userptr.notifier,
3197 				      to_userptr_vma(vma)->userptr.notifier_seq));
3198 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3199 							     DMA_RESV_USAGE_BOOKKEEP));
3200 
3201 		} else {
3202 			xe_bo_assert_held(xe_vma_bo(vma));
3203 		}
3204 	}
3205 
3206 	for_each_tile(tile, xe, id) {
3207 		if (xe_pt_zap_ptes(tile, vma)) {
3208 			tile_needs_invalidate |= BIT(id);
3209 			xe_device_wmb(xe);
3210 			/*
3211 			 * FIXME: We potentially need to invalidate multiple
3212 			 * GTs within the tile
3213 			 */
3214 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3215 			if (seqno[id] < 0)
3216 				return seqno[id];
3217 		}
3218 	}
3219 
3220 	for_each_tile(tile, xe, id) {
3221 		if (tile_needs_invalidate & BIT(id)) {
3222 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3223 			if (ret < 0)
3224 				return ret;
3225 		}
3226 	}
3227 
3228 	vma->usm.tile_invalidated = vma->tile_mask;
3229 
3230 	return 0;
3231 }
3232 
3233 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3234 {
3235 	struct drm_gpuva *gpuva;
3236 	bool is_vram;
3237 	uint64_t addr;
3238 
3239 	if (!down_read_trylock(&vm->lock)) {
3240 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3241 		return 0;
3242 	}
3243 	if (vm->pt_root[gt_id]) {
3244 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3245 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3246 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3247 			   is_vram ? "VRAM" : "SYS");
3248 	}
3249 
3250 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3251 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3252 		bool is_userptr = xe_vma_is_userptr(vma);
3253 		bool is_null = xe_vma_is_null(vma);
3254 
3255 		if (is_null) {
3256 			addr = 0;
3257 		} else if (is_userptr) {
3258 			struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
3259 			struct xe_res_cursor cur;
3260 
3261 			if (sg) {
3262 				xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
3263 				addr = xe_res_dma(&cur);
3264 			} else {
3265 				addr = 0;
3266 			}
3267 		} else {
3268 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3269 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3270 		}
3271 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3272 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3273 			   xe_vma_size(vma),
3274 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3275 			   is_vram ? "VRAM" : "SYS");
3276 	}
3277 	up_read(&vm->lock);
3278 
3279 	return 0;
3280 }
3281 
3282 struct xe_vm_snapshot {
3283 	unsigned long num_snaps;
3284 	struct {
3285 		u64 ofs, bo_ofs;
3286 		unsigned long len;
3287 		struct xe_bo *bo;
3288 		void *data;
3289 		struct mm_struct *mm;
3290 	} snap[];
3291 };
3292 
3293 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3294 {
3295 	unsigned long num_snaps = 0, i;
3296 	struct xe_vm_snapshot *snap = NULL;
3297 	struct drm_gpuva *gpuva;
3298 
3299 	if (!vm)
3300 		return NULL;
3301 
3302 	mutex_lock(&vm->snap_mutex);
3303 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3304 		if (gpuva->flags & XE_VMA_DUMPABLE)
3305 			num_snaps++;
3306 	}
3307 
3308 	if (num_snaps)
3309 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3310 	if (!snap)
3311 		goto out_unlock;
3312 
3313 	snap->num_snaps = num_snaps;
3314 	i = 0;
3315 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3316 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3317 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3318 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3319 
3320 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3321 			continue;
3322 
3323 		snap->snap[i].ofs = xe_vma_start(vma);
3324 		snap->snap[i].len = xe_vma_size(vma);
3325 		if (bo) {
3326 			snap->snap[i].bo = xe_bo_get(bo);
3327 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3328 		} else if (xe_vma_is_userptr(vma)) {
3329 			struct mm_struct *mm =
3330 				to_userptr_vma(vma)->userptr.notifier.mm;
3331 
3332 			if (mmget_not_zero(mm))
3333 				snap->snap[i].mm = mm;
3334 			else
3335 				snap->snap[i].data = ERR_PTR(-EFAULT);
3336 
3337 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3338 		} else {
3339 			snap->snap[i].data = ERR_PTR(-ENOENT);
3340 		}
3341 		i++;
3342 	}
3343 
3344 out_unlock:
3345 	mutex_unlock(&vm->snap_mutex);
3346 	return snap;
3347 }
3348 
3349 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3350 {
3351 	for (int i = 0; i < snap->num_snaps; i++) {
3352 		struct xe_bo *bo = snap->snap[i].bo;
3353 		struct iosys_map src;
3354 		int err;
3355 
3356 		if (IS_ERR(snap->snap[i].data))
3357 			continue;
3358 
3359 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3360 		if (!snap->snap[i].data) {
3361 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3362 			goto cleanup_bo;
3363 		}
3364 
3365 		if (bo) {
3366 			dma_resv_lock(bo->ttm.base.resv, NULL);
3367 			err = ttm_bo_vmap(&bo->ttm, &src);
3368 			if (!err) {
3369 				xe_map_memcpy_from(xe_bo_device(bo),
3370 						   snap->snap[i].data,
3371 						   &src, snap->snap[i].bo_ofs,
3372 						   snap->snap[i].len);
3373 				ttm_bo_vunmap(&bo->ttm, &src);
3374 			}
3375 			dma_resv_unlock(bo->ttm.base.resv);
3376 		} else {
3377 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3378 
3379 			kthread_use_mm(snap->snap[i].mm);
3380 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3381 				err = 0;
3382 			else
3383 				err = -EFAULT;
3384 			kthread_unuse_mm(snap->snap[i].mm);
3385 
3386 			mmput(snap->snap[i].mm);
3387 			snap->snap[i].mm = NULL;
3388 		}
3389 
3390 		if (err) {
3391 			kvfree(snap->snap[i].data);
3392 			snap->snap[i].data = ERR_PTR(err);
3393 		}
3394 
3395 cleanup_bo:
3396 		xe_bo_put(bo);
3397 		snap->snap[i].bo = NULL;
3398 	}
3399 }
3400 
3401 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3402 {
3403 	unsigned long i, j;
3404 
3405 	for (i = 0; i < snap->num_snaps; i++) {
3406 		if (IS_ERR(snap->snap[i].data))
3407 			goto uncaptured;
3408 
3409 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3410 		drm_printf(p, "[%llx].data: ",
3411 			   snap->snap[i].ofs);
3412 
3413 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3414 			u32 *val = snap->snap[i].data + j;
3415 			char dumped[ASCII85_BUFSZ];
3416 
3417 			drm_puts(p, ascii85_encode(*val, dumped));
3418 		}
3419 
3420 		drm_puts(p, "\n");
3421 		continue;
3422 
3423 uncaptured:
3424 		drm_printf(p, "Unable to capture range [%llx-%llx]: %li\n",
3425 			   snap->snap[i].ofs, snap->snap[i].ofs + snap->snap[i].len - 1,
3426 			   PTR_ERR(snap->snap[i].data));
3427 	}
3428 }
3429 
3430 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3431 {
3432 	unsigned long i;
3433 
3434 	if (!snap)
3435 		return;
3436 
3437 	for (i = 0; i < snap->num_snaps; i++) {
3438 		if (!IS_ERR(snap->snap[i].data))
3439 			kvfree(snap->snap[i].data);
3440 		xe_bo_put(snap->snap[i].bo);
3441 		if (snap->snap[i].mm)
3442 			mmput(snap->snap[i].mm);
3443 	}
3444 	kvfree(snap);
3445 }
3446