xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision a9a95523c84957b7863796b5d1df2f3f5dca4519)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 
10 #include <drm/drm_exec.h>
11 #include <drm/drm_print.h>
12 #include <drm/ttm/ttm_execbuf_util.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <drm/xe_drm.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 
20 #include "xe_assert.h"
21 #include "xe_bo.h"
22 #include "xe_device.h"
23 #include "xe_drm_client.h"
24 #include "xe_exec_queue.h"
25 #include "xe_gt.h"
26 #include "xe_gt_pagefault.h"
27 #include "xe_gt_tlb_invalidation.h"
28 #include "xe_migrate.h"
29 #include "xe_pm.h"
30 #include "xe_preempt_fence.h"
31 #include "xe_pt.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sync.h"
34 #include "xe_trace.h"
35 #include "generated/xe_wa_oob.h"
36 #include "xe_wa.h"
37 
38 #define TEST_VM_ASYNC_OPS_ERROR
39 
40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
41 {
42 	return vm->gpuvm.r_obj;
43 }
44 
45 /**
46  * xe_vma_userptr_check_repin() - Advisory check for repin needed
47  * @vma: The userptr vma
48  *
49  * Check if the userptr vma has been invalidated since last successful
50  * repin. The check is advisory only and can the function can be called
51  * without the vm->userptr.notifier_lock held. There is no guarantee that the
52  * vma userptr will remain valid after a lockless check, so typically
53  * the call needs to be followed by a proper check under the notifier_lock.
54  *
55  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
56  */
57 int xe_vma_userptr_check_repin(struct xe_vma *vma)
58 {
59 	return mmu_interval_check_retry(&vma->userptr.notifier,
60 					vma->userptr.notifier_seq) ?
61 		-EAGAIN : 0;
62 }
63 
64 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
65 {
66 	struct xe_vm *vm = xe_vma_vm(vma);
67 	struct xe_device *xe = vm->xe;
68 	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
69 	struct page **pages;
70 	bool in_kthread = !current->mm;
71 	unsigned long notifier_seq;
72 	int pinned, ret, i;
73 	bool read_only = xe_vma_read_only(vma);
74 
75 	lockdep_assert_held(&vm->lock);
76 	xe_assert(xe, xe_vma_is_userptr(vma));
77 retry:
78 	if (vma->gpuva.flags & XE_VMA_DESTROYED)
79 		return 0;
80 
81 	notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
82 	if (notifier_seq == vma->userptr.notifier_seq)
83 		return 0;
84 
85 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
86 	if (!pages)
87 		return -ENOMEM;
88 
89 	if (vma->userptr.sg) {
90 		dma_unmap_sgtable(xe->drm.dev,
91 				  vma->userptr.sg,
92 				  read_only ? DMA_TO_DEVICE :
93 				  DMA_BIDIRECTIONAL, 0);
94 		sg_free_table(vma->userptr.sg);
95 		vma->userptr.sg = NULL;
96 	}
97 
98 	pinned = ret = 0;
99 	if (in_kthread) {
100 		if (!mmget_not_zero(vma->userptr.notifier.mm)) {
101 			ret = -EFAULT;
102 			goto mm_closed;
103 		}
104 		kthread_use_mm(vma->userptr.notifier.mm);
105 	}
106 
107 	while (pinned < num_pages) {
108 		ret = get_user_pages_fast(xe_vma_userptr(vma) +
109 					  pinned * PAGE_SIZE,
110 					  num_pages - pinned,
111 					  read_only ? 0 : FOLL_WRITE,
112 					  &pages[pinned]);
113 		if (ret < 0) {
114 			if (in_kthread)
115 				ret = 0;
116 			break;
117 		}
118 
119 		pinned += ret;
120 		ret = 0;
121 	}
122 
123 	if (in_kthread) {
124 		kthread_unuse_mm(vma->userptr.notifier.mm);
125 		mmput(vma->userptr.notifier.mm);
126 	}
127 mm_closed:
128 	if (ret)
129 		goto out;
130 
131 	ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
132 						pinned, 0,
133 						(u64)pinned << PAGE_SHIFT,
134 						xe_sg_segment_size(xe->drm.dev),
135 						GFP_KERNEL);
136 	if (ret) {
137 		vma->userptr.sg = NULL;
138 		goto out;
139 	}
140 	vma->userptr.sg = &vma->userptr.sgt;
141 
142 	ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
143 			      read_only ? DMA_TO_DEVICE :
144 			      DMA_BIDIRECTIONAL,
145 			      DMA_ATTR_SKIP_CPU_SYNC |
146 			      DMA_ATTR_NO_KERNEL_MAPPING);
147 	if (ret) {
148 		sg_free_table(vma->userptr.sg);
149 		vma->userptr.sg = NULL;
150 		goto out;
151 	}
152 
153 	for (i = 0; i < pinned; ++i) {
154 		if (!read_only) {
155 			lock_page(pages[i]);
156 			set_page_dirty(pages[i]);
157 			unlock_page(pages[i]);
158 		}
159 
160 		mark_page_accessed(pages[i]);
161 	}
162 
163 out:
164 	release_pages(pages, pinned);
165 	kvfree(pages);
166 
167 	if (!(ret < 0)) {
168 		vma->userptr.notifier_seq = notifier_seq;
169 		if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
170 			goto retry;
171 	}
172 
173 	return ret < 0 ? ret : 0;
174 }
175 
176 static bool preempt_fences_waiting(struct xe_vm *vm)
177 {
178 	struct xe_exec_queue *q;
179 
180 	lockdep_assert_held(&vm->lock);
181 	xe_vm_assert_held(vm);
182 
183 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
184 		if (!q->compute.pfence ||
185 		    (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
186 						   &q->compute.pfence->flags))) {
187 			return true;
188 		}
189 	}
190 
191 	return false;
192 }
193 
194 static void free_preempt_fences(struct list_head *list)
195 {
196 	struct list_head *link, *next;
197 
198 	list_for_each_safe(link, next, list)
199 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
200 }
201 
202 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
203 				unsigned int *count)
204 {
205 	lockdep_assert_held(&vm->lock);
206 	xe_vm_assert_held(vm);
207 
208 	if (*count >= vm->preempt.num_exec_queues)
209 		return 0;
210 
211 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
212 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
213 
214 		if (IS_ERR(pfence))
215 			return PTR_ERR(pfence);
216 
217 		list_move_tail(xe_preempt_fence_link(pfence), list);
218 	}
219 
220 	return 0;
221 }
222 
223 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
224 {
225 	struct xe_exec_queue *q;
226 
227 	xe_vm_assert_held(vm);
228 
229 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
230 		if (q->compute.pfence) {
231 			long timeout = dma_fence_wait(q->compute.pfence, false);
232 
233 			if (timeout < 0)
234 				return -ETIME;
235 			dma_fence_put(q->compute.pfence);
236 			q->compute.pfence = NULL;
237 		}
238 	}
239 
240 	return 0;
241 }
242 
243 static bool xe_vm_is_idle(struct xe_vm *vm)
244 {
245 	struct xe_exec_queue *q;
246 
247 	xe_vm_assert_held(vm);
248 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
249 		if (!xe_exec_queue_is_idle(q))
250 			return false;
251 	}
252 
253 	return true;
254 }
255 
256 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
257 {
258 	struct list_head *link;
259 	struct xe_exec_queue *q;
260 
261 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
262 		struct dma_fence *fence;
263 
264 		link = list->next;
265 		xe_assert(vm->xe, link != list);
266 
267 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
268 					     q, q->compute.context,
269 					     ++q->compute.seqno);
270 		dma_fence_put(q->compute.pfence);
271 		q->compute.pfence = fence;
272 	}
273 }
274 
275 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
276 {
277 	struct xe_exec_queue *q;
278 	int err;
279 
280 	err = xe_bo_lock(bo, true);
281 	if (err)
282 		return err;
283 
284 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
285 	if (err)
286 		goto out_unlock;
287 
288 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
289 		if (q->compute.pfence) {
290 			dma_resv_add_fence(bo->ttm.base.resv,
291 					   q->compute.pfence,
292 					   DMA_RESV_USAGE_BOOKKEEP);
293 		}
294 
295 out_unlock:
296 	xe_bo_unlock(bo);
297 	return err;
298 }
299 
300 /**
301  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
302  * @vm: The vm.
303  * @fence: The fence to add.
304  * @usage: The resv usage for the fence.
305  *
306  * Loops over all of the vm's external object bindings and adds a @fence
307  * with the given @usage to all of the external object's reservation
308  * objects.
309  */
310 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
311 			     enum dma_resv_usage usage)
312 {
313 	struct xe_vma *vma;
314 
315 	list_for_each_entry(vma, &vm->extobj.list, extobj.link)
316 		dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence, usage);
317 }
318 
319 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
320 {
321 	struct xe_exec_queue *q;
322 
323 	lockdep_assert_held(&vm->lock);
324 	xe_vm_assert_held(vm);
325 
326 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
327 		q->ops->resume(q);
328 
329 		dma_resv_add_fence(xe_vm_resv(vm), q->compute.pfence,
330 				   DMA_RESV_USAGE_BOOKKEEP);
331 		xe_vm_fence_all_extobjs(vm, q->compute.pfence,
332 					DMA_RESV_USAGE_BOOKKEEP);
333 	}
334 }
335 
336 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
337 {
338 	struct drm_exec exec;
339 	struct dma_fence *pfence;
340 	int err;
341 	bool wait;
342 
343 	xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
344 
345 	down_write(&vm->lock);
346 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
347 	drm_exec_until_all_locked(&exec) {
348 		err = xe_vm_lock_dma_resv(vm, &exec, 1, true);
349 		drm_exec_retry_on_contention(&exec);
350 		if (err)
351 			goto out_unlock;
352 	}
353 
354 	pfence = xe_preempt_fence_create(q, q->compute.context,
355 					 ++q->compute.seqno);
356 	if (!pfence) {
357 		err = -ENOMEM;
358 		goto out_unlock;
359 	}
360 
361 	list_add(&q->compute.link, &vm->preempt.exec_queues);
362 	++vm->preempt.num_exec_queues;
363 	q->compute.pfence = pfence;
364 
365 	down_read(&vm->userptr.notifier_lock);
366 
367 	dma_resv_add_fence(xe_vm_resv(vm), pfence,
368 			   DMA_RESV_USAGE_BOOKKEEP);
369 
370 	xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
371 
372 	/*
373 	 * Check to see if a preemption on VM is in flight or userptr
374 	 * invalidation, if so trigger this preempt fence to sync state with
375 	 * other preempt fences on the VM.
376 	 */
377 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
378 	if (wait)
379 		dma_fence_enable_sw_signaling(pfence);
380 
381 	up_read(&vm->userptr.notifier_lock);
382 
383 out_unlock:
384 	drm_exec_fini(&exec);
385 	up_write(&vm->lock);
386 
387 	return err;
388 }
389 
390 /**
391  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
392  * @vm: The VM.
393  * @q: The exec_queue
394  */
395 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
396 {
397 	if (!xe_vm_in_compute_mode(vm))
398 		return;
399 
400 	down_write(&vm->lock);
401 	list_del(&q->compute.link);
402 	--vm->preempt.num_exec_queues;
403 	if (q->compute.pfence) {
404 		dma_fence_enable_sw_signaling(q->compute.pfence);
405 		dma_fence_put(q->compute.pfence);
406 		q->compute.pfence = NULL;
407 	}
408 	up_write(&vm->lock);
409 }
410 
411 /**
412  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
413  * that need repinning.
414  * @vm: The VM.
415  *
416  * This function checks for whether the VM has userptrs that need repinning,
417  * and provides a release-type barrier on the userptr.notifier_lock after
418  * checking.
419  *
420  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
421  */
422 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
423 {
424 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
425 
426 	return (list_empty(&vm->userptr.repin_list) &&
427 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
428 }
429 
430 /**
431  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
432  * objects of the vm's external buffer objects.
433  * @vm: The vm.
434  * @exec: Pointer to a struct drm_exec locking context.
435  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
436  * @lock_vm: Lock also the vm's dma_resv.
437  *
438  * Locks the vm dma-resv objects and all the dma-resv objects of the
439  * buffer objects on the vm external object list.
440  *
441  * Return: 0 on success, Negative error code on error. In particular if
442  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned.
443  */
444 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec,
445 			unsigned int num_shared, bool lock_vm)
446 {
447 	struct xe_vma *vma, *next;
448 	int err = 0;
449 
450 	lockdep_assert_held(&vm->lock);
451 
452 	if (lock_vm) {
453 		err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
454 		if (err)
455 			return err;
456 	}
457 
458 	list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
459 		err = drm_exec_prepare_obj(exec, &xe_vma_bo(vma)->ttm.base, num_shared);
460 		if (err)
461 			return err;
462 	}
463 
464 	spin_lock(&vm->notifier.list_lock);
465 	list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
466 				 notifier.rebind_link) {
467 		xe_bo_assert_held(xe_vma_bo(vma));
468 
469 		list_del_init(&vma->notifier.rebind_link);
470 		if (vma->tile_present && !(vma->gpuva.flags & XE_VMA_DESTROYED))
471 			list_move_tail(&vma->combined_links.rebind,
472 				       &vm->rebind_list);
473 	}
474 	spin_unlock(&vm->notifier.list_lock);
475 
476 	return 0;
477 }
478 
479 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
480 
481 static void xe_vm_kill(struct xe_vm *vm)
482 {
483 	struct xe_exec_queue *q;
484 
485 	lockdep_assert_held(&vm->lock);
486 
487 	xe_vm_lock(vm, false);
488 	vm->flags |= XE_VM_FLAG_BANNED;
489 	trace_xe_vm_kill(vm);
490 
491 	list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
492 		q->ops->kill(q);
493 	xe_vm_unlock(vm);
494 
495 	/* TODO: Inform user the VM is banned */
496 }
497 
498 /**
499  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
500  * @exec: The drm_exec object used for locking before validation.
501  * @err: The error returned from ttm_bo_validate().
502  * @end: A ktime_t cookie that should be set to 0 before first use and
503  * that should be reused on subsequent calls.
504  *
505  * With multiple active VMs, under memory pressure, it is possible that
506  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
507  * Until ttm properly handles locking in such scenarios, best thing the
508  * driver can do is retry with a timeout. Check if that is necessary, and
509  * if so unlock the drm_exec's objects while keeping the ticket to prepare
510  * for a rerun.
511  *
512  * Return: true if a retry after drm_exec_init() is recommended;
513  * false otherwise.
514  */
515 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
516 {
517 	ktime_t cur;
518 
519 	if (err != -ENOMEM)
520 		return false;
521 
522 	cur = ktime_get();
523 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
524 	if (!ktime_before(cur, *end))
525 		return false;
526 
527 	/*
528 	 * We would like to keep the ticket here with
529 	 * drm_exec_unlock_all(), but WW mutex asserts currently
530 	 * stop us from that. In any case this function could go away
531 	 * with proper TTM -EDEADLK handling.
532 	 */
533 	drm_exec_fini(exec);
534 
535 	msleep(20);
536 	return true;
537 }
538 
539 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
540 				 bool *done)
541 {
542 	struct xe_vma *vma;
543 	int err;
544 
545 	/*
546 	 * 1 fence for each preempt fence plus a fence for each tile from a
547 	 * possible rebind
548 	 */
549 	err = drm_exec_prepare_obj(exec, xe_vm_obj(vm),
550 				   vm->preempt.num_exec_queues +
551 				   vm->xe->info.tile_count);
552 	if (err)
553 		return err;
554 
555 	if (xe_vm_is_idle(vm)) {
556 		vm->preempt.rebind_deactivated = true;
557 		*done = true;
558 		return 0;
559 	}
560 
561 	if (!preempt_fences_waiting(vm)) {
562 		*done = true;
563 		return 0;
564 	}
565 
566 	err = xe_vm_lock_dma_resv(vm, exec, vm->preempt.num_exec_queues, false);
567 	if (err)
568 		return err;
569 
570 	err = wait_for_existing_preempt_fences(vm);
571 	if (err)
572 		return err;
573 
574 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
575 		if (xe_vma_has_no_bo(vma) ||
576 		    vma->gpuva.flags & XE_VMA_DESTROYED)
577 			continue;
578 
579 		err = xe_bo_validate(xe_vma_bo(vma), vm, false);
580 		if (err)
581 			break;
582 	}
583 
584 	return err;
585 }
586 
587 static void preempt_rebind_work_func(struct work_struct *w)
588 {
589 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
590 	struct drm_exec exec;
591 	struct dma_fence *rebind_fence;
592 	unsigned int fence_count = 0;
593 	LIST_HEAD(preempt_fences);
594 	ktime_t end = 0;
595 	int err = 0;
596 	long wait;
597 	int __maybe_unused tries = 0;
598 
599 	xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
600 	trace_xe_vm_rebind_worker_enter(vm);
601 
602 	down_write(&vm->lock);
603 
604 	if (xe_vm_is_closed_or_banned(vm)) {
605 		up_write(&vm->lock);
606 		trace_xe_vm_rebind_worker_exit(vm);
607 		return;
608 	}
609 
610 retry:
611 	if (xe_vm_userptr_check_repin(vm)) {
612 		err = xe_vm_userptr_pin(vm);
613 		if (err)
614 			goto out_unlock_outer;
615 	}
616 
617 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
618 
619 	drm_exec_until_all_locked(&exec) {
620 		bool done = false;
621 
622 		err = xe_preempt_work_begin(&exec, vm, &done);
623 		drm_exec_retry_on_contention(&exec);
624 		if (err && xe_vm_validate_should_retry(&exec, err, &end)) {
625 			err = -EAGAIN;
626 			goto out_unlock_outer;
627 		}
628 		if (err || done)
629 			goto out_unlock;
630 	}
631 
632 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
633 	if (err)
634 		goto out_unlock;
635 
636 	rebind_fence = xe_vm_rebind(vm, true);
637 	if (IS_ERR(rebind_fence)) {
638 		err = PTR_ERR(rebind_fence);
639 		goto out_unlock;
640 	}
641 
642 	if (rebind_fence) {
643 		dma_fence_wait(rebind_fence, false);
644 		dma_fence_put(rebind_fence);
645 	}
646 
647 	/* Wait on munmap style VM unbinds */
648 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
649 				     DMA_RESV_USAGE_KERNEL,
650 				     false, MAX_SCHEDULE_TIMEOUT);
651 	if (wait <= 0) {
652 		err = -ETIME;
653 		goto out_unlock;
654 	}
655 
656 #define retry_required(__tries, __vm) \
657 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
658 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
659 	__xe_vm_userptr_needs_repin(__vm))
660 
661 	down_read(&vm->userptr.notifier_lock);
662 	if (retry_required(tries, vm)) {
663 		up_read(&vm->userptr.notifier_lock);
664 		err = -EAGAIN;
665 		goto out_unlock;
666 	}
667 
668 #undef retry_required
669 
670 	spin_lock(&vm->xe->ttm.lru_lock);
671 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
672 	spin_unlock(&vm->xe->ttm.lru_lock);
673 
674 	/* Point of no return. */
675 	arm_preempt_fences(vm, &preempt_fences);
676 	resume_and_reinstall_preempt_fences(vm);
677 	up_read(&vm->userptr.notifier_lock);
678 
679 out_unlock:
680 	drm_exec_fini(&exec);
681 out_unlock_outer:
682 	if (err == -EAGAIN) {
683 		trace_xe_vm_rebind_worker_retry(vm);
684 		goto retry;
685 	}
686 
687 	if (err) {
688 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
689 		xe_vm_kill(vm);
690 	}
691 	up_write(&vm->lock);
692 
693 	free_preempt_fences(&preempt_fences);
694 
695 	trace_xe_vm_rebind_worker_exit(vm);
696 }
697 
698 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
699 				   const struct mmu_notifier_range *range,
700 				   unsigned long cur_seq)
701 {
702 	struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
703 	struct xe_vm *vm = xe_vma_vm(vma);
704 	struct dma_resv_iter cursor;
705 	struct dma_fence *fence;
706 	long err;
707 
708 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
709 	trace_xe_vma_userptr_invalidate(vma);
710 
711 	if (!mmu_notifier_range_blockable(range))
712 		return false;
713 
714 	down_write(&vm->userptr.notifier_lock);
715 	mmu_interval_set_seq(mni, cur_seq);
716 
717 	/* No need to stop gpu access if the userptr is not yet bound. */
718 	if (!vma->userptr.initial_bind) {
719 		up_write(&vm->userptr.notifier_lock);
720 		return true;
721 	}
722 
723 	/*
724 	 * Tell exec and rebind worker they need to repin and rebind this
725 	 * userptr.
726 	 */
727 	if (!xe_vm_in_fault_mode(vm) &&
728 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
729 		spin_lock(&vm->userptr.invalidated_lock);
730 		list_move_tail(&vma->userptr.invalidate_link,
731 			       &vm->userptr.invalidated);
732 		spin_unlock(&vm->userptr.invalidated_lock);
733 	}
734 
735 	up_write(&vm->userptr.notifier_lock);
736 
737 	/*
738 	 * Preempt fences turn into schedule disables, pipeline these.
739 	 * Note that even in fault mode, we need to wait for binds and
740 	 * unbinds to complete, and those are attached as BOOKMARK fences
741 	 * to the vm.
742 	 */
743 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
744 			    DMA_RESV_USAGE_BOOKKEEP);
745 	dma_resv_for_each_fence_unlocked(&cursor, fence)
746 		dma_fence_enable_sw_signaling(fence);
747 	dma_resv_iter_end(&cursor);
748 
749 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
750 				    DMA_RESV_USAGE_BOOKKEEP,
751 				    false, MAX_SCHEDULE_TIMEOUT);
752 	XE_WARN_ON(err <= 0);
753 
754 	if (xe_vm_in_fault_mode(vm)) {
755 		err = xe_vm_invalidate_vma(vma);
756 		XE_WARN_ON(err);
757 	}
758 
759 	trace_xe_vma_userptr_invalidate_complete(vma);
760 
761 	return true;
762 }
763 
764 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
765 	.invalidate = vma_userptr_invalidate,
766 };
767 
768 int xe_vm_userptr_pin(struct xe_vm *vm)
769 {
770 	struct xe_vma *vma, *next;
771 	int err = 0;
772 	LIST_HEAD(tmp_evict);
773 
774 	lockdep_assert_held_write(&vm->lock);
775 
776 	/* Collect invalidated userptrs */
777 	spin_lock(&vm->userptr.invalidated_lock);
778 	list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
779 				 userptr.invalidate_link) {
780 		list_del_init(&vma->userptr.invalidate_link);
781 		if (list_empty(&vma->combined_links.userptr))
782 			list_move_tail(&vma->combined_links.userptr,
783 				       &vm->userptr.repin_list);
784 	}
785 	spin_unlock(&vm->userptr.invalidated_lock);
786 
787 	/* Pin and move to temporary list */
788 	list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
789 				 combined_links.userptr) {
790 		err = xe_vma_userptr_pin_pages(vma);
791 		if (err < 0)
792 			goto out_err;
793 
794 		list_move_tail(&vma->combined_links.userptr, &tmp_evict);
795 	}
796 
797 	/* Take lock and move to rebind_list for rebinding. */
798 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
799 	if (err)
800 		goto out_err;
801 
802 	list_for_each_entry_safe(vma, next, &tmp_evict, combined_links.userptr)
803 		list_move_tail(&vma->combined_links.rebind, &vm->rebind_list);
804 
805 	dma_resv_unlock(xe_vm_resv(vm));
806 
807 	return 0;
808 
809 out_err:
810 	list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
811 
812 	return err;
813 }
814 
815 /**
816  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
817  * that need repinning.
818  * @vm: The VM.
819  *
820  * This function does an advisory check for whether the VM has userptrs that
821  * need repinning.
822  *
823  * Return: 0 if there are no indications of userptrs needing repinning,
824  * -EAGAIN if there are.
825  */
826 int xe_vm_userptr_check_repin(struct xe_vm *vm)
827 {
828 	return (list_empty_careful(&vm->userptr.repin_list) &&
829 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
830 }
831 
832 static struct dma_fence *
833 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
834 	       struct xe_sync_entry *syncs, u32 num_syncs,
835 	       bool first_op, bool last_op);
836 
837 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
838 {
839 	struct dma_fence *fence = NULL;
840 	struct xe_vma *vma, *next;
841 
842 	lockdep_assert_held(&vm->lock);
843 	if (xe_vm_no_dma_fences(vm) && !rebind_worker)
844 		return NULL;
845 
846 	xe_vm_assert_held(vm);
847 	list_for_each_entry_safe(vma, next, &vm->rebind_list,
848 				 combined_links.rebind) {
849 		xe_assert(vm->xe, vma->tile_present);
850 
851 		list_del_init(&vma->combined_links.rebind);
852 		dma_fence_put(fence);
853 		if (rebind_worker)
854 			trace_xe_vma_rebind_worker(vma);
855 		else
856 			trace_xe_vma_rebind_exec(vma);
857 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
858 		if (IS_ERR(fence))
859 			return fence;
860 	}
861 
862 	return fence;
863 }
864 
865 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
866 				    struct xe_bo *bo,
867 				    u64 bo_offset_or_userptr,
868 				    u64 start, u64 end,
869 				    bool read_only,
870 				    bool is_null,
871 				    u8 tile_mask)
872 {
873 	struct xe_vma *vma;
874 	struct xe_tile *tile;
875 	u8 id;
876 
877 	xe_assert(vm->xe, start < end);
878 	xe_assert(vm->xe, end < vm->size);
879 
880 	if (!bo && !is_null)	/* userptr */
881 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
882 	else
883 		vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
884 			      GFP_KERNEL);
885 	if (!vma) {
886 		vma = ERR_PTR(-ENOMEM);
887 		return vma;
888 	}
889 
890 	INIT_LIST_HEAD(&vma->combined_links.rebind);
891 	INIT_LIST_HEAD(&vma->notifier.rebind_link);
892 	INIT_LIST_HEAD(&vma->extobj.link);
893 
894 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
895 	vma->gpuva.vm = &vm->gpuvm;
896 	vma->gpuva.va.addr = start;
897 	vma->gpuva.va.range = end - start + 1;
898 	if (read_only)
899 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
900 	if (is_null)
901 		vma->gpuva.flags |= DRM_GPUVA_SPARSE;
902 
903 	if (tile_mask) {
904 		vma->tile_mask = tile_mask;
905 	} else {
906 		for_each_tile(tile, vm->xe, id)
907 			vma->tile_mask |= 0x1 << id;
908 	}
909 
910 	if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
911 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
912 
913 	if (bo) {
914 		struct drm_gpuvm_bo *vm_bo;
915 
916 		xe_bo_assert_held(bo);
917 
918 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
919 		if (IS_ERR(vm_bo)) {
920 			kfree(vma);
921 			return ERR_CAST(vm_bo);
922 		}
923 
924 		drm_gem_object_get(&bo->ttm.base);
925 		vma->gpuva.gem.obj = &bo->ttm.base;
926 		vma->gpuva.gem.offset = bo_offset_or_userptr;
927 		drm_gpuva_link(&vma->gpuva, vm_bo);
928 		drm_gpuvm_bo_put(vm_bo);
929 	} else /* userptr or null */ {
930 		if (!is_null) {
931 			u64 size = end - start + 1;
932 			int err;
933 
934 			INIT_LIST_HEAD(&vma->userptr.invalidate_link);
935 			vma->gpuva.gem.offset = bo_offset_or_userptr;
936 
937 			err = mmu_interval_notifier_insert(&vma->userptr.notifier,
938 							   current->mm,
939 							   xe_vma_userptr(vma), size,
940 							   &vma_userptr_notifier_ops);
941 			if (err) {
942 				kfree(vma);
943 				vma = ERR_PTR(err);
944 				return vma;
945 			}
946 
947 			vma->userptr.notifier_seq = LONG_MAX;
948 		}
949 
950 		xe_vm_get(vm);
951 	}
952 
953 	return vma;
954 }
955 
956 static bool vm_remove_extobj(struct xe_vma *vma)
957 {
958 	if (!list_empty(&vma->extobj.link)) {
959 		xe_vma_vm(vma)->extobj.entries--;
960 		list_del_init(&vma->extobj.link);
961 		return true;
962 	}
963 	return false;
964 }
965 
966 static void xe_vma_destroy_late(struct xe_vma *vma)
967 {
968 	struct xe_vm *vm = xe_vma_vm(vma);
969 	struct xe_device *xe = vm->xe;
970 	bool read_only = xe_vma_read_only(vma);
971 
972 	if (xe_vma_is_userptr(vma)) {
973 		if (vma->userptr.sg) {
974 			dma_unmap_sgtable(xe->drm.dev,
975 					  vma->userptr.sg,
976 					  read_only ? DMA_TO_DEVICE :
977 					  DMA_BIDIRECTIONAL, 0);
978 			sg_free_table(vma->userptr.sg);
979 			vma->userptr.sg = NULL;
980 		}
981 
982 		/*
983 		 * Since userptr pages are not pinned, we can't remove
984 		 * the notifer until we're sure the GPU is not accessing
985 		 * them anymore
986 		 */
987 		mmu_interval_notifier_remove(&vma->userptr.notifier);
988 		xe_vm_put(vm);
989 	} else if (xe_vma_is_null(vma)) {
990 		xe_vm_put(vm);
991 	} else {
992 		xe_bo_put(xe_vma_bo(vma));
993 	}
994 
995 	kfree(vma);
996 }
997 
998 static void vma_destroy_work_func(struct work_struct *w)
999 {
1000 	struct xe_vma *vma =
1001 		container_of(w, struct xe_vma, destroy_work);
1002 
1003 	xe_vma_destroy_late(vma);
1004 }
1005 
1006 static struct xe_vma *
1007 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
1008 			    struct xe_vma *ignore)
1009 {
1010 	struct drm_gpuvm_bo *vm_bo;
1011 	struct drm_gpuva *va;
1012 	struct drm_gem_object *obj = &bo->ttm.base;
1013 
1014 	xe_bo_assert_held(bo);
1015 
1016 	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
1017 		drm_gpuvm_bo_for_each_va(va, vm_bo) {
1018 			struct xe_vma *vma = gpuva_to_vma(va);
1019 
1020 			if (vma != ignore && xe_vma_vm(vma) == vm)
1021 				return vma;
1022 		}
1023 	}
1024 
1025 	return NULL;
1026 }
1027 
1028 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
1029 				 struct xe_vma *ignore)
1030 {
1031 	bool ret;
1032 
1033 	xe_bo_lock(bo, false);
1034 	ret = !!bo_has_vm_references_locked(bo, vm, ignore);
1035 	xe_bo_unlock(bo);
1036 
1037 	return ret;
1038 }
1039 
1040 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1041 {
1042 	lockdep_assert_held_write(&vm->lock);
1043 
1044 	list_add(&vma->extobj.link, &vm->extobj.list);
1045 	vm->extobj.entries++;
1046 }
1047 
1048 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
1049 {
1050 	struct xe_bo *bo = xe_vma_bo(vma);
1051 
1052 	lockdep_assert_held_write(&vm->lock);
1053 
1054 	if (bo_has_vm_references(bo, vm, vma))
1055 		return;
1056 
1057 	__vm_insert_extobj(vm, vma);
1058 }
1059 
1060 static void vma_destroy_cb(struct dma_fence *fence,
1061 			   struct dma_fence_cb *cb)
1062 {
1063 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1064 
1065 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1066 	queue_work(system_unbound_wq, &vma->destroy_work);
1067 }
1068 
1069 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1070 {
1071 	struct xe_vm *vm = xe_vma_vm(vma);
1072 
1073 	lockdep_assert_held_write(&vm->lock);
1074 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1075 
1076 	if (xe_vma_is_userptr(vma)) {
1077 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1078 
1079 		spin_lock(&vm->userptr.invalidated_lock);
1080 		list_del(&vma->userptr.invalidate_link);
1081 		spin_unlock(&vm->userptr.invalidated_lock);
1082 	} else if (!xe_vma_is_null(vma)) {
1083 		xe_bo_assert_held(xe_vma_bo(vma));
1084 
1085 		spin_lock(&vm->notifier.list_lock);
1086 		list_del(&vma->notifier.rebind_link);
1087 		spin_unlock(&vm->notifier.list_lock);
1088 
1089 		drm_gpuva_unlink(&vma->gpuva);
1090 
1091 		if (!xe_vma_bo(vma)->vm && vm_remove_extobj(vma)) {
1092 			struct xe_vma *other;
1093 
1094 			other = bo_has_vm_references_locked(xe_vma_bo(vma), vm, NULL);
1095 
1096 			if (other)
1097 				__vm_insert_extobj(vm, other);
1098 		}
1099 	}
1100 
1101 	xe_vm_assert_held(vm);
1102 	if (fence) {
1103 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1104 						 vma_destroy_cb);
1105 
1106 		if (ret) {
1107 			XE_WARN_ON(ret != -ENOENT);
1108 			xe_vma_destroy_late(vma);
1109 		}
1110 	} else {
1111 		xe_vma_destroy_late(vma);
1112 	}
1113 }
1114 
1115 /**
1116  * xe_vm_prepare_vma() - drm_exec utility to lock a vma
1117  * @exec: The drm_exec object we're currently locking for.
1118  * @vma: The vma for witch we want to lock the vm resv and any attached
1119  * object's resv.
1120  * @num_shared: The number of dma-fence slots to pre-allocate in the
1121  * objects' reservation objects.
1122  *
1123  * Return: 0 on success, negative error code on error. In particular
1124  * may return -EDEADLK on WW transaction contention and -EINTR if
1125  * an interruptible wait is terminated by a signal.
1126  */
1127 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
1128 		      unsigned int num_shared)
1129 {
1130 	struct xe_vm *vm = xe_vma_vm(vma);
1131 	struct xe_bo *bo = xe_vma_bo(vma);
1132 	int err;
1133 
1134 	XE_WARN_ON(!vm);
1135 	err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
1136 	if (!err && bo && !bo->vm)
1137 		err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
1138 
1139 	return err;
1140 }
1141 
1142 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1143 {
1144 	struct drm_exec exec;
1145 	int err;
1146 
1147 	drm_exec_init(&exec, 0);
1148 	drm_exec_until_all_locked(&exec) {
1149 		err = xe_vm_prepare_vma(&exec, vma, 0);
1150 		drm_exec_retry_on_contention(&exec);
1151 		if (XE_WARN_ON(err))
1152 			break;
1153 	}
1154 
1155 	xe_vma_destroy(vma, NULL);
1156 
1157 	drm_exec_fini(&exec);
1158 }
1159 
1160 struct xe_vma *
1161 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1162 {
1163 	struct drm_gpuva *gpuva;
1164 
1165 	lockdep_assert_held(&vm->lock);
1166 
1167 	if (xe_vm_is_closed_or_banned(vm))
1168 		return NULL;
1169 
1170 	xe_assert(vm->xe, start + range <= vm->size);
1171 
1172 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1173 
1174 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1175 }
1176 
1177 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1178 {
1179 	int err;
1180 
1181 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1182 	lockdep_assert_held(&vm->lock);
1183 
1184 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1185 	XE_WARN_ON(err);	/* Shouldn't be possible */
1186 
1187 	return err;
1188 }
1189 
1190 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1191 {
1192 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1193 	lockdep_assert_held(&vm->lock);
1194 
1195 	drm_gpuva_remove(&vma->gpuva);
1196 	if (vm->usm.last_fault_vma == vma)
1197 		vm->usm.last_fault_vma = NULL;
1198 }
1199 
1200 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1201 {
1202 	struct xe_vma_op *op;
1203 
1204 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1205 
1206 	if (unlikely(!op))
1207 		return NULL;
1208 
1209 	return &op->base;
1210 }
1211 
1212 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1213 
1214 static struct drm_gpuvm_ops gpuvm_ops = {
1215 	.op_alloc = xe_vm_op_alloc,
1216 	.vm_free = xe_vm_free,
1217 };
1218 
1219 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1220 {
1221 	u64 pte = 0;
1222 
1223 	if (pat_index & BIT(0))
1224 		pte |= XE_PPGTT_PTE_PAT0;
1225 
1226 	if (pat_index & BIT(1))
1227 		pte |= XE_PPGTT_PTE_PAT1;
1228 
1229 	return pte;
1230 }
1231 
1232 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1233 				u32 pt_level)
1234 {
1235 	u64 pte = 0;
1236 
1237 	if (pat_index & BIT(0))
1238 		pte |= XE_PPGTT_PTE_PAT0;
1239 
1240 	if (pat_index & BIT(1))
1241 		pte |= XE_PPGTT_PTE_PAT1;
1242 
1243 	if (pat_index & BIT(2)) {
1244 		if (pt_level)
1245 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1246 		else
1247 			pte |= XE_PPGTT_PTE_PAT2;
1248 	}
1249 
1250 	if (pat_index & BIT(3))
1251 		pte |= XELPG_PPGTT_PTE_PAT3;
1252 
1253 	if (pat_index & (BIT(4)))
1254 		pte |= XE2_PPGTT_PTE_PAT4;
1255 
1256 	return pte;
1257 }
1258 
1259 static u64 pte_encode_ps(u32 pt_level)
1260 {
1261 	XE_WARN_ON(pt_level > 2);
1262 
1263 	if (pt_level == 1)
1264 		return XE_PDE_PS_2M;
1265 	else if (pt_level == 2)
1266 		return XE_PDPE_PS_1G;
1267 
1268 	return 0;
1269 }
1270 
1271 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1272 			      const u16 pat_index)
1273 {
1274 	struct xe_device *xe = xe_bo_device(bo);
1275 	u64 pde;
1276 
1277 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1278 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1279 	pde |= pde_encode_pat_index(xe, pat_index);
1280 
1281 	return pde;
1282 }
1283 
1284 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1285 			      u16 pat_index, u32 pt_level)
1286 {
1287 	struct xe_device *xe = xe_bo_device(bo);
1288 	u64 pte;
1289 
1290 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1291 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1292 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1293 	pte |= pte_encode_ps(pt_level);
1294 
1295 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1296 		pte |= XE_PPGTT_PTE_DM;
1297 
1298 	return pte;
1299 }
1300 
1301 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1302 			       u16 pat_index, u32 pt_level)
1303 {
1304 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1305 
1306 	pte |= XE_PAGE_PRESENT;
1307 
1308 	if (likely(!xe_vma_read_only(vma)))
1309 		pte |= XE_PAGE_RW;
1310 
1311 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1312 	pte |= pte_encode_ps(pt_level);
1313 
1314 	if (unlikely(xe_vma_is_null(vma)))
1315 		pte |= XE_PTE_NULL;
1316 
1317 	return pte;
1318 }
1319 
1320 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1321 				u16 pat_index,
1322 				u32 pt_level, bool devmem, u64 flags)
1323 {
1324 	u64 pte;
1325 
1326 	/* Avoid passing random bits directly as flags */
1327 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1328 
1329 	pte = addr;
1330 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1331 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1332 	pte |= pte_encode_ps(pt_level);
1333 
1334 	if (devmem)
1335 		pte |= XE_PPGTT_PTE_DM;
1336 
1337 	pte |= flags;
1338 
1339 	return pte;
1340 }
1341 
1342 static const struct xe_pt_ops xelp_pt_ops = {
1343 	.pte_encode_bo = xelp_pte_encode_bo,
1344 	.pte_encode_vma = xelp_pte_encode_vma,
1345 	.pte_encode_addr = xelp_pte_encode_addr,
1346 	.pde_encode_bo = xelp_pde_encode_bo,
1347 };
1348 
1349 static void vm_destroy_work_func(struct work_struct *w);
1350 
1351 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1352 {
1353 	struct drm_gem_object *vm_resv_obj;
1354 	struct xe_vm *vm;
1355 	int err, number_tiles = 0;
1356 	struct xe_tile *tile;
1357 	u8 id;
1358 
1359 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1360 	if (!vm)
1361 		return ERR_PTR(-ENOMEM);
1362 
1363 	vm->xe = xe;
1364 
1365 	vm->size = 1ull << xe->info.va_bits;
1366 
1367 	vm->flags = flags;
1368 
1369 	init_rwsem(&vm->lock);
1370 
1371 	INIT_LIST_HEAD(&vm->rebind_list);
1372 
1373 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1374 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1375 	init_rwsem(&vm->userptr.notifier_lock);
1376 	spin_lock_init(&vm->userptr.invalidated_lock);
1377 
1378 	INIT_LIST_HEAD(&vm->notifier.rebind_list);
1379 	spin_lock_init(&vm->notifier.list_lock);
1380 
1381 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1382 
1383 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1384 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1385 
1386 	for_each_tile(tile, xe, id)
1387 		xe_range_fence_tree_init(&vm->rftree[id]);
1388 
1389 	INIT_LIST_HEAD(&vm->extobj.list);
1390 
1391 	vm->pt_ops = &xelp_pt_ops;
1392 
1393 	if (!(flags & XE_VM_FLAG_MIGRATION))
1394 		xe_device_mem_access_get(xe);
1395 
1396 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1397 	if (!vm_resv_obj) {
1398 		err = -ENOMEM;
1399 		goto err_no_resv;
1400 	}
1401 
1402 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", 0, &xe->drm, vm_resv_obj,
1403 		       0, vm->size, 0, 0, &gpuvm_ops);
1404 
1405 	drm_gem_object_put(vm_resv_obj);
1406 
1407 	err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
1408 	if (err)
1409 		goto err_close;
1410 
1411 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1412 		vm->flags |= XE_VM_FLAG_64K;
1413 
1414 	for_each_tile(tile, xe, id) {
1415 		if (flags & XE_VM_FLAG_MIGRATION &&
1416 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1417 			continue;
1418 
1419 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1420 		if (IS_ERR(vm->pt_root[id])) {
1421 			err = PTR_ERR(vm->pt_root[id]);
1422 			vm->pt_root[id] = NULL;
1423 			goto err_unlock_close;
1424 		}
1425 	}
1426 
1427 	if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1428 		for_each_tile(tile, xe, id) {
1429 			if (!vm->pt_root[id])
1430 				continue;
1431 
1432 			err = xe_pt_create_scratch(xe, tile, vm);
1433 			if (err)
1434 				goto err_unlock_close;
1435 		}
1436 		vm->batch_invalidate_tlb = true;
1437 	}
1438 
1439 	if (flags & XE_VM_FLAG_COMPUTE_MODE) {
1440 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1441 		vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1442 		vm->batch_invalidate_tlb = false;
1443 	}
1444 
1445 	/* Fill pt_root after allocating scratch tables */
1446 	for_each_tile(tile, xe, id) {
1447 		if (!vm->pt_root[id])
1448 			continue;
1449 
1450 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1451 	}
1452 	dma_resv_unlock(xe_vm_resv(vm));
1453 
1454 	/* Kernel migration VM shouldn't have a circular loop.. */
1455 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1456 		for_each_tile(tile, xe, id) {
1457 			struct xe_gt *gt = tile->primary_gt;
1458 			struct xe_vm *migrate_vm;
1459 			struct xe_exec_queue *q;
1460 			u32 create_flags = EXEC_QUEUE_FLAG_VM |
1461 				((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
1462 				EXEC_QUEUE_FLAG_VM_ASYNC : 0);
1463 
1464 			if (!vm->pt_root[id])
1465 				continue;
1466 
1467 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1468 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1469 						       XE_ENGINE_CLASS_COPY,
1470 						       create_flags);
1471 			xe_vm_put(migrate_vm);
1472 			if (IS_ERR(q)) {
1473 				err = PTR_ERR(q);
1474 				goto err_close;
1475 			}
1476 			vm->q[id] = q;
1477 			number_tiles++;
1478 		}
1479 	}
1480 
1481 	if (number_tiles > 1)
1482 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1483 
1484 	mutex_lock(&xe->usm.lock);
1485 	if (flags & XE_VM_FLAG_FAULT_MODE)
1486 		xe->usm.num_vm_in_fault_mode++;
1487 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1488 		xe->usm.num_vm_in_non_fault_mode++;
1489 	mutex_unlock(&xe->usm.lock);
1490 
1491 	trace_xe_vm_create(vm);
1492 
1493 	return vm;
1494 
1495 err_unlock_close:
1496 	dma_resv_unlock(xe_vm_resv(vm));
1497 err_close:
1498 	xe_vm_close_and_put(vm);
1499 	return ERR_PTR(err);
1500 
1501 err_no_resv:
1502 	for_each_tile(tile, xe, id)
1503 		xe_range_fence_tree_fini(&vm->rftree[id]);
1504 	kfree(vm);
1505 	if (!(flags & XE_VM_FLAG_MIGRATION))
1506 		xe_device_mem_access_put(xe);
1507 	return ERR_PTR(err);
1508 }
1509 
1510 static void xe_vm_close(struct xe_vm *vm)
1511 {
1512 	down_write(&vm->lock);
1513 	vm->size = 0;
1514 	up_write(&vm->lock);
1515 }
1516 
1517 void xe_vm_close_and_put(struct xe_vm *vm)
1518 {
1519 	LIST_HEAD(contested);
1520 	struct xe_device *xe = vm->xe;
1521 	struct xe_tile *tile;
1522 	struct xe_vma *vma, *next_vma;
1523 	struct drm_gpuva *gpuva, *next;
1524 	u8 id;
1525 
1526 	xe_assert(xe, !vm->preempt.num_exec_queues);
1527 
1528 	xe_vm_close(vm);
1529 	if (xe_vm_in_compute_mode(vm))
1530 		flush_work(&vm->preempt.rebind_work);
1531 
1532 	down_write(&vm->lock);
1533 	for_each_tile(tile, xe, id) {
1534 		if (vm->q[id])
1535 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1536 	}
1537 	up_write(&vm->lock);
1538 
1539 	for_each_tile(tile, xe, id) {
1540 		if (vm->q[id]) {
1541 			xe_exec_queue_kill(vm->q[id]);
1542 			xe_exec_queue_put(vm->q[id]);
1543 			vm->q[id] = NULL;
1544 		}
1545 	}
1546 
1547 	down_write(&vm->lock);
1548 	xe_vm_lock(vm, false);
1549 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1550 		vma = gpuva_to_vma(gpuva);
1551 
1552 		if (xe_vma_has_no_bo(vma)) {
1553 			down_read(&vm->userptr.notifier_lock);
1554 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1555 			up_read(&vm->userptr.notifier_lock);
1556 		}
1557 
1558 		xe_vm_remove_vma(vm, vma);
1559 
1560 		/* easy case, remove from VMA? */
1561 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1562 			list_del_init(&vma->combined_links.rebind);
1563 			xe_vma_destroy(vma, NULL);
1564 			continue;
1565 		}
1566 
1567 		list_move_tail(&vma->combined_links.destroy, &contested);
1568 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1569 	}
1570 
1571 	/*
1572 	 * All vm operations will add shared fences to resv.
1573 	 * The only exception is eviction for a shared object,
1574 	 * but even so, the unbind when evicted would still
1575 	 * install a fence to resv. Hence it's safe to
1576 	 * destroy the pagetables immediately.
1577 	 */
1578 	for_each_tile(tile, xe, id) {
1579 		if (vm->scratch_bo[id]) {
1580 			u32 i;
1581 
1582 			xe_bo_unpin(vm->scratch_bo[id]);
1583 			xe_bo_put(vm->scratch_bo[id]);
1584 			for (i = 0; i < vm->pt_root[id]->level; i++)
1585 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1586 					      NULL);
1587 		}
1588 		if (vm->pt_root[id]) {
1589 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1590 			vm->pt_root[id] = NULL;
1591 		}
1592 	}
1593 	xe_vm_unlock(vm);
1594 
1595 	/*
1596 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1597 	 * Since we hold a refcount to the bo, we can remove and free
1598 	 * the members safely without locking.
1599 	 */
1600 	list_for_each_entry_safe(vma, next_vma, &contested,
1601 				 combined_links.destroy) {
1602 		list_del_init(&vma->combined_links.destroy);
1603 		xe_vma_destroy_unlocked(vma);
1604 	}
1605 
1606 	xe_assert(xe, list_empty(&vm->extobj.list));
1607 	up_write(&vm->lock);
1608 
1609 	mutex_lock(&xe->usm.lock);
1610 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1611 		xe->usm.num_vm_in_fault_mode--;
1612 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1613 		xe->usm.num_vm_in_non_fault_mode--;
1614 	mutex_unlock(&xe->usm.lock);
1615 
1616 	for_each_tile(tile, xe, id)
1617 		xe_range_fence_tree_fini(&vm->rftree[id]);
1618 
1619 	xe_vm_put(vm);
1620 }
1621 
1622 static void vm_destroy_work_func(struct work_struct *w)
1623 {
1624 	struct xe_vm *vm =
1625 		container_of(w, struct xe_vm, destroy_work);
1626 	struct xe_device *xe = vm->xe;
1627 	struct xe_tile *tile;
1628 	u8 id;
1629 	void *lookup;
1630 
1631 	/* xe_vm_close_and_put was not called? */
1632 	xe_assert(xe, !vm->size);
1633 
1634 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1635 		xe_device_mem_access_put(xe);
1636 
1637 		if (xe->info.has_asid) {
1638 			mutex_lock(&xe->usm.lock);
1639 			lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1640 			xe_assert(xe, lookup == vm);
1641 			mutex_unlock(&xe->usm.lock);
1642 		}
1643 	}
1644 
1645 	for_each_tile(tile, xe, id)
1646 		XE_WARN_ON(vm->pt_root[id]);
1647 
1648 	trace_xe_vm_free(vm);
1649 	dma_fence_put(vm->rebind_fence);
1650 	kfree(vm);
1651 }
1652 
1653 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1654 {
1655 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1656 
1657 	/* To destroy the VM we need to be able to sleep */
1658 	queue_work(system_unbound_wq, &vm->destroy_work);
1659 }
1660 
1661 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1662 {
1663 	struct xe_vm *vm;
1664 
1665 	mutex_lock(&xef->vm.lock);
1666 	vm = xa_load(&xef->vm.xa, id);
1667 	if (vm)
1668 		xe_vm_get(vm);
1669 	mutex_unlock(&xef->vm.lock);
1670 
1671 	return vm;
1672 }
1673 
1674 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1675 {
1676 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1677 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1678 }
1679 
1680 static struct xe_exec_queue *
1681 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1682 {
1683 	return q ? q : vm->q[0];
1684 }
1685 
1686 static struct dma_fence *
1687 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1688 		 struct xe_sync_entry *syncs, u32 num_syncs,
1689 		 bool first_op, bool last_op)
1690 {
1691 	struct xe_vm *vm = xe_vma_vm(vma);
1692 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1693 	struct xe_tile *tile;
1694 	struct dma_fence *fence = NULL;
1695 	struct dma_fence **fences = NULL;
1696 	struct dma_fence_array *cf = NULL;
1697 	int cur_fence = 0, i;
1698 	int number_tiles = hweight8(vma->tile_present);
1699 	int err;
1700 	u8 id;
1701 
1702 	trace_xe_vma_unbind(vma);
1703 
1704 	if (number_tiles > 1) {
1705 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1706 				       GFP_KERNEL);
1707 		if (!fences)
1708 			return ERR_PTR(-ENOMEM);
1709 	}
1710 
1711 	for_each_tile(tile, vm->xe, id) {
1712 		if (!(vma->tile_present & BIT(id)))
1713 			goto next;
1714 
1715 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1716 					   first_op ? syncs : NULL,
1717 					   first_op ? num_syncs : 0);
1718 		if (IS_ERR(fence)) {
1719 			err = PTR_ERR(fence);
1720 			goto err_fences;
1721 		}
1722 
1723 		if (fences)
1724 			fences[cur_fence++] = fence;
1725 
1726 next:
1727 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1728 			q = list_next_entry(q, multi_gt_list);
1729 	}
1730 
1731 	if (fences) {
1732 		cf = dma_fence_array_create(number_tiles, fences,
1733 					    vm->composite_fence_ctx,
1734 					    vm->composite_fence_seqno++,
1735 					    false);
1736 		if (!cf) {
1737 			--vm->composite_fence_seqno;
1738 			err = -ENOMEM;
1739 			goto err_fences;
1740 		}
1741 	}
1742 
1743 	if (last_op) {
1744 		for (i = 0; i < num_syncs; i++)
1745 			xe_sync_entry_signal(&syncs[i], NULL,
1746 					     cf ? &cf->base : fence);
1747 	}
1748 
1749 	return cf ? &cf->base : !fence ?
1750 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1751 
1752 err_fences:
1753 	if (fences) {
1754 		while (cur_fence)
1755 			dma_fence_put(fences[--cur_fence]);
1756 		kfree(fences);
1757 	}
1758 
1759 	return ERR_PTR(err);
1760 }
1761 
1762 static struct dma_fence *
1763 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1764 	       struct xe_sync_entry *syncs, u32 num_syncs,
1765 	       bool first_op, bool last_op)
1766 {
1767 	struct xe_tile *tile;
1768 	struct dma_fence *fence;
1769 	struct dma_fence **fences = NULL;
1770 	struct dma_fence_array *cf = NULL;
1771 	struct xe_vm *vm = xe_vma_vm(vma);
1772 	int cur_fence = 0, i;
1773 	int number_tiles = hweight8(vma->tile_mask);
1774 	int err;
1775 	u8 id;
1776 
1777 	trace_xe_vma_bind(vma);
1778 
1779 	if (number_tiles > 1) {
1780 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1781 				       GFP_KERNEL);
1782 		if (!fences)
1783 			return ERR_PTR(-ENOMEM);
1784 	}
1785 
1786 	for_each_tile(tile, vm->xe, id) {
1787 		if (!(vma->tile_mask & BIT(id)))
1788 			goto next;
1789 
1790 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1791 					 first_op ? syncs : NULL,
1792 					 first_op ? num_syncs : 0,
1793 					 vma->tile_present & BIT(id));
1794 		if (IS_ERR(fence)) {
1795 			err = PTR_ERR(fence);
1796 			goto err_fences;
1797 		}
1798 
1799 		if (fences)
1800 			fences[cur_fence++] = fence;
1801 
1802 next:
1803 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1804 			q = list_next_entry(q, multi_gt_list);
1805 	}
1806 
1807 	if (fences) {
1808 		cf = dma_fence_array_create(number_tiles, fences,
1809 					    vm->composite_fence_ctx,
1810 					    vm->composite_fence_seqno++,
1811 					    false);
1812 		if (!cf) {
1813 			--vm->composite_fence_seqno;
1814 			err = -ENOMEM;
1815 			goto err_fences;
1816 		}
1817 	}
1818 
1819 	if (last_op) {
1820 		for (i = 0; i < num_syncs; i++)
1821 			xe_sync_entry_signal(&syncs[i], NULL,
1822 					     cf ? &cf->base : fence);
1823 	}
1824 
1825 	return cf ? &cf->base : fence;
1826 
1827 err_fences:
1828 	if (fences) {
1829 		while (cur_fence)
1830 			dma_fence_put(fences[--cur_fence]);
1831 		kfree(fences);
1832 	}
1833 
1834 	return ERR_PTR(err);
1835 }
1836 
1837 static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
1838 {
1839 	return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
1840 		!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
1841 }
1842 
1843 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1844 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1845 			u32 num_syncs, bool immediate, bool first_op,
1846 			bool last_op)
1847 {
1848 	struct dma_fence *fence;
1849 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1850 
1851 	xe_vm_assert_held(vm);
1852 
1853 	if (immediate) {
1854 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
1855 				       last_op);
1856 		if (IS_ERR(fence))
1857 			return PTR_ERR(fence);
1858 	} else {
1859 		int i;
1860 
1861 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1862 
1863 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1864 		if (last_op) {
1865 			for (i = 0; i < num_syncs; i++)
1866 				xe_sync_entry_signal(&syncs[i], NULL, fence);
1867 		}
1868 	}
1869 
1870 	if (last_op)
1871 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1872 	if (last_op && xe_vm_sync_mode(vm, q))
1873 		dma_fence_wait(fence, true);
1874 	dma_fence_put(fence);
1875 
1876 	return 0;
1877 }
1878 
1879 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1880 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1881 		      u32 num_syncs, bool immediate, bool first_op,
1882 		      bool last_op)
1883 {
1884 	int err;
1885 
1886 	xe_vm_assert_held(vm);
1887 	xe_bo_assert_held(bo);
1888 
1889 	if (bo && immediate) {
1890 		err = xe_bo_validate(bo, vm, true);
1891 		if (err)
1892 			return err;
1893 	}
1894 
1895 	return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
1896 			    last_op);
1897 }
1898 
1899 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1900 			struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1901 			u32 num_syncs, bool first_op, bool last_op)
1902 {
1903 	struct dma_fence *fence;
1904 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1905 
1906 	xe_vm_assert_held(vm);
1907 	xe_bo_assert_held(xe_vma_bo(vma));
1908 
1909 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1910 	if (IS_ERR(fence))
1911 		return PTR_ERR(fence);
1912 
1913 	xe_vma_destroy(vma, fence);
1914 	if (last_op)
1915 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
1916 	if (last_op && xe_vm_sync_mode(vm, q))
1917 		dma_fence_wait(fence, true);
1918 	dma_fence_put(fence);
1919 
1920 	return 0;
1921 }
1922 
1923 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1924 				    DRM_XE_VM_CREATE_COMPUTE_MODE | \
1925 				    DRM_XE_VM_CREATE_ASYNC_DEFAULT | \
1926 				    DRM_XE_VM_CREATE_FAULT_MODE)
1927 
1928 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1929 		       struct drm_file *file)
1930 {
1931 	struct xe_device *xe = to_xe_device(dev);
1932 	struct xe_file *xef = to_xe_file(file);
1933 	struct drm_xe_vm_create *args = data;
1934 	struct xe_tile *tile;
1935 	struct xe_vm *vm;
1936 	u32 id, asid;
1937 	int err;
1938 	u32 flags = 0;
1939 
1940 	if (XE_IOCTL_DBG(xe, args->extensions))
1941 		return -EINVAL;
1942 
1943 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1944 		args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
1945 
1946 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1947 			 !xe->info.supports_usm))
1948 		return -EINVAL;
1949 
1950 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1951 		return -EINVAL;
1952 
1953 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1954 		return -EINVAL;
1955 
1956 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1957 			 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1958 		return -EINVAL;
1959 
1960 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1961 			 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1962 		return -EINVAL;
1963 
1964 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1965 			 xe_device_in_non_fault_mode(xe)))
1966 		return -EINVAL;
1967 
1968 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1969 			 xe_device_in_fault_mode(xe)))
1970 		return -EINVAL;
1971 
1972 	if (XE_IOCTL_DBG(xe, args->extensions))
1973 		return -EINVAL;
1974 
1975 	if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1976 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1977 	if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1978 		flags |= XE_VM_FLAG_COMPUTE_MODE;
1979 	if (args->flags & DRM_XE_VM_CREATE_ASYNC_DEFAULT)
1980 		flags |= XE_VM_FLAG_ASYNC_DEFAULT;
1981 	if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1982 		flags |= XE_VM_FLAG_FAULT_MODE;
1983 
1984 	vm = xe_vm_create(xe, flags);
1985 	if (IS_ERR(vm))
1986 		return PTR_ERR(vm);
1987 
1988 	mutex_lock(&xef->vm.lock);
1989 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1990 	mutex_unlock(&xef->vm.lock);
1991 	if (err) {
1992 		xe_vm_close_and_put(vm);
1993 		return err;
1994 	}
1995 
1996 	if (xe->info.has_asid) {
1997 		mutex_lock(&xe->usm.lock);
1998 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1999 				      XA_LIMIT(0, XE_MAX_ASID - 1),
2000 				      &xe->usm.next_asid, GFP_KERNEL);
2001 		mutex_unlock(&xe->usm.lock);
2002 		if (err) {
2003 			xe_vm_close_and_put(vm);
2004 			return err;
2005 		}
2006 		vm->usm.asid = asid;
2007 	}
2008 
2009 	args->vm_id = id;
2010 	vm->xef = xef;
2011 
2012 	/* Record BO memory for VM pagetable created against client */
2013 	for_each_tile(tile, xe, id)
2014 		if (vm->pt_root[id])
2015 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2016 
2017 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2018 	/* Warning: Security issue - never enable by default */
2019 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2020 #endif
2021 
2022 	return 0;
2023 }
2024 
2025 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2026 			struct drm_file *file)
2027 {
2028 	struct xe_device *xe = to_xe_device(dev);
2029 	struct xe_file *xef = to_xe_file(file);
2030 	struct drm_xe_vm_destroy *args = data;
2031 	struct xe_vm *vm;
2032 	int err = 0;
2033 
2034 	if (XE_IOCTL_DBG(xe, args->pad) ||
2035 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2036 		return -EINVAL;
2037 
2038 	mutex_lock(&xef->vm.lock);
2039 	vm = xa_load(&xef->vm.xa, args->vm_id);
2040 	if (XE_IOCTL_DBG(xe, !vm))
2041 		err = -ENOENT;
2042 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2043 		err = -EBUSY;
2044 	else
2045 		xa_erase(&xef->vm.xa, args->vm_id);
2046 	mutex_unlock(&xef->vm.lock);
2047 
2048 	if (!err)
2049 		xe_vm_close_and_put(vm);
2050 
2051 	return err;
2052 }
2053 
2054 static const u32 region_to_mem_type[] = {
2055 	XE_PL_TT,
2056 	XE_PL_VRAM0,
2057 	XE_PL_VRAM1,
2058 };
2059 
2060 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
2061 			  struct xe_exec_queue *q, u32 region,
2062 			  struct xe_sync_entry *syncs, u32 num_syncs,
2063 			  bool first_op, bool last_op)
2064 {
2065 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
2066 	int err;
2067 
2068 	xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2069 
2070 	if (!xe_vma_has_no_bo(vma)) {
2071 		err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
2072 		if (err)
2073 			return err;
2074 	}
2075 
2076 	if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
2077 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
2078 				  true, first_op, last_op);
2079 	} else {
2080 		int i;
2081 
2082 		/* Nothing to do, signal fences now */
2083 		if (last_op) {
2084 			for (i = 0; i < num_syncs; i++) {
2085 				struct dma_fence *fence =
2086 					xe_exec_queue_last_fence_get(wait_exec_queue, vm);
2087 
2088 				xe_sync_entry_signal(&syncs[i], NULL, fence);
2089 			}
2090 		}
2091 
2092 		return 0;
2093 	}
2094 }
2095 
2096 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2097 			     bool post_commit)
2098 {
2099 	down_read(&vm->userptr.notifier_lock);
2100 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2101 	up_read(&vm->userptr.notifier_lock);
2102 	if (post_commit)
2103 		xe_vm_remove_vma(vm, vma);
2104 }
2105 
2106 #undef ULL
2107 #define ULL	unsigned long long
2108 
2109 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2110 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2111 {
2112 	struct xe_vma *vma;
2113 
2114 	switch (op->op) {
2115 	case DRM_GPUVA_OP_MAP:
2116 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2117 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2118 		break;
2119 	case DRM_GPUVA_OP_REMAP:
2120 		vma = gpuva_to_vma(op->remap.unmap->va);
2121 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2122 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2123 		       op->remap.unmap->keep ? 1 : 0);
2124 		if (op->remap.prev)
2125 			vm_dbg(&xe->drm,
2126 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2127 			       (ULL)op->remap.prev->va.addr,
2128 			       (ULL)op->remap.prev->va.range);
2129 		if (op->remap.next)
2130 			vm_dbg(&xe->drm,
2131 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2132 			       (ULL)op->remap.next->va.addr,
2133 			       (ULL)op->remap.next->va.range);
2134 		break;
2135 	case DRM_GPUVA_OP_UNMAP:
2136 		vma = gpuva_to_vma(op->unmap.va);
2137 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2138 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2139 		       op->unmap.keep ? 1 : 0);
2140 		break;
2141 	case DRM_GPUVA_OP_PREFETCH:
2142 		vma = gpuva_to_vma(op->prefetch.va);
2143 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2144 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2145 		break;
2146 	default:
2147 		drm_warn(&xe->drm, "NOT POSSIBLE");
2148 	}
2149 }
2150 #else
2151 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2152 {
2153 }
2154 #endif
2155 
2156 /*
2157  * Create operations list from IOCTL arguments, setup operations fields so parse
2158  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2159  */
2160 static struct drm_gpuva_ops *
2161 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2162 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2163 			 u32 operation, u32 flags, u8 tile_mask, u32 region)
2164 {
2165 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2166 	struct drm_gpuva_ops *ops;
2167 	struct drm_gpuva_op *__op;
2168 	struct xe_vma_op *op;
2169 	struct drm_gpuvm_bo *vm_bo;
2170 	int err;
2171 
2172 	lockdep_assert_held_write(&vm->lock);
2173 
2174 	vm_dbg(&vm->xe->drm,
2175 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2176 	       operation, (ULL)addr, (ULL)range,
2177 	       (ULL)bo_offset_or_userptr);
2178 
2179 	switch (operation) {
2180 	case XE_VM_BIND_OP_MAP:
2181 	case XE_VM_BIND_OP_MAP_USERPTR:
2182 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2183 						  obj, bo_offset_or_userptr);
2184 		if (IS_ERR(ops))
2185 			return ops;
2186 
2187 		drm_gpuva_for_each_op(__op, ops) {
2188 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2189 
2190 			op->tile_mask = tile_mask;
2191 			op->map.immediate =
2192 				flags & XE_VM_BIND_FLAG_IMMEDIATE;
2193 			op->map.read_only =
2194 				flags & XE_VM_BIND_FLAG_READONLY;
2195 			op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
2196 		}
2197 		break;
2198 	case XE_VM_BIND_OP_UNMAP:
2199 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2200 		if (IS_ERR(ops))
2201 			return ops;
2202 
2203 		drm_gpuva_for_each_op(__op, ops) {
2204 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2205 
2206 			op->tile_mask = tile_mask;
2207 		}
2208 		break;
2209 	case XE_VM_BIND_OP_PREFETCH:
2210 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2211 		if (IS_ERR(ops))
2212 			return ops;
2213 
2214 		drm_gpuva_for_each_op(__op, ops) {
2215 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2216 
2217 			op->tile_mask = tile_mask;
2218 			op->prefetch.region = region;
2219 		}
2220 		break;
2221 	case XE_VM_BIND_OP_UNMAP_ALL:
2222 		xe_assert(vm->xe, bo);
2223 
2224 		err = xe_bo_lock(bo, true);
2225 		if (err)
2226 			return ERR_PTR(err);
2227 
2228 		vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
2229 		if (!vm_bo)
2230 			break;
2231 
2232 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2233 		drm_gpuvm_bo_put(vm_bo);
2234 		xe_bo_unlock(bo);
2235 		if (IS_ERR(ops))
2236 			return ops;
2237 
2238 		drm_gpuva_for_each_op(__op, ops) {
2239 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2240 
2241 			op->tile_mask = tile_mask;
2242 		}
2243 		break;
2244 	default:
2245 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2246 		ops = ERR_PTR(-EINVAL);
2247 	}
2248 
2249 #ifdef TEST_VM_ASYNC_OPS_ERROR
2250 	if (operation & FORCE_ASYNC_OP_ERROR) {
2251 		op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
2252 					      base.entry);
2253 		if (op)
2254 			op->inject_error = true;
2255 	}
2256 #endif
2257 
2258 	if (!IS_ERR(ops))
2259 		drm_gpuva_for_each_op(__op, ops)
2260 			print_op(vm->xe, __op);
2261 
2262 	return ops;
2263 }
2264 
2265 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2266 			      u8 tile_mask, bool read_only, bool is_null)
2267 {
2268 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2269 	struct xe_vma *vma;
2270 	int err;
2271 
2272 	lockdep_assert_held_write(&vm->lock);
2273 
2274 	if (bo) {
2275 		err = xe_bo_lock(bo, true);
2276 		if (err)
2277 			return ERR_PTR(err);
2278 	}
2279 	vma = xe_vma_create(vm, bo, op->gem.offset,
2280 			    op->va.addr, op->va.addr +
2281 			    op->va.range - 1, read_only, is_null,
2282 			    tile_mask);
2283 	if (bo)
2284 		xe_bo_unlock(bo);
2285 
2286 	if (xe_vma_is_userptr(vma)) {
2287 		err = xe_vma_userptr_pin_pages(vma);
2288 		if (err) {
2289 			prep_vma_destroy(vm, vma, false);
2290 			xe_vma_destroy_unlocked(vma);
2291 			return ERR_PTR(err);
2292 		}
2293 	} else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
2294 		vm_insert_extobj(vm, vma);
2295 		err = add_preempt_fences(vm, bo);
2296 		if (err) {
2297 			prep_vma_destroy(vm, vma, false);
2298 			xe_vma_destroy_unlocked(vma);
2299 			return ERR_PTR(err);
2300 		}
2301 	}
2302 
2303 	return vma;
2304 }
2305 
2306 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2307 {
2308 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2309 		return SZ_1G;
2310 	else if (vma->gpuva.flags & XE_VMA_PTE_2M)
2311 		return SZ_2M;
2312 
2313 	return SZ_4K;
2314 }
2315 
2316 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2317 {
2318 	switch (size) {
2319 	case SZ_1G:
2320 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2321 		break;
2322 	case SZ_2M:
2323 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2324 		break;
2325 	}
2326 
2327 	return SZ_4K;
2328 }
2329 
2330 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2331 {
2332 	int err = 0;
2333 
2334 	lockdep_assert_held_write(&vm->lock);
2335 
2336 	switch (op->base.op) {
2337 	case DRM_GPUVA_OP_MAP:
2338 		err |= xe_vm_insert_vma(vm, op->map.vma);
2339 		if (!err)
2340 			op->flags |= XE_VMA_OP_COMMITTED;
2341 		break;
2342 	case DRM_GPUVA_OP_REMAP:
2343 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2344 				 true);
2345 		op->flags |= XE_VMA_OP_COMMITTED;
2346 
2347 		if (op->remap.prev) {
2348 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2349 			if (!err)
2350 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2351 			if (!err && op->remap.skip_prev)
2352 				op->remap.prev = NULL;
2353 		}
2354 		if (op->remap.next) {
2355 			err |= xe_vm_insert_vma(vm, op->remap.next);
2356 			if (!err)
2357 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2358 			if (!err && op->remap.skip_next)
2359 				op->remap.next = NULL;
2360 		}
2361 
2362 		/* Adjust for partial unbind after removin VMA from VM */
2363 		if (!err) {
2364 			op->base.remap.unmap->va->va.addr = op->remap.start;
2365 			op->base.remap.unmap->va->va.range = op->remap.range;
2366 		}
2367 		break;
2368 	case DRM_GPUVA_OP_UNMAP:
2369 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2370 		op->flags |= XE_VMA_OP_COMMITTED;
2371 		break;
2372 	case DRM_GPUVA_OP_PREFETCH:
2373 		op->flags |= XE_VMA_OP_COMMITTED;
2374 		break;
2375 	default:
2376 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2377 	}
2378 
2379 	return err;
2380 }
2381 
2382 
2383 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2384 				   struct drm_gpuva_ops *ops,
2385 				   struct xe_sync_entry *syncs, u32 num_syncs,
2386 				   struct list_head *ops_list, bool last,
2387 				   bool async)
2388 {
2389 	struct xe_vma_op *last_op = NULL;
2390 	struct drm_gpuva_op *__op;
2391 	int err = 0;
2392 
2393 	lockdep_assert_held_write(&vm->lock);
2394 
2395 	drm_gpuva_for_each_op(__op, ops) {
2396 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2397 		bool first = list_empty(ops_list);
2398 
2399 		INIT_LIST_HEAD(&op->link);
2400 		list_add_tail(&op->link, ops_list);
2401 
2402 		if (first) {
2403 			op->flags |= XE_VMA_OP_FIRST;
2404 			op->num_syncs = num_syncs;
2405 			op->syncs = syncs;
2406 		}
2407 
2408 		op->q = q;
2409 
2410 		switch (op->base.op) {
2411 		case DRM_GPUVA_OP_MAP:
2412 		{
2413 			struct xe_vma *vma;
2414 
2415 			vma = new_vma(vm, &op->base.map,
2416 				      op->tile_mask, op->map.read_only,
2417 				      op->map.is_null);
2418 			if (IS_ERR(vma))
2419 				return PTR_ERR(vma);
2420 
2421 			op->map.vma = vma;
2422 			break;
2423 		}
2424 		case DRM_GPUVA_OP_REMAP:
2425 		{
2426 			struct xe_vma *old =
2427 				gpuva_to_vma(op->base.remap.unmap->va);
2428 
2429 			op->remap.start = xe_vma_start(old);
2430 			op->remap.range = xe_vma_size(old);
2431 
2432 			if (op->base.remap.prev) {
2433 				struct xe_vma *vma;
2434 				bool read_only =
2435 					op->base.remap.unmap->va->flags &
2436 					XE_VMA_READ_ONLY;
2437 				bool is_null =
2438 					op->base.remap.unmap->va->flags &
2439 					DRM_GPUVA_SPARSE;
2440 
2441 				vma = new_vma(vm, op->base.remap.prev,
2442 					      op->tile_mask, read_only,
2443 					      is_null);
2444 				if (IS_ERR(vma))
2445 					return PTR_ERR(vma);
2446 
2447 				op->remap.prev = vma;
2448 
2449 				/*
2450 				 * Userptr creates a new SG mapping so
2451 				 * we must also rebind.
2452 				 */
2453 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2454 					IS_ALIGNED(xe_vma_end(vma),
2455 						   xe_vma_max_pte_size(old));
2456 				if (op->remap.skip_prev) {
2457 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2458 					op->remap.range -=
2459 						xe_vma_end(vma) -
2460 						xe_vma_start(old);
2461 					op->remap.start = xe_vma_end(vma);
2462 				}
2463 			}
2464 
2465 			if (op->base.remap.next) {
2466 				struct xe_vma *vma;
2467 				bool read_only =
2468 					op->base.remap.unmap->va->flags &
2469 					XE_VMA_READ_ONLY;
2470 
2471 				bool is_null =
2472 					op->base.remap.unmap->va->flags &
2473 					DRM_GPUVA_SPARSE;
2474 
2475 				vma = new_vma(vm, op->base.remap.next,
2476 					      op->tile_mask, read_only,
2477 					      is_null);
2478 				if (IS_ERR(vma))
2479 					return PTR_ERR(vma);
2480 
2481 				op->remap.next = vma;
2482 
2483 				/*
2484 				 * Userptr creates a new SG mapping so
2485 				 * we must also rebind.
2486 				 */
2487 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2488 					IS_ALIGNED(xe_vma_start(vma),
2489 						   xe_vma_max_pte_size(old));
2490 				if (op->remap.skip_next) {
2491 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2492 					op->remap.range -=
2493 						xe_vma_end(old) -
2494 						xe_vma_start(vma);
2495 				}
2496 			}
2497 			break;
2498 		}
2499 		case DRM_GPUVA_OP_UNMAP:
2500 		case DRM_GPUVA_OP_PREFETCH:
2501 			/* Nothing to do */
2502 			break;
2503 		default:
2504 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2505 		}
2506 
2507 		last_op = op;
2508 
2509 		err = xe_vma_op_commit(vm, op);
2510 		if (err)
2511 			return err;
2512 	}
2513 
2514 	/* FIXME: Unhandled corner case */
2515 	XE_WARN_ON(!last_op && last && !list_empty(ops_list));
2516 
2517 	if (!last_op)
2518 		return 0;
2519 
2520 	last_op->ops = ops;
2521 	if (last) {
2522 		last_op->flags |= XE_VMA_OP_LAST;
2523 		last_op->num_syncs = num_syncs;
2524 		last_op->syncs = syncs;
2525 	}
2526 
2527 	return 0;
2528 }
2529 
2530 static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
2531 		      struct xe_vma *vma, struct xe_vma_op *op)
2532 {
2533 	int err;
2534 
2535 	lockdep_assert_held_write(&vm->lock);
2536 
2537 	err = xe_vm_prepare_vma(exec, vma, 1);
2538 	if (err)
2539 		return err;
2540 
2541 	xe_vm_assert_held(vm);
2542 	xe_bo_assert_held(xe_vma_bo(vma));
2543 
2544 	switch (op->base.op) {
2545 	case DRM_GPUVA_OP_MAP:
2546 		err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2547 				 op->syncs, op->num_syncs,
2548 				 op->map.immediate || !xe_vm_in_fault_mode(vm),
2549 				 op->flags & XE_VMA_OP_FIRST,
2550 				 op->flags & XE_VMA_OP_LAST);
2551 		break;
2552 	case DRM_GPUVA_OP_REMAP:
2553 	{
2554 		bool prev = !!op->remap.prev;
2555 		bool next = !!op->remap.next;
2556 
2557 		if (!op->remap.unmap_done) {
2558 			if (prev || next)
2559 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2560 			err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2561 					   op->num_syncs,
2562 					   op->flags & XE_VMA_OP_FIRST,
2563 					   op->flags & XE_VMA_OP_LAST &&
2564 					   !prev && !next);
2565 			if (err)
2566 				break;
2567 			op->remap.unmap_done = true;
2568 		}
2569 
2570 		if (prev) {
2571 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2572 			err = xe_vm_bind(vm, op->remap.prev, op->q,
2573 					 xe_vma_bo(op->remap.prev), op->syncs,
2574 					 op->num_syncs, true, false,
2575 					 op->flags & XE_VMA_OP_LAST && !next);
2576 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2577 			if (err)
2578 				break;
2579 			op->remap.prev = NULL;
2580 		}
2581 
2582 		if (next) {
2583 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2584 			err = xe_vm_bind(vm, op->remap.next, op->q,
2585 					 xe_vma_bo(op->remap.next),
2586 					 op->syncs, op->num_syncs,
2587 					 true, false,
2588 					 op->flags & XE_VMA_OP_LAST);
2589 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2590 			if (err)
2591 				break;
2592 			op->remap.next = NULL;
2593 		}
2594 
2595 		break;
2596 	}
2597 	case DRM_GPUVA_OP_UNMAP:
2598 		err = xe_vm_unbind(vm, vma, op->q, op->syncs,
2599 				   op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2600 				   op->flags & XE_VMA_OP_LAST);
2601 		break;
2602 	case DRM_GPUVA_OP_PREFETCH:
2603 		err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
2604 				     op->syncs, op->num_syncs,
2605 				     op->flags & XE_VMA_OP_FIRST,
2606 				     op->flags & XE_VMA_OP_LAST);
2607 		break;
2608 	default:
2609 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2610 	}
2611 
2612 	if (err)
2613 		trace_xe_vma_fail(vma);
2614 
2615 	return err;
2616 }
2617 
2618 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2619 			       struct xe_vma_op *op)
2620 {
2621 	struct drm_exec exec;
2622 	int err;
2623 
2624 retry_userptr:
2625 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
2626 	drm_exec_until_all_locked(&exec) {
2627 		err = op_execute(&exec, vm, vma, op);
2628 		drm_exec_retry_on_contention(&exec);
2629 		if (err)
2630 			break;
2631 	}
2632 	drm_exec_fini(&exec);
2633 
2634 	if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2635 		lockdep_assert_held_write(&vm->lock);
2636 		err = xe_vma_userptr_pin_pages(vma);
2637 		if (!err)
2638 			goto retry_userptr;
2639 
2640 		trace_xe_vma_fail(vma);
2641 	}
2642 
2643 	return err;
2644 }
2645 
2646 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2647 {
2648 	int ret = 0;
2649 
2650 	lockdep_assert_held_write(&vm->lock);
2651 
2652 #ifdef TEST_VM_ASYNC_OPS_ERROR
2653 	if (op->inject_error) {
2654 		op->inject_error = false;
2655 		return -ENOMEM;
2656 	}
2657 #endif
2658 
2659 	switch (op->base.op) {
2660 	case DRM_GPUVA_OP_MAP:
2661 		ret = __xe_vma_op_execute(vm, op->map.vma, op);
2662 		break;
2663 	case DRM_GPUVA_OP_REMAP:
2664 	{
2665 		struct xe_vma *vma;
2666 
2667 		if (!op->remap.unmap_done)
2668 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2669 		else if (op->remap.prev)
2670 			vma = op->remap.prev;
2671 		else
2672 			vma = op->remap.next;
2673 
2674 		ret = __xe_vma_op_execute(vm, vma, op);
2675 		break;
2676 	}
2677 	case DRM_GPUVA_OP_UNMAP:
2678 		ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2679 					  op);
2680 		break;
2681 	case DRM_GPUVA_OP_PREFETCH:
2682 		ret = __xe_vma_op_execute(vm,
2683 					  gpuva_to_vma(op->base.prefetch.va),
2684 					  op);
2685 		break;
2686 	default:
2687 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2688 	}
2689 
2690 	return ret;
2691 }
2692 
2693 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
2694 {
2695 	bool last = op->flags & XE_VMA_OP_LAST;
2696 
2697 	if (last) {
2698 		while (op->num_syncs--)
2699 			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2700 		kfree(op->syncs);
2701 		if (op->q)
2702 			xe_exec_queue_put(op->q);
2703 	}
2704 	if (!list_empty(&op->link))
2705 		list_del(&op->link);
2706 	if (op->ops)
2707 		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
2708 	if (last)
2709 		xe_vm_put(vm);
2710 }
2711 
2712 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2713 			     bool post_commit, bool prev_post_commit,
2714 			     bool next_post_commit)
2715 {
2716 	lockdep_assert_held_write(&vm->lock);
2717 
2718 	switch (op->base.op) {
2719 	case DRM_GPUVA_OP_MAP:
2720 		if (op->map.vma) {
2721 			prep_vma_destroy(vm, op->map.vma, post_commit);
2722 			xe_vma_destroy_unlocked(op->map.vma);
2723 		}
2724 		break;
2725 	case DRM_GPUVA_OP_UNMAP:
2726 	{
2727 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2728 
2729 		if (vma) {
2730 			down_read(&vm->userptr.notifier_lock);
2731 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2732 			up_read(&vm->userptr.notifier_lock);
2733 			if (post_commit)
2734 				xe_vm_insert_vma(vm, vma);
2735 		}
2736 		break;
2737 	}
2738 	case DRM_GPUVA_OP_REMAP:
2739 	{
2740 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2741 
2742 		if (op->remap.prev) {
2743 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2744 			xe_vma_destroy_unlocked(op->remap.prev);
2745 		}
2746 		if (op->remap.next) {
2747 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2748 			xe_vma_destroy_unlocked(op->remap.next);
2749 		}
2750 		if (vma) {
2751 			down_read(&vm->userptr.notifier_lock);
2752 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2753 			up_read(&vm->userptr.notifier_lock);
2754 			if (post_commit)
2755 				xe_vm_insert_vma(vm, vma);
2756 		}
2757 		break;
2758 	}
2759 	case DRM_GPUVA_OP_PREFETCH:
2760 		/* Nothing to do */
2761 		break;
2762 	default:
2763 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2764 	}
2765 }
2766 
2767 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2768 				     struct drm_gpuva_ops **ops,
2769 				     int num_ops_list)
2770 {
2771 	int i;
2772 
2773 	for (i = num_ops_list - 1; i; ++i) {
2774 		struct drm_gpuva_ops *__ops = ops[i];
2775 		struct drm_gpuva_op *__op;
2776 
2777 		if (!__ops)
2778 			continue;
2779 
2780 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2781 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2782 
2783 			xe_vma_op_unwind(vm, op,
2784 					 op->flags & XE_VMA_OP_COMMITTED,
2785 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2786 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2787 		}
2788 
2789 		drm_gpuva_ops_free(&vm->gpuvm, __ops);
2790 	}
2791 }
2792 
2793 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2794 				     struct list_head *ops_list)
2795 {
2796 	struct xe_vma_op *op, *next;
2797 	int err;
2798 
2799 	lockdep_assert_held_write(&vm->lock);
2800 
2801 	list_for_each_entry_safe(op, next, ops_list, link) {
2802 		err = xe_vma_op_execute(vm, op);
2803 		if (err) {
2804 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
2805 				 op->base.op, err);
2806 			/*
2807 			 * FIXME: Killing VM rather than proper error handling
2808 			 */
2809 			xe_vm_kill(vm);
2810 			return -ENOSPC;
2811 		}
2812 		xe_vma_op_cleanup(vm, op);
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 #ifdef TEST_VM_ASYNC_OPS_ERROR
2819 #define SUPPORTED_FLAGS	\
2820 	(FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2821 	 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
2822 	 XE_VM_BIND_FLAG_NULL | 0xffff)
2823 #else
2824 #define SUPPORTED_FLAGS	\
2825 	(XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2826 	 XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \
2827 	 0xffff)
2828 #endif
2829 #define XE_64K_PAGE_MASK 0xffffull
2830 
2831 #define MAX_BINDS	512	/* FIXME: Picking random upper limit */
2832 
2833 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2834 				    struct drm_xe_vm_bind *args,
2835 				    struct drm_xe_vm_bind_op **bind_ops,
2836 				    bool *async)
2837 {
2838 	int err;
2839 	int i;
2840 
2841 	if (XE_IOCTL_DBG(xe, args->extensions) ||
2842 	    XE_IOCTL_DBG(xe, !args->num_binds) ||
2843 	    XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
2844 		return -EINVAL;
2845 
2846 	if (args->num_binds > 1) {
2847 		u64 __user *bind_user =
2848 			u64_to_user_ptr(args->vector_of_binds);
2849 
2850 		*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2851 				    args->num_binds, GFP_KERNEL);
2852 		if (!*bind_ops)
2853 			return -ENOMEM;
2854 
2855 		err = __copy_from_user(*bind_ops, bind_user,
2856 				       sizeof(struct drm_xe_vm_bind_op) *
2857 				       args->num_binds);
2858 		if (XE_IOCTL_DBG(xe, err)) {
2859 			err = -EFAULT;
2860 			goto free_bind_ops;
2861 		}
2862 	} else {
2863 		*bind_ops = &args->bind;
2864 	}
2865 
2866 	for (i = 0; i < args->num_binds; ++i) {
2867 		u64 range = (*bind_ops)[i].range;
2868 		u64 addr = (*bind_ops)[i].addr;
2869 		u32 op = (*bind_ops)[i].op;
2870 		u32 flags = (*bind_ops)[i].flags;
2871 		u32 obj = (*bind_ops)[i].obj;
2872 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2873 		u32 region = (*bind_ops)[i].region;
2874 		bool is_null = flags & XE_VM_BIND_FLAG_NULL;
2875 
2876 		if (i == 0) {
2877 			*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
2878 			if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
2879 				err = -EINVAL;
2880 				goto free_bind_ops;
2881 			}
2882 		} else if (XE_IOCTL_DBG(xe, *async !=
2883 					!!(flags & XE_VM_BIND_FLAG_ASYNC))) {
2884 			err = -EINVAL;
2885 			goto free_bind_ops;
2886 		}
2887 
2888 		if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
2889 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2890 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2891 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2892 		    XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
2893 				 is_null) ||
2894 		    XE_IOCTL_DBG(xe, !obj &&
2895 				 op == XE_VM_BIND_OP_MAP &&
2896 				 !is_null) ||
2897 		    XE_IOCTL_DBG(xe, !obj &&
2898 				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
2899 		    XE_IOCTL_DBG(xe, addr &&
2900 				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
2901 		    XE_IOCTL_DBG(xe, range &&
2902 				 op == XE_VM_BIND_OP_UNMAP_ALL) ||
2903 		    XE_IOCTL_DBG(xe, obj &&
2904 				 op == XE_VM_BIND_OP_MAP_USERPTR) ||
2905 		    XE_IOCTL_DBG(xe, obj &&
2906 				 op == XE_VM_BIND_OP_PREFETCH) ||
2907 		    XE_IOCTL_DBG(xe, region &&
2908 				 op != XE_VM_BIND_OP_PREFETCH) ||
2909 		    XE_IOCTL_DBG(xe, !(BIT(region) &
2910 				       xe->info.mem_region_mask)) ||
2911 		    XE_IOCTL_DBG(xe, obj &&
2912 				 op == XE_VM_BIND_OP_UNMAP)) {
2913 			err = -EINVAL;
2914 			goto free_bind_ops;
2915 		}
2916 
2917 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
2918 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
2919 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
2920 		    XE_IOCTL_DBG(xe, !range &&
2921 				 op != XE_VM_BIND_OP_UNMAP_ALL)) {
2922 			err = -EINVAL;
2923 			goto free_bind_ops;
2924 		}
2925 	}
2926 
2927 	return 0;
2928 
2929 free_bind_ops:
2930 	if (args->num_binds > 1)
2931 		kfree(*bind_ops);
2932 	return err;
2933 }
2934 
2935 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2936 {
2937 	struct xe_device *xe = to_xe_device(dev);
2938 	struct xe_file *xef = to_xe_file(file);
2939 	struct drm_xe_vm_bind *args = data;
2940 	struct drm_xe_sync __user *syncs_user;
2941 	struct xe_bo **bos = NULL;
2942 	struct drm_gpuva_ops **ops = NULL;
2943 	struct xe_vm *vm;
2944 	struct xe_exec_queue *q = NULL;
2945 	u32 num_syncs;
2946 	struct xe_sync_entry *syncs = NULL;
2947 	struct drm_xe_vm_bind_op *bind_ops;
2948 	LIST_HEAD(ops_list);
2949 	bool async;
2950 	int err;
2951 	int i;
2952 
2953 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2954 	if (err)
2955 		return err;
2956 
2957 	if (args->exec_queue_id) {
2958 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
2959 		if (XE_IOCTL_DBG(xe, !q)) {
2960 			err = -ENOENT;
2961 			goto free_objs;
2962 		}
2963 
2964 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
2965 			err = -EINVAL;
2966 			goto put_exec_queue;
2967 		}
2968 
2969 		if (XE_IOCTL_DBG(xe, async !=
2970 				 !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
2971 			err = -EINVAL;
2972 			goto put_exec_queue;
2973 		}
2974 	}
2975 
2976 	vm = xe_vm_lookup(xef, args->vm_id);
2977 	if (XE_IOCTL_DBG(xe, !vm)) {
2978 		err = -EINVAL;
2979 		goto put_exec_queue;
2980 	}
2981 
2982 	if (!args->exec_queue_id) {
2983 		if (XE_IOCTL_DBG(xe, async !=
2984 				 !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
2985 			err = -EINVAL;
2986 			goto put_vm;
2987 		}
2988 	}
2989 
2990 	err = down_write_killable(&vm->lock);
2991 	if (err)
2992 		goto put_vm;
2993 
2994 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
2995 		err = -ENOENT;
2996 		goto release_vm_lock;
2997 	}
2998 
2999 	for (i = 0; i < args->num_binds; ++i) {
3000 		u64 range = bind_ops[i].range;
3001 		u64 addr = bind_ops[i].addr;
3002 
3003 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3004 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3005 			err = -EINVAL;
3006 			goto release_vm_lock;
3007 		}
3008 
3009 		if (bind_ops[i].tile_mask) {
3010 			u64 valid_tiles = BIT(xe->info.tile_count) - 1;
3011 
3012 			if (XE_IOCTL_DBG(xe, bind_ops[i].tile_mask &
3013 					 ~valid_tiles)) {
3014 				err = -EINVAL;
3015 				goto release_vm_lock;
3016 			}
3017 		}
3018 	}
3019 
3020 	bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3021 	if (!bos) {
3022 		err = -ENOMEM;
3023 		goto release_vm_lock;
3024 	}
3025 
3026 	ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
3027 	if (!ops) {
3028 		err = -ENOMEM;
3029 		goto release_vm_lock;
3030 	}
3031 
3032 	for (i = 0; i < args->num_binds; ++i) {
3033 		struct drm_gem_object *gem_obj;
3034 		u64 range = bind_ops[i].range;
3035 		u64 addr = bind_ops[i].addr;
3036 		u32 obj = bind_ops[i].obj;
3037 		u64 obj_offset = bind_ops[i].obj_offset;
3038 
3039 		if (!obj)
3040 			continue;
3041 
3042 		gem_obj = drm_gem_object_lookup(file, obj);
3043 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3044 			err = -ENOENT;
3045 			goto put_obj;
3046 		}
3047 		bos[i] = gem_to_xe_bo(gem_obj);
3048 
3049 		if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
3050 		    XE_IOCTL_DBG(xe, obj_offset >
3051 				 bos[i]->size - range)) {
3052 			err = -EINVAL;
3053 			goto put_obj;
3054 		}
3055 
3056 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3057 			if (XE_IOCTL_DBG(xe, obj_offset &
3058 					 XE_64K_PAGE_MASK) ||
3059 			    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3060 			    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3061 				err = -EINVAL;
3062 				goto put_obj;
3063 			}
3064 		}
3065 	}
3066 
3067 	if (args->num_syncs) {
3068 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3069 		if (!syncs) {
3070 			err = -ENOMEM;
3071 			goto put_obj;
3072 		}
3073 	}
3074 
3075 	syncs_user = u64_to_user_ptr(args->syncs);
3076 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3077 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3078 					  &syncs_user[num_syncs], false,
3079 					  xe_vm_no_dma_fences(vm));
3080 		if (err)
3081 			goto free_syncs;
3082 	}
3083 
3084 	for (i = 0; i < args->num_binds; ++i) {
3085 		u64 range = bind_ops[i].range;
3086 		u64 addr = bind_ops[i].addr;
3087 		u32 op = bind_ops[i].op;
3088 		u32 flags = bind_ops[i].flags;
3089 		u64 obj_offset = bind_ops[i].obj_offset;
3090 		u8 tile_mask = bind_ops[i].tile_mask;
3091 		u32 region = bind_ops[i].region;
3092 
3093 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3094 						  addr, range, op, flags,
3095 						  tile_mask, region);
3096 		if (IS_ERR(ops[i])) {
3097 			err = PTR_ERR(ops[i]);
3098 			ops[i] = NULL;
3099 			goto unwind_ops;
3100 		}
3101 
3102 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3103 					      &ops_list,
3104 					      i == args->num_binds - 1,
3105 					      async);
3106 		if (err)
3107 			goto unwind_ops;
3108 	}
3109 
3110 	/* Nothing to do */
3111 	if (list_empty(&ops_list)) {
3112 		err = -ENODATA;
3113 		goto unwind_ops;
3114 	}
3115 
3116 	xe_vm_get(vm);
3117 	if (q)
3118 		xe_exec_queue_get(q);
3119 
3120 	err = vm_bind_ioctl_ops_execute(vm, &ops_list);
3121 
3122 	up_write(&vm->lock);
3123 
3124 	if (q)
3125 		xe_exec_queue_put(q);
3126 	xe_vm_put(vm);
3127 
3128 	for (i = 0; bos && i < args->num_binds; ++i)
3129 		xe_bo_put(bos[i]);
3130 
3131 	kfree(bos);
3132 	kfree(ops);
3133 	if (args->num_binds > 1)
3134 		kfree(bind_ops);
3135 
3136 	return err;
3137 
3138 unwind_ops:
3139 	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3140 free_syncs:
3141 	for (i = 0; err == -ENODATA && i < num_syncs; i++) {
3142 		struct dma_fence *fence =
3143 			xe_exec_queue_last_fence_get(to_wait_exec_queue(vm, q), vm);
3144 
3145 		xe_sync_entry_signal(&syncs[i], NULL, fence);
3146 	}
3147 	while (num_syncs--)
3148 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3149 
3150 	kfree(syncs);
3151 put_obj:
3152 	for (i = 0; i < args->num_binds; ++i)
3153 		xe_bo_put(bos[i]);
3154 release_vm_lock:
3155 	up_write(&vm->lock);
3156 put_vm:
3157 	xe_vm_put(vm);
3158 put_exec_queue:
3159 	if (q)
3160 		xe_exec_queue_put(q);
3161 free_objs:
3162 	kfree(bos);
3163 	kfree(ops);
3164 	if (args->num_binds > 1)
3165 		kfree(bind_ops);
3166 	return err == -ENODATA ? 0 : err;
3167 }
3168 
3169 /**
3170  * xe_vm_lock() - Lock the vm's dma_resv object
3171  * @vm: The struct xe_vm whose lock is to be locked
3172  * @intr: Whether to perform any wait interruptible
3173  *
3174  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3175  * contended lock was interrupted. If @intr is false, the function
3176  * always returns 0.
3177  */
3178 int xe_vm_lock(struct xe_vm *vm, bool intr)
3179 {
3180 	if (intr)
3181 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3182 
3183 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3184 }
3185 
3186 /**
3187  * xe_vm_unlock() - Unlock the vm's dma_resv object
3188  * @vm: The struct xe_vm whose lock is to be released.
3189  *
3190  * Unlock a buffer object lock that was locked by xe_vm_lock().
3191  */
3192 void xe_vm_unlock(struct xe_vm *vm)
3193 {
3194 	dma_resv_unlock(xe_vm_resv(vm));
3195 }
3196 
3197 /**
3198  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3199  * @vma: VMA to invalidate
3200  *
3201  * Walks a list of page tables leaves which it memset the entries owned by this
3202  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3203  * complete.
3204  *
3205  * Returns 0 for success, negative error code otherwise.
3206  */
3207 int xe_vm_invalidate_vma(struct xe_vma *vma)
3208 {
3209 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3210 	struct xe_tile *tile;
3211 	u32 tile_needs_invalidate = 0;
3212 	int seqno[XE_MAX_TILES_PER_DEVICE];
3213 	u8 id;
3214 	int ret;
3215 
3216 	xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
3217 	xe_assert(xe, !xe_vma_is_null(vma));
3218 	trace_xe_vma_usm_invalidate(vma);
3219 
3220 	/* Check that we don't race with page-table updates */
3221 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3222 		if (xe_vma_is_userptr(vma)) {
3223 			WARN_ON_ONCE(!mmu_interval_check_retry
3224 				     (&vma->userptr.notifier,
3225 				      vma->userptr.notifier_seq));
3226 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3227 							     DMA_RESV_USAGE_BOOKKEEP));
3228 
3229 		} else {
3230 			xe_bo_assert_held(xe_vma_bo(vma));
3231 		}
3232 	}
3233 
3234 	for_each_tile(tile, xe, id) {
3235 		if (xe_pt_zap_ptes(tile, vma)) {
3236 			tile_needs_invalidate |= BIT(id);
3237 			xe_device_wmb(xe);
3238 			/*
3239 			 * FIXME: We potentially need to invalidate multiple
3240 			 * GTs within the tile
3241 			 */
3242 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3243 			if (seqno[id] < 0)
3244 				return seqno[id];
3245 		}
3246 	}
3247 
3248 	for_each_tile(tile, xe, id) {
3249 		if (tile_needs_invalidate & BIT(id)) {
3250 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3251 			if (ret < 0)
3252 				return ret;
3253 		}
3254 	}
3255 
3256 	vma->usm.tile_invalidated = vma->tile_mask;
3257 
3258 	return 0;
3259 }
3260 
3261 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3262 {
3263 	struct drm_gpuva *gpuva;
3264 	bool is_vram;
3265 	uint64_t addr;
3266 
3267 	if (!down_read_trylock(&vm->lock)) {
3268 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3269 		return 0;
3270 	}
3271 	if (vm->pt_root[gt_id]) {
3272 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
3273 		is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
3274 		drm_printf(p, " VM root: A:0x%llx %s\n", addr,
3275 			   is_vram ? "VRAM" : "SYS");
3276 	}
3277 
3278 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3279 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3280 		bool is_userptr = xe_vma_is_userptr(vma);
3281 		bool is_null = xe_vma_is_null(vma);
3282 
3283 		if (is_null) {
3284 			addr = 0;
3285 		} else if (is_userptr) {
3286 			struct xe_res_cursor cur;
3287 
3288 			if (vma->userptr.sg) {
3289 				xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
3290 						&cur);
3291 				addr = xe_res_dma(&cur);
3292 			} else {
3293 				addr = 0;
3294 			}
3295 		} else {
3296 			addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
3297 			is_vram = xe_bo_is_vram(xe_vma_bo(vma));
3298 		}
3299 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3300 			   xe_vma_start(vma), xe_vma_end(vma) - 1,
3301 			   xe_vma_size(vma),
3302 			   addr, is_null ? "NULL" : is_userptr ? "USR" :
3303 			   is_vram ? "VRAM" : "SYS");
3304 	}
3305 	up_read(&vm->lock);
3306 
3307 	return 0;
3308 }
3309