xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 
10 #include <drm/ttm/ttm_execbuf_util.h>
11 #include <drm/ttm/ttm_tt.h>
12 #include <drm/xe_drm.h>
13 #include <linux/kthread.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16 
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_engine.h"
20 #include "xe_gt.h"
21 #include "xe_gt_pagefault.h"
22 #include "xe_migrate.h"
23 #include "xe_pm.h"
24 #include "xe_preempt_fence.h"
25 #include "xe_pt.h"
26 #include "xe_res_cursor.h"
27 #include "xe_trace.h"
28 #include "xe_sync.h"
29 
30 #define TEST_VM_ASYNC_OPS_ERROR
31 
32 /**
33  * xe_vma_userptr_check_repin() - Advisory check for repin needed
34  * @vma: The userptr vma
35  *
36  * Check if the userptr vma has been invalidated since last successful
37  * repin. The check is advisory only and can the function can be called
38  * without the vm->userptr.notifier_lock held. There is no guarantee that the
39  * vma userptr will remain valid after a lockless check, so typically
40  * the call needs to be followed by a proper check under the notifier_lock.
41  *
42  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
43  */
44 int xe_vma_userptr_check_repin(struct xe_vma *vma)
45 {
46 	return mmu_interval_check_retry(&vma->userptr.notifier,
47 					vma->userptr.notifier_seq) ?
48 		-EAGAIN : 0;
49 }
50 
51 int xe_vma_userptr_pin_pages(struct xe_vma *vma)
52 {
53 	struct xe_vm *vm = vma->vm;
54 	struct xe_device *xe = vm->xe;
55 	const unsigned long num_pages =
56 		(vma->end - vma->start + 1) >> PAGE_SHIFT;
57 	struct page **pages;
58 	bool in_kthread = !current->mm;
59 	unsigned long notifier_seq;
60 	int pinned, ret, i;
61 	bool read_only = vma->pte_flags & PTE_READ_ONLY;
62 
63 	lockdep_assert_held(&vm->lock);
64 	XE_BUG_ON(!xe_vma_is_userptr(vma));
65 retry:
66 	if (vma->destroyed)
67 		return 0;
68 
69 	notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
70 	if (notifier_seq == vma->userptr.notifier_seq)
71 		return 0;
72 
73 	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
74 	if (!pages)
75 		return -ENOMEM;
76 
77 	if (vma->userptr.sg) {
78 		dma_unmap_sgtable(xe->drm.dev,
79 				  vma->userptr.sg,
80 				  read_only ? DMA_TO_DEVICE :
81 				  DMA_BIDIRECTIONAL, 0);
82 		sg_free_table(vma->userptr.sg);
83 		vma->userptr.sg = NULL;
84 	}
85 
86 	pinned = ret = 0;
87 	if (in_kthread) {
88 		if (!mmget_not_zero(vma->userptr.notifier.mm)) {
89 			ret = -EFAULT;
90 			goto mm_closed;
91 		}
92 		kthread_use_mm(vma->userptr.notifier.mm);
93 	}
94 
95 	while (pinned < num_pages) {
96 		ret = get_user_pages_fast(vma->userptr.ptr + pinned * PAGE_SIZE,
97 					  num_pages - pinned,
98 					  read_only ? 0 : FOLL_WRITE,
99 					  &pages[pinned]);
100 		if (ret < 0) {
101 			if (in_kthread)
102 				ret = 0;
103 			break;
104 		}
105 
106 		pinned += ret;
107 		ret = 0;
108 	}
109 
110 	if (in_kthread) {
111 		kthread_unuse_mm(vma->userptr.notifier.mm);
112 		mmput(vma->userptr.notifier.mm);
113 	}
114 mm_closed:
115 	if (ret)
116 		goto out;
117 
118 	ret = sg_alloc_table_from_pages(&vma->userptr.sgt, pages, pinned,
119 					0, (u64)pinned << PAGE_SHIFT,
120 					GFP_KERNEL);
121 	if (ret) {
122 		vma->userptr.sg = NULL;
123 		goto out;
124 	}
125 	vma->userptr.sg = &vma->userptr.sgt;
126 
127 	ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
128 			      read_only ? DMA_TO_DEVICE :
129 			      DMA_BIDIRECTIONAL,
130 			      DMA_ATTR_SKIP_CPU_SYNC |
131 			      DMA_ATTR_NO_KERNEL_MAPPING);
132 	if (ret) {
133 		sg_free_table(vma->userptr.sg);
134 		vma->userptr.sg = NULL;
135 		goto out;
136 	}
137 
138 	for (i = 0; i < pinned; ++i) {
139 		if (!read_only) {
140 			lock_page(pages[i]);
141 			set_page_dirty(pages[i]);
142 			unlock_page(pages[i]);
143 		}
144 
145 		mark_page_accessed(pages[i]);
146 	}
147 
148 out:
149 	release_pages(pages, pinned);
150 	kvfree(pages);
151 
152 	if (!(ret < 0)) {
153 		vma->userptr.notifier_seq = notifier_seq;
154 		if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
155 			goto retry;
156 	}
157 
158 	return ret < 0 ? ret : 0;
159 }
160 
161 static bool preempt_fences_waiting(struct xe_vm *vm)
162 {
163 	struct xe_engine *e;
164 
165 	lockdep_assert_held(&vm->lock);
166 	xe_vm_assert_held(vm);
167 
168 	list_for_each_entry(e, &vm->preempt.engines, compute.link) {
169 		if (!e->compute.pfence || (e->compute.pfence &&
170 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
171 			     &e->compute.pfence->flags))) {
172 			return true;
173 		}
174 	}
175 
176 	return false;
177 }
178 
179 static void free_preempt_fences(struct list_head *list)
180 {
181 	struct list_head *link, *next;
182 
183 	list_for_each_safe(link, next, list)
184 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
185 }
186 
187 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
188 				unsigned int *count)
189 {
190 	lockdep_assert_held(&vm->lock);
191 	xe_vm_assert_held(vm);
192 
193 	if (*count >= vm->preempt.num_engines)
194 		return 0;
195 
196 	for (; *count < vm->preempt.num_engines; ++(*count)) {
197 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
198 
199 		if (IS_ERR(pfence))
200 			return PTR_ERR(pfence);
201 
202 		list_move_tail(xe_preempt_fence_link(pfence), list);
203 	}
204 
205 	return 0;
206 }
207 
208 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
209 {
210 	struct xe_engine *e;
211 
212 	xe_vm_assert_held(vm);
213 
214 	list_for_each_entry(e, &vm->preempt.engines, compute.link) {
215 		if (e->compute.pfence) {
216 			long timeout = dma_fence_wait(e->compute.pfence, false);
217 
218 			if (timeout < 0)
219 				return -ETIME;
220 			dma_fence_put(e->compute.pfence);
221 			e->compute.pfence = NULL;
222 		}
223 	}
224 
225 	return 0;
226 }
227 
228 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
229 {
230 	struct list_head *link;
231 	struct xe_engine *e;
232 
233 	list_for_each_entry(e, &vm->preempt.engines, compute.link) {
234 		struct dma_fence *fence;
235 
236 		link = list->next;
237 		XE_BUG_ON(link == list);
238 
239 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
240 					     e, e->compute.context,
241 					     ++e->compute.seqno);
242 		dma_fence_put(e->compute.pfence);
243 		e->compute.pfence = fence;
244 	}
245 }
246 
247 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
248 {
249 	struct xe_engine *e;
250 	struct ww_acquire_ctx ww;
251 	int err;
252 
253 	err = xe_bo_lock(bo, &ww, vm->preempt.num_engines, true);
254 	if (err)
255 		return err;
256 
257 	list_for_each_entry(e, &vm->preempt.engines, compute.link)
258 		if (e->compute.pfence) {
259 			dma_resv_add_fence(bo->ttm.base.resv,
260 					   e->compute.pfence,
261 					   DMA_RESV_USAGE_BOOKKEEP);
262 		}
263 
264 	xe_bo_unlock(bo, &ww);
265 	return 0;
266 }
267 
268 /**
269  * xe_vm_fence_all_extobjs() - Add a fence to vm's external objects' resv
270  * @vm: The vm.
271  * @fence: The fence to add.
272  * @usage: The resv usage for the fence.
273  *
274  * Loops over all of the vm's external object bindings and adds a @fence
275  * with the given @usage to all of the external object's reservation
276  * objects.
277  */
278 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
279 			     enum dma_resv_usage usage)
280 {
281 	struct xe_vma *vma;
282 
283 	list_for_each_entry(vma, &vm->extobj.list, extobj.link)
284 		dma_resv_add_fence(vma->bo->ttm.base.resv, fence, usage);
285 }
286 
287 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
288 {
289 	struct xe_engine *e;
290 
291 	lockdep_assert_held(&vm->lock);
292 	xe_vm_assert_held(vm);
293 
294 	list_for_each_entry(e, &vm->preempt.engines, compute.link) {
295 		e->ops->resume(e);
296 
297 		dma_resv_add_fence(&vm->resv, e->compute.pfence,
298 				   DMA_RESV_USAGE_BOOKKEEP);
299 		xe_vm_fence_all_extobjs(vm, e->compute.pfence,
300 					DMA_RESV_USAGE_BOOKKEEP);
301 	}
302 }
303 
304 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
305 {
306 	struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
307 	struct ttm_validate_buffer *tv;
308 	struct ww_acquire_ctx ww;
309 	struct list_head objs;
310 	struct dma_fence *pfence;
311 	int err;
312 	bool wait;
313 
314 	XE_BUG_ON(!xe_vm_in_compute_mode(vm));
315 
316 	down_write(&vm->lock);
317 
318 	err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs, true, 1);
319 	if (err)
320 		goto out_unlock_outer;
321 
322 	pfence = xe_preempt_fence_create(e, e->compute.context,
323 					 ++e->compute.seqno);
324 	if (!pfence) {
325 		err = -ENOMEM;
326 		goto out_unlock;
327 	}
328 
329 	list_add(&e->compute.link, &vm->preempt.engines);
330 	++vm->preempt.num_engines;
331 	e->compute.pfence = pfence;
332 
333 	down_read(&vm->userptr.notifier_lock);
334 
335 	dma_resv_add_fence(&vm->resv, pfence,
336 			   DMA_RESV_USAGE_BOOKKEEP);
337 
338 	xe_vm_fence_all_extobjs(vm, pfence, DMA_RESV_USAGE_BOOKKEEP);
339 
340 	/*
341 	 * Check to see if a preemption on VM is in flight or userptr
342 	 * invalidation, if so trigger this preempt fence to sync state with
343 	 * other preempt fences on the VM.
344 	 */
345 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
346 	if (wait)
347 		dma_fence_enable_sw_signaling(pfence);
348 
349 	up_read(&vm->userptr.notifier_lock);
350 
351 out_unlock:
352 	xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
353 out_unlock_outer:
354 	up_write(&vm->lock);
355 
356 	return err;
357 }
358 
359 /**
360  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
361  * that need repinning.
362  * @vm: The VM.
363  *
364  * This function checks for whether the VM has userptrs that need repinning,
365  * and provides a release-type barrier on the userptr.notifier_lock after
366  * checking.
367  *
368  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
369  */
370 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
371 {
372 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
373 
374 	return (list_empty(&vm->userptr.repin_list) &&
375 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
376 }
377 
378 /**
379  * xe_vm_lock_dma_resv() - Lock the vm dma_resv object and the dma_resv
380  * objects of the vm's external buffer objects.
381  * @vm: The vm.
382  * @ww: Pointer to a struct ww_acquire_ctx locking context.
383  * @tv_onstack: Array size XE_ONSTACK_TV of storage for the struct
384  * ttm_validate_buffers used for locking.
385  * @tv: Pointer to a pointer that on output contains the actual storage used.
386  * @objs: List head for the buffer objects locked.
387  * @intr: Whether to lock interruptible.
388  * @num_shared: Number of dma-fence slots to reserve in the locked objects.
389  *
390  * Locks the vm dma-resv objects and all the dma-resv objects of the
391  * buffer objects on the vm external object list. The TTM utilities require
392  * a list of struct ttm_validate_buffers pointing to the actual buffer
393  * objects to lock. Storage for those struct ttm_validate_buffers should
394  * be provided in @tv_onstack, and is typically reserved on the stack
395  * of the caller. If the size of @tv_onstack isn't sufficient, then
396  * storage will be allocated internally using kvmalloc().
397  *
398  * The function performs deadlock handling internally, and after a
399  * successful return the ww locking transaction should be considered
400  * sealed.
401  *
402  * Return: 0 on success, Negative error code on error. In particular if
403  * @intr is set to true, -EINTR or -ERESTARTSYS may be returned. In case
404  * of error, any locking performed has been reverted.
405  */
406 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
407 			struct ttm_validate_buffer *tv_onstack,
408 			struct ttm_validate_buffer **tv,
409 			struct list_head *objs,
410 			bool intr,
411 			unsigned int num_shared)
412 {
413 	struct ttm_validate_buffer *tv_vm, *tv_bo;
414 	struct xe_vma *vma, *next;
415 	LIST_HEAD(dups);
416 	int err;
417 
418 	lockdep_assert_held(&vm->lock);
419 
420 	if (vm->extobj.entries < XE_ONSTACK_TV) {
421 		tv_vm = tv_onstack;
422 	} else {
423 		tv_vm = kvmalloc_array(vm->extobj.entries + 1, sizeof(*tv_vm),
424 				       GFP_KERNEL);
425 		if (!tv_vm)
426 			return -ENOMEM;
427 	}
428 	tv_bo = tv_vm + 1;
429 
430 	INIT_LIST_HEAD(objs);
431 	list_for_each_entry(vma, &vm->extobj.list, extobj.link) {
432 		tv_bo->num_shared = num_shared;
433 		tv_bo->bo = &vma->bo->ttm;
434 
435 		list_add_tail(&tv_bo->head, objs);
436 		tv_bo++;
437 	}
438 	tv_vm->num_shared = num_shared;
439 	tv_vm->bo = xe_vm_ttm_bo(vm);
440 	list_add_tail(&tv_vm->head, objs);
441 	err = ttm_eu_reserve_buffers(ww, objs, intr, &dups);
442 	if (err)
443 		goto out_err;
444 
445 	spin_lock(&vm->notifier.list_lock);
446 	list_for_each_entry_safe(vma, next, &vm->notifier.rebind_list,
447 				 notifier.rebind_link) {
448 		xe_bo_assert_held(vma->bo);
449 
450 		list_del_init(&vma->notifier.rebind_link);
451 		if (vma->gt_present && !vma->destroyed)
452 			list_move_tail(&vma->rebind_link, &vm->rebind_list);
453 	}
454 	spin_unlock(&vm->notifier.list_lock);
455 
456 	*tv = tv_vm;
457 	return 0;
458 
459 out_err:
460 	if (tv_vm != tv_onstack)
461 		kvfree(tv_vm);
462 
463 	return err;
464 }
465 
466 /**
467  * xe_vm_unlock_dma_resv() - Unlock reservation objects locked by
468  * xe_vm_lock_dma_resv()
469  * @vm: The vm.
470  * @tv_onstack: The @tv_onstack array given to xe_vm_lock_dma_resv().
471  * @tv: The value of *@tv given by xe_vm_lock_dma_resv().
472  * @ww: The ww_acquire_context used for locking.
473  * @objs: The list returned from xe_vm_lock_dma_resv().
474  *
475  * Unlocks the reservation objects and frees any memory allocated by
476  * xe_vm_lock_dma_resv().
477  */
478 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
479 			   struct ttm_validate_buffer *tv_onstack,
480 			   struct ttm_validate_buffer *tv,
481 			   struct ww_acquire_ctx *ww,
482 			   struct list_head *objs)
483 {
484 	/*
485 	 * Nothing should've been able to enter the list while we were locked,
486 	 * since we've held the dma-resvs of all the vm's external objects,
487 	 * and holding the dma_resv of an object is required for list
488 	 * addition, and we shouldn't add ourselves.
489 	 */
490 	XE_WARN_ON(!list_empty(&vm->notifier.rebind_list));
491 
492 	ttm_eu_backoff_reservation(ww, objs);
493 	if (tv && tv != tv_onstack)
494 		kvfree(tv);
495 }
496 
497 static void preempt_rebind_work_func(struct work_struct *w)
498 {
499 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
500 	struct xe_vma *vma;
501 	struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
502 	struct ttm_validate_buffer *tv;
503 	struct ww_acquire_ctx ww;
504 	struct list_head objs;
505 	struct dma_fence *rebind_fence;
506 	unsigned int fence_count = 0;
507 	LIST_HEAD(preempt_fences);
508 	int err;
509 	long wait;
510 	int __maybe_unused tries = 0;
511 
512 	XE_BUG_ON(!xe_vm_in_compute_mode(vm));
513 	trace_xe_vm_rebind_worker_enter(vm);
514 
515 	if (xe_vm_is_closed(vm)) {
516 		trace_xe_vm_rebind_worker_exit(vm);
517 		return;
518 	}
519 
520 	down_write(&vm->lock);
521 
522 retry:
523 	if (vm->async_ops.error)
524 		goto out_unlock_outer;
525 
526 	/*
527 	 * Extreme corner where we exit a VM error state with a munmap style VM
528 	 * unbind inflight which requires a rebind. In this case the rebind
529 	 * needs to install some fences into the dma-resv slots. The worker to
530 	 * do this queued, let that worker make progress by dropping vm->lock
531 	 * and trying this again.
532 	 */
533 	if (vm->async_ops.munmap_rebind_inflight) {
534 		up_write(&vm->lock);
535 		flush_work(&vm->async_ops.work);
536 		goto retry;
537 	}
538 
539 	if (xe_vm_userptr_check_repin(vm)) {
540 		err = xe_vm_userptr_pin(vm);
541 		if (err)
542 			goto out_unlock_outer;
543 	}
544 
545 	err = xe_vm_lock_dma_resv(vm, &ww, tv_onstack, &tv, &objs,
546 				  false, vm->preempt.num_engines);
547 	if (err)
548 		goto out_unlock_outer;
549 
550 	/* Fresh preempt fences already installed. Everyting is running. */
551 	if (!preempt_fences_waiting(vm))
552 		goto out_unlock;
553 
554 	/*
555 	 * This makes sure vm is completely suspended and also balances
556 	 * xe_engine suspend- and resume; we resume *all* vm engines below.
557 	 */
558 	err = wait_for_existing_preempt_fences(vm);
559 	if (err)
560 		goto out_unlock;
561 
562 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
563 	if (err)
564 		goto out_unlock;
565 
566 	list_for_each_entry(vma, &vm->rebind_list, rebind_link) {
567 		if (xe_vma_is_userptr(vma) || vma->destroyed)
568 			continue;
569 
570 		err = xe_bo_validate(vma->bo, vm, false);
571 		if (err)
572 			goto out_unlock;
573 	}
574 
575 	rebind_fence = xe_vm_rebind(vm, true);
576 	if (IS_ERR(rebind_fence)) {
577 		err = PTR_ERR(rebind_fence);
578 		goto out_unlock;
579 	}
580 
581 	if (rebind_fence) {
582 		dma_fence_wait(rebind_fence, false);
583 		dma_fence_put(rebind_fence);
584 	}
585 
586 	/* Wait on munmap style VM unbinds */
587 	wait = dma_resv_wait_timeout(&vm->resv,
588 				     DMA_RESV_USAGE_KERNEL,
589 				     false, MAX_SCHEDULE_TIMEOUT);
590 	if (wait <= 0) {
591 		err = -ETIME;
592 		goto out_unlock;
593 	}
594 
595 #define retry_required(__tries, __vm) \
596 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
597 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
598 	__xe_vm_userptr_needs_repin(__vm))
599 
600 	down_read(&vm->userptr.notifier_lock);
601 	if (retry_required(tries, vm)) {
602 		up_read(&vm->userptr.notifier_lock);
603 		err = -EAGAIN;
604 		goto out_unlock;
605 	}
606 
607 #undef retry_required
608 
609 	/* Point of no return. */
610 	arm_preempt_fences(vm, &preempt_fences);
611 	resume_and_reinstall_preempt_fences(vm);
612 	up_read(&vm->userptr.notifier_lock);
613 
614 out_unlock:
615 	xe_vm_unlock_dma_resv(vm, tv_onstack, tv, &ww, &objs);
616 out_unlock_outer:
617 	if (err == -EAGAIN) {
618 		trace_xe_vm_rebind_worker_retry(vm);
619 		goto retry;
620 	}
621 	up_write(&vm->lock);
622 
623 	free_preempt_fences(&preempt_fences);
624 
625 	XE_WARN_ON(err < 0);	/* TODO: Kill VM or put in error state */
626 	trace_xe_vm_rebind_worker_exit(vm);
627 }
628 
629 struct async_op_fence;
630 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
631 			struct xe_engine *e, struct xe_sync_entry *syncs,
632 			u32 num_syncs, struct async_op_fence *afence);
633 
634 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
635 				   const struct mmu_notifier_range *range,
636 				   unsigned long cur_seq)
637 {
638 	struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
639 	struct xe_vm *vm = vma->vm;
640 	struct dma_resv_iter cursor;
641 	struct dma_fence *fence;
642 	long err;
643 
644 	XE_BUG_ON(!xe_vma_is_userptr(vma));
645 	trace_xe_vma_userptr_invalidate(vma);
646 
647 	if (!mmu_notifier_range_blockable(range))
648 		return false;
649 
650 	down_write(&vm->userptr.notifier_lock);
651 	mmu_interval_set_seq(mni, cur_seq);
652 
653 	/* No need to stop gpu access if the userptr is not yet bound. */
654 	if (!vma->userptr.initial_bind) {
655 		up_write(&vm->userptr.notifier_lock);
656 		return true;
657 	}
658 
659 	/*
660 	 * Tell exec and rebind worker they need to repin and rebind this
661 	 * userptr.
662 	 */
663 	if (!xe_vm_in_fault_mode(vm) && !vma->destroyed && vma->gt_present) {
664 		spin_lock(&vm->userptr.invalidated_lock);
665 		list_move_tail(&vma->userptr.invalidate_link,
666 			       &vm->userptr.invalidated);
667 		spin_unlock(&vm->userptr.invalidated_lock);
668 	}
669 
670 	up_write(&vm->userptr.notifier_lock);
671 
672 	/*
673 	 * Preempt fences turn into schedule disables, pipeline these.
674 	 * Note that even in fault mode, we need to wait for binds and
675 	 * unbinds to complete, and those are attached as BOOKMARK fences
676 	 * to the vm.
677 	 */
678 	dma_resv_iter_begin(&cursor, &vm->resv,
679 			    DMA_RESV_USAGE_BOOKKEEP);
680 	dma_resv_for_each_fence_unlocked(&cursor, fence)
681 		dma_fence_enable_sw_signaling(fence);
682 	dma_resv_iter_end(&cursor);
683 
684 	err = dma_resv_wait_timeout(&vm->resv,
685 				    DMA_RESV_USAGE_BOOKKEEP,
686 				    false, MAX_SCHEDULE_TIMEOUT);
687 	XE_WARN_ON(err <= 0);
688 
689 	if (xe_vm_in_fault_mode(vm)) {
690 		err = xe_vm_invalidate_vma(vma);
691 		XE_WARN_ON(err);
692 	}
693 
694 	trace_xe_vma_userptr_invalidate_complete(vma);
695 
696 	return true;
697 }
698 
699 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
700 	.invalidate = vma_userptr_invalidate,
701 };
702 
703 int xe_vm_userptr_pin(struct xe_vm *vm)
704 {
705 	struct xe_vma *vma, *next;
706 	int err = 0;
707 	LIST_HEAD(tmp_evict);
708 
709 	lockdep_assert_held_write(&vm->lock);
710 
711 	/* Collect invalidated userptrs */
712 	spin_lock(&vm->userptr.invalidated_lock);
713 	list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
714 				 userptr.invalidate_link) {
715 		list_del_init(&vma->userptr.invalidate_link);
716 		list_move_tail(&vma->userptr_link, &vm->userptr.repin_list);
717 	}
718 	spin_unlock(&vm->userptr.invalidated_lock);
719 
720 	/* Pin and move to temporary list */
721 	list_for_each_entry_safe(vma, next, &vm->userptr.repin_list, userptr_link) {
722 		err = xe_vma_userptr_pin_pages(vma);
723 		if (err < 0)
724 			goto out_err;
725 
726 		list_move_tail(&vma->userptr_link, &tmp_evict);
727 	}
728 
729 	/* Take lock and move to rebind_list for rebinding. */
730 	err = dma_resv_lock_interruptible(&vm->resv, NULL);
731 	if (err)
732 		goto out_err;
733 
734 	list_for_each_entry_safe(vma, next, &tmp_evict, userptr_link) {
735 		list_del_init(&vma->userptr_link);
736 		list_move_tail(&vma->rebind_link, &vm->rebind_list);
737 	}
738 
739 	dma_resv_unlock(&vm->resv);
740 
741 	return 0;
742 
743 out_err:
744 	list_splice_tail(&tmp_evict, &vm->userptr.repin_list);
745 
746 	return err;
747 }
748 
749 /**
750  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
751  * that need repinning.
752  * @vm: The VM.
753  *
754  * This function does an advisory check for whether the VM has userptrs that
755  * need repinning.
756  *
757  * Return: 0 if there are no indications of userptrs needing repinning,
758  * -EAGAIN if there are.
759  */
760 int xe_vm_userptr_check_repin(struct xe_vm *vm)
761 {
762 	return (list_empty_careful(&vm->userptr.repin_list) &&
763 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
764 }
765 
766 static struct dma_fence *
767 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
768 	       struct xe_sync_entry *syncs, u32 num_syncs);
769 
770 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
771 {
772 	struct dma_fence *fence = NULL;
773 	struct xe_vma *vma, *next;
774 
775 	lockdep_assert_held(&vm->lock);
776 	if (xe_vm_no_dma_fences(vm) && !rebind_worker)
777 		return NULL;
778 
779 	xe_vm_assert_held(vm);
780 	list_for_each_entry_safe(vma, next, &vm->rebind_list, rebind_link) {
781 		XE_WARN_ON(!vma->gt_present);
782 
783 		list_del_init(&vma->rebind_link);
784 		dma_fence_put(fence);
785 		if (rebind_worker)
786 			trace_xe_vma_rebind_worker(vma);
787 		else
788 			trace_xe_vma_rebind_exec(vma);
789 		fence = xe_vm_bind_vma(vma, NULL, NULL, 0);
790 		if (IS_ERR(fence))
791 			return fence;
792 	}
793 
794 	return fence;
795 }
796 
797 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
798 				    struct xe_bo *bo,
799 				    u64 bo_offset_or_userptr,
800 				    u64 start, u64 end,
801 				    bool read_only,
802 				    u64 gt_mask)
803 {
804 	struct xe_vma *vma;
805 	struct xe_gt *gt;
806 	u8 id;
807 
808 	XE_BUG_ON(start >= end);
809 	XE_BUG_ON(end >= vm->size);
810 
811 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
812 	if (!vma) {
813 		vma = ERR_PTR(-ENOMEM);
814 		return vma;
815 	}
816 
817 	INIT_LIST_HEAD(&vma->rebind_link);
818 	INIT_LIST_HEAD(&vma->unbind_link);
819 	INIT_LIST_HEAD(&vma->userptr_link);
820 	INIT_LIST_HEAD(&vma->userptr.invalidate_link);
821 	INIT_LIST_HEAD(&vma->notifier.rebind_link);
822 	INIT_LIST_HEAD(&vma->extobj.link);
823 
824 	vma->vm = vm;
825 	vma->start = start;
826 	vma->end = end;
827 	if (read_only)
828 		vma->pte_flags = PTE_READ_ONLY;
829 
830 	if (gt_mask) {
831 		vma->gt_mask = gt_mask;
832 	} else {
833 		for_each_gt(gt, vm->xe, id)
834 			if (!xe_gt_is_media_type(gt))
835 				vma->gt_mask |= 0x1 << id;
836 	}
837 
838 	if (vm->xe->info.platform == XE_PVC)
839 		vma->use_atomic_access_pte_bit = true;
840 
841 	if (bo) {
842 		xe_bo_assert_held(bo);
843 		vma->bo_offset = bo_offset_or_userptr;
844 		vma->bo = xe_bo_get(bo);
845 		list_add_tail(&vma->bo_link, &bo->vmas);
846 	} else /* userptr */ {
847 		u64 size = end - start + 1;
848 		int err;
849 
850 		vma->userptr.ptr = bo_offset_or_userptr;
851 
852 		err = mmu_interval_notifier_insert(&vma->userptr.notifier,
853 						   current->mm,
854 						   vma->userptr.ptr, size,
855 						   &vma_userptr_notifier_ops);
856 		if (err) {
857 			kfree(vma);
858 			vma = ERR_PTR(err);
859 			return vma;
860 		}
861 
862 		vma->userptr.notifier_seq = LONG_MAX;
863 		xe_vm_get(vm);
864 	}
865 
866 	return vma;
867 }
868 
869 static bool vm_remove_extobj(struct xe_vma *vma)
870 {
871 	if (!list_empty(&vma->extobj.link)) {
872 		vma->vm->extobj.entries--;
873 		list_del_init(&vma->extobj.link);
874 		return true;
875 	}
876 	return false;
877 }
878 
879 static void xe_vma_destroy_late(struct xe_vma *vma)
880 {
881 	struct xe_vm *vm = vma->vm;
882 	struct xe_device *xe = vm->xe;
883 	bool read_only = vma->pte_flags & PTE_READ_ONLY;
884 
885 	if (xe_vma_is_userptr(vma)) {
886 		if (vma->userptr.sg) {
887 			dma_unmap_sgtable(xe->drm.dev,
888 					  vma->userptr.sg,
889 					  read_only ? DMA_TO_DEVICE :
890 					  DMA_BIDIRECTIONAL, 0);
891 			sg_free_table(vma->userptr.sg);
892 			vma->userptr.sg = NULL;
893 		}
894 
895 		/*
896 		 * Since userptr pages are not pinned, we can't remove
897 		 * the notifer until we're sure the GPU is not accessing
898 		 * them anymore
899 		 */
900 		mmu_interval_notifier_remove(&vma->userptr.notifier);
901 		xe_vm_put(vm);
902 	} else {
903 		xe_bo_put(vma->bo);
904 	}
905 
906 	kfree(vma);
907 }
908 
909 static void vma_destroy_work_func(struct work_struct *w)
910 {
911 	struct xe_vma *vma =
912 		container_of(w, struct xe_vma, destroy_work);
913 
914 	xe_vma_destroy_late(vma);
915 }
916 
917 static struct xe_vma *
918 bo_has_vm_references_locked(struct xe_bo *bo, struct xe_vm *vm,
919 			    struct xe_vma *ignore)
920 {
921 	struct xe_vma *vma;
922 
923 	list_for_each_entry(vma, &bo->vmas, bo_link) {
924 		if (vma != ignore && vma->vm == vm && !vma->destroyed)
925 			return vma;
926 	}
927 
928 	return NULL;
929 }
930 
931 static bool bo_has_vm_references(struct xe_bo *bo, struct xe_vm *vm,
932 				 struct xe_vma *ignore)
933 {
934 	struct ww_acquire_ctx ww;
935 	bool ret;
936 
937 	xe_bo_lock(bo, &ww, 0, false);
938 	ret = !!bo_has_vm_references_locked(bo, vm, ignore);
939 	xe_bo_unlock(bo, &ww);
940 
941 	return ret;
942 }
943 
944 static void __vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
945 {
946 	list_add(&vma->extobj.link, &vm->extobj.list);
947 	vm->extobj.entries++;
948 }
949 
950 static void vm_insert_extobj(struct xe_vm *vm, struct xe_vma *vma)
951 {
952 	struct xe_bo *bo = vma->bo;
953 
954 	lockdep_assert_held_write(&vm->lock);
955 
956 	if (bo_has_vm_references(bo, vm, vma))
957 		return;
958 
959 	__vm_insert_extobj(vm, vma);
960 }
961 
962 static void vma_destroy_cb(struct dma_fence *fence,
963 			   struct dma_fence_cb *cb)
964 {
965 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
966 
967 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
968 	queue_work(system_unbound_wq, &vma->destroy_work);
969 }
970 
971 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
972 {
973 	struct xe_vm *vm = vma->vm;
974 
975 	lockdep_assert_held_write(&vm->lock);
976 	XE_BUG_ON(!list_empty(&vma->unbind_link));
977 
978 	if (xe_vma_is_userptr(vma)) {
979 		XE_WARN_ON(!vma->destroyed);
980 		spin_lock(&vm->userptr.invalidated_lock);
981 		list_del_init(&vma->userptr.invalidate_link);
982 		spin_unlock(&vm->userptr.invalidated_lock);
983 		list_del(&vma->userptr_link);
984 	} else {
985 		xe_bo_assert_held(vma->bo);
986 		list_del(&vma->bo_link);
987 
988 		spin_lock(&vm->notifier.list_lock);
989 		list_del(&vma->notifier.rebind_link);
990 		spin_unlock(&vm->notifier.list_lock);
991 
992 		if (!vma->bo->vm && vm_remove_extobj(vma)) {
993 			struct xe_vma *other;
994 
995 			other = bo_has_vm_references_locked(vma->bo, vm, NULL);
996 
997 			if (other)
998 				__vm_insert_extobj(vm, other);
999 		}
1000 	}
1001 
1002 	xe_vm_assert_held(vm);
1003 	if (!list_empty(&vma->rebind_link))
1004 		list_del(&vma->rebind_link);
1005 
1006 	if (fence) {
1007 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1008 						 vma_destroy_cb);
1009 
1010 		if (ret) {
1011 			XE_WARN_ON(ret != -ENOENT);
1012 			xe_vma_destroy_late(vma);
1013 		}
1014 	} else {
1015 		xe_vma_destroy_late(vma);
1016 	}
1017 }
1018 
1019 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1020 {
1021 	struct ttm_validate_buffer tv[2];
1022 	struct ww_acquire_ctx ww;
1023 	struct xe_bo *bo = vma->bo;
1024 	LIST_HEAD(objs);
1025 	LIST_HEAD(dups);
1026 	int err;
1027 
1028 	memset(tv, 0, sizeof(tv));
1029 	tv[0].bo = xe_vm_ttm_bo(vma->vm);
1030 	list_add(&tv[0].head, &objs);
1031 
1032 	if (bo) {
1033 		tv[1].bo = &xe_bo_get(bo)->ttm;
1034 		list_add(&tv[1].head, &objs);
1035 	}
1036 	err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
1037 	XE_WARN_ON(err);
1038 
1039 	xe_vma_destroy(vma, NULL);
1040 
1041 	ttm_eu_backoff_reservation(&ww, &objs);
1042 	if (bo)
1043 		xe_bo_put(bo);
1044 }
1045 
1046 static struct xe_vma *to_xe_vma(const struct rb_node *node)
1047 {
1048 	BUILD_BUG_ON(offsetof(struct xe_vma, vm_node) != 0);
1049 	return (struct xe_vma *)node;
1050 }
1051 
1052 static int xe_vma_cmp(const struct xe_vma *a, const struct xe_vma *b)
1053 {
1054 	if (a->end < b->start) {
1055 		return -1;
1056 	} else if (b->end < a->start) {
1057 		return 1;
1058 	} else {
1059 		return 0;
1060 	}
1061 }
1062 
1063 static bool xe_vma_less_cb(struct rb_node *a, const struct rb_node *b)
1064 {
1065 	return xe_vma_cmp(to_xe_vma(a), to_xe_vma(b)) < 0;
1066 }
1067 
1068 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node)
1069 {
1070 	struct xe_vma *cmp = to_xe_vma(node);
1071 	const struct xe_vma *own = key;
1072 
1073 	if (own->start > cmp->end)
1074 		return 1;
1075 
1076 	if (own->end < cmp->start)
1077 		return -1;
1078 
1079 	return 0;
1080 }
1081 
1082 struct xe_vma *
1083 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma)
1084 {
1085 	struct rb_node *node;
1086 
1087 	if (xe_vm_is_closed(vm))
1088 		return NULL;
1089 
1090 	XE_BUG_ON(vma->end >= vm->size);
1091 	lockdep_assert_held(&vm->lock);
1092 
1093 	node = rb_find(vma, &vm->vmas, xe_vma_cmp_vma_cb);
1094 
1095 	return node ? to_xe_vma(node) : NULL;
1096 }
1097 
1098 static void xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1099 {
1100 	XE_BUG_ON(vma->vm != vm);
1101 	lockdep_assert_held(&vm->lock);
1102 
1103 	rb_add(&vma->vm_node, &vm->vmas, xe_vma_less_cb);
1104 }
1105 
1106 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1107 {
1108 	XE_BUG_ON(vma->vm != vm);
1109 	lockdep_assert_held(&vm->lock);
1110 
1111 	rb_erase(&vma->vm_node, &vm->vmas);
1112 	if (vm->usm.last_fault_vma == vma)
1113 		vm->usm.last_fault_vma = NULL;
1114 }
1115 
1116 static void async_op_work_func(struct work_struct *w);
1117 static void vm_destroy_work_func(struct work_struct *w);
1118 
1119 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1120 {
1121 	struct xe_vm *vm;
1122 	int err, i = 0, number_gts = 0;
1123 	struct xe_gt *gt;
1124 	u8 id;
1125 
1126 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1127 	if (!vm)
1128 		return ERR_PTR(-ENOMEM);
1129 
1130 	vm->xe = xe;
1131 	kref_init(&vm->refcount);
1132 	dma_resv_init(&vm->resv);
1133 
1134 	vm->size = 1ull << xe_pt_shift(xe->info.vm_max_level + 1);
1135 
1136 	vm->vmas = RB_ROOT;
1137 	vm->flags = flags;
1138 
1139 	init_rwsem(&vm->lock);
1140 
1141 	INIT_LIST_HEAD(&vm->rebind_list);
1142 
1143 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1144 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1145 	init_rwsem(&vm->userptr.notifier_lock);
1146 	spin_lock_init(&vm->userptr.invalidated_lock);
1147 
1148 	INIT_LIST_HEAD(&vm->notifier.rebind_list);
1149 	spin_lock_init(&vm->notifier.list_lock);
1150 
1151 	INIT_LIST_HEAD(&vm->async_ops.pending);
1152 	INIT_WORK(&vm->async_ops.work, async_op_work_func);
1153 	spin_lock_init(&vm->async_ops.lock);
1154 
1155 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1156 
1157 	INIT_LIST_HEAD(&vm->preempt.engines);
1158 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1159 
1160 	INIT_LIST_HEAD(&vm->extobj.list);
1161 
1162 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1163 		/* We need to immeditatelly exit from any D3 state */
1164 		xe_pm_runtime_get(xe);
1165 		xe_device_mem_access_get(xe);
1166 	}
1167 
1168 	err = dma_resv_lock_interruptible(&vm->resv, NULL);
1169 	if (err)
1170 		goto err_put;
1171 
1172 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1173 		vm->flags |= XE_VM_FLAGS_64K;
1174 
1175 	for_each_gt(gt, xe, id) {
1176 		if (xe_gt_is_media_type(gt))
1177 			continue;
1178 
1179 		if (flags & XE_VM_FLAG_MIGRATION &&
1180 		    gt->info.id != XE_VM_FLAG_GT_ID(flags))
1181 			continue;
1182 
1183 		vm->pt_root[id] = xe_pt_create(vm, gt, xe->info.vm_max_level);
1184 		if (IS_ERR(vm->pt_root[id])) {
1185 			err = PTR_ERR(vm->pt_root[id]);
1186 			vm->pt_root[id] = NULL;
1187 			goto err_destroy_root;
1188 		}
1189 	}
1190 
1191 	if (flags & XE_VM_FLAG_SCRATCH_PAGE) {
1192 		for_each_gt(gt, xe, id) {
1193 			if (!vm->pt_root[id])
1194 				continue;
1195 
1196 			err = xe_pt_create_scratch(xe, gt, vm);
1197 			if (err)
1198 				goto err_scratch_pt;
1199 		}
1200 	}
1201 
1202 	if (flags & DRM_XE_VM_CREATE_COMPUTE_MODE) {
1203 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1204 		vm->flags |= XE_VM_FLAG_COMPUTE_MODE;
1205 	}
1206 
1207 	if (flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS) {
1208 		vm->async_ops.fence.context = dma_fence_context_alloc(1);
1209 		vm->flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1210 	}
1211 
1212 	/* Fill pt_root after allocating scratch tables */
1213 	for_each_gt(gt, xe, id) {
1214 		if (!vm->pt_root[id])
1215 			continue;
1216 
1217 		xe_pt_populate_empty(gt, vm, vm->pt_root[id]);
1218 	}
1219 	dma_resv_unlock(&vm->resv);
1220 
1221 	/* Kernel migration VM shouldn't have a circular loop.. */
1222 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1223 		for_each_gt(gt, xe, id) {
1224 			struct xe_vm *migrate_vm;
1225 			struct xe_engine *eng;
1226 
1227 			if (!vm->pt_root[id])
1228 				continue;
1229 
1230 			migrate_vm = xe_migrate_get_vm(gt->migrate);
1231 			eng = xe_engine_create_class(xe, gt, migrate_vm,
1232 						     XE_ENGINE_CLASS_COPY,
1233 						     ENGINE_FLAG_VM);
1234 			xe_vm_put(migrate_vm);
1235 			if (IS_ERR(eng)) {
1236 				xe_vm_close_and_put(vm);
1237 				return ERR_CAST(eng);
1238 			}
1239 			vm->eng[id] = eng;
1240 			number_gts++;
1241 		}
1242 	}
1243 
1244 	if (number_gts > 1)
1245 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1246 
1247 	mutex_lock(&xe->usm.lock);
1248 	if (flags & XE_VM_FLAG_FAULT_MODE)
1249 		xe->usm.num_vm_in_fault_mode++;
1250 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1251 		xe->usm.num_vm_in_non_fault_mode++;
1252 	mutex_unlock(&xe->usm.lock);
1253 
1254 	trace_xe_vm_create(vm);
1255 
1256 	return vm;
1257 
1258 err_scratch_pt:
1259 	for_each_gt(gt, xe, id) {
1260 		if (!vm->pt_root[id])
1261 			continue;
1262 
1263 		i = vm->pt_root[id]->level;
1264 		while (i)
1265 			if (vm->scratch_pt[id][--i])
1266 				xe_pt_destroy(vm->scratch_pt[id][i],
1267 					      vm->flags, NULL);
1268 		xe_bo_unpin(vm->scratch_bo[id]);
1269 		xe_bo_put(vm->scratch_bo[id]);
1270 	}
1271 err_destroy_root:
1272 	for_each_gt(gt, xe, id) {
1273 		if (vm->pt_root[id])
1274 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1275 	}
1276 	dma_resv_unlock(&vm->resv);
1277 err_put:
1278 	dma_resv_fini(&vm->resv);
1279 	kfree(vm);
1280 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1281 		xe_device_mem_access_put(xe);
1282 		xe_pm_runtime_put(xe);
1283 	}
1284 	return ERR_PTR(err);
1285 }
1286 
1287 static void flush_async_ops(struct xe_vm *vm)
1288 {
1289 	queue_work(system_unbound_wq, &vm->async_ops.work);
1290 	flush_work(&vm->async_ops.work);
1291 }
1292 
1293 static void vm_error_capture(struct xe_vm *vm, int err,
1294 			     u32 op, u64 addr, u64 size)
1295 {
1296 	struct drm_xe_vm_bind_op_error_capture capture;
1297 	u64 __user *address =
1298 		u64_to_user_ptr(vm->async_ops.error_capture.addr);
1299 	bool in_kthread = !current->mm;
1300 
1301 	capture.error = err;
1302 	capture.op = op;
1303 	capture.addr = addr;
1304 	capture.size = size;
1305 
1306 	if (in_kthread) {
1307 		if (!mmget_not_zero(vm->async_ops.error_capture.mm))
1308 			goto mm_closed;
1309 		kthread_use_mm(vm->async_ops.error_capture.mm);
1310 	}
1311 
1312 	if (copy_to_user(address, &capture, sizeof(capture)))
1313 		XE_WARN_ON("Copy to user failed");
1314 
1315 	if (in_kthread) {
1316 		kthread_unuse_mm(vm->async_ops.error_capture.mm);
1317 		mmput(vm->async_ops.error_capture.mm);
1318 	}
1319 
1320 mm_closed:
1321 	wake_up_all(&vm->async_ops.error_capture.wq);
1322 }
1323 
1324 void xe_vm_close_and_put(struct xe_vm *vm)
1325 {
1326 	struct rb_root contested = RB_ROOT;
1327 	struct ww_acquire_ctx ww;
1328 	struct xe_device *xe = vm->xe;
1329 	struct xe_gt *gt;
1330 	u8 id;
1331 
1332 	XE_BUG_ON(vm->preempt.num_engines);
1333 
1334 	vm->size = 0;
1335 	smp_mb();
1336 	flush_async_ops(vm);
1337 	if (xe_vm_in_compute_mode(vm))
1338 		flush_work(&vm->preempt.rebind_work);
1339 
1340 	for_each_gt(gt, xe, id) {
1341 		if (vm->eng[id]) {
1342 			xe_engine_kill(vm->eng[id]);
1343 			xe_engine_put(vm->eng[id]);
1344 			vm->eng[id] = NULL;
1345 		}
1346 	}
1347 
1348 	down_write(&vm->lock);
1349 	xe_vm_lock(vm, &ww, 0, false);
1350 	while (vm->vmas.rb_node) {
1351 		struct xe_vma *vma = to_xe_vma(vm->vmas.rb_node);
1352 
1353 		if (xe_vma_is_userptr(vma)) {
1354 			down_read(&vm->userptr.notifier_lock);
1355 			vma->destroyed = true;
1356 			up_read(&vm->userptr.notifier_lock);
1357 		}
1358 
1359 		rb_erase(&vma->vm_node, &vm->vmas);
1360 
1361 		/* easy case, remove from VMA? */
1362 		if (xe_vma_is_userptr(vma) || vma->bo->vm) {
1363 			xe_vma_destroy(vma, NULL);
1364 			continue;
1365 		}
1366 
1367 		rb_add(&vma->vm_node, &contested, xe_vma_less_cb);
1368 	}
1369 
1370 	/*
1371 	 * All vm operations will add shared fences to resv.
1372 	 * The only exception is eviction for a shared object,
1373 	 * but even so, the unbind when evicted would still
1374 	 * install a fence to resv. Hence it's safe to
1375 	 * destroy the pagetables immediately.
1376 	 */
1377 	for_each_gt(gt, xe, id) {
1378 		if (vm->scratch_bo[id]) {
1379 			u32 i;
1380 
1381 			xe_bo_unpin(vm->scratch_bo[id]);
1382 			xe_bo_put(vm->scratch_bo[id]);
1383 			for (i = 0; i < vm->pt_root[id]->level; i++)
1384 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags,
1385 					      NULL);
1386 		}
1387 	}
1388 	xe_vm_unlock(vm, &ww);
1389 
1390 	if (contested.rb_node) {
1391 
1392 		/*
1393 		 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1394 		 * Since we hold a refcount to the bo, we can remove and free
1395 		 * the members safely without locking.
1396 		 */
1397 		while (contested.rb_node) {
1398 			struct xe_vma *vma = to_xe_vma(contested.rb_node);
1399 
1400 			rb_erase(&vma->vm_node, &contested);
1401 			xe_vma_destroy_unlocked(vma);
1402 		}
1403 	}
1404 
1405 	if (vm->async_ops.error_capture.addr)
1406 		wake_up_all(&vm->async_ops.error_capture.wq);
1407 
1408 	XE_WARN_ON(!list_empty(&vm->extobj.list));
1409 	up_write(&vm->lock);
1410 
1411 	xe_vm_put(vm);
1412 }
1413 
1414 static void vm_destroy_work_func(struct work_struct *w)
1415 {
1416 	struct xe_vm *vm =
1417 		container_of(w, struct xe_vm, destroy_work);
1418 	struct ww_acquire_ctx ww;
1419 	struct xe_device *xe = vm->xe;
1420 	struct xe_gt *gt;
1421 	u8 id;
1422 	void *lookup;
1423 
1424 	/* xe_vm_close_and_put was not called? */
1425 	XE_WARN_ON(vm->size);
1426 
1427 	if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
1428 		xe_device_mem_access_put(xe);
1429 		xe_pm_runtime_put(xe);
1430 
1431 		mutex_lock(&xe->usm.lock);
1432 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1433 		XE_WARN_ON(lookup != vm);
1434 		mutex_unlock(&xe->usm.lock);
1435 	}
1436 
1437 	/*
1438 	 * XXX: We delay destroying the PT root until the VM if freed as PT root
1439 	 * is needed for xe_vm_lock to work. If we remove that dependency this
1440 	 * can be moved to xe_vm_close_and_put.
1441 	 */
1442 	xe_vm_lock(vm, &ww, 0, false);
1443 	for_each_gt(gt, xe, id) {
1444 		if (vm->pt_root[id]) {
1445 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1446 			vm->pt_root[id] = NULL;
1447 		}
1448 	}
1449 	xe_vm_unlock(vm, &ww);
1450 
1451 	mutex_lock(&xe->usm.lock);
1452 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1453 		xe->usm.num_vm_in_fault_mode--;
1454 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1455 		xe->usm.num_vm_in_non_fault_mode--;
1456 	mutex_unlock(&xe->usm.lock);
1457 
1458 	trace_xe_vm_free(vm);
1459 	dma_fence_put(vm->rebind_fence);
1460 	dma_resv_fini(&vm->resv);
1461 	kfree(vm);
1462 
1463 }
1464 
1465 void xe_vm_free(struct kref *ref)
1466 {
1467 	struct xe_vm *vm = container_of(ref, struct xe_vm, refcount);
1468 
1469 	/* To destroy the VM we need to be able to sleep */
1470 	queue_work(system_unbound_wq, &vm->destroy_work);
1471 }
1472 
1473 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1474 {
1475 	struct xe_vm *vm;
1476 
1477 	mutex_lock(&xef->vm.lock);
1478 	vm = xa_load(&xef->vm.xa, id);
1479 	mutex_unlock(&xef->vm.lock);
1480 
1481 	if (vm)
1482 		xe_vm_get(vm);
1483 
1484 	return vm;
1485 }
1486 
1487 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt)
1488 {
1489 	XE_BUG_ON(xe_gt_is_media_type(full_gt));
1490 
1491 	return gen8_pde_encode(vm->pt_root[full_gt->info.id]->bo, 0,
1492 			       XE_CACHE_WB);
1493 }
1494 
1495 static struct dma_fence *
1496 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
1497 		 struct xe_sync_entry *syncs, u32 num_syncs)
1498 {
1499 	struct xe_gt *gt;
1500 	struct dma_fence *fence = NULL;
1501 	struct dma_fence **fences = NULL;
1502 	struct dma_fence_array *cf = NULL;
1503 	struct xe_vm *vm = vma->vm;
1504 	int cur_fence = 0, i;
1505 	int number_gts = hweight_long(vma->gt_present);
1506 	int err;
1507 	u8 id;
1508 
1509 	trace_xe_vma_unbind(vma);
1510 
1511 	if (number_gts > 1) {
1512 		fences = kmalloc_array(number_gts, sizeof(*fences),
1513 				       GFP_KERNEL);
1514 		if (!fences)
1515 			return ERR_PTR(-ENOMEM);
1516 	}
1517 
1518 	for_each_gt(gt, vm->xe, id) {
1519 		if (!(vma->gt_present & BIT(id)))
1520 			goto next;
1521 
1522 		XE_BUG_ON(xe_gt_is_media_type(gt));
1523 
1524 		fence = __xe_pt_unbind_vma(gt, vma, e, syncs, num_syncs);
1525 		if (IS_ERR(fence)) {
1526 			err = PTR_ERR(fence);
1527 			goto err_fences;
1528 		}
1529 
1530 		if (fences)
1531 			fences[cur_fence++] = fence;
1532 
1533 next:
1534 		if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1535 			e = list_next_entry(e, multi_gt_list);
1536 	}
1537 
1538 	if (fences) {
1539 		cf = dma_fence_array_create(number_gts, fences,
1540 					    vm->composite_fence_ctx,
1541 					    vm->composite_fence_seqno++,
1542 					    false);
1543 		if (!cf) {
1544 			--vm->composite_fence_seqno;
1545 			err = -ENOMEM;
1546 			goto err_fences;
1547 		}
1548 	}
1549 
1550 	for (i = 0; i < num_syncs; i++)
1551 		xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1552 
1553 	return cf ? &cf->base : !fence ? dma_fence_get_stub() : fence;
1554 
1555 err_fences:
1556 	if (fences) {
1557 		while (cur_fence) {
1558 			/* FIXME: Rewind the previous binds? */
1559 			dma_fence_put(fences[--cur_fence]);
1560 		}
1561 		kfree(fences);
1562 	}
1563 
1564 	return ERR_PTR(err);
1565 }
1566 
1567 static struct dma_fence *
1568 xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
1569 	       struct xe_sync_entry *syncs, u32 num_syncs)
1570 {
1571 	struct xe_gt *gt;
1572 	struct dma_fence *fence;
1573 	struct dma_fence **fences = NULL;
1574 	struct dma_fence_array *cf = NULL;
1575 	struct xe_vm *vm = vma->vm;
1576 	int cur_fence = 0, i;
1577 	int number_gts = hweight_long(vma->gt_mask);
1578 	int err;
1579 	u8 id;
1580 
1581 	trace_xe_vma_bind(vma);
1582 
1583 	if (number_gts > 1) {
1584 		fences = kmalloc_array(number_gts, sizeof(*fences),
1585 				       GFP_KERNEL);
1586 		if (!fences)
1587 			return ERR_PTR(-ENOMEM);
1588 	}
1589 
1590 	for_each_gt(gt, vm->xe, id) {
1591 		if (!(vma->gt_mask & BIT(id)))
1592 			goto next;
1593 
1594 		XE_BUG_ON(xe_gt_is_media_type(gt));
1595 		fence = __xe_pt_bind_vma(gt, vma, e, syncs, num_syncs,
1596 					 vma->gt_present & BIT(id));
1597 		if (IS_ERR(fence)) {
1598 			err = PTR_ERR(fence);
1599 			goto err_fences;
1600 		}
1601 
1602 		if (fences)
1603 			fences[cur_fence++] = fence;
1604 
1605 next:
1606 		if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
1607 			e = list_next_entry(e, multi_gt_list);
1608 	}
1609 
1610 	if (fences) {
1611 		cf = dma_fence_array_create(number_gts, fences,
1612 					    vm->composite_fence_ctx,
1613 					    vm->composite_fence_seqno++,
1614 					    false);
1615 		if (!cf) {
1616 			--vm->composite_fence_seqno;
1617 			err = -ENOMEM;
1618 			goto err_fences;
1619 		}
1620 	}
1621 
1622 	for (i = 0; i < num_syncs; i++)
1623 		xe_sync_entry_signal(&syncs[i], NULL, cf ? &cf->base : fence);
1624 
1625 	return cf ? &cf->base : fence;
1626 
1627 err_fences:
1628 	if (fences) {
1629 		while (cur_fence) {
1630 			/* FIXME: Rewind the previous binds? */
1631 			dma_fence_put(fences[--cur_fence]);
1632 		}
1633 		kfree(fences);
1634 	}
1635 
1636 	return ERR_PTR(err);
1637 }
1638 
1639 struct async_op_fence {
1640 	struct dma_fence fence;
1641 	struct dma_fence_cb cb;
1642 	struct xe_vm *vm;
1643 	wait_queue_head_t wq;
1644 	bool started;
1645 };
1646 
1647 static const char *async_op_fence_get_driver_name(struct dma_fence *dma_fence)
1648 {
1649 	return "xe";
1650 }
1651 
1652 static const char *
1653 async_op_fence_get_timeline_name(struct dma_fence *dma_fence)
1654 {
1655 	return "async_op_fence";
1656 }
1657 
1658 static const struct dma_fence_ops async_op_fence_ops = {
1659 	.get_driver_name = async_op_fence_get_driver_name,
1660 	.get_timeline_name = async_op_fence_get_timeline_name,
1661 };
1662 
1663 static void async_op_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1664 {
1665 	struct async_op_fence *afence =
1666 		container_of(cb, struct async_op_fence, cb);
1667 
1668 	dma_fence_signal(&afence->fence);
1669 	xe_vm_put(afence->vm);
1670 	dma_fence_put(&afence->fence);
1671 }
1672 
1673 static void add_async_op_fence_cb(struct xe_vm *vm,
1674 				  struct dma_fence *fence,
1675 				  struct async_op_fence *afence)
1676 {
1677 	int ret;
1678 
1679 	if (!xe_vm_no_dma_fences(vm)) {
1680 		afence->started = true;
1681 		smp_wmb();
1682 		wake_up_all(&afence->wq);
1683 	}
1684 
1685 	afence->vm = xe_vm_get(vm);
1686 	dma_fence_get(&afence->fence);
1687 	ret = dma_fence_add_callback(fence, &afence->cb, async_op_fence_cb);
1688 	if (ret == -ENOENT)
1689 		dma_fence_signal(&afence->fence);
1690 	if (ret) {
1691 		xe_vm_put(vm);
1692 		dma_fence_put(&afence->fence);
1693 	}
1694 	XE_WARN_ON(ret && ret != -ENOENT);
1695 }
1696 
1697 int xe_vm_async_fence_wait_start(struct dma_fence *fence)
1698 {
1699 	if (fence->ops == &async_op_fence_ops) {
1700 		struct async_op_fence *afence =
1701 			container_of(fence, struct async_op_fence, fence);
1702 
1703 		XE_BUG_ON(xe_vm_no_dma_fences(afence->vm));
1704 
1705 		smp_rmb();
1706 		return wait_event_interruptible(afence->wq, afence->started);
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
1713 			struct xe_engine *e, struct xe_sync_entry *syncs,
1714 			u32 num_syncs, struct async_op_fence *afence)
1715 {
1716 	struct dma_fence *fence;
1717 
1718 	xe_vm_assert_held(vm);
1719 
1720 	fence = xe_vm_bind_vma(vma, e, syncs, num_syncs);
1721 	if (IS_ERR(fence))
1722 		return PTR_ERR(fence);
1723 	if (afence)
1724 		add_async_op_fence_cb(vm, fence, afence);
1725 
1726 	dma_fence_put(fence);
1727 	return 0;
1728 }
1729 
1730 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
1731 		      struct xe_bo *bo, struct xe_sync_entry *syncs,
1732 		      u32 num_syncs, struct async_op_fence *afence)
1733 {
1734 	int err;
1735 
1736 	xe_vm_assert_held(vm);
1737 	xe_bo_assert_held(bo);
1738 
1739 	if (bo) {
1740 		err = xe_bo_validate(bo, vm, true);
1741 		if (err)
1742 			return err;
1743 	}
1744 
1745 	return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence);
1746 }
1747 
1748 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1749 			struct xe_engine *e, struct xe_sync_entry *syncs,
1750 			u32 num_syncs, struct async_op_fence *afence)
1751 {
1752 	struct dma_fence *fence;
1753 
1754 	xe_vm_assert_held(vm);
1755 	xe_bo_assert_held(vma->bo);
1756 
1757 	fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs);
1758 	if (IS_ERR(fence))
1759 		return PTR_ERR(fence);
1760 	if (afence)
1761 		add_async_op_fence_cb(vm, fence, afence);
1762 
1763 	xe_vma_destroy(vma, fence);
1764 	dma_fence_put(fence);
1765 
1766 	return 0;
1767 }
1768 
1769 static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
1770 					u64 value)
1771 {
1772 	if (XE_IOCTL_ERR(xe, !value))
1773 		return -EINVAL;
1774 
1775 	if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
1776 		return -ENOTSUPP;
1777 
1778 	if (XE_IOCTL_ERR(xe, vm->async_ops.error_capture.addr))
1779 		return -ENOTSUPP;
1780 
1781 	vm->async_ops.error_capture.mm = current->mm;
1782 	vm->async_ops.error_capture.addr = value;
1783 	init_waitqueue_head(&vm->async_ops.error_capture.wq);
1784 
1785 	return 0;
1786 }
1787 
1788 typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
1789 				     u64 value);
1790 
1791 static const xe_vm_set_property_fn vm_set_property_funcs[] = {
1792 	[XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
1793 		vm_set_error_capture_address,
1794 };
1795 
1796 static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
1797 				    u64 extension)
1798 {
1799 	u64 __user *address = u64_to_user_ptr(extension);
1800 	struct drm_xe_ext_vm_set_property ext;
1801 	int err;
1802 
1803 	err = __copy_from_user(&ext, address, sizeof(ext));
1804 	if (XE_IOCTL_ERR(xe, err))
1805 		return -EFAULT;
1806 
1807 	if (XE_IOCTL_ERR(xe, ext.property >=
1808 			 ARRAY_SIZE(vm_set_property_funcs)))
1809 		return -EINVAL;
1810 
1811 	return vm_set_property_funcs[ext.property](xe, vm, ext.value);
1812 }
1813 
1814 typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
1815 				       u64 extension);
1816 
1817 static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
1818 	[XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
1819 };
1820 
1821 #define MAX_USER_EXTENSIONS	16
1822 static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
1823 			      u64 extensions, int ext_number)
1824 {
1825 	u64 __user *address = u64_to_user_ptr(extensions);
1826 	struct xe_user_extension ext;
1827 	int err;
1828 
1829 	if (XE_IOCTL_ERR(xe, ext_number >= MAX_USER_EXTENSIONS))
1830 		return -E2BIG;
1831 
1832 	err = __copy_from_user(&ext, address, sizeof(ext));
1833 	if (XE_IOCTL_ERR(xe, err))
1834 		return -EFAULT;
1835 
1836 	if (XE_IOCTL_ERR(xe, ext.name >=
1837 			 ARRAY_SIZE(vm_user_extension_funcs)))
1838 		return -EINVAL;
1839 
1840 	err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
1841 	if (XE_IOCTL_ERR(xe, err))
1842 		return err;
1843 
1844 	if (ext.next_extension)
1845 		return vm_user_extensions(xe, vm, ext.next_extension,
1846 					  ++ext_number);
1847 
1848 	return 0;
1849 }
1850 
1851 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
1852 				    DRM_XE_VM_CREATE_COMPUTE_MODE | \
1853 				    DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
1854 				    DRM_XE_VM_CREATE_FAULT_MODE)
1855 
1856 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1857 		       struct drm_file *file)
1858 {
1859 	struct xe_device *xe = to_xe_device(dev);
1860 	struct xe_file *xef = to_xe_file(file);
1861 	struct drm_xe_vm_create *args = data;
1862 	struct xe_vm *vm;
1863 	u32 id, asid;
1864 	int err;
1865 	u32 flags = 0;
1866 
1867 	if (XE_IOCTL_ERR(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1868 		return -EINVAL;
1869 
1870 	if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE &&
1871 			 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1872 		return -EINVAL;
1873 
1874 	if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE &&
1875 			 args->flags & DRM_XE_VM_CREATE_FAULT_MODE))
1876 		return -EINVAL;
1877 
1878 	if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1879 			 xe_device_in_non_fault_mode(xe)))
1880 		return -EINVAL;
1881 
1882 	if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_VM_CREATE_FAULT_MODE) &&
1883 			 xe_device_in_fault_mode(xe)))
1884 		return -EINVAL;
1885 
1886 	if (XE_IOCTL_ERR(xe, args->flags & DRM_XE_VM_CREATE_FAULT_MODE &&
1887 			 !xe->info.supports_usm))
1888 		return -EINVAL;
1889 
1890 	if (args->flags & DRM_XE_VM_CREATE_SCRATCH_PAGE)
1891 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1892 	if (args->flags & DRM_XE_VM_CREATE_COMPUTE_MODE)
1893 		flags |= XE_VM_FLAG_COMPUTE_MODE;
1894 	if (args->flags & DRM_XE_VM_CREATE_ASYNC_BIND_OPS)
1895 		flags |= XE_VM_FLAG_ASYNC_BIND_OPS;
1896 	if (args->flags & DRM_XE_VM_CREATE_FAULT_MODE)
1897 		flags |= XE_VM_FLAG_FAULT_MODE;
1898 
1899 	vm = xe_vm_create(xe, flags);
1900 	if (IS_ERR(vm))
1901 		return PTR_ERR(vm);
1902 
1903 	if (args->extensions) {
1904 		err = vm_user_extensions(xe, vm, args->extensions, 0);
1905 		if (XE_IOCTL_ERR(xe, err)) {
1906 			xe_vm_close_and_put(vm);
1907 			return err;
1908 		}
1909 	}
1910 
1911 	mutex_lock(&xef->vm.lock);
1912 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1913 	mutex_unlock(&xef->vm.lock);
1914 	if (err) {
1915 		xe_vm_close_and_put(vm);
1916 		return err;
1917 	}
1918 
1919 	mutex_lock(&xe->usm.lock);
1920 	err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1921 			      XA_LIMIT(0, XE_MAX_ASID - 1),
1922 			      &xe->usm.next_asid, GFP_KERNEL);
1923 	mutex_unlock(&xe->usm.lock);
1924 	if (err) {
1925 		xe_vm_close_and_put(vm);
1926 		return err;
1927 	}
1928 	vm->usm.asid = asid;
1929 
1930 	args->vm_id = id;
1931 
1932 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1933 	/* Warning: Security issue - never enable by default */
1934 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, GEN8_PAGE_SIZE);
1935 #endif
1936 
1937 	return 0;
1938 }
1939 
1940 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1941 			struct drm_file *file)
1942 {
1943 	struct xe_device *xe = to_xe_device(dev);
1944 	struct xe_file *xef = to_xe_file(file);
1945 	struct drm_xe_vm_destroy *args = data;
1946 	struct xe_vm *vm;
1947 
1948 	if (XE_IOCTL_ERR(xe, args->pad))
1949 		return -EINVAL;
1950 
1951 	vm = xe_vm_lookup(xef, args->vm_id);
1952 	if (XE_IOCTL_ERR(xe, !vm))
1953 		return -ENOENT;
1954 	xe_vm_put(vm);
1955 
1956 	/* FIXME: Extend this check to non-compute mode VMs */
1957 	if (XE_IOCTL_ERR(xe, vm->preempt.num_engines))
1958 		return -EBUSY;
1959 
1960 	mutex_lock(&xef->vm.lock);
1961 	xa_erase(&xef->vm.xa, args->vm_id);
1962 	mutex_unlock(&xef->vm.lock);
1963 
1964 	xe_vm_close_and_put(vm);
1965 
1966 	return 0;
1967 }
1968 
1969 static const u32 region_to_mem_type[] = {
1970 	XE_PL_TT,
1971 	XE_PL_VRAM0,
1972 	XE_PL_VRAM1,
1973 };
1974 
1975 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1976 			  struct xe_engine *e, u32 region,
1977 			  struct xe_sync_entry *syncs, u32 num_syncs,
1978 			  struct async_op_fence *afence)
1979 {
1980 	int err;
1981 
1982 	XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type));
1983 
1984 	if (!xe_vma_is_userptr(vma)) {
1985 		err = xe_bo_migrate(vma->bo, region_to_mem_type[region]);
1986 		if (err)
1987 			return err;
1988 	}
1989 
1990 	if (vma->gt_mask != (vma->gt_present & ~vma->usm.gt_invalidated)) {
1991 		return xe_vm_bind(vm, vma, e, vma->bo, syncs, num_syncs,
1992 				  afence);
1993 	} else {
1994 		int i;
1995 
1996 		/* Nothing to do, signal fences now */
1997 		for (i = 0; i < num_syncs; i++)
1998 			xe_sync_entry_signal(&syncs[i], NULL,
1999 					     dma_fence_get_stub());
2000 		if (afence)
2001 			dma_fence_signal(&afence->fence);
2002 		return 0;
2003 	}
2004 }
2005 
2006 #define VM_BIND_OP(op)	(op & 0xffff)
2007 
2008 static int __vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2009 			   struct xe_engine *e, struct xe_bo *bo, u32 op,
2010 			   u32 region, struct xe_sync_entry *syncs,
2011 			   u32 num_syncs, struct async_op_fence *afence)
2012 {
2013 	switch (VM_BIND_OP(op)) {
2014 	case XE_VM_BIND_OP_MAP:
2015 		return xe_vm_bind(vm, vma, e, bo, syncs, num_syncs, afence);
2016 	case XE_VM_BIND_OP_UNMAP:
2017 	case XE_VM_BIND_OP_UNMAP_ALL:
2018 		return xe_vm_unbind(vm, vma, e, syncs, num_syncs, afence);
2019 	case XE_VM_BIND_OP_MAP_USERPTR:
2020 		return xe_vm_bind(vm, vma, e, NULL, syncs, num_syncs, afence);
2021 	case XE_VM_BIND_OP_PREFETCH:
2022 		return xe_vm_prefetch(vm, vma, e, region, syncs, num_syncs,
2023 				      afence);
2024 		break;
2025 	default:
2026 		XE_BUG_ON("NOT POSSIBLE");
2027 		return -EINVAL;
2028 	}
2029 }
2030 
2031 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
2032 {
2033 	int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
2034 		XE_VM_FLAG_GT_ID(vm->flags) : 0;
2035 
2036 	/* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
2037 	return &vm->pt_root[idx]->bo->ttm;
2038 }
2039 
2040 static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
2041 {
2042 	tv->num_shared = 1;
2043 	tv->bo = xe_vm_ttm_bo(vm);
2044 }
2045 
2046 static bool is_map_op(u32 op)
2047 {
2048 	return VM_BIND_OP(op) == XE_VM_BIND_OP_MAP ||
2049 		VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR;
2050 }
2051 
2052 static bool is_unmap_op(u32 op)
2053 {
2054 	return VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP ||
2055 		VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL;
2056 }
2057 
2058 static int vm_bind_ioctl(struct xe_vm *vm, struct xe_vma *vma,
2059 			 struct xe_engine *e, struct xe_bo *bo,
2060 			 struct drm_xe_vm_bind_op *bind_op,
2061 			 struct xe_sync_entry *syncs, u32 num_syncs,
2062 			 struct async_op_fence *afence)
2063 {
2064 	LIST_HEAD(objs);
2065 	LIST_HEAD(dups);
2066 	struct ttm_validate_buffer tv_bo, tv_vm;
2067 	struct ww_acquire_ctx ww;
2068 	struct xe_bo *vbo;
2069 	int err, i;
2070 
2071 	lockdep_assert_held(&vm->lock);
2072 	XE_BUG_ON(!list_empty(&vma->unbind_link));
2073 
2074 	/* Binds deferred to faults, signal fences now */
2075 	if (xe_vm_in_fault_mode(vm) && is_map_op(bind_op->op) &&
2076 	    !(bind_op->op & XE_VM_BIND_FLAG_IMMEDIATE)) {
2077 		for (i = 0; i < num_syncs; i++)
2078 			xe_sync_entry_signal(&syncs[i], NULL,
2079 					     dma_fence_get_stub());
2080 		if (afence)
2081 			dma_fence_signal(&afence->fence);
2082 		return 0;
2083 	}
2084 
2085 	xe_vm_tv_populate(vm, &tv_vm);
2086 	list_add_tail(&tv_vm.head, &objs);
2087 	vbo = vma->bo;
2088 	if (vbo) {
2089 		/*
2090 		 * An unbind can drop the last reference to the BO and
2091 		 * the BO is needed for ttm_eu_backoff_reservation so
2092 		 * take a reference here.
2093 		 */
2094 		xe_bo_get(vbo);
2095 
2096 		tv_bo.bo = &vbo->ttm;
2097 		tv_bo.num_shared = 1;
2098 		list_add(&tv_bo.head, &objs);
2099 	}
2100 
2101 again:
2102 	err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
2103 	if (!err) {
2104 		err = __vm_bind_ioctl(vm, vma, e, bo,
2105 				      bind_op->op, bind_op->region, syncs,
2106 				      num_syncs, afence);
2107 		ttm_eu_backoff_reservation(&ww, &objs);
2108 		if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
2109 			lockdep_assert_held_write(&vm->lock);
2110 			err = xe_vma_userptr_pin_pages(vma);
2111 			if (!err)
2112 				goto again;
2113 		}
2114 	}
2115 	xe_bo_put(vbo);
2116 
2117 	return err;
2118 }
2119 
2120 struct async_op {
2121 	struct xe_vma *vma;
2122 	struct xe_engine *engine;
2123 	struct xe_bo *bo;
2124 	struct drm_xe_vm_bind_op bind_op;
2125 	struct xe_sync_entry *syncs;
2126 	u32 num_syncs;
2127 	struct list_head link;
2128 	struct async_op_fence *fence;
2129 };
2130 
2131 static void async_op_cleanup(struct xe_vm *vm, struct async_op *op)
2132 {
2133 	while (op->num_syncs--)
2134 		xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
2135 	kfree(op->syncs);
2136 	xe_bo_put(op->bo);
2137 	if (op->engine)
2138 		xe_engine_put(op->engine);
2139 	xe_vm_put(vm);
2140 	if (op->fence)
2141 		dma_fence_put(&op->fence->fence);
2142 	kfree(op);
2143 }
2144 
2145 static struct async_op *next_async_op(struct xe_vm *vm)
2146 {
2147 	return list_first_entry_or_null(&vm->async_ops.pending,
2148 					struct async_op, link);
2149 }
2150 
2151 static void vm_set_async_error(struct xe_vm *vm, int err)
2152 {
2153 	lockdep_assert_held(&vm->lock);
2154 	vm->async_ops.error = err;
2155 }
2156 
2157 static void async_op_work_func(struct work_struct *w)
2158 {
2159 	struct xe_vm *vm = container_of(w, struct xe_vm, async_ops.work);
2160 
2161 	for (;;) {
2162 		struct async_op *op;
2163 		int err;
2164 
2165 		if (vm->async_ops.error && !xe_vm_is_closed(vm))
2166 			break;
2167 
2168 		spin_lock_irq(&vm->async_ops.lock);
2169 		op = next_async_op(vm);
2170 		if (op)
2171 			list_del_init(&op->link);
2172 		spin_unlock_irq(&vm->async_ops.lock);
2173 
2174 		if (!op)
2175 			break;
2176 
2177 		if (!xe_vm_is_closed(vm)) {
2178 			bool first, last;
2179 
2180 			down_write(&vm->lock);
2181 again:
2182 			first = op->vma->first_munmap_rebind;
2183 			last = op->vma->last_munmap_rebind;
2184 #ifdef TEST_VM_ASYNC_OPS_ERROR
2185 #define FORCE_ASYNC_OP_ERROR	BIT(31)
2186 			if (!(op->bind_op.op & FORCE_ASYNC_OP_ERROR)) {
2187 				err = vm_bind_ioctl(vm, op->vma, op->engine,
2188 						    op->bo, &op->bind_op,
2189 						    op->syncs, op->num_syncs,
2190 						    op->fence);
2191 			} else {
2192 				err = -ENOMEM;
2193 				op->bind_op.op &= ~FORCE_ASYNC_OP_ERROR;
2194 			}
2195 #else
2196 			err = vm_bind_ioctl(vm, op->vma, op->engine, op->bo,
2197 					    &op->bind_op, op->syncs,
2198 					    op->num_syncs, op->fence);
2199 #endif
2200 			/*
2201 			 * In order for the fencing to work (stall behind
2202 			 * existing jobs / prevent new jobs from running) all
2203 			 * the dma-resv slots need to be programmed in a batch
2204 			 * relative to execs / the rebind worker. The vm->lock
2205 			 * ensure this.
2206 			 */
2207 			if (!err && ((first && VM_BIND_OP(op->bind_op.op) ==
2208 				      XE_VM_BIND_OP_UNMAP) ||
2209 				     vm->async_ops.munmap_rebind_inflight)) {
2210 				if (last) {
2211 					op->vma->last_munmap_rebind = false;
2212 					vm->async_ops.munmap_rebind_inflight =
2213 						false;
2214 				} else {
2215 					vm->async_ops.munmap_rebind_inflight =
2216 						true;
2217 
2218 					async_op_cleanup(vm, op);
2219 
2220 					spin_lock_irq(&vm->async_ops.lock);
2221 					op = next_async_op(vm);
2222 					XE_BUG_ON(!op);
2223 					list_del_init(&op->link);
2224 					spin_unlock_irq(&vm->async_ops.lock);
2225 
2226 					goto again;
2227 				}
2228 			}
2229 			if (err) {
2230 				trace_xe_vma_fail(op->vma);
2231 				drm_warn(&vm->xe->drm, "Async VM op(%d) failed with %d",
2232 					 VM_BIND_OP(op->bind_op.op),
2233 					 err);
2234 
2235 				spin_lock_irq(&vm->async_ops.lock);
2236 				list_add(&op->link, &vm->async_ops.pending);
2237 				spin_unlock_irq(&vm->async_ops.lock);
2238 
2239 				vm_set_async_error(vm, err);
2240 				up_write(&vm->lock);
2241 
2242 				if (vm->async_ops.error_capture.addr)
2243 					vm_error_capture(vm, err,
2244 							 op->bind_op.op,
2245 							 op->bind_op.addr,
2246 							 op->bind_op.range);
2247 				break;
2248 			}
2249 			up_write(&vm->lock);
2250 		} else {
2251 			trace_xe_vma_flush(op->vma);
2252 
2253 			if (is_unmap_op(op->bind_op.op)) {
2254 				down_write(&vm->lock);
2255 				xe_vma_destroy_unlocked(op->vma);
2256 				up_write(&vm->lock);
2257 			}
2258 
2259 			if (op->fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2260 						   &op->fence->fence.flags)) {
2261 				if (!xe_vm_no_dma_fences(vm)) {
2262 					op->fence->started = true;
2263 					smp_wmb();
2264 					wake_up_all(&op->fence->wq);
2265 				}
2266 				dma_fence_signal(&op->fence->fence);
2267 			}
2268 		}
2269 
2270 		async_op_cleanup(vm, op);
2271 	}
2272 }
2273 
2274 static int __vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2275 				 struct xe_engine *e, struct xe_bo *bo,
2276 				 struct drm_xe_vm_bind_op *bind_op,
2277 				 struct xe_sync_entry *syncs, u32 num_syncs)
2278 {
2279 	struct async_op *op;
2280 	bool installed = false;
2281 	u64 seqno;
2282 	int i;
2283 
2284 	lockdep_assert_held(&vm->lock);
2285 
2286 	op = kmalloc(sizeof(*op), GFP_KERNEL);
2287 	if (!op) {
2288 		return -ENOMEM;
2289 	}
2290 
2291 	if (num_syncs) {
2292 		op->fence = kmalloc(sizeof(*op->fence), GFP_KERNEL);
2293 		if (!op->fence) {
2294 			kfree(op);
2295 			return -ENOMEM;
2296 		}
2297 
2298 		seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
2299 		dma_fence_init(&op->fence->fence, &async_op_fence_ops,
2300 			       &vm->async_ops.lock, e ? e->bind.fence_ctx :
2301 			       vm->async_ops.fence.context, seqno);
2302 
2303 		if (!xe_vm_no_dma_fences(vm)) {
2304 			op->fence->vm = vm;
2305 			op->fence->started = false;
2306 			init_waitqueue_head(&op->fence->wq);
2307 		}
2308 	} else {
2309 		op->fence = NULL;
2310 	}
2311 	op->vma = vma;
2312 	op->engine = e;
2313 	op->bo = bo;
2314 	op->bind_op = *bind_op;
2315 	op->syncs = syncs;
2316 	op->num_syncs = num_syncs;
2317 	INIT_LIST_HEAD(&op->link);
2318 
2319 	for (i = 0; i < num_syncs; i++)
2320 		installed |= xe_sync_entry_signal(&syncs[i], NULL,
2321 						  &op->fence->fence);
2322 
2323 	if (!installed && op->fence)
2324 		dma_fence_signal(&op->fence->fence);
2325 
2326 	spin_lock_irq(&vm->async_ops.lock);
2327 	list_add_tail(&op->link, &vm->async_ops.pending);
2328 	spin_unlock_irq(&vm->async_ops.lock);
2329 
2330 	if (!vm->async_ops.error)
2331 		queue_work(system_unbound_wq, &vm->async_ops.work);
2332 
2333 	return 0;
2334 }
2335 
2336 static int vm_bind_ioctl_async(struct xe_vm *vm, struct xe_vma *vma,
2337 			       struct xe_engine *e, struct xe_bo *bo,
2338 			       struct drm_xe_vm_bind_op *bind_op,
2339 			       struct xe_sync_entry *syncs, u32 num_syncs)
2340 {
2341 	struct xe_vma *__vma, *next;
2342 	struct list_head rebind_list;
2343 	struct xe_sync_entry *in_syncs = NULL, *out_syncs = NULL;
2344 	u32 num_in_syncs = 0, num_out_syncs = 0;
2345 	bool first = true, last;
2346 	int err;
2347 	int i;
2348 
2349 	lockdep_assert_held(&vm->lock);
2350 
2351 	/* Not a linked list of unbinds + rebinds, easy */
2352 	if (list_empty(&vma->unbind_link))
2353 		return __vm_bind_ioctl_async(vm, vma, e, bo, bind_op,
2354 					     syncs, num_syncs);
2355 
2356 	/*
2357 	 * Linked list of unbinds + rebinds, decompose syncs into 'in / out'
2358 	 * passing the 'in' to the first operation and 'out' to the last. Also
2359 	 * the reference counting is a little tricky, increment the VM / bind
2360 	 * engine ref count on all but the last operation and increment the BOs
2361 	 * ref count on each rebind.
2362 	 */
2363 
2364 	XE_BUG_ON(VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP &&
2365 		  VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_UNMAP_ALL &&
2366 		  VM_BIND_OP(bind_op->op) != XE_VM_BIND_OP_PREFETCH);
2367 
2368 	/* Decompose syncs */
2369 	if (num_syncs) {
2370 		in_syncs = kmalloc(sizeof(*in_syncs) * num_syncs, GFP_KERNEL);
2371 		out_syncs = kmalloc(sizeof(*out_syncs) * num_syncs, GFP_KERNEL);
2372 		if (!in_syncs || !out_syncs) {
2373 			err = -ENOMEM;
2374 			goto out_error;
2375 		}
2376 
2377 		for (i = 0; i < num_syncs; ++i) {
2378 			bool signal = syncs[i].flags & DRM_XE_SYNC_SIGNAL;
2379 
2380 			if (signal)
2381 				out_syncs[num_out_syncs++] = syncs[i];
2382 			else
2383 				in_syncs[num_in_syncs++] = syncs[i];
2384 		}
2385 	}
2386 
2387 	/* Do unbinds + move rebinds to new list */
2388 	INIT_LIST_HEAD(&rebind_list);
2389 	list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link) {
2390 		if (__vma->destroyed ||
2391 		    VM_BIND_OP(bind_op->op) == XE_VM_BIND_OP_PREFETCH) {
2392 			list_del_init(&__vma->unbind_link);
2393 			xe_bo_get(bo);
2394 			err = __vm_bind_ioctl_async(xe_vm_get(vm), __vma,
2395 						    e ? xe_engine_get(e) : NULL,
2396 						    bo, bind_op, first ?
2397 						    in_syncs : NULL,
2398 						    first ? num_in_syncs : 0);
2399 			if (err) {
2400 				xe_bo_put(bo);
2401 				xe_vm_put(vm);
2402 				if (e)
2403 					xe_engine_put(e);
2404 				goto out_error;
2405 			}
2406 			in_syncs = NULL;
2407 			first = false;
2408 		} else {
2409 			list_move_tail(&__vma->unbind_link, &rebind_list);
2410 		}
2411 	}
2412 	last = list_empty(&rebind_list);
2413 	if (!last) {
2414 		xe_vm_get(vm);
2415 		if (e)
2416 			xe_engine_get(e);
2417 	}
2418 	err = __vm_bind_ioctl_async(vm, vma, e,
2419 				    bo, bind_op,
2420 				    first ? in_syncs :
2421 				    last ? out_syncs : NULL,
2422 				    first ? num_in_syncs :
2423 				    last ? num_out_syncs : 0);
2424 	if (err) {
2425 		if (!last) {
2426 			xe_vm_put(vm);
2427 			if (e)
2428 				xe_engine_put(e);
2429 		}
2430 		goto out_error;
2431 	}
2432 	in_syncs = NULL;
2433 
2434 	/* Do rebinds */
2435 	list_for_each_entry_safe(__vma, next, &rebind_list, unbind_link) {
2436 		list_del_init(&__vma->unbind_link);
2437 		last = list_empty(&rebind_list);
2438 
2439 		if (xe_vma_is_userptr(__vma)) {
2440 			bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2441 				XE_VM_BIND_OP_MAP_USERPTR;
2442 		} else {
2443 			bind_op->op = XE_VM_BIND_FLAG_ASYNC |
2444 				XE_VM_BIND_OP_MAP;
2445 			xe_bo_get(__vma->bo);
2446 		}
2447 
2448 		if (!last) {
2449 			xe_vm_get(vm);
2450 			if (e)
2451 				xe_engine_get(e);
2452 		}
2453 
2454 		err = __vm_bind_ioctl_async(vm, __vma, e,
2455 					    __vma->bo, bind_op, last ?
2456 					    out_syncs : NULL,
2457 					    last ? num_out_syncs : 0);
2458 		if (err) {
2459 			if (!last) {
2460 				xe_vm_put(vm);
2461 				if (e)
2462 					xe_engine_put(e);
2463 			}
2464 			goto out_error;
2465 		}
2466 	}
2467 
2468 	kfree(syncs);
2469 	return 0;
2470 
2471 out_error:
2472 	kfree(in_syncs);
2473 	kfree(out_syncs);
2474 	kfree(syncs);
2475 
2476 	return err;
2477 }
2478 
2479 static int __vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
2480 				      u64 addr, u64 range, u32 op)
2481 {
2482 	struct xe_device *xe = vm->xe;
2483 	struct xe_vma *vma, lookup;
2484 	bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2485 
2486 	lockdep_assert_held(&vm->lock);
2487 
2488 	lookup.start = addr;
2489 	lookup.end = addr + range - 1;
2490 
2491 	switch (VM_BIND_OP(op)) {
2492 	case XE_VM_BIND_OP_MAP:
2493 	case XE_VM_BIND_OP_MAP_USERPTR:
2494 		vma = xe_vm_find_overlapping_vma(vm, &lookup);
2495 		if (XE_IOCTL_ERR(xe, vma))
2496 			return -EBUSY;
2497 		break;
2498 	case XE_VM_BIND_OP_UNMAP:
2499 	case XE_VM_BIND_OP_PREFETCH:
2500 		vma = xe_vm_find_overlapping_vma(vm, &lookup);
2501 		if (XE_IOCTL_ERR(xe, !vma) ||
2502 		    XE_IOCTL_ERR(xe, (vma->start != addr ||
2503 				 vma->end != addr + range - 1) && !async))
2504 			return -EINVAL;
2505 		break;
2506 	case XE_VM_BIND_OP_UNMAP_ALL:
2507 		break;
2508 	default:
2509 		XE_BUG_ON("NOT POSSIBLE");
2510 		return -EINVAL;
2511 	}
2512 
2513 	return 0;
2514 }
2515 
2516 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma)
2517 {
2518 	down_read(&vm->userptr.notifier_lock);
2519 	vma->destroyed = true;
2520 	up_read(&vm->userptr.notifier_lock);
2521 	xe_vm_remove_vma(vm, vma);
2522 }
2523 
2524 static int prep_replacement_vma(struct xe_vm *vm, struct xe_vma *vma)
2525 {
2526 	int err;
2527 
2528 	if (vma->bo && !vma->bo->vm) {
2529 		vm_insert_extobj(vm, vma);
2530 		err = add_preempt_fences(vm, vma->bo);
2531 		if (err)
2532 			return err;
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 /*
2539  * Find all overlapping VMAs in lookup range and add to a list in the returned
2540  * VMA, all of VMAs found will be unbound. Also possibly add 2 new VMAs that
2541  * need to be bound if first / last VMAs are not fully unbound. This is akin to
2542  * how munmap works.
2543  */
2544 static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
2545 					    struct xe_vma *lookup)
2546 {
2547 	struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup);
2548 	struct rb_node *node;
2549 	struct xe_vma *first = vma, *last = vma, *new_first = NULL,
2550 		      *new_last = NULL, *__vma, *next;
2551 	int err = 0;
2552 	bool first_munmap_rebind = false;
2553 
2554 	lockdep_assert_held(&vm->lock);
2555 	XE_BUG_ON(!vma);
2556 
2557 	node = &vma->vm_node;
2558 	while ((node = rb_next(node))) {
2559 		if (!xe_vma_cmp_vma_cb(lookup, node)) {
2560 			__vma = to_xe_vma(node);
2561 			list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2562 			last = __vma;
2563 		} else {
2564 			break;
2565 		}
2566 	}
2567 
2568 	node = &vma->vm_node;
2569 	while ((node = rb_prev(node))) {
2570 		if (!xe_vma_cmp_vma_cb(lookup, node)) {
2571 			__vma = to_xe_vma(node);
2572 			list_add(&__vma->unbind_link, &vma->unbind_link);
2573 			first = __vma;
2574 		} else {
2575 			break;
2576 		}
2577 	}
2578 
2579 	if (first->start != lookup->start) {
2580 		struct ww_acquire_ctx ww;
2581 
2582 		if (first->bo)
2583 			err = xe_bo_lock(first->bo, &ww, 0, true);
2584 		if (err)
2585 			goto unwind;
2586 		new_first = xe_vma_create(first->vm, first->bo,
2587 					  first->bo ? first->bo_offset :
2588 					  first->userptr.ptr,
2589 					  first->start,
2590 					  lookup->start - 1,
2591 					  (first->pte_flags & PTE_READ_ONLY),
2592 					  first->gt_mask);
2593 		if (first->bo)
2594 			xe_bo_unlock(first->bo, &ww);
2595 		if (!new_first) {
2596 			err = -ENOMEM;
2597 			goto unwind;
2598 		}
2599 		if (!first->bo) {
2600 			err = xe_vma_userptr_pin_pages(new_first);
2601 			if (err)
2602 				goto unwind;
2603 		}
2604 		err = prep_replacement_vma(vm, new_first);
2605 		if (err)
2606 			goto unwind;
2607 	}
2608 
2609 	if (last->end != lookup->end) {
2610 		struct ww_acquire_ctx ww;
2611 		u64 chunk = lookup->end + 1 - last->start;
2612 
2613 		if (last->bo)
2614 			err = xe_bo_lock(last->bo, &ww, 0, true);
2615 		if (err)
2616 			goto unwind;
2617 		new_last = xe_vma_create(last->vm, last->bo,
2618 					 last->bo ? last->bo_offset + chunk :
2619 					 last->userptr.ptr + chunk,
2620 					 last->start + chunk,
2621 					 last->end,
2622 					 (last->pte_flags & PTE_READ_ONLY),
2623 					 last->gt_mask);
2624 		if (last->bo)
2625 			xe_bo_unlock(last->bo, &ww);
2626 		if (!new_last) {
2627 			err = -ENOMEM;
2628 			goto unwind;
2629 		}
2630 		if (!last->bo) {
2631 			err = xe_vma_userptr_pin_pages(new_last);
2632 			if (err)
2633 				goto unwind;
2634 		}
2635 		err = prep_replacement_vma(vm, new_last);
2636 		if (err)
2637 			goto unwind;
2638 	}
2639 
2640 	prep_vma_destroy(vm, vma);
2641 	if (list_empty(&vma->unbind_link) && (new_first || new_last))
2642 		vma->first_munmap_rebind = true;
2643 	list_for_each_entry(__vma, &vma->unbind_link, unbind_link) {
2644 		if ((new_first || new_last) && !first_munmap_rebind) {
2645 			__vma->first_munmap_rebind = true;
2646 			first_munmap_rebind = true;
2647 		}
2648 		prep_vma_destroy(vm, __vma);
2649 	}
2650 	if (new_first) {
2651 		xe_vm_insert_vma(vm, new_first);
2652 		list_add_tail(&new_first->unbind_link, &vma->unbind_link);
2653 		if (!new_last)
2654 			new_first->last_munmap_rebind = true;
2655 	}
2656 	if (new_last) {
2657 		xe_vm_insert_vma(vm, new_last);
2658 		list_add_tail(&new_last->unbind_link, &vma->unbind_link);
2659 		new_last->last_munmap_rebind = true;
2660 	}
2661 
2662 	return vma;
2663 
2664 unwind:
2665 	list_for_each_entry_safe(__vma, next, &vma->unbind_link, unbind_link)
2666 		list_del_init(&__vma->unbind_link);
2667 	if (new_last) {
2668 		prep_vma_destroy(vm, new_last);
2669 		xe_vma_destroy_unlocked(new_last);
2670 	}
2671 	if (new_first) {
2672 		prep_vma_destroy(vm, new_first);
2673 		xe_vma_destroy_unlocked(new_first);
2674 	}
2675 
2676 	return ERR_PTR(err);
2677 }
2678 
2679 /*
2680  * Similar to vm_unbind_lookup_vmas, find all VMAs in lookup range to prefetch
2681  */
2682 static struct xe_vma *vm_prefetch_lookup_vmas(struct xe_vm *vm,
2683 					      struct xe_vma *lookup,
2684 					      u32 region)
2685 {
2686 	struct xe_vma *vma = xe_vm_find_overlapping_vma(vm, lookup), *__vma,
2687 		      *next;
2688 	struct rb_node *node;
2689 
2690 	if (!xe_vma_is_userptr(vma)) {
2691 		if (!xe_bo_can_migrate(vma->bo, region_to_mem_type[region]))
2692 			return ERR_PTR(-EINVAL);
2693 	}
2694 
2695 	node = &vma->vm_node;
2696 	while ((node = rb_next(node))) {
2697 		if (!xe_vma_cmp_vma_cb(lookup, node)) {
2698 			__vma = to_xe_vma(node);
2699 			if (!xe_vma_is_userptr(__vma)) {
2700 				if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2701 					goto flush_list;
2702 			}
2703 			list_add_tail(&__vma->unbind_link, &vma->unbind_link);
2704 		} else {
2705 			break;
2706 		}
2707 	}
2708 
2709 	node = &vma->vm_node;
2710 	while ((node = rb_prev(node))) {
2711 		if (!xe_vma_cmp_vma_cb(lookup, node)) {
2712 			__vma = to_xe_vma(node);
2713 			if (!xe_vma_is_userptr(__vma)) {
2714 				if (!xe_bo_can_migrate(__vma->bo, region_to_mem_type[region]))
2715 					goto flush_list;
2716 			}
2717 			list_add(&__vma->unbind_link, &vma->unbind_link);
2718 		} else {
2719 			break;
2720 		}
2721 	}
2722 
2723 	return vma;
2724 
2725 flush_list:
2726 	list_for_each_entry_safe(__vma, next, &vma->unbind_link,
2727 				 unbind_link)
2728 		list_del_init(&__vma->unbind_link);
2729 
2730 	return ERR_PTR(-EINVAL);
2731 }
2732 
2733 static struct xe_vma *vm_unbind_all_lookup_vmas(struct xe_vm *vm,
2734 						struct xe_bo *bo)
2735 {
2736 	struct xe_vma *first = NULL, *vma;
2737 
2738 	lockdep_assert_held(&vm->lock);
2739 	xe_bo_assert_held(bo);
2740 
2741 	list_for_each_entry(vma, &bo->vmas, bo_link) {
2742 		if (vma->vm != vm)
2743 			continue;
2744 
2745 		prep_vma_destroy(vm, vma);
2746 		if (!first)
2747 			first = vma;
2748 		else
2749 			list_add_tail(&vma->unbind_link, &first->unbind_link);
2750 	}
2751 
2752 	return first;
2753 }
2754 
2755 static struct xe_vma *vm_bind_ioctl_lookup_vma(struct xe_vm *vm,
2756 					       struct xe_bo *bo,
2757 					       u64 bo_offset_or_userptr,
2758 					       u64 addr, u64 range, u32 op,
2759 					       u64 gt_mask, u32 region)
2760 {
2761 	struct ww_acquire_ctx ww;
2762 	struct xe_vma *vma, lookup;
2763 	int err;
2764 
2765 	lockdep_assert_held(&vm->lock);
2766 
2767 	lookup.start = addr;
2768 	lookup.end = addr + range - 1;
2769 
2770 	switch (VM_BIND_OP(op)) {
2771 	case XE_VM_BIND_OP_MAP:
2772 		XE_BUG_ON(!bo);
2773 
2774 		err = xe_bo_lock(bo, &ww, 0, true);
2775 		if (err)
2776 			return ERR_PTR(err);
2777 		vma = xe_vma_create(vm, bo, bo_offset_or_userptr, addr,
2778 				    addr + range - 1,
2779 				    op & XE_VM_BIND_FLAG_READONLY,
2780 				    gt_mask);
2781 		xe_bo_unlock(bo, &ww);
2782 		if (!vma)
2783 			return ERR_PTR(-ENOMEM);
2784 
2785 		xe_vm_insert_vma(vm, vma);
2786 		if (!bo->vm) {
2787 			vm_insert_extobj(vm, vma);
2788 			err = add_preempt_fences(vm, bo);
2789 			if (err) {
2790 				prep_vma_destroy(vm, vma);
2791 				xe_vma_destroy_unlocked(vma);
2792 
2793 				return ERR_PTR(err);
2794 			}
2795 		}
2796 		break;
2797 	case XE_VM_BIND_OP_UNMAP:
2798 		vma = vm_unbind_lookup_vmas(vm, &lookup);
2799 		break;
2800 	case XE_VM_BIND_OP_PREFETCH:
2801 		vma = vm_prefetch_lookup_vmas(vm, &lookup, region);
2802 		break;
2803 	case XE_VM_BIND_OP_UNMAP_ALL:
2804 		XE_BUG_ON(!bo);
2805 
2806 		err = xe_bo_lock(bo, &ww, 0, true);
2807 		if (err)
2808 			return ERR_PTR(err);
2809 		vma = vm_unbind_all_lookup_vmas(vm, bo);
2810 		if (!vma)
2811 			vma = ERR_PTR(-EINVAL);
2812 		xe_bo_unlock(bo, &ww);
2813 		break;
2814 	case XE_VM_BIND_OP_MAP_USERPTR:
2815 		XE_BUG_ON(bo);
2816 
2817 		vma = xe_vma_create(vm, NULL, bo_offset_or_userptr, addr,
2818 				    addr + range - 1,
2819 				    op & XE_VM_BIND_FLAG_READONLY,
2820 				    gt_mask);
2821 		if (!vma)
2822 			return ERR_PTR(-ENOMEM);
2823 
2824 		err = xe_vma_userptr_pin_pages(vma);
2825 		if (err) {
2826 			xe_vma_destroy(vma, NULL);
2827 
2828 			return ERR_PTR(err);
2829 		} else {
2830 			xe_vm_insert_vma(vm, vma);
2831 		}
2832 		break;
2833 	default:
2834 		XE_BUG_ON("NOT POSSIBLE");
2835 		vma = ERR_PTR(-EINVAL);
2836 	}
2837 
2838 	return vma;
2839 }
2840 
2841 #ifdef TEST_VM_ASYNC_OPS_ERROR
2842 #define SUPPORTED_FLAGS	\
2843 	(FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
2844 	 XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2845 #else
2846 #define SUPPORTED_FLAGS	\
2847 	(XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
2848 	 XE_VM_BIND_FLAG_IMMEDIATE | 0xffff)
2849 #endif
2850 #define XE_64K_PAGE_MASK 0xffffull
2851 
2852 #define MAX_BINDS	512	/* FIXME: Picking random upper limit */
2853 
2854 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2855 				    struct drm_xe_vm_bind *args,
2856 				    struct drm_xe_vm_bind_op **bind_ops,
2857 				    bool *async)
2858 {
2859 	int err;
2860 	int i;
2861 
2862 	if (XE_IOCTL_ERR(xe, args->extensions) ||
2863 	    XE_IOCTL_ERR(xe, !args->num_binds) ||
2864 	    XE_IOCTL_ERR(xe, args->num_binds > MAX_BINDS))
2865 		return -EINVAL;
2866 
2867 	if (args->num_binds > 1) {
2868 		u64 __user *bind_user =
2869 			u64_to_user_ptr(args->vector_of_binds);
2870 
2871 		*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
2872 				    args->num_binds, GFP_KERNEL);
2873 		if (!*bind_ops)
2874 			return -ENOMEM;
2875 
2876 		err = __copy_from_user(*bind_ops, bind_user,
2877 				       sizeof(struct drm_xe_vm_bind_op) *
2878 				       args->num_binds);
2879 		if (XE_IOCTL_ERR(xe, err)) {
2880 			err = -EFAULT;
2881 			goto free_bind_ops;
2882 		}
2883 	} else {
2884 		*bind_ops = &args->bind;
2885 	}
2886 
2887 	for (i = 0; i < args->num_binds; ++i) {
2888 		u64 range = (*bind_ops)[i].range;
2889 		u64 addr = (*bind_ops)[i].addr;
2890 		u32 op = (*bind_ops)[i].op;
2891 		u32 obj = (*bind_ops)[i].obj;
2892 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2893 		u32 region = (*bind_ops)[i].region;
2894 
2895 		if (i == 0) {
2896 			*async = !!(op & XE_VM_BIND_FLAG_ASYNC);
2897 		} else if (XE_IOCTL_ERR(xe, !*async) ||
2898 			   XE_IOCTL_ERR(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
2899 			   XE_IOCTL_ERR(xe, VM_BIND_OP(op) ==
2900 					XE_VM_BIND_OP_RESTART)) {
2901 			err = -EINVAL;
2902 			goto free_bind_ops;
2903 		}
2904 
2905 		if (XE_IOCTL_ERR(xe, !*async &&
2906 				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
2907 			err = -EINVAL;
2908 			goto free_bind_ops;
2909 		}
2910 
2911 		if (XE_IOCTL_ERR(xe, !*async &&
2912 				 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
2913 			err = -EINVAL;
2914 			goto free_bind_ops;
2915 		}
2916 
2917 		if (XE_IOCTL_ERR(xe, VM_BIND_OP(op) >
2918 				 XE_VM_BIND_OP_PREFETCH) ||
2919 		    XE_IOCTL_ERR(xe, op & ~SUPPORTED_FLAGS) ||
2920 		    XE_IOCTL_ERR(xe, !obj &&
2921 				 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP) ||
2922 		    XE_IOCTL_ERR(xe, !obj &&
2923 				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2924 		    XE_IOCTL_ERR(xe, addr &&
2925 				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2926 		    XE_IOCTL_ERR(xe, range &&
2927 				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
2928 		    XE_IOCTL_ERR(xe, obj &&
2929 				 VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
2930 		    XE_IOCTL_ERR(xe, obj &&
2931 				 VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
2932 		    XE_IOCTL_ERR(xe, region &&
2933 				 VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
2934 		    XE_IOCTL_ERR(xe, !(BIT(region) &
2935 				       xe->info.mem_region_mask)) ||
2936 		    XE_IOCTL_ERR(xe, obj &&
2937 				 VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
2938 			err = -EINVAL;
2939 			goto free_bind_ops;
2940 		}
2941 
2942 		if (XE_IOCTL_ERR(xe, obj_offset & ~PAGE_MASK) ||
2943 		    XE_IOCTL_ERR(xe, addr & ~PAGE_MASK) ||
2944 		    XE_IOCTL_ERR(xe, range & ~PAGE_MASK) ||
2945 		    XE_IOCTL_ERR(xe, !range && VM_BIND_OP(op) !=
2946 				 XE_VM_BIND_OP_RESTART &&
2947 				 VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
2948 			err = -EINVAL;
2949 			goto free_bind_ops;
2950 		}
2951 	}
2952 
2953 	return 0;
2954 
2955 free_bind_ops:
2956 	if (args->num_binds > 1)
2957 		kfree(*bind_ops);
2958 	return err;
2959 }
2960 
2961 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2962 {
2963 	struct xe_device *xe = to_xe_device(dev);
2964 	struct xe_file *xef = to_xe_file(file);
2965 	struct drm_xe_vm_bind *args = data;
2966 	struct drm_xe_sync __user *syncs_user;
2967 	struct xe_bo **bos = NULL;
2968 	struct xe_vma **vmas = NULL;
2969 	struct xe_vm *vm;
2970 	struct xe_engine *e = NULL;
2971 	u32 num_syncs;
2972 	struct xe_sync_entry *syncs = NULL;
2973 	struct drm_xe_vm_bind_op *bind_ops;
2974 	bool async;
2975 	int err;
2976 	int i, j = 0;
2977 
2978 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
2979 	if (err)
2980 		return err;
2981 
2982 	vm = xe_vm_lookup(xef, args->vm_id);
2983 	if (XE_IOCTL_ERR(xe, !vm)) {
2984 		err = -EINVAL;
2985 		goto free_objs;
2986 	}
2987 
2988 	if (XE_IOCTL_ERR(xe, xe_vm_is_closed(vm))) {
2989 		DRM_ERROR("VM closed while we began looking up?\n");
2990 		err = -ENOENT;
2991 		goto put_vm;
2992 	}
2993 
2994 	if (args->engine_id) {
2995 		e = xe_engine_lookup(xef, args->engine_id);
2996 		if (XE_IOCTL_ERR(xe, !e)) {
2997 			err = -ENOENT;
2998 			goto put_vm;
2999 		}
3000 		if (XE_IOCTL_ERR(xe, !(e->flags & ENGINE_FLAG_VM))) {
3001 			err = -EINVAL;
3002 			goto put_engine;
3003 		}
3004 	}
3005 
3006 	if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
3007 		if (XE_IOCTL_ERR(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
3008 			err = -ENOTSUPP;
3009 		if (XE_IOCTL_ERR(xe, !err && args->num_syncs))
3010 			err = EINVAL;
3011 		if (XE_IOCTL_ERR(xe, !err && !vm->async_ops.error))
3012 			err = -EPROTO;
3013 
3014 		if (!err) {
3015 			down_write(&vm->lock);
3016 			trace_xe_vm_restart(vm);
3017 			vm_set_async_error(vm, 0);
3018 			up_write(&vm->lock);
3019 
3020 			queue_work(system_unbound_wq, &vm->async_ops.work);
3021 
3022 			/* Rebinds may have been blocked, give worker a kick */
3023 			if (xe_vm_in_compute_mode(vm))
3024 				queue_work(vm->xe->ordered_wq,
3025 					   &vm->preempt.rebind_work);
3026 		}
3027 
3028 		goto put_engine;
3029 	}
3030 
3031 	if (XE_IOCTL_ERR(xe, !vm->async_ops.error &&
3032 			 async != !!(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS))) {
3033 		err = -ENOTSUPP;
3034 		goto put_engine;
3035 	}
3036 
3037 	for (i = 0; i < args->num_binds; ++i) {
3038 		u64 range = bind_ops[i].range;
3039 		u64 addr = bind_ops[i].addr;
3040 
3041 		if (XE_IOCTL_ERR(xe, range > vm->size) ||
3042 		    XE_IOCTL_ERR(xe, addr > vm->size - range)) {
3043 			err = -EINVAL;
3044 			goto put_engine;
3045 		}
3046 
3047 		if (bind_ops[i].gt_mask) {
3048 			u64 valid_gts = BIT(xe->info.tile_count) - 1;
3049 
3050 			if (XE_IOCTL_ERR(xe, bind_ops[i].gt_mask &
3051 					 ~valid_gts)) {
3052 				err = -EINVAL;
3053 				goto put_engine;
3054 			}
3055 		}
3056 	}
3057 
3058 	bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
3059 	if (!bos) {
3060 		err = -ENOMEM;
3061 		goto put_engine;
3062 	}
3063 
3064 	vmas = kzalloc(sizeof(*vmas) * args->num_binds, GFP_KERNEL);
3065 	if (!vmas) {
3066 		err = -ENOMEM;
3067 		goto put_engine;
3068 	}
3069 
3070 	for (i = 0; i < args->num_binds; ++i) {
3071 		struct drm_gem_object *gem_obj;
3072 		u64 range = bind_ops[i].range;
3073 		u64 addr = bind_ops[i].addr;
3074 		u32 obj = bind_ops[i].obj;
3075 		u64 obj_offset = bind_ops[i].obj_offset;
3076 
3077 		if (!obj)
3078 			continue;
3079 
3080 		gem_obj = drm_gem_object_lookup(file, obj);
3081 		if (XE_IOCTL_ERR(xe, !gem_obj)) {
3082 			err = -ENOENT;
3083 			goto put_obj;
3084 		}
3085 		bos[i] = gem_to_xe_bo(gem_obj);
3086 
3087 		if (XE_IOCTL_ERR(xe, range > bos[i]->size) ||
3088 		    XE_IOCTL_ERR(xe, obj_offset >
3089 				 bos[i]->size - range)) {
3090 			err = -EINVAL;
3091 			goto put_obj;
3092 		}
3093 
3094 		if (bos[i]->flags & XE_BO_INTERNAL_64K) {
3095 			if (XE_IOCTL_ERR(xe, obj_offset &
3096 					 XE_64K_PAGE_MASK) ||
3097 			    XE_IOCTL_ERR(xe, addr & XE_64K_PAGE_MASK) ||
3098 			    XE_IOCTL_ERR(xe, range & XE_64K_PAGE_MASK)) {
3099 				err = -EINVAL;
3100 				goto put_obj;
3101 			}
3102 		}
3103 	}
3104 
3105 	if (args->num_syncs) {
3106 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3107 		if (!syncs) {
3108 			err = -ENOMEM;
3109 			goto put_obj;
3110 		}
3111 	}
3112 
3113 	syncs_user = u64_to_user_ptr(args->syncs);
3114 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3115 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3116 					  &syncs_user[num_syncs], false,
3117 					  xe_vm_no_dma_fences(vm));
3118 		if (err)
3119 			goto free_syncs;
3120 	}
3121 
3122 	err = down_write_killable(&vm->lock);
3123 	if (err)
3124 		goto free_syncs;
3125 
3126 	/* Do some error checking first to make the unwind easier */
3127 	for (i = 0; i < args->num_binds; ++i) {
3128 		u64 range = bind_ops[i].range;
3129 		u64 addr = bind_ops[i].addr;
3130 		u32 op = bind_ops[i].op;
3131 
3132 		err = __vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
3133 		if (err)
3134 			goto release_vm_lock;
3135 	}
3136 
3137 	for (i = 0; i < args->num_binds; ++i) {
3138 		u64 range = bind_ops[i].range;
3139 		u64 addr = bind_ops[i].addr;
3140 		u32 op = bind_ops[i].op;
3141 		u64 obj_offset = bind_ops[i].obj_offset;
3142 		u64 gt_mask = bind_ops[i].gt_mask;
3143 		u32 region = bind_ops[i].region;
3144 
3145 		vmas[i] = vm_bind_ioctl_lookup_vma(vm, bos[i], obj_offset,
3146 						   addr, range, op, gt_mask,
3147 						   region);
3148 		if (IS_ERR(vmas[i])) {
3149 			err = PTR_ERR(vmas[i]);
3150 			vmas[i] = NULL;
3151 			goto destroy_vmas;
3152 		}
3153 	}
3154 
3155 	for (j = 0; j < args->num_binds; ++j) {
3156 		struct xe_sync_entry *__syncs;
3157 		u32 __num_syncs = 0;
3158 		bool first_or_last = j == 0 || j == args->num_binds - 1;
3159 
3160 		if (args->num_binds == 1) {
3161 			__num_syncs = num_syncs;
3162 			__syncs = syncs;
3163 		} else if (first_or_last && num_syncs) {
3164 			bool first = j == 0;
3165 
3166 			__syncs = kmalloc(sizeof(*__syncs) * num_syncs,
3167 					  GFP_KERNEL);
3168 			if (!__syncs) {
3169 				err = ENOMEM;
3170 				break;
3171 			}
3172 
3173 			/* in-syncs on first bind, out-syncs on last bind */
3174 			for (i = 0; i < num_syncs; ++i) {
3175 				bool signal = syncs[i].flags &
3176 					DRM_XE_SYNC_SIGNAL;
3177 
3178 				if ((first && !signal) || (!first && signal))
3179 					__syncs[__num_syncs++] = syncs[i];
3180 			}
3181 		} else {
3182 			__num_syncs = 0;
3183 			__syncs = NULL;
3184 		}
3185 
3186 		if (async) {
3187 			bool last = j == args->num_binds - 1;
3188 
3189 			/*
3190 			 * Each pass of async worker drops the ref, take a ref
3191 			 * here, 1 set of refs taken above
3192 			 */
3193 			if (!last) {
3194 				if (e)
3195 					xe_engine_get(e);
3196 				xe_vm_get(vm);
3197 			}
3198 
3199 			err = vm_bind_ioctl_async(vm, vmas[j], e, bos[j],
3200 						  bind_ops + j, __syncs,
3201 						  __num_syncs);
3202 			if (err && !last) {
3203 				if (e)
3204 					xe_engine_put(e);
3205 				xe_vm_put(vm);
3206 			}
3207 			if (err)
3208 				break;
3209 		} else {
3210 			XE_BUG_ON(j != 0);	/* Not supported */
3211 			err = vm_bind_ioctl(vm, vmas[j], e, bos[j],
3212 					    bind_ops + j, __syncs,
3213 					    __num_syncs, NULL);
3214 			break;	/* Needed so cleanup loops work */
3215 		}
3216 	}
3217 
3218 	/* Most of cleanup owned by the async bind worker */
3219 	if (async && !err) {
3220 		up_write(&vm->lock);
3221 		if (args->num_binds > 1)
3222 			kfree(syncs);
3223 		goto free_objs;
3224 	}
3225 
3226 destroy_vmas:
3227 	for (i = j; err && i < args->num_binds; ++i) {
3228 		u32 op = bind_ops[i].op;
3229 		struct xe_vma *vma, *next;
3230 
3231 		if (!vmas[i])
3232 			break;
3233 
3234 		list_for_each_entry_safe(vma, next, &vma->unbind_link,
3235 					 unbind_link) {
3236 			list_del_init(&vma->unbind_link);
3237 			if (!vma->destroyed) {
3238 				prep_vma_destroy(vm, vma);
3239 				xe_vma_destroy_unlocked(vma);
3240 			}
3241 		}
3242 
3243 		switch (VM_BIND_OP(op)) {
3244 		case XE_VM_BIND_OP_MAP:
3245 			prep_vma_destroy(vm, vmas[i]);
3246 			xe_vma_destroy_unlocked(vmas[i]);
3247 			break;
3248 		case XE_VM_BIND_OP_MAP_USERPTR:
3249 			prep_vma_destroy(vm, vmas[i]);
3250 			xe_vma_destroy_unlocked(vmas[i]);
3251 			break;
3252 		}
3253 	}
3254 release_vm_lock:
3255 	up_write(&vm->lock);
3256 free_syncs:
3257 	while (num_syncs--) {
3258 		if (async && j &&
3259 		    !(syncs[num_syncs].flags & DRM_XE_SYNC_SIGNAL))
3260 			continue;	/* Still in async worker */
3261 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3262 	}
3263 
3264 	kfree(syncs);
3265 put_obj:
3266 	for (i = j; i < args->num_binds; ++i)
3267 		xe_bo_put(bos[i]);
3268 put_engine:
3269 	if (e)
3270 		xe_engine_put(e);
3271 put_vm:
3272 	xe_vm_put(vm);
3273 free_objs:
3274 	kfree(bos);
3275 	kfree(vmas);
3276 	if (args->num_binds > 1)
3277 		kfree(bind_ops);
3278 	return err;
3279 }
3280 
3281 /*
3282  * XXX: Using the TTM wrappers for now, likely can call into dma-resv code
3283  * directly to optimize. Also this likely should be an inline function.
3284  */
3285 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
3286 	       int num_resv, bool intr)
3287 {
3288 	struct ttm_validate_buffer tv_vm;
3289 	LIST_HEAD(objs);
3290 	LIST_HEAD(dups);
3291 
3292 	XE_BUG_ON(!ww);
3293 
3294 	tv_vm.num_shared = num_resv;
3295 	tv_vm.bo = xe_vm_ttm_bo(vm);;
3296 	list_add_tail(&tv_vm.head, &objs);
3297 
3298 	return ttm_eu_reserve_buffers(ww, &objs, intr, &dups);
3299 }
3300 
3301 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww)
3302 {
3303 	dma_resv_unlock(&vm->resv);
3304 	ww_acquire_fini(ww);
3305 }
3306 
3307 /**
3308  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3309  * @vma: VMA to invalidate
3310  *
3311  * Walks a list of page tables leaves which it memset the entries owned by this
3312  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3313  * complete.
3314  *
3315  * Returns 0 for success, negative error code otherwise.
3316  */
3317 int xe_vm_invalidate_vma(struct xe_vma *vma)
3318 {
3319 	struct xe_device *xe = vma->vm->xe;
3320 	struct xe_gt *gt;
3321 	u32 gt_needs_invalidate = 0;
3322 	int seqno[XE_MAX_GT];
3323 	u8 id;
3324 	int ret;
3325 
3326 	XE_BUG_ON(!xe_vm_in_fault_mode(vma->vm));
3327 	trace_xe_vma_usm_invalidate(vma);
3328 
3329 	/* Check that we don't race with page-table updates */
3330 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3331 		if (xe_vma_is_userptr(vma)) {
3332 			WARN_ON_ONCE(!mmu_interval_check_retry
3333 				     (&vma->userptr.notifier,
3334 				      vma->userptr.notifier_seq));
3335 			WARN_ON_ONCE(!dma_resv_test_signaled(&vma->vm->resv,
3336 							     DMA_RESV_USAGE_BOOKKEEP));
3337 
3338 		} else {
3339 			xe_bo_assert_held(vma->bo);
3340 		}
3341 	}
3342 
3343 	for_each_gt(gt, xe, id) {
3344 		if (xe_pt_zap_ptes(gt, vma)) {
3345 			gt_needs_invalidate |= BIT(id);
3346 			xe_device_wmb(xe);
3347 			seqno[id] = xe_gt_tlb_invalidation(gt);
3348 			if (seqno[id] < 0)
3349 				return seqno[id];
3350 		}
3351 	}
3352 
3353 	for_each_gt(gt, xe, id) {
3354 		if (gt_needs_invalidate & BIT(id)) {
3355 			ret = xe_gt_tlb_invalidation_wait(gt, seqno[id]);
3356 			if (ret < 0)
3357 				return ret;
3358 		}
3359 	}
3360 
3361 	vma->usm.gt_invalidated = vma->gt_mask;
3362 
3363 	return 0;
3364 }
3365 
3366 #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
3367 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3368 {
3369 	struct rb_node *node;
3370 	bool is_lmem;
3371 	uint64_t addr;
3372 
3373 	if (!down_read_trylock(&vm->lock)) {
3374 		drm_printf(p, " Failed to acquire VM lock to dump capture");
3375 		return 0;
3376 	}
3377 	if (vm->pt_root[gt_id]) {
3378 		addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3379 		drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
3380 	}
3381 
3382 	for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
3383 		struct xe_vma *vma = to_xe_vma(node);
3384 		bool is_userptr = xe_vma_is_userptr(vma);
3385 
3386 		if (is_userptr) {
3387 			struct xe_res_cursor cur;
3388 
3389 			xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
3390 			addr = xe_res_dma(&cur);
3391 		} else {
3392 			addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
3393 		}
3394 		drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
3395 			   vma->start, vma->end, vma->end - vma->start + 1ull,
3396 			   addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
3397 	}
3398 	up_read(&vm->lock);
3399 
3400 	return 0;
3401 }
3402 #else
3403 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
3404 {
3405 	return 0;
3406 }
3407 #endif
3408