xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_exec.h>
13 #include <drm/drm_print.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <uapi/drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_pxp.h"
38 #include "xe_res_cursor.h"
39 #include "xe_svm.h"
40 #include "xe_sync.h"
41 #include "xe_trace_bo.h"
42 #include "xe_wa.h"
43 #include "xe_hmm.h"
44 
45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
46 {
47 	return vm->gpuvm.r_obj;
48 }
49 
50 /**
51  * xe_vma_userptr_check_repin() - Advisory check for repin needed
52  * @uvma: The userptr vma
53  *
54  * Check if the userptr vma has been invalidated since last successful
55  * repin. The check is advisory only and can the function can be called
56  * without the vm->userptr.notifier_lock held. There is no guarantee that the
57  * vma userptr will remain valid after a lockless check, so typically
58  * the call needs to be followed by a proper check under the notifier_lock.
59  *
60  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
61  */
62 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
63 {
64 	return mmu_interval_check_retry(&uvma->userptr.notifier,
65 					uvma->userptr.notifier_seq) ?
66 		-EAGAIN : 0;
67 }
68 
69 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
70 {
71 	struct xe_vma *vma = &uvma->vma;
72 	struct xe_vm *vm = xe_vma_vm(vma);
73 	struct xe_device *xe = vm->xe;
74 
75 	lockdep_assert_held(&vm->lock);
76 	xe_assert(xe, xe_vma_is_userptr(vma));
77 
78 	return xe_hmm_userptr_populate_range(uvma, false);
79 }
80 
81 static bool preempt_fences_waiting(struct xe_vm *vm)
82 {
83 	struct xe_exec_queue *q;
84 
85 	lockdep_assert_held(&vm->lock);
86 	xe_vm_assert_held(vm);
87 
88 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
89 		if (!q->lr.pfence ||
90 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
91 			     &q->lr.pfence->flags)) {
92 			return true;
93 		}
94 	}
95 
96 	return false;
97 }
98 
99 static void free_preempt_fences(struct list_head *list)
100 {
101 	struct list_head *link, *next;
102 
103 	list_for_each_safe(link, next, list)
104 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
105 }
106 
107 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
108 				unsigned int *count)
109 {
110 	lockdep_assert_held(&vm->lock);
111 	xe_vm_assert_held(vm);
112 
113 	if (*count >= vm->preempt.num_exec_queues)
114 		return 0;
115 
116 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
117 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
118 
119 		if (IS_ERR(pfence))
120 			return PTR_ERR(pfence);
121 
122 		list_move_tail(xe_preempt_fence_link(pfence), list);
123 	}
124 
125 	return 0;
126 }
127 
128 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
129 {
130 	struct xe_exec_queue *q;
131 
132 	xe_vm_assert_held(vm);
133 
134 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
135 		if (q->lr.pfence) {
136 			long timeout = dma_fence_wait(q->lr.pfence, false);
137 
138 			/* Only -ETIME on fence indicates VM needs to be killed */
139 			if (timeout < 0 || q->lr.pfence->error == -ETIME)
140 				return -ETIME;
141 
142 			dma_fence_put(q->lr.pfence);
143 			q->lr.pfence = NULL;
144 		}
145 	}
146 
147 	return 0;
148 }
149 
150 static bool xe_vm_is_idle(struct xe_vm *vm)
151 {
152 	struct xe_exec_queue *q;
153 
154 	xe_vm_assert_held(vm);
155 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
156 		if (!xe_exec_queue_is_idle(q))
157 			return false;
158 	}
159 
160 	return true;
161 }
162 
163 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
164 {
165 	struct list_head *link;
166 	struct xe_exec_queue *q;
167 
168 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
169 		struct dma_fence *fence;
170 
171 		link = list->next;
172 		xe_assert(vm->xe, link != list);
173 
174 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
175 					     q, q->lr.context,
176 					     ++q->lr.seqno);
177 		dma_fence_put(q->lr.pfence);
178 		q->lr.pfence = fence;
179 	}
180 }
181 
182 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
183 {
184 	struct xe_exec_queue *q;
185 	int err;
186 
187 	xe_bo_assert_held(bo);
188 
189 	if (!vm->preempt.num_exec_queues)
190 		return 0;
191 
192 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
193 	if (err)
194 		return err;
195 
196 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
197 		if (q->lr.pfence) {
198 			dma_resv_add_fence(bo->ttm.base.resv,
199 					   q->lr.pfence,
200 					   DMA_RESV_USAGE_BOOKKEEP);
201 		}
202 
203 	return 0;
204 }
205 
206 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
207 						struct drm_exec *exec)
208 {
209 	struct xe_exec_queue *q;
210 
211 	lockdep_assert_held(&vm->lock);
212 	xe_vm_assert_held(vm);
213 
214 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
215 		q->ops->resume(q);
216 
217 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
218 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
219 	}
220 }
221 
222 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
223 {
224 	struct drm_gpuvm_exec vm_exec = {
225 		.vm = &vm->gpuvm,
226 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
227 		.num_fences = 1,
228 	};
229 	struct drm_exec *exec = &vm_exec.exec;
230 	struct dma_fence *pfence;
231 	int err;
232 	bool wait;
233 
234 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
235 
236 	down_write(&vm->lock);
237 	err = drm_gpuvm_exec_lock(&vm_exec);
238 	if (err)
239 		goto out_up_write;
240 
241 	pfence = xe_preempt_fence_create(q, q->lr.context,
242 					 ++q->lr.seqno);
243 	if (!pfence) {
244 		err = -ENOMEM;
245 		goto out_fini;
246 	}
247 
248 	list_add(&q->lr.link, &vm->preempt.exec_queues);
249 	++vm->preempt.num_exec_queues;
250 	q->lr.pfence = pfence;
251 
252 	down_read(&vm->userptr.notifier_lock);
253 
254 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
255 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
256 
257 	/*
258 	 * Check to see if a preemption on VM is in flight or userptr
259 	 * invalidation, if so trigger this preempt fence to sync state with
260 	 * other preempt fences on the VM.
261 	 */
262 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
263 	if (wait)
264 		dma_fence_enable_sw_signaling(pfence);
265 
266 	up_read(&vm->userptr.notifier_lock);
267 
268 out_fini:
269 	drm_exec_fini(exec);
270 out_up_write:
271 	up_write(&vm->lock);
272 
273 	return err;
274 }
275 ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
276 
277 /**
278  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
279  * @vm: The VM.
280  * @q: The exec_queue
281  *
282  * Note that this function might be called multiple times on the same queue.
283  */
284 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
285 {
286 	if (!xe_vm_in_preempt_fence_mode(vm))
287 		return;
288 
289 	down_write(&vm->lock);
290 	if (!list_empty(&q->lr.link)) {
291 		list_del_init(&q->lr.link);
292 		--vm->preempt.num_exec_queues;
293 	}
294 	if (q->lr.pfence) {
295 		dma_fence_enable_sw_signaling(q->lr.pfence);
296 		dma_fence_put(q->lr.pfence);
297 		q->lr.pfence = NULL;
298 	}
299 	up_write(&vm->lock);
300 }
301 
302 /**
303  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
304  * that need repinning.
305  * @vm: The VM.
306  *
307  * This function checks for whether the VM has userptrs that need repinning,
308  * and provides a release-type barrier on the userptr.notifier_lock after
309  * checking.
310  *
311  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
312  */
313 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
314 {
315 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
316 
317 	return (list_empty(&vm->userptr.repin_list) &&
318 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
319 }
320 
321 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
322 
323 /**
324  * xe_vm_kill() - VM Kill
325  * @vm: The VM.
326  * @unlocked: Flag indicates the VM's dma-resv is not held
327  *
328  * Kill the VM by setting banned flag indicated VM is no longer available for
329  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
330  */
331 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
332 {
333 	struct xe_exec_queue *q;
334 
335 	lockdep_assert_held(&vm->lock);
336 
337 	if (unlocked)
338 		xe_vm_lock(vm, false);
339 
340 	vm->flags |= XE_VM_FLAG_BANNED;
341 	trace_xe_vm_kill(vm);
342 
343 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
344 		q->ops->kill(q);
345 
346 	if (unlocked)
347 		xe_vm_unlock(vm);
348 
349 	/* TODO: Inform user the VM is banned */
350 }
351 
352 /**
353  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
354  * @exec: The drm_exec object used for locking before validation.
355  * @err: The error returned from ttm_bo_validate().
356  * @end: A ktime_t cookie that should be set to 0 before first use and
357  * that should be reused on subsequent calls.
358  *
359  * With multiple active VMs, under memory pressure, it is possible that
360  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
361  * Until ttm properly handles locking in such scenarios, best thing the
362  * driver can do is retry with a timeout. Check if that is necessary, and
363  * if so unlock the drm_exec's objects while keeping the ticket to prepare
364  * for a rerun.
365  *
366  * Return: true if a retry after drm_exec_init() is recommended;
367  * false otherwise.
368  */
369 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
370 {
371 	ktime_t cur;
372 
373 	if (err != -ENOMEM)
374 		return false;
375 
376 	cur = ktime_get();
377 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
378 	if (!ktime_before(cur, *end))
379 		return false;
380 
381 	msleep(20);
382 	return true;
383 }
384 
385 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
386 {
387 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
388 	struct drm_gpuva *gpuva;
389 	int ret;
390 
391 	lockdep_assert_held(&vm->lock);
392 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
393 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
394 			       &vm->rebind_list);
395 
396 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
397 	if (ret)
398 		return ret;
399 
400 	vm_bo->evicted = false;
401 	return 0;
402 }
403 
404 /**
405  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
406  * @vm: The vm for which we are rebinding.
407  * @exec: The struct drm_exec with the locked GEM objects.
408  * @num_fences: The number of fences to reserve for the operation, not
409  * including rebinds and validations.
410  *
411  * Validates all evicted gem objects and rebinds their vmas. Note that
412  * rebindings may cause evictions and hence the validation-rebind
413  * sequence is rerun until there are no more objects to validate.
414  *
415  * Return: 0 on success, negative error code on error. In particular,
416  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
417  * the drm_exec transaction needs to be restarted.
418  */
419 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
420 			  unsigned int num_fences)
421 {
422 	struct drm_gem_object *obj;
423 	unsigned long index;
424 	int ret;
425 
426 	do {
427 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
428 		if (ret)
429 			return ret;
430 
431 		ret = xe_vm_rebind(vm, false);
432 		if (ret)
433 			return ret;
434 	} while (!list_empty(&vm->gpuvm.evict.list));
435 
436 	drm_exec_for_each_locked_object(exec, index, obj) {
437 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
438 		if (ret)
439 			return ret;
440 	}
441 
442 	return 0;
443 }
444 
445 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
446 				 bool *done)
447 {
448 	int err;
449 
450 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
451 	if (err)
452 		return err;
453 
454 	if (xe_vm_is_idle(vm)) {
455 		vm->preempt.rebind_deactivated = true;
456 		*done = true;
457 		return 0;
458 	}
459 
460 	if (!preempt_fences_waiting(vm)) {
461 		*done = true;
462 		return 0;
463 	}
464 
465 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
466 	if (err)
467 		return err;
468 
469 	err = wait_for_existing_preempt_fences(vm);
470 	if (err)
471 		return err;
472 
473 	/*
474 	 * Add validation and rebinding to the locking loop since both can
475 	 * cause evictions which may require blocing dma_resv locks.
476 	 * The fence reservation here is intended for the new preempt fences
477 	 * we attach at the end of the rebind work.
478 	 */
479 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
480 }
481 
482 static void preempt_rebind_work_func(struct work_struct *w)
483 {
484 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
485 	struct drm_exec exec;
486 	unsigned int fence_count = 0;
487 	LIST_HEAD(preempt_fences);
488 	ktime_t end = 0;
489 	int err = 0;
490 	long wait;
491 	int __maybe_unused tries = 0;
492 
493 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
494 	trace_xe_vm_rebind_worker_enter(vm);
495 
496 	down_write(&vm->lock);
497 
498 	if (xe_vm_is_closed_or_banned(vm)) {
499 		up_write(&vm->lock);
500 		trace_xe_vm_rebind_worker_exit(vm);
501 		return;
502 	}
503 
504 retry:
505 	if (xe_vm_userptr_check_repin(vm)) {
506 		err = xe_vm_userptr_pin(vm);
507 		if (err)
508 			goto out_unlock_outer;
509 	}
510 
511 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
512 
513 	drm_exec_until_all_locked(&exec) {
514 		bool done = false;
515 
516 		err = xe_preempt_work_begin(&exec, vm, &done);
517 		drm_exec_retry_on_contention(&exec);
518 		if (err || done) {
519 			drm_exec_fini(&exec);
520 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
521 				err = -EAGAIN;
522 
523 			goto out_unlock_outer;
524 		}
525 	}
526 
527 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
528 	if (err)
529 		goto out_unlock;
530 
531 	err = xe_vm_rebind(vm, true);
532 	if (err)
533 		goto out_unlock;
534 
535 	/* Wait on rebinds and munmap style VM unbinds */
536 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
537 				     DMA_RESV_USAGE_KERNEL,
538 				     false, MAX_SCHEDULE_TIMEOUT);
539 	if (wait <= 0) {
540 		err = -ETIME;
541 		goto out_unlock;
542 	}
543 
544 #define retry_required(__tries, __vm) \
545 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
546 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
547 	__xe_vm_userptr_needs_repin(__vm))
548 
549 	down_read(&vm->userptr.notifier_lock);
550 	if (retry_required(tries, vm)) {
551 		up_read(&vm->userptr.notifier_lock);
552 		err = -EAGAIN;
553 		goto out_unlock;
554 	}
555 
556 #undef retry_required
557 
558 	spin_lock(&vm->xe->ttm.lru_lock);
559 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
560 	spin_unlock(&vm->xe->ttm.lru_lock);
561 
562 	/* Point of no return. */
563 	arm_preempt_fences(vm, &preempt_fences);
564 	resume_and_reinstall_preempt_fences(vm, &exec);
565 	up_read(&vm->userptr.notifier_lock);
566 
567 out_unlock:
568 	drm_exec_fini(&exec);
569 out_unlock_outer:
570 	if (err == -EAGAIN) {
571 		trace_xe_vm_rebind_worker_retry(vm);
572 		goto retry;
573 	}
574 
575 	if (err) {
576 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
577 		xe_vm_kill(vm, true);
578 	}
579 	up_write(&vm->lock);
580 
581 	free_preempt_fences(&preempt_fences);
582 
583 	trace_xe_vm_rebind_worker_exit(vm);
584 }
585 
586 static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
587 {
588 	struct xe_userptr *userptr = &uvma->userptr;
589 	struct xe_vma *vma = &uvma->vma;
590 	struct dma_resv_iter cursor;
591 	struct dma_fence *fence;
592 	long err;
593 
594 	/*
595 	 * Tell exec and rebind worker they need to repin and rebind this
596 	 * userptr.
597 	 */
598 	if (!xe_vm_in_fault_mode(vm) &&
599 	    !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
600 		spin_lock(&vm->userptr.invalidated_lock);
601 		list_move_tail(&userptr->invalidate_link,
602 			       &vm->userptr.invalidated);
603 		spin_unlock(&vm->userptr.invalidated_lock);
604 	}
605 
606 	/*
607 	 * Preempt fences turn into schedule disables, pipeline these.
608 	 * Note that even in fault mode, we need to wait for binds and
609 	 * unbinds to complete, and those are attached as BOOKMARK fences
610 	 * to the vm.
611 	 */
612 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
613 			    DMA_RESV_USAGE_BOOKKEEP);
614 	dma_resv_for_each_fence_unlocked(&cursor, fence)
615 		dma_fence_enable_sw_signaling(fence);
616 	dma_resv_iter_end(&cursor);
617 
618 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
619 				    DMA_RESV_USAGE_BOOKKEEP,
620 				    false, MAX_SCHEDULE_TIMEOUT);
621 	XE_WARN_ON(err <= 0);
622 
623 	if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
624 		err = xe_vm_invalidate_vma(vma);
625 		XE_WARN_ON(err);
626 	}
627 
628 	xe_hmm_userptr_unmap(uvma);
629 }
630 
631 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
632 				   const struct mmu_notifier_range *range,
633 				   unsigned long cur_seq)
634 {
635 	struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
636 	struct xe_vma *vma = &uvma->vma;
637 	struct xe_vm *vm = xe_vma_vm(vma);
638 
639 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
640 	trace_xe_vma_userptr_invalidate(vma);
641 
642 	if (!mmu_notifier_range_blockable(range))
643 		return false;
644 
645 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
646 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
647 		xe_vma_start(vma), xe_vma_size(vma));
648 
649 	down_write(&vm->userptr.notifier_lock);
650 	mmu_interval_set_seq(mni, cur_seq);
651 
652 	__vma_userptr_invalidate(vm, uvma);
653 	up_write(&vm->userptr.notifier_lock);
654 	trace_xe_vma_userptr_invalidate_complete(vma);
655 
656 	return true;
657 }
658 
659 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
660 	.invalidate = vma_userptr_invalidate,
661 };
662 
663 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
664 /**
665  * xe_vma_userptr_force_invalidate() - force invalidate a userptr
666  * @uvma: The userptr vma to invalidate
667  *
668  * Perform a forced userptr invalidation for testing purposes.
669  */
670 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
671 {
672 	struct xe_vm *vm = xe_vma_vm(&uvma->vma);
673 
674 	/* Protect against concurrent userptr pinning */
675 	lockdep_assert_held(&vm->lock);
676 	/* Protect against concurrent notifiers */
677 	lockdep_assert_held(&vm->userptr.notifier_lock);
678 	/*
679 	 * Protect against concurrent instances of this function and
680 	 * the critical exec sections
681 	 */
682 	xe_vm_assert_held(vm);
683 
684 	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
685 				     uvma->userptr.notifier_seq))
686 		uvma->userptr.notifier_seq -= 2;
687 	__vma_userptr_invalidate(vm, uvma);
688 }
689 #endif
690 
691 int xe_vm_userptr_pin(struct xe_vm *vm)
692 {
693 	struct xe_userptr_vma *uvma, *next;
694 	int err = 0;
695 
696 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
697 	lockdep_assert_held_write(&vm->lock);
698 
699 	/* Collect invalidated userptrs */
700 	spin_lock(&vm->userptr.invalidated_lock);
701 	xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
702 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
703 				 userptr.invalidate_link) {
704 		list_del_init(&uvma->userptr.invalidate_link);
705 		list_add_tail(&uvma->userptr.repin_link,
706 			      &vm->userptr.repin_list);
707 	}
708 	spin_unlock(&vm->userptr.invalidated_lock);
709 
710 	/* Pin and move to bind list */
711 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
712 				 userptr.repin_link) {
713 		err = xe_vma_userptr_pin_pages(uvma);
714 		if (err == -EFAULT) {
715 			list_del_init(&uvma->userptr.repin_link);
716 			/*
717 			 * We might have already done the pin once already, but
718 			 * then had to retry before the re-bind happened, due
719 			 * some other condition in the caller, but in the
720 			 * meantime the userptr got dinged by the notifier such
721 			 * that we need to revalidate here, but this time we hit
722 			 * the EFAULT. In such a case make sure we remove
723 			 * ourselves from the rebind list to avoid going down in
724 			 * flames.
725 			 */
726 			if (!list_empty(&uvma->vma.combined_links.rebind))
727 				list_del_init(&uvma->vma.combined_links.rebind);
728 
729 			/* Wait for pending binds */
730 			xe_vm_lock(vm, false);
731 			dma_resv_wait_timeout(xe_vm_resv(vm),
732 					      DMA_RESV_USAGE_BOOKKEEP,
733 					      false, MAX_SCHEDULE_TIMEOUT);
734 
735 			err = xe_vm_invalidate_vma(&uvma->vma);
736 			xe_vm_unlock(vm);
737 			if (err)
738 				break;
739 		} else {
740 			if (err)
741 				break;
742 
743 			list_del_init(&uvma->userptr.repin_link);
744 			list_move_tail(&uvma->vma.combined_links.rebind,
745 				       &vm->rebind_list);
746 		}
747 	}
748 
749 	if (err) {
750 		down_write(&vm->userptr.notifier_lock);
751 		spin_lock(&vm->userptr.invalidated_lock);
752 		list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
753 					 userptr.repin_link) {
754 			list_del_init(&uvma->userptr.repin_link);
755 			list_move_tail(&uvma->userptr.invalidate_link,
756 				       &vm->userptr.invalidated);
757 		}
758 		spin_unlock(&vm->userptr.invalidated_lock);
759 		up_write(&vm->userptr.notifier_lock);
760 	}
761 	return err;
762 }
763 
764 /**
765  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
766  * that need repinning.
767  * @vm: The VM.
768  *
769  * This function does an advisory check for whether the VM has userptrs that
770  * need repinning.
771  *
772  * Return: 0 if there are no indications of userptrs needing repinning,
773  * -EAGAIN if there are.
774  */
775 int xe_vm_userptr_check_repin(struct xe_vm *vm)
776 {
777 	return (list_empty_careful(&vm->userptr.repin_list) &&
778 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
779 }
780 
781 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
782 {
783 	int i;
784 
785 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
786 		if (!vops->pt_update_ops[i].num_ops)
787 			continue;
788 
789 		vops->pt_update_ops[i].ops =
790 			kmalloc_array(vops->pt_update_ops[i].num_ops,
791 				      sizeof(*vops->pt_update_ops[i].ops),
792 				      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
793 		if (!vops->pt_update_ops[i].ops)
794 			return array_of_binds ? -ENOBUFS : -ENOMEM;
795 	}
796 
797 	return 0;
798 }
799 ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
800 
801 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
802 {
803 	int i;
804 
805 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
806 		kfree(vops->pt_update_ops[i].ops);
807 }
808 
809 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
810 {
811 	int i;
812 
813 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
814 		if (BIT(i) & tile_mask)
815 			++vops->pt_update_ops[i].num_ops;
816 }
817 
818 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
819 				  u8 tile_mask)
820 {
821 	INIT_LIST_HEAD(&op->link);
822 	op->tile_mask = tile_mask;
823 	op->base.op = DRM_GPUVA_OP_MAP;
824 	op->base.map.va.addr = vma->gpuva.va.addr;
825 	op->base.map.va.range = vma->gpuva.va.range;
826 	op->base.map.gem.obj = vma->gpuva.gem.obj;
827 	op->base.map.gem.offset = vma->gpuva.gem.offset;
828 	op->map.vma = vma;
829 	op->map.immediate = true;
830 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
831 	op->map.is_null = xe_vma_is_null(vma);
832 }
833 
834 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
835 				u8 tile_mask)
836 {
837 	struct xe_vma_op *op;
838 
839 	op = kzalloc(sizeof(*op), GFP_KERNEL);
840 	if (!op)
841 		return -ENOMEM;
842 
843 	xe_vm_populate_rebind(op, vma, tile_mask);
844 	list_add_tail(&op->link, &vops->list);
845 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
846 
847 	return 0;
848 }
849 
850 static struct dma_fence *ops_execute(struct xe_vm *vm,
851 				     struct xe_vma_ops *vops);
852 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
853 			    struct xe_exec_queue *q,
854 			    struct xe_sync_entry *syncs, u32 num_syncs);
855 
856 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
857 {
858 	struct dma_fence *fence;
859 	struct xe_vma *vma, *next;
860 	struct xe_vma_ops vops;
861 	struct xe_vma_op *op, *next_op;
862 	int err, i;
863 
864 	lockdep_assert_held(&vm->lock);
865 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
866 	    list_empty(&vm->rebind_list))
867 		return 0;
868 
869 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
870 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
871 		vops.pt_update_ops[i].wait_vm_bookkeep = true;
872 
873 	xe_vm_assert_held(vm);
874 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
875 		xe_assert(vm->xe, vma->tile_present);
876 
877 		if (rebind_worker)
878 			trace_xe_vma_rebind_worker(vma);
879 		else
880 			trace_xe_vma_rebind_exec(vma);
881 
882 		err = xe_vm_ops_add_rebind(&vops, vma,
883 					   vma->tile_present);
884 		if (err)
885 			goto free_ops;
886 	}
887 
888 	err = xe_vma_ops_alloc(&vops, false);
889 	if (err)
890 		goto free_ops;
891 
892 	fence = ops_execute(vm, &vops);
893 	if (IS_ERR(fence)) {
894 		err = PTR_ERR(fence);
895 	} else {
896 		dma_fence_put(fence);
897 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
898 					 combined_links.rebind)
899 			list_del_init(&vma->combined_links.rebind);
900 	}
901 free_ops:
902 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
903 		list_del(&op->link);
904 		kfree(op);
905 	}
906 	xe_vma_ops_fini(&vops);
907 
908 	return err;
909 }
910 
911 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
912 {
913 	struct dma_fence *fence = NULL;
914 	struct xe_vma_ops vops;
915 	struct xe_vma_op *op, *next_op;
916 	struct xe_tile *tile;
917 	u8 id;
918 	int err;
919 
920 	lockdep_assert_held(&vm->lock);
921 	xe_vm_assert_held(vm);
922 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
923 
924 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
925 	for_each_tile(tile, vm->xe, id) {
926 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
927 		vops.pt_update_ops[tile->id].q =
928 			xe_tile_migrate_exec_queue(tile);
929 	}
930 
931 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
932 	if (err)
933 		return ERR_PTR(err);
934 
935 	err = xe_vma_ops_alloc(&vops, false);
936 	if (err) {
937 		fence = ERR_PTR(err);
938 		goto free_ops;
939 	}
940 
941 	fence = ops_execute(vm, &vops);
942 
943 free_ops:
944 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
945 		list_del(&op->link);
946 		kfree(op);
947 	}
948 	xe_vma_ops_fini(&vops);
949 
950 	return fence;
951 }
952 
953 static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
954 					struct xe_vma *vma,
955 					struct xe_svm_range *range,
956 					u8 tile_mask)
957 {
958 	INIT_LIST_HEAD(&op->link);
959 	op->tile_mask = tile_mask;
960 	op->base.op = DRM_GPUVA_OP_DRIVER;
961 	op->subop = XE_VMA_SUBOP_MAP_RANGE;
962 	op->map_range.vma = vma;
963 	op->map_range.range = range;
964 }
965 
966 static int
967 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
968 			   struct xe_vma *vma,
969 			   struct xe_svm_range *range,
970 			   u8 tile_mask)
971 {
972 	struct xe_vma_op *op;
973 
974 	op = kzalloc(sizeof(*op), GFP_KERNEL);
975 	if (!op)
976 		return -ENOMEM;
977 
978 	xe_vm_populate_range_rebind(op, vma, range, tile_mask);
979 	list_add_tail(&op->link, &vops->list);
980 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
981 
982 	return 0;
983 }
984 
985 /**
986  * xe_vm_range_rebind() - VM range (re)bind
987  * @vm: The VM which the range belongs to.
988  * @vma: The VMA which the range belongs to.
989  * @range: SVM range to rebind.
990  * @tile_mask: Tile mask to bind the range to.
991  *
992  * (re)bind SVM range setting up GPU page tables for the range.
993  *
994  * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
995  * failure
996  */
997 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
998 				     struct xe_vma *vma,
999 				     struct xe_svm_range *range,
1000 				     u8 tile_mask)
1001 {
1002 	struct dma_fence *fence = NULL;
1003 	struct xe_vma_ops vops;
1004 	struct xe_vma_op *op, *next_op;
1005 	struct xe_tile *tile;
1006 	u8 id;
1007 	int err;
1008 
1009 	lockdep_assert_held(&vm->lock);
1010 	xe_vm_assert_held(vm);
1011 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1012 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1013 
1014 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
1015 	for_each_tile(tile, vm->xe, id) {
1016 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
1017 		vops.pt_update_ops[tile->id].q =
1018 			xe_tile_migrate_exec_queue(tile);
1019 	}
1020 
1021 	err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
1022 	if (err)
1023 		return ERR_PTR(err);
1024 
1025 	err = xe_vma_ops_alloc(&vops, false);
1026 	if (err) {
1027 		fence = ERR_PTR(err);
1028 		goto free_ops;
1029 	}
1030 
1031 	fence = ops_execute(vm, &vops);
1032 
1033 free_ops:
1034 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
1035 		list_del(&op->link);
1036 		kfree(op);
1037 	}
1038 	xe_vma_ops_fini(&vops);
1039 
1040 	return fence;
1041 }
1042 
1043 static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
1044 					struct xe_svm_range *range)
1045 {
1046 	INIT_LIST_HEAD(&op->link);
1047 	op->tile_mask = range->tile_present;
1048 	op->base.op = DRM_GPUVA_OP_DRIVER;
1049 	op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
1050 	op->unmap_range.range = range;
1051 }
1052 
1053 static int
1054 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
1055 			   struct xe_svm_range *range)
1056 {
1057 	struct xe_vma_op *op;
1058 
1059 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1060 	if (!op)
1061 		return -ENOMEM;
1062 
1063 	xe_vm_populate_range_unbind(op, range);
1064 	list_add_tail(&op->link, &vops->list);
1065 	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
1066 
1067 	return 0;
1068 }
1069 
1070 /**
1071  * xe_vm_range_unbind() - VM range unbind
1072  * @vm: The VM which the range belongs to.
1073  * @range: SVM range to rebind.
1074  *
1075  * Unbind SVM range removing the GPU page tables for the range.
1076  *
1077  * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
1078  * failure
1079  */
1080 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
1081 				     struct xe_svm_range *range)
1082 {
1083 	struct dma_fence *fence = NULL;
1084 	struct xe_vma_ops vops;
1085 	struct xe_vma_op *op, *next_op;
1086 	struct xe_tile *tile;
1087 	u8 id;
1088 	int err;
1089 
1090 	lockdep_assert_held(&vm->lock);
1091 	xe_vm_assert_held(vm);
1092 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1093 
1094 	if (!range->tile_present)
1095 		return dma_fence_get_stub();
1096 
1097 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
1098 	for_each_tile(tile, vm->xe, id) {
1099 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
1100 		vops.pt_update_ops[tile->id].q =
1101 			xe_tile_migrate_exec_queue(tile);
1102 	}
1103 
1104 	err = xe_vm_ops_add_range_unbind(&vops, range);
1105 	if (err)
1106 		return ERR_PTR(err);
1107 
1108 	err = xe_vma_ops_alloc(&vops, false);
1109 	if (err) {
1110 		fence = ERR_PTR(err);
1111 		goto free_ops;
1112 	}
1113 
1114 	fence = ops_execute(vm, &vops);
1115 
1116 free_ops:
1117 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
1118 		list_del(&op->link);
1119 		kfree(op);
1120 	}
1121 	xe_vma_ops_fini(&vops);
1122 
1123 	return fence;
1124 }
1125 
1126 static void xe_vma_free(struct xe_vma *vma)
1127 {
1128 	if (xe_vma_is_userptr(vma))
1129 		kfree(to_userptr_vma(vma));
1130 	else
1131 		kfree(vma);
1132 }
1133 
1134 #define VMA_CREATE_FLAG_READ_ONLY		BIT(0)
1135 #define VMA_CREATE_FLAG_IS_NULL			BIT(1)
1136 #define VMA_CREATE_FLAG_DUMPABLE		BIT(2)
1137 #define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR	BIT(3)
1138 
1139 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
1140 				    struct xe_bo *bo,
1141 				    u64 bo_offset_or_userptr,
1142 				    u64 start, u64 end,
1143 				    u16 pat_index, unsigned int flags)
1144 {
1145 	struct xe_vma *vma;
1146 	struct xe_tile *tile;
1147 	u8 id;
1148 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
1149 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
1150 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
1151 	bool is_cpu_addr_mirror =
1152 		(flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
1153 
1154 	xe_assert(vm->xe, start < end);
1155 	xe_assert(vm->xe, end < vm->size);
1156 
1157 	/*
1158 	 * Allocate and ensure that the xe_vma_is_userptr() return
1159 	 * matches what was allocated.
1160 	 */
1161 	if (!bo && !is_null && !is_cpu_addr_mirror) {
1162 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
1163 
1164 		if (!uvma)
1165 			return ERR_PTR(-ENOMEM);
1166 
1167 		vma = &uvma->vma;
1168 	} else {
1169 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1170 		if (!vma)
1171 			return ERR_PTR(-ENOMEM);
1172 
1173 		if (is_cpu_addr_mirror)
1174 			vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
1175 		if (is_null)
1176 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
1177 		if (bo)
1178 			vma->gpuva.gem.obj = &bo->ttm.base;
1179 	}
1180 
1181 	INIT_LIST_HEAD(&vma->combined_links.rebind);
1182 
1183 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
1184 	vma->gpuva.vm = &vm->gpuvm;
1185 	vma->gpuva.va.addr = start;
1186 	vma->gpuva.va.range = end - start + 1;
1187 	if (read_only)
1188 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
1189 	if (dumpable)
1190 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
1191 
1192 	for_each_tile(tile, vm->xe, id)
1193 		vma->tile_mask |= 0x1 << id;
1194 
1195 	if (vm->xe->info.has_atomic_enable_pte_bit)
1196 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
1197 
1198 	vma->pat_index = pat_index;
1199 
1200 	if (bo) {
1201 		struct drm_gpuvm_bo *vm_bo;
1202 
1203 		xe_bo_assert_held(bo);
1204 
1205 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
1206 		if (IS_ERR(vm_bo)) {
1207 			xe_vma_free(vma);
1208 			return ERR_CAST(vm_bo);
1209 		}
1210 
1211 		drm_gpuvm_bo_extobj_add(vm_bo);
1212 		drm_gem_object_get(&bo->ttm.base);
1213 		vma->gpuva.gem.offset = bo_offset_or_userptr;
1214 		drm_gpuva_link(&vma->gpuva, vm_bo);
1215 		drm_gpuvm_bo_put(vm_bo);
1216 	} else /* userptr or null */ {
1217 		if (!is_null && !is_cpu_addr_mirror) {
1218 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
1219 			u64 size = end - start + 1;
1220 			int err;
1221 
1222 			INIT_LIST_HEAD(&userptr->invalidate_link);
1223 			INIT_LIST_HEAD(&userptr->repin_link);
1224 			vma->gpuva.gem.offset = bo_offset_or_userptr;
1225 			mutex_init(&userptr->unmap_mutex);
1226 
1227 			err = mmu_interval_notifier_insert(&userptr->notifier,
1228 							   current->mm,
1229 							   xe_vma_userptr(vma), size,
1230 							   &vma_userptr_notifier_ops);
1231 			if (err) {
1232 				xe_vma_free(vma);
1233 				return ERR_PTR(err);
1234 			}
1235 
1236 			userptr->notifier_seq = LONG_MAX;
1237 		}
1238 
1239 		xe_vm_get(vm);
1240 	}
1241 
1242 	return vma;
1243 }
1244 
1245 static void xe_vma_destroy_late(struct xe_vma *vma)
1246 {
1247 	struct xe_vm *vm = xe_vma_vm(vma);
1248 
1249 	if (vma->ufence) {
1250 		xe_sync_ufence_put(vma->ufence);
1251 		vma->ufence = NULL;
1252 	}
1253 
1254 	if (xe_vma_is_userptr(vma)) {
1255 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1256 		struct xe_userptr *userptr = &uvma->userptr;
1257 
1258 		if (userptr->sg)
1259 			xe_hmm_userptr_free_sg(uvma);
1260 
1261 		/*
1262 		 * Since userptr pages are not pinned, we can't remove
1263 		 * the notifier until we're sure the GPU is not accessing
1264 		 * them anymore
1265 		 */
1266 		mmu_interval_notifier_remove(&userptr->notifier);
1267 		mutex_destroy(&userptr->unmap_mutex);
1268 		xe_vm_put(vm);
1269 	} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
1270 		xe_vm_put(vm);
1271 	} else {
1272 		xe_bo_put(xe_vma_bo(vma));
1273 	}
1274 
1275 	xe_vma_free(vma);
1276 }
1277 
1278 static void vma_destroy_work_func(struct work_struct *w)
1279 {
1280 	struct xe_vma *vma =
1281 		container_of(w, struct xe_vma, destroy_work);
1282 
1283 	xe_vma_destroy_late(vma);
1284 }
1285 
1286 static void vma_destroy_cb(struct dma_fence *fence,
1287 			   struct dma_fence_cb *cb)
1288 {
1289 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1290 
1291 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1292 	queue_work(system_unbound_wq, &vma->destroy_work);
1293 }
1294 
1295 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1296 {
1297 	struct xe_vm *vm = xe_vma_vm(vma);
1298 
1299 	lockdep_assert_held_write(&vm->lock);
1300 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1301 
1302 	if (xe_vma_is_userptr(vma)) {
1303 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1304 
1305 		spin_lock(&vm->userptr.invalidated_lock);
1306 		xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
1307 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
1308 		spin_unlock(&vm->userptr.invalidated_lock);
1309 	} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
1310 		xe_bo_assert_held(xe_vma_bo(vma));
1311 
1312 		drm_gpuva_unlink(&vma->gpuva);
1313 	}
1314 
1315 	xe_vm_assert_held(vm);
1316 	if (fence) {
1317 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1318 						 vma_destroy_cb);
1319 
1320 		if (ret) {
1321 			XE_WARN_ON(ret != -ENOENT);
1322 			xe_vma_destroy_late(vma);
1323 		}
1324 	} else {
1325 		xe_vma_destroy_late(vma);
1326 	}
1327 }
1328 
1329 /**
1330  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1331  * @exec: The drm_exec object we're currently locking for.
1332  * @vma: The vma for witch we want to lock the vm resv and any attached
1333  * object's resv.
1334  *
1335  * Return: 0 on success, negative error code on error. In particular
1336  * may return -EDEADLK on WW transaction contention and -EINTR if
1337  * an interruptible wait is terminated by a signal.
1338  */
1339 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1340 {
1341 	struct xe_vm *vm = xe_vma_vm(vma);
1342 	struct xe_bo *bo = xe_vma_bo(vma);
1343 	int err;
1344 
1345 	XE_WARN_ON(!vm);
1346 
1347 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1348 	if (!err && bo && !bo->vm)
1349 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1350 
1351 	return err;
1352 }
1353 
1354 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1355 {
1356 	struct drm_exec exec;
1357 	int err;
1358 
1359 	drm_exec_init(&exec, 0, 0);
1360 	drm_exec_until_all_locked(&exec) {
1361 		err = xe_vm_lock_vma(&exec, vma);
1362 		drm_exec_retry_on_contention(&exec);
1363 		if (XE_WARN_ON(err))
1364 			break;
1365 	}
1366 
1367 	xe_vma_destroy(vma, NULL);
1368 
1369 	drm_exec_fini(&exec);
1370 }
1371 
1372 struct xe_vma *
1373 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1374 {
1375 	struct drm_gpuva *gpuva;
1376 
1377 	lockdep_assert_held(&vm->lock);
1378 
1379 	if (xe_vm_is_closed_or_banned(vm))
1380 		return NULL;
1381 
1382 	xe_assert(vm->xe, start + range <= vm->size);
1383 
1384 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1385 
1386 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1387 }
1388 
1389 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1390 {
1391 	int err;
1392 
1393 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1394 	lockdep_assert_held(&vm->lock);
1395 
1396 	mutex_lock(&vm->snap_mutex);
1397 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1398 	mutex_unlock(&vm->snap_mutex);
1399 	XE_WARN_ON(err);	/* Shouldn't be possible */
1400 
1401 	return err;
1402 }
1403 
1404 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1405 {
1406 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1407 	lockdep_assert_held(&vm->lock);
1408 
1409 	mutex_lock(&vm->snap_mutex);
1410 	drm_gpuva_remove(&vma->gpuva);
1411 	mutex_unlock(&vm->snap_mutex);
1412 	if (vm->usm.last_fault_vma == vma)
1413 		vm->usm.last_fault_vma = NULL;
1414 }
1415 
1416 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1417 {
1418 	struct xe_vma_op *op;
1419 
1420 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1421 
1422 	if (unlikely(!op))
1423 		return NULL;
1424 
1425 	return &op->base;
1426 }
1427 
1428 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1429 
1430 static const struct drm_gpuvm_ops gpuvm_ops = {
1431 	.op_alloc = xe_vm_op_alloc,
1432 	.vm_bo_validate = xe_gpuvm_validate,
1433 	.vm_free = xe_vm_free,
1434 };
1435 
1436 static u64 pde_encode_pat_index(u16 pat_index)
1437 {
1438 	u64 pte = 0;
1439 
1440 	if (pat_index & BIT(0))
1441 		pte |= XE_PPGTT_PTE_PAT0;
1442 
1443 	if (pat_index & BIT(1))
1444 		pte |= XE_PPGTT_PTE_PAT1;
1445 
1446 	return pte;
1447 }
1448 
1449 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1450 {
1451 	u64 pte = 0;
1452 
1453 	if (pat_index & BIT(0))
1454 		pte |= XE_PPGTT_PTE_PAT0;
1455 
1456 	if (pat_index & BIT(1))
1457 		pte |= XE_PPGTT_PTE_PAT1;
1458 
1459 	if (pat_index & BIT(2)) {
1460 		if (pt_level)
1461 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1462 		else
1463 			pte |= XE_PPGTT_PTE_PAT2;
1464 	}
1465 
1466 	if (pat_index & BIT(3))
1467 		pte |= XELPG_PPGTT_PTE_PAT3;
1468 
1469 	if (pat_index & (BIT(4)))
1470 		pte |= XE2_PPGTT_PTE_PAT4;
1471 
1472 	return pte;
1473 }
1474 
1475 static u64 pte_encode_ps(u32 pt_level)
1476 {
1477 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1478 
1479 	if (pt_level == 1)
1480 		return XE_PDE_PS_2M;
1481 	else if (pt_level == 2)
1482 		return XE_PDPE_PS_1G;
1483 
1484 	return 0;
1485 }
1486 
1487 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1488 			      const u16 pat_index)
1489 {
1490 	u64 pde;
1491 
1492 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1493 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1494 	pde |= pde_encode_pat_index(pat_index);
1495 
1496 	return pde;
1497 }
1498 
1499 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1500 			      u16 pat_index, u32 pt_level)
1501 {
1502 	u64 pte;
1503 
1504 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1505 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1506 	pte |= pte_encode_pat_index(pat_index, pt_level);
1507 	pte |= pte_encode_ps(pt_level);
1508 
1509 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1510 		pte |= XE_PPGTT_PTE_DM;
1511 
1512 	return pte;
1513 }
1514 
1515 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1516 			       u16 pat_index, u32 pt_level)
1517 {
1518 	pte |= XE_PAGE_PRESENT;
1519 
1520 	if (likely(!xe_vma_read_only(vma)))
1521 		pte |= XE_PAGE_RW;
1522 
1523 	pte |= pte_encode_pat_index(pat_index, pt_level);
1524 	pte |= pte_encode_ps(pt_level);
1525 
1526 	if (unlikely(xe_vma_is_null(vma)))
1527 		pte |= XE_PTE_NULL;
1528 
1529 	return pte;
1530 }
1531 
1532 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1533 				u16 pat_index,
1534 				u32 pt_level, bool devmem, u64 flags)
1535 {
1536 	u64 pte;
1537 
1538 	/* Avoid passing random bits directly as flags */
1539 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1540 
1541 	pte = addr;
1542 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1543 	pte |= pte_encode_pat_index(pat_index, pt_level);
1544 	pte |= pte_encode_ps(pt_level);
1545 
1546 	if (devmem)
1547 		pte |= XE_PPGTT_PTE_DM;
1548 
1549 	pte |= flags;
1550 
1551 	return pte;
1552 }
1553 
1554 static const struct xe_pt_ops xelp_pt_ops = {
1555 	.pte_encode_bo = xelp_pte_encode_bo,
1556 	.pte_encode_vma = xelp_pte_encode_vma,
1557 	.pte_encode_addr = xelp_pte_encode_addr,
1558 	.pde_encode_bo = xelp_pde_encode_bo,
1559 };
1560 
1561 static void vm_destroy_work_func(struct work_struct *w);
1562 
1563 /**
1564  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1565  * given tile and vm.
1566  * @xe: xe device.
1567  * @tile: tile to set up for.
1568  * @vm: vm to set up for.
1569  *
1570  * Sets up a pagetable tree with one page-table per level and a single
1571  * leaf PTE. All pagetable entries point to the single page-table or,
1572  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1573  * writes become NOPs.
1574  *
1575  * Return: 0 on success, negative error code on error.
1576  */
1577 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1578 				struct xe_vm *vm)
1579 {
1580 	u8 id = tile->id;
1581 	int i;
1582 
1583 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1584 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1585 		if (IS_ERR(vm->scratch_pt[id][i]))
1586 			return PTR_ERR(vm->scratch_pt[id][i]);
1587 
1588 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1589 	}
1590 
1591 	return 0;
1592 }
1593 ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
1594 
1595 static void xe_vm_free_scratch(struct xe_vm *vm)
1596 {
1597 	struct xe_tile *tile;
1598 	u8 id;
1599 
1600 	if (!xe_vm_has_scratch(vm))
1601 		return;
1602 
1603 	for_each_tile(tile, vm->xe, id) {
1604 		u32 i;
1605 
1606 		if (!vm->pt_root[id])
1607 			continue;
1608 
1609 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1610 			if (vm->scratch_pt[id][i])
1611 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1612 	}
1613 }
1614 
1615 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1616 {
1617 	struct drm_gem_object *vm_resv_obj;
1618 	struct xe_vm *vm;
1619 	int err, number_tiles = 0;
1620 	struct xe_tile *tile;
1621 	u8 id;
1622 
1623 	/*
1624 	 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
1625 	 * ever be in faulting mode.
1626 	 */
1627 	xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
1628 
1629 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1630 	if (!vm)
1631 		return ERR_PTR(-ENOMEM);
1632 
1633 	vm->xe = xe;
1634 
1635 	vm->size = 1ull << xe->info.va_bits;
1636 
1637 	vm->flags = flags;
1638 
1639 	/**
1640 	 * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
1641 	 * manipulated under the PXP mutex. However, the PXP mutex can be taken
1642 	 * under a user-VM lock when the PXP session is started at exec_queue
1643 	 * creation time. Those are different VMs and therefore there is no risk
1644 	 * of deadlock, but we need to tell lockdep that this is the case or it
1645 	 * will print a warning.
1646 	 */
1647 	if (flags & XE_VM_FLAG_GSC) {
1648 		static struct lock_class_key gsc_vm_key;
1649 
1650 		__init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
1651 	} else {
1652 		init_rwsem(&vm->lock);
1653 	}
1654 	mutex_init(&vm->snap_mutex);
1655 
1656 	INIT_LIST_HEAD(&vm->rebind_list);
1657 
1658 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1659 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1660 	init_rwsem(&vm->userptr.notifier_lock);
1661 	spin_lock_init(&vm->userptr.invalidated_lock);
1662 
1663 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1664 
1665 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1666 
1667 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1668 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1669 
1670 	for_each_tile(tile, xe, id)
1671 		xe_range_fence_tree_init(&vm->rftree[id]);
1672 
1673 	vm->pt_ops = &xelp_pt_ops;
1674 
1675 	/*
1676 	 * Long-running workloads are not protected by the scheduler references.
1677 	 * By design, run_job for long-running workloads returns NULL and the
1678 	 * scheduler drops all the references of it, hence protecting the VM
1679 	 * for this case is necessary.
1680 	 */
1681 	if (flags & XE_VM_FLAG_LR_MODE)
1682 		xe_pm_runtime_get_noresume(xe);
1683 
1684 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1685 	if (!vm_resv_obj) {
1686 		err = -ENOMEM;
1687 		goto err_no_resv;
1688 	}
1689 
1690 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1691 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1692 
1693 	drm_gem_object_put(vm_resv_obj);
1694 
1695 	err = xe_vm_lock(vm, true);
1696 	if (err)
1697 		goto err_close;
1698 
1699 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1700 		vm->flags |= XE_VM_FLAG_64K;
1701 
1702 	for_each_tile(tile, xe, id) {
1703 		if (flags & XE_VM_FLAG_MIGRATION &&
1704 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1705 			continue;
1706 
1707 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1708 		if (IS_ERR(vm->pt_root[id])) {
1709 			err = PTR_ERR(vm->pt_root[id]);
1710 			vm->pt_root[id] = NULL;
1711 			goto err_unlock_close;
1712 		}
1713 	}
1714 
1715 	if (xe_vm_has_scratch(vm)) {
1716 		for_each_tile(tile, xe, id) {
1717 			if (!vm->pt_root[id])
1718 				continue;
1719 
1720 			err = xe_vm_create_scratch(xe, tile, vm);
1721 			if (err)
1722 				goto err_unlock_close;
1723 		}
1724 		vm->batch_invalidate_tlb = true;
1725 	}
1726 
1727 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1728 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1729 		vm->batch_invalidate_tlb = false;
1730 	}
1731 
1732 	/* Fill pt_root after allocating scratch tables */
1733 	for_each_tile(tile, xe, id) {
1734 		if (!vm->pt_root[id])
1735 			continue;
1736 
1737 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1738 	}
1739 	xe_vm_unlock(vm);
1740 
1741 	/* Kernel migration VM shouldn't have a circular loop.. */
1742 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1743 		for_each_tile(tile, xe, id) {
1744 			struct xe_exec_queue *q;
1745 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1746 
1747 			if (!vm->pt_root[id])
1748 				continue;
1749 
1750 			q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
1751 			if (IS_ERR(q)) {
1752 				err = PTR_ERR(q);
1753 				goto err_close;
1754 			}
1755 			vm->q[id] = q;
1756 			number_tiles++;
1757 		}
1758 	}
1759 
1760 	if (flags & XE_VM_FLAG_FAULT_MODE) {
1761 		err = xe_svm_init(vm);
1762 		if (err)
1763 			goto err_close;
1764 	}
1765 
1766 	if (number_tiles > 1)
1767 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1768 
1769 	trace_xe_vm_create(vm);
1770 
1771 	return vm;
1772 
1773 err_unlock_close:
1774 	xe_vm_unlock(vm);
1775 err_close:
1776 	xe_vm_close_and_put(vm);
1777 	return ERR_PTR(err);
1778 
1779 err_no_resv:
1780 	mutex_destroy(&vm->snap_mutex);
1781 	for_each_tile(tile, xe, id)
1782 		xe_range_fence_tree_fini(&vm->rftree[id]);
1783 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1784 	kfree(vm);
1785 	if (flags & XE_VM_FLAG_LR_MODE)
1786 		xe_pm_runtime_put(xe);
1787 	return ERR_PTR(err);
1788 }
1789 
1790 static void xe_vm_close(struct xe_vm *vm)
1791 {
1792 	struct xe_device *xe = vm->xe;
1793 	bool bound;
1794 	int idx;
1795 
1796 	bound = drm_dev_enter(&xe->drm, &idx);
1797 
1798 	down_write(&vm->lock);
1799 	if (xe_vm_in_fault_mode(vm))
1800 		xe_svm_notifier_lock(vm);
1801 
1802 	vm->size = 0;
1803 
1804 	if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
1805 		struct xe_tile *tile;
1806 		struct xe_gt *gt;
1807 		u8 id;
1808 
1809 		/* Wait for pending binds */
1810 		dma_resv_wait_timeout(xe_vm_resv(vm),
1811 				      DMA_RESV_USAGE_BOOKKEEP,
1812 				      false, MAX_SCHEDULE_TIMEOUT);
1813 
1814 		if (bound) {
1815 			for_each_tile(tile, xe, id)
1816 				if (vm->pt_root[id])
1817 					xe_pt_clear(xe, vm->pt_root[id]);
1818 
1819 			for_each_gt(gt, xe, id)
1820 				xe_gt_tlb_invalidation_vm(gt, vm);
1821 		}
1822 	}
1823 
1824 	if (xe_vm_in_fault_mode(vm))
1825 		xe_svm_notifier_unlock(vm);
1826 	up_write(&vm->lock);
1827 
1828 	if (bound)
1829 		drm_dev_exit(idx);
1830 }
1831 
1832 void xe_vm_close_and_put(struct xe_vm *vm)
1833 {
1834 	LIST_HEAD(contested);
1835 	struct xe_device *xe = vm->xe;
1836 	struct xe_tile *tile;
1837 	struct xe_vma *vma, *next_vma;
1838 	struct drm_gpuva *gpuva, *next;
1839 	u8 id;
1840 
1841 	xe_assert(xe, !vm->preempt.num_exec_queues);
1842 
1843 	xe_vm_close(vm);
1844 	if (xe_vm_in_preempt_fence_mode(vm))
1845 		flush_work(&vm->preempt.rebind_work);
1846 	if (xe_vm_in_fault_mode(vm))
1847 		xe_svm_close(vm);
1848 
1849 	down_write(&vm->lock);
1850 	for_each_tile(tile, xe, id) {
1851 		if (vm->q[id])
1852 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1853 	}
1854 	up_write(&vm->lock);
1855 
1856 	for_each_tile(tile, xe, id) {
1857 		if (vm->q[id]) {
1858 			xe_exec_queue_kill(vm->q[id]);
1859 			xe_exec_queue_put(vm->q[id]);
1860 			vm->q[id] = NULL;
1861 		}
1862 	}
1863 
1864 	down_write(&vm->lock);
1865 	xe_vm_lock(vm, false);
1866 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1867 		vma = gpuva_to_vma(gpuva);
1868 
1869 		if (xe_vma_has_no_bo(vma)) {
1870 			down_read(&vm->userptr.notifier_lock);
1871 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1872 			up_read(&vm->userptr.notifier_lock);
1873 		}
1874 
1875 		xe_vm_remove_vma(vm, vma);
1876 
1877 		/* easy case, remove from VMA? */
1878 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1879 			list_del_init(&vma->combined_links.rebind);
1880 			xe_vma_destroy(vma, NULL);
1881 			continue;
1882 		}
1883 
1884 		list_move_tail(&vma->combined_links.destroy, &contested);
1885 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1886 	}
1887 
1888 	/*
1889 	 * All vm operations will add shared fences to resv.
1890 	 * The only exception is eviction for a shared object,
1891 	 * but even so, the unbind when evicted would still
1892 	 * install a fence to resv. Hence it's safe to
1893 	 * destroy the pagetables immediately.
1894 	 */
1895 	xe_vm_free_scratch(vm);
1896 
1897 	for_each_tile(tile, xe, id) {
1898 		if (vm->pt_root[id]) {
1899 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1900 			vm->pt_root[id] = NULL;
1901 		}
1902 	}
1903 	xe_vm_unlock(vm);
1904 
1905 	/*
1906 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1907 	 * Since we hold a refcount to the bo, we can remove and free
1908 	 * the members safely without locking.
1909 	 */
1910 	list_for_each_entry_safe(vma, next_vma, &contested,
1911 				 combined_links.destroy) {
1912 		list_del_init(&vma->combined_links.destroy);
1913 		xe_vma_destroy_unlocked(vma);
1914 	}
1915 
1916 	if (xe_vm_in_fault_mode(vm))
1917 		xe_svm_fini(vm);
1918 
1919 	up_write(&vm->lock);
1920 
1921 	down_write(&xe->usm.lock);
1922 	if (vm->usm.asid) {
1923 		void *lookup;
1924 
1925 		xe_assert(xe, xe->info.has_asid);
1926 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1927 
1928 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1929 		xe_assert(xe, lookup == vm);
1930 	}
1931 	up_write(&xe->usm.lock);
1932 
1933 	for_each_tile(tile, xe, id)
1934 		xe_range_fence_tree_fini(&vm->rftree[id]);
1935 
1936 	xe_vm_put(vm);
1937 }
1938 
1939 static void vm_destroy_work_func(struct work_struct *w)
1940 {
1941 	struct xe_vm *vm =
1942 		container_of(w, struct xe_vm, destroy_work);
1943 	struct xe_device *xe = vm->xe;
1944 	struct xe_tile *tile;
1945 	u8 id;
1946 
1947 	/* xe_vm_close_and_put was not called? */
1948 	xe_assert(xe, !vm->size);
1949 
1950 	if (xe_vm_in_preempt_fence_mode(vm))
1951 		flush_work(&vm->preempt.rebind_work);
1952 
1953 	mutex_destroy(&vm->snap_mutex);
1954 
1955 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1956 		xe_pm_runtime_put(xe);
1957 
1958 	for_each_tile(tile, xe, id)
1959 		XE_WARN_ON(vm->pt_root[id]);
1960 
1961 	trace_xe_vm_free(vm);
1962 
1963 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1964 
1965 	if (vm->xef)
1966 		xe_file_put(vm->xef);
1967 
1968 	kfree(vm);
1969 }
1970 
1971 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1972 {
1973 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1974 
1975 	/* To destroy the VM we need to be able to sleep */
1976 	queue_work(system_unbound_wq, &vm->destroy_work);
1977 }
1978 
1979 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1980 {
1981 	struct xe_vm *vm;
1982 
1983 	mutex_lock(&xef->vm.lock);
1984 	vm = xa_load(&xef->vm.xa, id);
1985 	if (vm)
1986 		xe_vm_get(vm);
1987 	mutex_unlock(&xef->vm.lock);
1988 
1989 	return vm;
1990 }
1991 
1992 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1993 {
1994 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1995 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1996 }
1997 
1998 static struct xe_exec_queue *
1999 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
2000 {
2001 	return q ? q : vm->q[0];
2002 }
2003 
2004 static struct xe_user_fence *
2005 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
2006 {
2007 	unsigned int i;
2008 
2009 	for (i = 0; i < num_syncs; i++) {
2010 		struct xe_sync_entry *e = &syncs[i];
2011 
2012 		if (xe_sync_is_ufence(e))
2013 			return xe_sync_ufence_get(e);
2014 	}
2015 
2016 	return NULL;
2017 }
2018 
2019 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
2020 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
2021 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
2022 
2023 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
2024 		       struct drm_file *file)
2025 {
2026 	struct xe_device *xe = to_xe_device(dev);
2027 	struct xe_file *xef = to_xe_file(file);
2028 	struct drm_xe_vm_create *args = data;
2029 	struct xe_tile *tile;
2030 	struct xe_vm *vm;
2031 	u32 id, asid;
2032 	int err;
2033 	u32 flags = 0;
2034 
2035 	if (XE_IOCTL_DBG(xe, args->extensions))
2036 		return -EINVAL;
2037 
2038 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
2039 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
2040 
2041 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
2042 			 !xe->info.has_usm))
2043 		return -EINVAL;
2044 
2045 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2046 		return -EINVAL;
2047 
2048 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
2049 		return -EINVAL;
2050 
2051 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
2052 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
2053 			 !xe->info.needs_scratch))
2054 		return -EINVAL;
2055 
2056 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
2057 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
2058 		return -EINVAL;
2059 
2060 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
2061 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
2062 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
2063 		flags |= XE_VM_FLAG_LR_MODE;
2064 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
2065 		flags |= XE_VM_FLAG_FAULT_MODE;
2066 
2067 	vm = xe_vm_create(xe, flags);
2068 	if (IS_ERR(vm))
2069 		return PTR_ERR(vm);
2070 
2071 	if (xe->info.has_asid) {
2072 		down_write(&xe->usm.lock);
2073 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
2074 				      XA_LIMIT(1, XE_MAX_ASID - 1),
2075 				      &xe->usm.next_asid, GFP_KERNEL);
2076 		up_write(&xe->usm.lock);
2077 		if (err < 0)
2078 			goto err_close_and_put;
2079 
2080 		vm->usm.asid = asid;
2081 	}
2082 
2083 	vm->xef = xe_file_get(xef);
2084 
2085 	/* Record BO memory for VM pagetable created against client */
2086 	for_each_tile(tile, xe, id)
2087 		if (vm->pt_root[id])
2088 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
2089 
2090 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
2091 	/* Warning: Security issue - never enable by default */
2092 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
2093 #endif
2094 
2095 	/* user id alloc must always be last in ioctl to prevent UAF */
2096 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
2097 	if (err)
2098 		goto err_close_and_put;
2099 
2100 	args->vm_id = id;
2101 
2102 	return 0;
2103 
2104 err_close_and_put:
2105 	xe_vm_close_and_put(vm);
2106 
2107 	return err;
2108 }
2109 
2110 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
2111 			struct drm_file *file)
2112 {
2113 	struct xe_device *xe = to_xe_device(dev);
2114 	struct xe_file *xef = to_xe_file(file);
2115 	struct drm_xe_vm_destroy *args = data;
2116 	struct xe_vm *vm;
2117 	int err = 0;
2118 
2119 	if (XE_IOCTL_DBG(xe, args->pad) ||
2120 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2121 		return -EINVAL;
2122 
2123 	mutex_lock(&xef->vm.lock);
2124 	vm = xa_load(&xef->vm.xa, args->vm_id);
2125 	if (XE_IOCTL_DBG(xe, !vm))
2126 		err = -ENOENT;
2127 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2128 		err = -EBUSY;
2129 	else
2130 		xa_erase(&xef->vm.xa, args->vm_id);
2131 	mutex_unlock(&xef->vm.lock);
2132 
2133 	if (!err)
2134 		xe_vm_close_and_put(vm);
2135 
2136 	return err;
2137 }
2138 
2139 static const u32 region_to_mem_type[] = {
2140 	XE_PL_TT,
2141 	XE_PL_VRAM0,
2142 	XE_PL_VRAM1,
2143 };
2144 
2145 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2146 			     bool post_commit)
2147 {
2148 	down_read(&vm->userptr.notifier_lock);
2149 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2150 	up_read(&vm->userptr.notifier_lock);
2151 	if (post_commit)
2152 		xe_vm_remove_vma(vm, vma);
2153 }
2154 
2155 #undef ULL
2156 #define ULL	unsigned long long
2157 
2158 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2159 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2160 {
2161 	struct xe_vma *vma;
2162 
2163 	switch (op->op) {
2164 	case DRM_GPUVA_OP_MAP:
2165 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2166 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2167 		break;
2168 	case DRM_GPUVA_OP_REMAP:
2169 		vma = gpuva_to_vma(op->remap.unmap->va);
2170 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2171 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2172 		       op->remap.unmap->keep ? 1 : 0);
2173 		if (op->remap.prev)
2174 			vm_dbg(&xe->drm,
2175 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2176 			       (ULL)op->remap.prev->va.addr,
2177 			       (ULL)op->remap.prev->va.range);
2178 		if (op->remap.next)
2179 			vm_dbg(&xe->drm,
2180 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2181 			       (ULL)op->remap.next->va.addr,
2182 			       (ULL)op->remap.next->va.range);
2183 		break;
2184 	case DRM_GPUVA_OP_UNMAP:
2185 		vma = gpuva_to_vma(op->unmap.va);
2186 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2187 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2188 		       op->unmap.keep ? 1 : 0);
2189 		break;
2190 	case DRM_GPUVA_OP_PREFETCH:
2191 		vma = gpuva_to_vma(op->prefetch.va);
2192 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2193 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2194 		break;
2195 	default:
2196 		drm_warn(&xe->drm, "NOT POSSIBLE");
2197 	}
2198 }
2199 #else
2200 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2201 {
2202 }
2203 #endif
2204 
2205 static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
2206 {
2207 	if (!xe_vm_in_fault_mode(vm))
2208 		return false;
2209 
2210 	if (!xe_vm_has_scratch(vm))
2211 		return false;
2212 
2213 	if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
2214 		return false;
2215 
2216 	return true;
2217 }
2218 
2219 /*
2220  * Create operations list from IOCTL arguments, setup operations fields so parse
2221  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2222  */
2223 static struct drm_gpuva_ops *
2224 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2225 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2226 			 u32 operation, u32 flags,
2227 			 u32 prefetch_region, u16 pat_index)
2228 {
2229 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2230 	struct drm_gpuva_ops *ops;
2231 	struct drm_gpuva_op *__op;
2232 	struct drm_gpuvm_bo *vm_bo;
2233 	int err;
2234 
2235 	lockdep_assert_held_write(&vm->lock);
2236 
2237 	vm_dbg(&vm->xe->drm,
2238 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2239 	       operation, (ULL)addr, (ULL)range,
2240 	       (ULL)bo_offset_or_userptr);
2241 
2242 	switch (operation) {
2243 	case DRM_XE_VM_BIND_OP_MAP:
2244 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2245 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2246 						  obj, bo_offset_or_userptr);
2247 		break;
2248 	case DRM_XE_VM_BIND_OP_UNMAP:
2249 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2250 		break;
2251 	case DRM_XE_VM_BIND_OP_PREFETCH:
2252 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2253 		break;
2254 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2255 		xe_assert(vm->xe, bo);
2256 
2257 		err = xe_bo_lock(bo, true);
2258 		if (err)
2259 			return ERR_PTR(err);
2260 
2261 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2262 		if (IS_ERR(vm_bo)) {
2263 			xe_bo_unlock(bo);
2264 			return ERR_CAST(vm_bo);
2265 		}
2266 
2267 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2268 		drm_gpuvm_bo_put(vm_bo);
2269 		xe_bo_unlock(bo);
2270 		break;
2271 	default:
2272 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2273 		ops = ERR_PTR(-EINVAL);
2274 	}
2275 	if (IS_ERR(ops))
2276 		return ops;
2277 
2278 	drm_gpuva_for_each_op(__op, ops) {
2279 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2280 
2281 		if (__op->op == DRM_GPUVA_OP_MAP) {
2282 			op->map.immediate =
2283 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2284 			op->map.read_only =
2285 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2286 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2287 			op->map.is_cpu_addr_mirror = flags &
2288 				DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
2289 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2290 			op->map.pat_index = pat_index;
2291 			op->map.invalidate_on_bind =
2292 				__xe_vm_needs_clear_scratch_pages(vm, flags);
2293 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2294 			op->prefetch.region = prefetch_region;
2295 		}
2296 
2297 		print_op(vm->xe, __op);
2298 	}
2299 
2300 	return ops;
2301 }
2302 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
2303 
2304 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2305 			      u16 pat_index, unsigned int flags)
2306 {
2307 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2308 	struct drm_exec exec;
2309 	struct xe_vma *vma;
2310 	int err = 0;
2311 
2312 	lockdep_assert_held_write(&vm->lock);
2313 
2314 	if (bo) {
2315 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2316 		drm_exec_until_all_locked(&exec) {
2317 			err = 0;
2318 			if (!bo->vm) {
2319 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2320 				drm_exec_retry_on_contention(&exec);
2321 			}
2322 			if (!err) {
2323 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2324 				drm_exec_retry_on_contention(&exec);
2325 			}
2326 			if (err) {
2327 				drm_exec_fini(&exec);
2328 				return ERR_PTR(err);
2329 			}
2330 		}
2331 	}
2332 	vma = xe_vma_create(vm, bo, op->gem.offset,
2333 			    op->va.addr, op->va.addr +
2334 			    op->va.range - 1, pat_index, flags);
2335 	if (IS_ERR(vma))
2336 		goto err_unlock;
2337 
2338 	if (xe_vma_is_userptr(vma))
2339 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2340 	else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2341 		err = add_preempt_fences(vm, bo);
2342 
2343 err_unlock:
2344 	if (bo)
2345 		drm_exec_fini(&exec);
2346 
2347 	if (err) {
2348 		prep_vma_destroy(vm, vma, false);
2349 		xe_vma_destroy_unlocked(vma);
2350 		vma = ERR_PTR(err);
2351 	}
2352 
2353 	return vma;
2354 }
2355 
2356 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2357 {
2358 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2359 		return SZ_1G;
2360 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2361 		return SZ_2M;
2362 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2363 		return SZ_64K;
2364 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2365 		return SZ_4K;
2366 
2367 	return SZ_1G;	/* Uninitialized, used max size */
2368 }
2369 
2370 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2371 {
2372 	switch (size) {
2373 	case SZ_1G:
2374 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2375 		break;
2376 	case SZ_2M:
2377 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2378 		break;
2379 	case SZ_64K:
2380 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2381 		break;
2382 	case SZ_4K:
2383 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2384 		break;
2385 	}
2386 }
2387 
2388 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2389 {
2390 	int err = 0;
2391 
2392 	lockdep_assert_held_write(&vm->lock);
2393 
2394 	switch (op->base.op) {
2395 	case DRM_GPUVA_OP_MAP:
2396 		err |= xe_vm_insert_vma(vm, op->map.vma);
2397 		if (!err)
2398 			op->flags |= XE_VMA_OP_COMMITTED;
2399 		break;
2400 	case DRM_GPUVA_OP_REMAP:
2401 	{
2402 		u8 tile_present =
2403 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2404 
2405 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2406 				 true);
2407 		op->flags |= XE_VMA_OP_COMMITTED;
2408 
2409 		if (op->remap.prev) {
2410 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2411 			if (!err)
2412 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2413 			if (!err && op->remap.skip_prev) {
2414 				op->remap.prev->tile_present =
2415 					tile_present;
2416 				op->remap.prev = NULL;
2417 			}
2418 		}
2419 		if (op->remap.next) {
2420 			err |= xe_vm_insert_vma(vm, op->remap.next);
2421 			if (!err)
2422 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2423 			if (!err && op->remap.skip_next) {
2424 				op->remap.next->tile_present =
2425 					tile_present;
2426 				op->remap.next = NULL;
2427 			}
2428 		}
2429 
2430 		/* Adjust for partial unbind after removing VMA from VM */
2431 		if (!err) {
2432 			op->base.remap.unmap->va->va.addr = op->remap.start;
2433 			op->base.remap.unmap->va->va.range = op->remap.range;
2434 		}
2435 		break;
2436 	}
2437 	case DRM_GPUVA_OP_UNMAP:
2438 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2439 		op->flags |= XE_VMA_OP_COMMITTED;
2440 		break;
2441 	case DRM_GPUVA_OP_PREFETCH:
2442 		op->flags |= XE_VMA_OP_COMMITTED;
2443 		break;
2444 	default:
2445 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2446 	}
2447 
2448 	return err;
2449 }
2450 
2451 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2452 				   struct xe_vma_ops *vops)
2453 {
2454 	struct xe_device *xe = vm->xe;
2455 	struct drm_gpuva_op *__op;
2456 	struct xe_tile *tile;
2457 	u8 id, tile_mask = 0;
2458 	int err = 0;
2459 
2460 	lockdep_assert_held_write(&vm->lock);
2461 
2462 	for_each_tile(tile, vm->xe, id)
2463 		tile_mask |= 0x1 << id;
2464 
2465 	drm_gpuva_for_each_op(__op, ops) {
2466 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2467 		struct xe_vma *vma;
2468 		unsigned int flags = 0;
2469 
2470 		INIT_LIST_HEAD(&op->link);
2471 		list_add_tail(&op->link, &vops->list);
2472 		op->tile_mask = tile_mask;
2473 
2474 		switch (op->base.op) {
2475 		case DRM_GPUVA_OP_MAP:
2476 		{
2477 			flags |= op->map.read_only ?
2478 				VMA_CREATE_FLAG_READ_ONLY : 0;
2479 			flags |= op->map.is_null ?
2480 				VMA_CREATE_FLAG_IS_NULL : 0;
2481 			flags |= op->map.dumpable ?
2482 				VMA_CREATE_FLAG_DUMPABLE : 0;
2483 			flags |= op->map.is_cpu_addr_mirror ?
2484 				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
2485 
2486 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2487 				      flags);
2488 			if (IS_ERR(vma))
2489 				return PTR_ERR(vma);
2490 
2491 			op->map.vma = vma;
2492 			if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
2493 			     !op->map.is_cpu_addr_mirror) ||
2494 			    op->map.invalidate_on_bind)
2495 				xe_vma_ops_incr_pt_update_ops(vops,
2496 							      op->tile_mask);
2497 			break;
2498 		}
2499 		case DRM_GPUVA_OP_REMAP:
2500 		{
2501 			struct xe_vma *old =
2502 				gpuva_to_vma(op->base.remap.unmap->va);
2503 			bool skip = xe_vma_is_cpu_addr_mirror(old);
2504 			u64 start = xe_vma_start(old), end = xe_vma_end(old);
2505 
2506 			if (op->base.remap.prev)
2507 				start = op->base.remap.prev->va.addr +
2508 					op->base.remap.prev->va.range;
2509 			if (op->base.remap.next)
2510 				end = op->base.remap.next->va.addr;
2511 
2512 			if (xe_vma_is_cpu_addr_mirror(old) &&
2513 			    xe_svm_has_mapping(vm, start, end))
2514 				return -EBUSY;
2515 
2516 			op->remap.start = xe_vma_start(old);
2517 			op->remap.range = xe_vma_size(old);
2518 
2519 			flags |= op->base.remap.unmap->va->flags &
2520 				XE_VMA_READ_ONLY ?
2521 				VMA_CREATE_FLAG_READ_ONLY : 0;
2522 			flags |= op->base.remap.unmap->va->flags &
2523 				DRM_GPUVA_SPARSE ?
2524 				VMA_CREATE_FLAG_IS_NULL : 0;
2525 			flags |= op->base.remap.unmap->va->flags &
2526 				XE_VMA_DUMPABLE ?
2527 				VMA_CREATE_FLAG_DUMPABLE : 0;
2528 			flags |= xe_vma_is_cpu_addr_mirror(old) ?
2529 				VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
2530 
2531 			if (op->base.remap.prev) {
2532 				vma = new_vma(vm, op->base.remap.prev,
2533 					      old->pat_index, flags);
2534 				if (IS_ERR(vma))
2535 					return PTR_ERR(vma);
2536 
2537 				op->remap.prev = vma;
2538 
2539 				/*
2540 				 * Userptr creates a new SG mapping so
2541 				 * we must also rebind.
2542 				 */
2543 				op->remap.skip_prev = skip ||
2544 					(!xe_vma_is_userptr(old) &&
2545 					IS_ALIGNED(xe_vma_end(vma),
2546 						   xe_vma_max_pte_size(old)));
2547 				if (op->remap.skip_prev) {
2548 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2549 					op->remap.range -=
2550 						xe_vma_end(vma) -
2551 						xe_vma_start(old);
2552 					op->remap.start = xe_vma_end(vma);
2553 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2554 					       (ULL)op->remap.start,
2555 					       (ULL)op->remap.range);
2556 				} else {
2557 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2558 				}
2559 			}
2560 
2561 			if (op->base.remap.next) {
2562 				vma = new_vma(vm, op->base.remap.next,
2563 					      old->pat_index, flags);
2564 				if (IS_ERR(vma))
2565 					return PTR_ERR(vma);
2566 
2567 				op->remap.next = vma;
2568 
2569 				/*
2570 				 * Userptr creates a new SG mapping so
2571 				 * we must also rebind.
2572 				 */
2573 				op->remap.skip_next = skip ||
2574 					(!xe_vma_is_userptr(old) &&
2575 					IS_ALIGNED(xe_vma_start(vma),
2576 						   xe_vma_max_pte_size(old)));
2577 				if (op->remap.skip_next) {
2578 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2579 					op->remap.range -=
2580 						xe_vma_end(old) -
2581 						xe_vma_start(vma);
2582 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2583 					       (ULL)op->remap.start,
2584 					       (ULL)op->remap.range);
2585 				} else {
2586 					xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2587 				}
2588 			}
2589 			if (!skip)
2590 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2591 			break;
2592 		}
2593 		case DRM_GPUVA_OP_UNMAP:
2594 			vma = gpuva_to_vma(op->base.unmap.va);
2595 
2596 			if (xe_vma_is_cpu_addr_mirror(vma) &&
2597 			    xe_svm_has_mapping(vm, xe_vma_start(vma),
2598 					       xe_vma_end(vma)))
2599 				return -EBUSY;
2600 
2601 			if (!xe_vma_is_cpu_addr_mirror(vma))
2602 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2603 			break;
2604 		case DRM_GPUVA_OP_PREFETCH:
2605 			vma = gpuva_to_vma(op->base.prefetch.va);
2606 
2607 			if (xe_vma_is_userptr(vma)) {
2608 				err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2609 				if (err)
2610 					return err;
2611 			}
2612 
2613 			if (!xe_vma_is_cpu_addr_mirror(vma))
2614 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
2615 			break;
2616 		default:
2617 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2618 		}
2619 
2620 		err = xe_vma_op_commit(vm, op);
2621 		if (err)
2622 			return err;
2623 	}
2624 
2625 	return 0;
2626 }
2627 
2628 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2629 			     bool post_commit, bool prev_post_commit,
2630 			     bool next_post_commit)
2631 {
2632 	lockdep_assert_held_write(&vm->lock);
2633 
2634 	switch (op->base.op) {
2635 	case DRM_GPUVA_OP_MAP:
2636 		if (op->map.vma) {
2637 			prep_vma_destroy(vm, op->map.vma, post_commit);
2638 			xe_vma_destroy_unlocked(op->map.vma);
2639 		}
2640 		break;
2641 	case DRM_GPUVA_OP_UNMAP:
2642 	{
2643 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2644 
2645 		if (vma) {
2646 			down_read(&vm->userptr.notifier_lock);
2647 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2648 			up_read(&vm->userptr.notifier_lock);
2649 			if (post_commit)
2650 				xe_vm_insert_vma(vm, vma);
2651 		}
2652 		break;
2653 	}
2654 	case DRM_GPUVA_OP_REMAP:
2655 	{
2656 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2657 
2658 		if (op->remap.prev) {
2659 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2660 			xe_vma_destroy_unlocked(op->remap.prev);
2661 		}
2662 		if (op->remap.next) {
2663 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2664 			xe_vma_destroy_unlocked(op->remap.next);
2665 		}
2666 		if (vma) {
2667 			down_read(&vm->userptr.notifier_lock);
2668 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2669 			up_read(&vm->userptr.notifier_lock);
2670 			if (post_commit)
2671 				xe_vm_insert_vma(vm, vma);
2672 		}
2673 		break;
2674 	}
2675 	case DRM_GPUVA_OP_PREFETCH:
2676 		/* Nothing to do */
2677 		break;
2678 	default:
2679 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2680 	}
2681 }
2682 
2683 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2684 				     struct drm_gpuva_ops **ops,
2685 				     int num_ops_list)
2686 {
2687 	int i;
2688 
2689 	for (i = num_ops_list - 1; i >= 0; --i) {
2690 		struct drm_gpuva_ops *__ops = ops[i];
2691 		struct drm_gpuva_op *__op;
2692 
2693 		if (!__ops)
2694 			continue;
2695 
2696 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2697 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2698 
2699 			xe_vma_op_unwind(vm, op,
2700 					 op->flags & XE_VMA_OP_COMMITTED,
2701 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2702 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2703 		}
2704 	}
2705 }
2706 
2707 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2708 				 bool validate)
2709 {
2710 	struct xe_bo *bo = xe_vma_bo(vma);
2711 	struct xe_vm *vm = xe_vma_vm(vma);
2712 	int err = 0;
2713 
2714 	if (bo) {
2715 		if (!bo->vm)
2716 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2717 		if (!err && validate)
2718 			err = xe_bo_validate(bo, vm,
2719 					     !xe_vm_in_preempt_fence_mode(vm));
2720 	}
2721 
2722 	return err;
2723 }
2724 
2725 static int check_ufence(struct xe_vma *vma)
2726 {
2727 	if (vma->ufence) {
2728 		struct xe_user_fence * const f = vma->ufence;
2729 
2730 		if (!xe_sync_ufence_get_status(f))
2731 			return -EBUSY;
2732 
2733 		vma->ufence = NULL;
2734 		xe_sync_ufence_put(f);
2735 	}
2736 
2737 	return 0;
2738 }
2739 
2740 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2741 			    struct xe_vma_op *op)
2742 {
2743 	int err = 0;
2744 
2745 	switch (op->base.op) {
2746 	case DRM_GPUVA_OP_MAP:
2747 		if (!op->map.invalidate_on_bind)
2748 			err = vma_lock_and_validate(exec, op->map.vma,
2749 						    !xe_vm_in_fault_mode(vm) ||
2750 						    op->map.immediate);
2751 		break;
2752 	case DRM_GPUVA_OP_REMAP:
2753 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2754 		if (err)
2755 			break;
2756 
2757 		err = vma_lock_and_validate(exec,
2758 					    gpuva_to_vma(op->base.remap.unmap->va),
2759 					    false);
2760 		if (!err && op->remap.prev)
2761 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2762 		if (!err && op->remap.next)
2763 			err = vma_lock_and_validate(exec, op->remap.next, true);
2764 		break;
2765 	case DRM_GPUVA_OP_UNMAP:
2766 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2767 		if (err)
2768 			break;
2769 
2770 		err = vma_lock_and_validate(exec,
2771 					    gpuva_to_vma(op->base.unmap.va),
2772 					    false);
2773 		break;
2774 	case DRM_GPUVA_OP_PREFETCH:
2775 	{
2776 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2777 		u32 region = op->prefetch.region;
2778 
2779 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2780 
2781 		err = vma_lock_and_validate(exec,
2782 					    gpuva_to_vma(op->base.prefetch.va),
2783 					    false);
2784 		if (!err && !xe_vma_has_no_bo(vma))
2785 			err = xe_bo_migrate(xe_vma_bo(vma),
2786 					    region_to_mem_type[region]);
2787 		break;
2788 	}
2789 	default:
2790 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2791 	}
2792 
2793 	return err;
2794 }
2795 
2796 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2797 					   struct xe_vm *vm,
2798 					   struct xe_vma_ops *vops)
2799 {
2800 	struct xe_vma_op *op;
2801 	int err;
2802 
2803 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2804 	if (err)
2805 		return err;
2806 
2807 	list_for_each_entry(op, &vops->list, link) {
2808 		err = op_lock_and_prep(exec, vm, op);
2809 		if (err)
2810 			return err;
2811 	}
2812 
2813 #ifdef TEST_VM_OPS_ERROR
2814 	if (vops->inject_error &&
2815 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
2816 		return -ENOSPC;
2817 #endif
2818 
2819 	return 0;
2820 }
2821 
2822 static void op_trace(struct xe_vma_op *op)
2823 {
2824 	switch (op->base.op) {
2825 	case DRM_GPUVA_OP_MAP:
2826 		trace_xe_vma_bind(op->map.vma);
2827 		break;
2828 	case DRM_GPUVA_OP_REMAP:
2829 		trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
2830 		if (op->remap.prev)
2831 			trace_xe_vma_bind(op->remap.prev);
2832 		if (op->remap.next)
2833 			trace_xe_vma_bind(op->remap.next);
2834 		break;
2835 	case DRM_GPUVA_OP_UNMAP:
2836 		trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
2837 		break;
2838 	case DRM_GPUVA_OP_PREFETCH:
2839 		trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
2840 		break;
2841 	case DRM_GPUVA_OP_DRIVER:
2842 		break;
2843 	default:
2844 		XE_WARN_ON("NOT POSSIBLE");
2845 	}
2846 }
2847 
2848 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
2849 {
2850 	struct xe_vma_op *op;
2851 
2852 	list_for_each_entry(op, &vops->list, link)
2853 		op_trace(op);
2854 }
2855 
2856 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
2857 {
2858 	struct xe_exec_queue *q = vops->q;
2859 	struct xe_tile *tile;
2860 	int number_tiles = 0;
2861 	u8 id;
2862 
2863 	for_each_tile(tile, vm->xe, id) {
2864 		if (vops->pt_update_ops[id].num_ops)
2865 			++number_tiles;
2866 
2867 		if (vops->pt_update_ops[id].q)
2868 			continue;
2869 
2870 		if (q) {
2871 			vops->pt_update_ops[id].q = q;
2872 			if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
2873 				q = list_next_entry(q, multi_gt_list);
2874 		} else {
2875 			vops->pt_update_ops[id].q = vm->q[id];
2876 		}
2877 	}
2878 
2879 	return number_tiles;
2880 }
2881 
2882 static struct dma_fence *ops_execute(struct xe_vm *vm,
2883 				     struct xe_vma_ops *vops)
2884 {
2885 	struct xe_tile *tile;
2886 	struct dma_fence *fence = NULL;
2887 	struct dma_fence **fences = NULL;
2888 	struct dma_fence_array *cf = NULL;
2889 	int number_tiles = 0, current_fence = 0, err;
2890 	u8 id;
2891 
2892 	number_tiles = vm_ops_setup_tile_args(vm, vops);
2893 	if (number_tiles == 0)
2894 		return ERR_PTR(-ENODATA);
2895 
2896 	if (number_tiles > 1) {
2897 		fences = kmalloc_array(number_tiles, sizeof(*fences),
2898 				       GFP_KERNEL);
2899 		if (!fences) {
2900 			fence = ERR_PTR(-ENOMEM);
2901 			goto err_trace;
2902 		}
2903 	}
2904 
2905 	for_each_tile(tile, vm->xe, id) {
2906 		if (!vops->pt_update_ops[id].num_ops)
2907 			continue;
2908 
2909 		err = xe_pt_update_ops_prepare(tile, vops);
2910 		if (err) {
2911 			fence = ERR_PTR(err);
2912 			goto err_out;
2913 		}
2914 	}
2915 
2916 	trace_xe_vm_ops_execute(vops);
2917 
2918 	for_each_tile(tile, vm->xe, id) {
2919 		if (!vops->pt_update_ops[id].num_ops)
2920 			continue;
2921 
2922 		fence = xe_pt_update_ops_run(tile, vops);
2923 		if (IS_ERR(fence))
2924 			goto err_out;
2925 
2926 		if (fences)
2927 			fences[current_fence++] = fence;
2928 	}
2929 
2930 	if (fences) {
2931 		cf = dma_fence_array_create(number_tiles, fences,
2932 					    vm->composite_fence_ctx,
2933 					    vm->composite_fence_seqno++,
2934 					    false);
2935 		if (!cf) {
2936 			--vm->composite_fence_seqno;
2937 			fence = ERR_PTR(-ENOMEM);
2938 			goto err_out;
2939 		}
2940 		fence = &cf->base;
2941 	}
2942 
2943 	for_each_tile(tile, vm->xe, id) {
2944 		if (!vops->pt_update_ops[id].num_ops)
2945 			continue;
2946 
2947 		xe_pt_update_ops_fini(tile, vops);
2948 	}
2949 
2950 	return fence;
2951 
2952 err_out:
2953 	for_each_tile(tile, vm->xe, id) {
2954 		if (!vops->pt_update_ops[id].num_ops)
2955 			continue;
2956 
2957 		xe_pt_update_ops_abort(tile, vops);
2958 	}
2959 	while (current_fence)
2960 		dma_fence_put(fences[--current_fence]);
2961 	kfree(fences);
2962 	kfree(cf);
2963 
2964 err_trace:
2965 	trace_xe_vm_ops_fail(vm);
2966 	return fence;
2967 }
2968 
2969 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2970 {
2971 	if (vma->ufence)
2972 		xe_sync_ufence_put(vma->ufence);
2973 	vma->ufence = __xe_sync_ufence_get(ufence);
2974 }
2975 
2976 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2977 			  struct xe_user_fence *ufence)
2978 {
2979 	switch (op->base.op) {
2980 	case DRM_GPUVA_OP_MAP:
2981 		vma_add_ufence(op->map.vma, ufence);
2982 		break;
2983 	case DRM_GPUVA_OP_REMAP:
2984 		if (op->remap.prev)
2985 			vma_add_ufence(op->remap.prev, ufence);
2986 		if (op->remap.next)
2987 			vma_add_ufence(op->remap.next, ufence);
2988 		break;
2989 	case DRM_GPUVA_OP_UNMAP:
2990 		break;
2991 	case DRM_GPUVA_OP_PREFETCH:
2992 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2993 		break;
2994 	default:
2995 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2996 	}
2997 }
2998 
2999 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
3000 				   struct dma_fence *fence)
3001 {
3002 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
3003 	struct xe_user_fence *ufence;
3004 	struct xe_vma_op *op;
3005 	int i;
3006 
3007 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
3008 	list_for_each_entry(op, &vops->list, link) {
3009 		if (ufence)
3010 			op_add_ufence(vm, op, ufence);
3011 
3012 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
3013 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
3014 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
3015 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
3016 				       fence);
3017 	}
3018 	if (ufence)
3019 		xe_sync_ufence_put(ufence);
3020 	if (fence) {
3021 		for (i = 0; i < vops->num_syncs; i++)
3022 			xe_sync_entry_signal(vops->syncs + i, fence);
3023 		xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
3024 	}
3025 }
3026 
3027 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
3028 						   struct xe_vma_ops *vops)
3029 {
3030 	struct drm_exec exec;
3031 	struct dma_fence *fence;
3032 	int err;
3033 
3034 	lockdep_assert_held_write(&vm->lock);
3035 
3036 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
3037 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
3038 	drm_exec_until_all_locked(&exec) {
3039 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
3040 		drm_exec_retry_on_contention(&exec);
3041 		if (err) {
3042 			fence = ERR_PTR(err);
3043 			goto unlock;
3044 		}
3045 
3046 		fence = ops_execute(vm, vops);
3047 		if (IS_ERR(fence)) {
3048 			if (PTR_ERR(fence) == -ENODATA)
3049 				vm_bind_ioctl_ops_fini(vm, vops, NULL);
3050 			goto unlock;
3051 		}
3052 
3053 		vm_bind_ioctl_ops_fini(vm, vops, fence);
3054 	}
3055 
3056 unlock:
3057 	drm_exec_fini(&exec);
3058 	return fence;
3059 }
3060 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
3061 
3062 #define SUPPORTED_FLAGS_STUB  \
3063 	(DRM_XE_VM_BIND_FLAG_READONLY | \
3064 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
3065 	 DRM_XE_VM_BIND_FLAG_NULL | \
3066 	 DRM_XE_VM_BIND_FLAG_DUMPABLE | \
3067 	 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
3068 	 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
3069 
3070 #ifdef TEST_VM_OPS_ERROR
3071 #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
3072 #else
3073 #define SUPPORTED_FLAGS	SUPPORTED_FLAGS_STUB
3074 #endif
3075 
3076 #define XE_64K_PAGE_MASK 0xffffull
3077 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
3078 
3079 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
3080 				    struct drm_xe_vm_bind *args,
3081 				    struct drm_xe_vm_bind_op **bind_ops)
3082 {
3083 	int err;
3084 	int i;
3085 
3086 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
3087 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3088 		return -EINVAL;
3089 
3090 	if (XE_IOCTL_DBG(xe, args->extensions))
3091 		return -EINVAL;
3092 
3093 	if (args->num_binds > 1) {
3094 		u64 __user *bind_user =
3095 			u64_to_user_ptr(args->vector_of_binds);
3096 
3097 		*bind_ops = kvmalloc_array(args->num_binds,
3098 					   sizeof(struct drm_xe_vm_bind_op),
3099 					   GFP_KERNEL | __GFP_ACCOUNT |
3100 					   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3101 		if (!*bind_ops)
3102 			return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
3103 
3104 		err = __copy_from_user(*bind_ops, bind_user,
3105 				       sizeof(struct drm_xe_vm_bind_op) *
3106 				       args->num_binds);
3107 		if (XE_IOCTL_DBG(xe, err)) {
3108 			err = -EFAULT;
3109 			goto free_bind_ops;
3110 		}
3111 	} else {
3112 		*bind_ops = &args->bind;
3113 	}
3114 
3115 	for (i = 0; i < args->num_binds; ++i) {
3116 		u64 range = (*bind_ops)[i].range;
3117 		u64 addr = (*bind_ops)[i].addr;
3118 		u32 op = (*bind_ops)[i].op;
3119 		u32 flags = (*bind_ops)[i].flags;
3120 		u32 obj = (*bind_ops)[i].obj;
3121 		u64 obj_offset = (*bind_ops)[i].obj_offset;
3122 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
3123 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
3124 		bool is_cpu_addr_mirror = flags &
3125 			DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
3126 		u16 pat_index = (*bind_ops)[i].pat_index;
3127 		u16 coh_mode;
3128 
3129 		if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
3130 				 (!xe_vm_in_fault_mode(vm) ||
3131 				 !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
3132 			err = -EINVAL;
3133 			goto free_bind_ops;
3134 		}
3135 
3136 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
3137 			err = -EINVAL;
3138 			goto free_bind_ops;
3139 		}
3140 
3141 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
3142 		(*bind_ops)[i].pat_index = pat_index;
3143 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3144 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
3145 			err = -EINVAL;
3146 			goto free_bind_ops;
3147 		}
3148 
3149 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
3150 			err = -EINVAL;
3151 			goto free_bind_ops;
3152 		}
3153 
3154 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
3155 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
3156 		    XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
3157 		    XE_IOCTL_DBG(xe, obj_offset && (is_null ||
3158 						    is_cpu_addr_mirror)) ||
3159 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
3160 				 (is_null || is_cpu_addr_mirror)) ||
3161 		    XE_IOCTL_DBG(xe, !obj &&
3162 				 op == DRM_XE_VM_BIND_OP_MAP &&
3163 				 !is_null && !is_cpu_addr_mirror) ||
3164 		    XE_IOCTL_DBG(xe, !obj &&
3165 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3166 		    XE_IOCTL_DBG(xe, addr &&
3167 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3168 		    XE_IOCTL_DBG(xe, range &&
3169 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3170 		    XE_IOCTL_DBG(xe, obj &&
3171 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3172 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3173 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3174 		    XE_IOCTL_DBG(xe, obj &&
3175 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3176 		    XE_IOCTL_DBG(xe, prefetch_region &&
3177 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3178 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
3179 				       xe->info.mem_region_mask)) ||
3180 		    XE_IOCTL_DBG(xe, obj &&
3181 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
3182 			err = -EINVAL;
3183 			goto free_bind_ops;
3184 		}
3185 
3186 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3187 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3188 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3189 		    XE_IOCTL_DBG(xe, !range &&
3190 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3191 			err = -EINVAL;
3192 			goto free_bind_ops;
3193 		}
3194 	}
3195 
3196 	return 0;
3197 
3198 free_bind_ops:
3199 	if (args->num_binds > 1)
3200 		kvfree(*bind_ops);
3201 	return err;
3202 }
3203 
3204 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3205 				       struct xe_exec_queue *q,
3206 				       struct xe_sync_entry *syncs,
3207 				       int num_syncs)
3208 {
3209 	struct dma_fence *fence;
3210 	int i, err = 0;
3211 
3212 	fence = xe_sync_in_fence_get(syncs, num_syncs,
3213 				     to_wait_exec_queue(vm, q), vm);
3214 	if (IS_ERR(fence))
3215 		return PTR_ERR(fence);
3216 
3217 	for (i = 0; i < num_syncs; i++)
3218 		xe_sync_entry_signal(&syncs[i], fence);
3219 
3220 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
3221 				     fence);
3222 	dma_fence_put(fence);
3223 
3224 	return err;
3225 }
3226 
3227 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3228 			    struct xe_exec_queue *q,
3229 			    struct xe_sync_entry *syncs, u32 num_syncs)
3230 {
3231 	memset(vops, 0, sizeof(*vops));
3232 	INIT_LIST_HEAD(&vops->list);
3233 	vops->vm = vm;
3234 	vops->q = q;
3235 	vops->syncs = syncs;
3236 	vops->num_syncs = num_syncs;
3237 }
3238 
3239 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3240 					u64 addr, u64 range, u64 obj_offset,
3241 					u16 pat_index, u32 op, u32 bind_flags)
3242 {
3243 	u16 coh_mode;
3244 
3245 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
3246 	    XE_IOCTL_DBG(xe, obj_offset >
3247 			 bo->size - range)) {
3248 		return -EINVAL;
3249 	}
3250 
3251 	/*
3252 	 * Some platforms require 64k VM_BIND alignment,
3253 	 * specifically those with XE_VRAM_FLAGS_NEED64K.
3254 	 *
3255 	 * Other platforms may have BO's set to 64k physical placement,
3256 	 * but can be mapped at 4k offsets anyway. This check is only
3257 	 * there for the former case.
3258 	 */
3259 	if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
3260 	    (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
3261 		if (XE_IOCTL_DBG(xe, obj_offset &
3262 				 XE_64K_PAGE_MASK) ||
3263 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3264 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3265 			return -EINVAL;
3266 		}
3267 	}
3268 
3269 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3270 	if (bo->cpu_caching) {
3271 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3272 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3273 			return -EINVAL;
3274 		}
3275 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3276 		/*
3277 		 * Imported dma-buf from a different device should
3278 		 * require 1way or 2way coherency since we don't know
3279 		 * how it was mapped on the CPU. Just assume is it
3280 		 * potentially cached on CPU side.
3281 		 */
3282 		return -EINVAL;
3283 	}
3284 
3285 	/* If a BO is protected it can only be mapped if the key is still valid */
3286 	if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
3287 	    op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
3288 		if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
3289 			return -ENOEXEC;
3290 
3291 	return 0;
3292 }
3293 
3294 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3295 {
3296 	struct xe_device *xe = to_xe_device(dev);
3297 	struct xe_file *xef = to_xe_file(file);
3298 	struct drm_xe_vm_bind *args = data;
3299 	struct drm_xe_sync __user *syncs_user;
3300 	struct xe_bo **bos = NULL;
3301 	struct drm_gpuva_ops **ops = NULL;
3302 	struct xe_vm *vm;
3303 	struct xe_exec_queue *q = NULL;
3304 	u32 num_syncs, num_ufence = 0;
3305 	struct xe_sync_entry *syncs = NULL;
3306 	struct drm_xe_vm_bind_op *bind_ops;
3307 	struct xe_vma_ops vops;
3308 	struct dma_fence *fence;
3309 	int err;
3310 	int i;
3311 
3312 	vm = xe_vm_lookup(xef, args->vm_id);
3313 	if (XE_IOCTL_DBG(xe, !vm))
3314 		return -EINVAL;
3315 
3316 	err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
3317 	if (err)
3318 		goto put_vm;
3319 
3320 	if (args->exec_queue_id) {
3321 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3322 		if (XE_IOCTL_DBG(xe, !q)) {
3323 			err = -ENOENT;
3324 			goto put_vm;
3325 		}
3326 
3327 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3328 			err = -EINVAL;
3329 			goto put_exec_queue;
3330 		}
3331 	}
3332 
3333 	/* Ensure all UNMAPs visible */
3334 	if (xe_vm_in_fault_mode(vm))
3335 		flush_work(&vm->svm.garbage_collector.work);
3336 
3337 	err = down_write_killable(&vm->lock);
3338 	if (err)
3339 		goto put_exec_queue;
3340 
3341 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3342 		err = -ENOENT;
3343 		goto release_vm_lock;
3344 	}
3345 
3346 	for (i = 0; i < args->num_binds; ++i) {
3347 		u64 range = bind_ops[i].range;
3348 		u64 addr = bind_ops[i].addr;
3349 
3350 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3351 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3352 			err = -EINVAL;
3353 			goto release_vm_lock;
3354 		}
3355 	}
3356 
3357 	if (args->num_binds) {
3358 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3359 			       GFP_KERNEL | __GFP_ACCOUNT |
3360 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3361 		if (!bos) {
3362 			err = -ENOMEM;
3363 			goto release_vm_lock;
3364 		}
3365 
3366 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3367 			       GFP_KERNEL | __GFP_ACCOUNT |
3368 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3369 		if (!ops) {
3370 			err = -ENOMEM;
3371 			goto release_vm_lock;
3372 		}
3373 	}
3374 
3375 	for (i = 0; i < args->num_binds; ++i) {
3376 		struct drm_gem_object *gem_obj;
3377 		u64 range = bind_ops[i].range;
3378 		u64 addr = bind_ops[i].addr;
3379 		u32 obj = bind_ops[i].obj;
3380 		u64 obj_offset = bind_ops[i].obj_offset;
3381 		u16 pat_index = bind_ops[i].pat_index;
3382 		u32 op = bind_ops[i].op;
3383 		u32 bind_flags = bind_ops[i].flags;
3384 
3385 		if (!obj)
3386 			continue;
3387 
3388 		gem_obj = drm_gem_object_lookup(file, obj);
3389 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3390 			err = -ENOENT;
3391 			goto put_obj;
3392 		}
3393 		bos[i] = gem_to_xe_bo(gem_obj);
3394 
3395 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3396 						   obj_offset, pat_index, op,
3397 						   bind_flags);
3398 		if (err)
3399 			goto put_obj;
3400 	}
3401 
3402 	if (args->num_syncs) {
3403 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3404 		if (!syncs) {
3405 			err = -ENOMEM;
3406 			goto put_obj;
3407 		}
3408 	}
3409 
3410 	syncs_user = u64_to_user_ptr(args->syncs);
3411 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3412 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3413 					  &syncs_user[num_syncs],
3414 					  (xe_vm_in_lr_mode(vm) ?
3415 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3416 					  (!args->num_binds ?
3417 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3418 		if (err)
3419 			goto free_syncs;
3420 
3421 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3422 			num_ufence++;
3423 	}
3424 
3425 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3426 		err = -EINVAL;
3427 		goto free_syncs;
3428 	}
3429 
3430 	if (!args->num_binds) {
3431 		err = -ENODATA;
3432 		goto free_syncs;
3433 	}
3434 
3435 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3436 	for (i = 0; i < args->num_binds; ++i) {
3437 		u64 range = bind_ops[i].range;
3438 		u64 addr = bind_ops[i].addr;
3439 		u32 op = bind_ops[i].op;
3440 		u32 flags = bind_ops[i].flags;
3441 		u64 obj_offset = bind_ops[i].obj_offset;
3442 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3443 		u16 pat_index = bind_ops[i].pat_index;
3444 
3445 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3446 						  addr, range, op, flags,
3447 						  prefetch_region, pat_index);
3448 		if (IS_ERR(ops[i])) {
3449 			err = PTR_ERR(ops[i]);
3450 			ops[i] = NULL;
3451 			goto unwind_ops;
3452 		}
3453 
3454 		err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3455 		if (err)
3456 			goto unwind_ops;
3457 
3458 #ifdef TEST_VM_OPS_ERROR
3459 		if (flags & FORCE_OP_ERROR) {
3460 			vops.inject_error = true;
3461 			vm->xe->vm_inject_error_position =
3462 				(vm->xe->vm_inject_error_position + 1) %
3463 				FORCE_OP_ERROR_COUNT;
3464 		}
3465 #endif
3466 	}
3467 
3468 	/* Nothing to do */
3469 	if (list_empty(&vops.list)) {
3470 		err = -ENODATA;
3471 		goto unwind_ops;
3472 	}
3473 
3474 	err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3475 	if (err)
3476 		goto unwind_ops;
3477 
3478 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3479 	if (IS_ERR(fence))
3480 		err = PTR_ERR(fence);
3481 	else
3482 		dma_fence_put(fence);
3483 
3484 unwind_ops:
3485 	if (err && err != -ENODATA)
3486 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3487 	xe_vma_ops_fini(&vops);
3488 	for (i = args->num_binds - 1; i >= 0; --i)
3489 		if (ops[i])
3490 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3491 free_syncs:
3492 	if (err == -ENODATA)
3493 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3494 	while (num_syncs--)
3495 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3496 
3497 	kfree(syncs);
3498 put_obj:
3499 	for (i = 0; i < args->num_binds; ++i)
3500 		xe_bo_put(bos[i]);
3501 release_vm_lock:
3502 	up_write(&vm->lock);
3503 put_exec_queue:
3504 	if (q)
3505 		xe_exec_queue_put(q);
3506 put_vm:
3507 	xe_vm_put(vm);
3508 	kvfree(bos);
3509 	kvfree(ops);
3510 	if (args->num_binds > 1)
3511 		kvfree(bind_ops);
3512 	return err;
3513 }
3514 
3515 /**
3516  * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
3517  * @vm: VM to bind the BO to
3518  * @bo: BO to bind
3519  * @q: exec queue to use for the bind (optional)
3520  * @addr: address at which to bind the BO
3521  * @cache_lvl: PAT cache level to use
3522  *
3523  * Execute a VM bind map operation on a kernel-owned BO to bind it into a
3524  * kernel-owned VM.
3525  *
3526  * Returns a dma_fence to track the binding completion if the job to do so was
3527  * successfully submitted, an error pointer otherwise.
3528  */
3529 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
3530 				       struct xe_exec_queue *q, u64 addr,
3531 				       enum xe_cache_level cache_lvl)
3532 {
3533 	struct xe_vma_ops vops;
3534 	struct drm_gpuva_ops *ops = NULL;
3535 	struct dma_fence *fence;
3536 	int err;
3537 
3538 	xe_bo_get(bo);
3539 	xe_vm_get(vm);
3540 	if (q)
3541 		xe_exec_queue_get(q);
3542 
3543 	down_write(&vm->lock);
3544 
3545 	xe_vma_ops_init(&vops, vm, q, NULL, 0);
3546 
3547 	ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
3548 				       DRM_XE_VM_BIND_OP_MAP, 0, 0,
3549 				       vm->xe->pat.idx[cache_lvl]);
3550 	if (IS_ERR(ops)) {
3551 		err = PTR_ERR(ops);
3552 		goto release_vm_lock;
3553 	}
3554 
3555 	err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
3556 	if (err)
3557 		goto release_vm_lock;
3558 
3559 	xe_assert(vm->xe, !list_empty(&vops.list));
3560 
3561 	err = xe_vma_ops_alloc(&vops, false);
3562 	if (err)
3563 		goto unwind_ops;
3564 
3565 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3566 	if (IS_ERR(fence))
3567 		err = PTR_ERR(fence);
3568 
3569 unwind_ops:
3570 	if (err && err != -ENODATA)
3571 		vm_bind_ioctl_ops_unwind(vm, &ops, 1);
3572 
3573 	xe_vma_ops_fini(&vops);
3574 	drm_gpuva_ops_free(&vm->gpuvm, ops);
3575 
3576 release_vm_lock:
3577 	up_write(&vm->lock);
3578 
3579 	if (q)
3580 		xe_exec_queue_put(q);
3581 	xe_vm_put(vm);
3582 	xe_bo_put(bo);
3583 
3584 	if (err)
3585 		fence = ERR_PTR(err);
3586 
3587 	return fence;
3588 }
3589 
3590 /**
3591  * xe_vm_lock() - Lock the vm's dma_resv object
3592  * @vm: The struct xe_vm whose lock is to be locked
3593  * @intr: Whether to perform any wait interruptible
3594  *
3595  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3596  * contended lock was interrupted. If @intr is false, the function
3597  * always returns 0.
3598  */
3599 int xe_vm_lock(struct xe_vm *vm, bool intr)
3600 {
3601 	if (intr)
3602 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3603 
3604 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3605 }
3606 
3607 /**
3608  * xe_vm_unlock() - Unlock the vm's dma_resv object
3609  * @vm: The struct xe_vm whose lock is to be released.
3610  *
3611  * Unlock a buffer object lock that was locked by xe_vm_lock().
3612  */
3613 void xe_vm_unlock(struct xe_vm *vm)
3614 {
3615 	dma_resv_unlock(xe_vm_resv(vm));
3616 }
3617 
3618 /**
3619  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3620  * @vma: VMA to invalidate
3621  *
3622  * Walks a list of page tables leaves which it memset the entries owned by this
3623  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3624  * complete.
3625  *
3626  * Returns 0 for success, negative error code otherwise.
3627  */
3628 int xe_vm_invalidate_vma(struct xe_vma *vma)
3629 {
3630 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3631 	struct xe_tile *tile;
3632 	struct xe_gt_tlb_invalidation_fence
3633 		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3634 	u8 id;
3635 	u32 fence_id = 0;
3636 	int ret = 0;
3637 
3638 	xe_assert(xe, !xe_vma_is_null(vma));
3639 	xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
3640 	trace_xe_vma_invalidate(vma);
3641 
3642 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3643 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3644 		xe_vma_start(vma), xe_vma_size(vma));
3645 
3646 	/* Check that we don't race with page-table updates */
3647 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3648 		if (xe_vma_is_userptr(vma)) {
3649 			WARN_ON_ONCE(!mmu_interval_check_retry
3650 				     (&to_userptr_vma(vma)->userptr.notifier,
3651 				      to_userptr_vma(vma)->userptr.notifier_seq));
3652 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3653 							     DMA_RESV_USAGE_BOOKKEEP));
3654 
3655 		} else {
3656 			xe_bo_assert_held(xe_vma_bo(vma));
3657 		}
3658 	}
3659 
3660 	for_each_tile(tile, xe, id) {
3661 		if (xe_pt_zap_ptes(tile, vma)) {
3662 			xe_device_wmb(xe);
3663 			xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
3664 							  &fence[fence_id],
3665 							  true);
3666 
3667 			ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
3668 							 &fence[fence_id], vma);
3669 			if (ret)
3670 				goto wait;
3671 			++fence_id;
3672 
3673 			if (!tile->media_gt)
3674 				continue;
3675 
3676 			xe_gt_tlb_invalidation_fence_init(tile->media_gt,
3677 							  &fence[fence_id],
3678 							  true);
3679 
3680 			ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
3681 							 &fence[fence_id], vma);
3682 			if (ret)
3683 				goto wait;
3684 			++fence_id;
3685 		}
3686 	}
3687 
3688 wait:
3689 	for (id = 0; id < fence_id; ++id)
3690 		xe_gt_tlb_invalidation_fence_wait(&fence[id]);
3691 
3692 	vma->tile_invalidated = vma->tile_mask;
3693 
3694 	return ret;
3695 }
3696 
3697 int xe_vm_validate_protected(struct xe_vm *vm)
3698 {
3699 	struct drm_gpuva *gpuva;
3700 	int err = 0;
3701 
3702 	if (!vm)
3703 		return -ENODEV;
3704 
3705 	mutex_lock(&vm->snap_mutex);
3706 
3707 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3708 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3709 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3710 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3711 
3712 		if (!bo)
3713 			continue;
3714 
3715 		if (xe_bo_is_protected(bo)) {
3716 			err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
3717 			if (err)
3718 				break;
3719 		}
3720 	}
3721 
3722 	mutex_unlock(&vm->snap_mutex);
3723 	return err;
3724 }
3725 
3726 struct xe_vm_snapshot {
3727 	unsigned long num_snaps;
3728 	struct {
3729 		u64 ofs, bo_ofs;
3730 		unsigned long len;
3731 		struct xe_bo *bo;
3732 		void *data;
3733 		struct mm_struct *mm;
3734 	} snap[];
3735 };
3736 
3737 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3738 {
3739 	unsigned long num_snaps = 0, i;
3740 	struct xe_vm_snapshot *snap = NULL;
3741 	struct drm_gpuva *gpuva;
3742 
3743 	if (!vm)
3744 		return NULL;
3745 
3746 	mutex_lock(&vm->snap_mutex);
3747 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3748 		if (gpuva->flags & XE_VMA_DUMPABLE)
3749 			num_snaps++;
3750 	}
3751 
3752 	if (num_snaps)
3753 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3754 	if (!snap) {
3755 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3756 		goto out_unlock;
3757 	}
3758 
3759 	snap->num_snaps = num_snaps;
3760 	i = 0;
3761 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3762 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3763 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3764 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3765 
3766 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3767 			continue;
3768 
3769 		snap->snap[i].ofs = xe_vma_start(vma);
3770 		snap->snap[i].len = xe_vma_size(vma);
3771 		if (bo) {
3772 			snap->snap[i].bo = xe_bo_get(bo);
3773 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3774 		} else if (xe_vma_is_userptr(vma)) {
3775 			struct mm_struct *mm =
3776 				to_userptr_vma(vma)->userptr.notifier.mm;
3777 
3778 			if (mmget_not_zero(mm))
3779 				snap->snap[i].mm = mm;
3780 			else
3781 				snap->snap[i].data = ERR_PTR(-EFAULT);
3782 
3783 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3784 		} else {
3785 			snap->snap[i].data = ERR_PTR(-ENOENT);
3786 		}
3787 		i++;
3788 	}
3789 
3790 out_unlock:
3791 	mutex_unlock(&vm->snap_mutex);
3792 	return snap;
3793 }
3794 
3795 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3796 {
3797 	if (IS_ERR_OR_NULL(snap))
3798 		return;
3799 
3800 	for (int i = 0; i < snap->num_snaps; i++) {
3801 		struct xe_bo *bo = snap->snap[i].bo;
3802 		int err;
3803 
3804 		if (IS_ERR(snap->snap[i].data))
3805 			continue;
3806 
3807 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3808 		if (!snap->snap[i].data) {
3809 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3810 			goto cleanup_bo;
3811 		}
3812 
3813 		if (bo) {
3814 			err = xe_bo_read(bo, snap->snap[i].bo_ofs,
3815 					 snap->snap[i].data, snap->snap[i].len);
3816 		} else {
3817 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3818 
3819 			kthread_use_mm(snap->snap[i].mm);
3820 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3821 				err = 0;
3822 			else
3823 				err = -EFAULT;
3824 			kthread_unuse_mm(snap->snap[i].mm);
3825 
3826 			mmput(snap->snap[i].mm);
3827 			snap->snap[i].mm = NULL;
3828 		}
3829 
3830 		if (err) {
3831 			kvfree(snap->snap[i].data);
3832 			snap->snap[i].data = ERR_PTR(err);
3833 		}
3834 
3835 cleanup_bo:
3836 		xe_bo_put(bo);
3837 		snap->snap[i].bo = NULL;
3838 	}
3839 }
3840 
3841 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3842 {
3843 	unsigned long i, j;
3844 
3845 	if (IS_ERR_OR_NULL(snap)) {
3846 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3847 		return;
3848 	}
3849 
3850 	for (i = 0; i < snap->num_snaps; i++) {
3851 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3852 
3853 		if (IS_ERR(snap->snap[i].data)) {
3854 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3855 				   PTR_ERR(snap->snap[i].data));
3856 			continue;
3857 		}
3858 
3859 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3860 
3861 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3862 			u32 *val = snap->snap[i].data + j;
3863 			char dumped[ASCII85_BUFSZ];
3864 
3865 			drm_puts(p, ascii85_encode(*val, dumped));
3866 		}
3867 
3868 		drm_puts(p, "\n");
3869 
3870 		if (drm_coredump_printer_is_full(p))
3871 			return;
3872 	}
3873 }
3874 
3875 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3876 {
3877 	unsigned long i;
3878 
3879 	if (IS_ERR_OR_NULL(snap))
3880 		return;
3881 
3882 	for (i = 0; i < snap->num_snaps; i++) {
3883 		if (!IS_ERR(snap->snap[i].data))
3884 			kvfree(snap->snap[i].data);
3885 		xe_bo_put(snap->snap[i].bo);
3886 		if (snap->snap[i].mm)
3887 			mmput(snap->snap[i].mm);
3888 	}
3889 	kvfree(snap);
3890 }
3891