xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_exec.h>
12 #include <drm/drm_print.h>
13 #include <drm/ttm/ttm_execbuf_util.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_migrate.h"
33 #include "xe_pat.h"
34 #include "xe_pm.h"
35 #include "xe_preempt_fence.h"
36 #include "xe_pt.h"
37 #include "xe_res_cursor.h"
38 #include "xe_sync.h"
39 #include "xe_trace_bo.h"
40 #include "xe_wa.h"
41 #include "xe_hmm.h"
42 
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
44 {
45 	return vm->gpuvm.r_obj;
46 }
47 
48 /**
49  * xe_vma_userptr_check_repin() - Advisory check for repin needed
50  * @uvma: The userptr vma
51  *
52  * Check if the userptr vma has been invalidated since last successful
53  * repin. The check is advisory only and can the function can be called
54  * without the vm->userptr.notifier_lock held. There is no guarantee that the
55  * vma userptr will remain valid after a lockless check, so typically
56  * the call needs to be followed by a proper check under the notifier_lock.
57  *
58  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
59  */
60 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
61 {
62 	return mmu_interval_check_retry(&uvma->userptr.notifier,
63 					uvma->userptr.notifier_seq) ?
64 		-EAGAIN : 0;
65 }
66 
67 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
68 {
69 	struct xe_vma *vma = &uvma->vma;
70 	struct xe_vm *vm = xe_vma_vm(vma);
71 	struct xe_device *xe = vm->xe;
72 
73 	lockdep_assert_held(&vm->lock);
74 	xe_assert(xe, xe_vma_is_userptr(vma));
75 
76 	return xe_hmm_userptr_populate_range(uvma, false);
77 }
78 
79 static bool preempt_fences_waiting(struct xe_vm *vm)
80 {
81 	struct xe_exec_queue *q;
82 
83 	lockdep_assert_held(&vm->lock);
84 	xe_vm_assert_held(vm);
85 
86 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
87 		if (!q->lr.pfence ||
88 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
89 			     &q->lr.pfence->flags)) {
90 			return true;
91 		}
92 	}
93 
94 	return false;
95 }
96 
97 static void free_preempt_fences(struct list_head *list)
98 {
99 	struct list_head *link, *next;
100 
101 	list_for_each_safe(link, next, list)
102 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
103 }
104 
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
106 				unsigned int *count)
107 {
108 	lockdep_assert_held(&vm->lock);
109 	xe_vm_assert_held(vm);
110 
111 	if (*count >= vm->preempt.num_exec_queues)
112 		return 0;
113 
114 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
115 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
116 
117 		if (IS_ERR(pfence))
118 			return PTR_ERR(pfence);
119 
120 		list_move_tail(xe_preempt_fence_link(pfence), list);
121 	}
122 
123 	return 0;
124 }
125 
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
127 {
128 	struct xe_exec_queue *q;
129 
130 	xe_vm_assert_held(vm);
131 
132 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
133 		if (q->lr.pfence) {
134 			long timeout = dma_fence_wait(q->lr.pfence, false);
135 
136 			if (timeout < 0)
137 				return -ETIME;
138 			dma_fence_put(q->lr.pfence);
139 			q->lr.pfence = NULL;
140 		}
141 	}
142 
143 	return 0;
144 }
145 
146 static bool xe_vm_is_idle(struct xe_vm *vm)
147 {
148 	struct xe_exec_queue *q;
149 
150 	xe_vm_assert_held(vm);
151 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
152 		if (!xe_exec_queue_is_idle(q))
153 			return false;
154 	}
155 
156 	return true;
157 }
158 
159 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
160 {
161 	struct list_head *link;
162 	struct xe_exec_queue *q;
163 
164 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
165 		struct dma_fence *fence;
166 
167 		link = list->next;
168 		xe_assert(vm->xe, link != list);
169 
170 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
171 					     q, q->lr.context,
172 					     ++q->lr.seqno);
173 		dma_fence_put(q->lr.pfence);
174 		q->lr.pfence = fence;
175 	}
176 }
177 
178 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
179 {
180 	struct xe_exec_queue *q;
181 	int err;
182 
183 	xe_bo_assert_held(bo);
184 
185 	if (!vm->preempt.num_exec_queues)
186 		return 0;
187 
188 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
189 	if (err)
190 		return err;
191 
192 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
193 		if (q->lr.pfence) {
194 			dma_resv_add_fence(bo->ttm.base.resv,
195 					   q->lr.pfence,
196 					   DMA_RESV_USAGE_BOOKKEEP);
197 		}
198 
199 	return 0;
200 }
201 
202 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
203 						struct drm_exec *exec)
204 {
205 	struct xe_exec_queue *q;
206 
207 	lockdep_assert_held(&vm->lock);
208 	xe_vm_assert_held(vm);
209 
210 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
211 		q->ops->resume(q);
212 
213 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
214 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
215 	}
216 }
217 
218 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
219 {
220 	struct drm_gpuvm_exec vm_exec = {
221 		.vm = &vm->gpuvm,
222 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
223 		.num_fences = 1,
224 	};
225 	struct drm_exec *exec = &vm_exec.exec;
226 	struct dma_fence *pfence;
227 	int err;
228 	bool wait;
229 
230 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
231 
232 	down_write(&vm->lock);
233 	err = drm_gpuvm_exec_lock(&vm_exec);
234 	if (err)
235 		goto out_up_write;
236 
237 	pfence = xe_preempt_fence_create(q, q->lr.context,
238 					 ++q->lr.seqno);
239 	if (!pfence) {
240 		err = -ENOMEM;
241 		goto out_fini;
242 	}
243 
244 	list_add(&q->lr.link, &vm->preempt.exec_queues);
245 	++vm->preempt.num_exec_queues;
246 	q->lr.pfence = pfence;
247 
248 	down_read(&vm->userptr.notifier_lock);
249 
250 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
251 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
252 
253 	/*
254 	 * Check to see if a preemption on VM is in flight or userptr
255 	 * invalidation, if so trigger this preempt fence to sync state with
256 	 * other preempt fences on the VM.
257 	 */
258 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
259 	if (wait)
260 		dma_fence_enable_sw_signaling(pfence);
261 
262 	up_read(&vm->userptr.notifier_lock);
263 
264 out_fini:
265 	drm_exec_fini(exec);
266 out_up_write:
267 	up_write(&vm->lock);
268 
269 	return err;
270 }
271 
272 /**
273  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
274  * @vm: The VM.
275  * @q: The exec_queue
276  */
277 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
278 {
279 	if (!xe_vm_in_preempt_fence_mode(vm))
280 		return;
281 
282 	down_write(&vm->lock);
283 	list_del(&q->lr.link);
284 	--vm->preempt.num_exec_queues;
285 	if (q->lr.pfence) {
286 		dma_fence_enable_sw_signaling(q->lr.pfence);
287 		dma_fence_put(q->lr.pfence);
288 		q->lr.pfence = NULL;
289 	}
290 	up_write(&vm->lock);
291 }
292 
293 /**
294  * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
295  * that need repinning.
296  * @vm: The VM.
297  *
298  * This function checks for whether the VM has userptrs that need repinning,
299  * and provides a release-type barrier on the userptr.notifier_lock after
300  * checking.
301  *
302  * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
303  */
304 int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
305 {
306 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
307 
308 	return (list_empty(&vm->userptr.repin_list) &&
309 		list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
310 }
311 
312 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
313 
314 static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
315 {
316 	struct xe_exec_queue *q;
317 
318 	lockdep_assert_held(&vm->lock);
319 
320 	if (unlocked)
321 		xe_vm_lock(vm, false);
322 
323 	vm->flags |= XE_VM_FLAG_BANNED;
324 	trace_xe_vm_kill(vm);
325 
326 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
327 		q->ops->kill(q);
328 
329 	if (unlocked)
330 		xe_vm_unlock(vm);
331 
332 	/* TODO: Inform user the VM is banned */
333 }
334 
335 /**
336  * xe_vm_validate_should_retry() - Whether to retry after a validate error.
337  * @exec: The drm_exec object used for locking before validation.
338  * @err: The error returned from ttm_bo_validate().
339  * @end: A ktime_t cookie that should be set to 0 before first use and
340  * that should be reused on subsequent calls.
341  *
342  * With multiple active VMs, under memory pressure, it is possible that
343  * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
344  * Until ttm properly handles locking in such scenarios, best thing the
345  * driver can do is retry with a timeout. Check if that is necessary, and
346  * if so unlock the drm_exec's objects while keeping the ticket to prepare
347  * for a rerun.
348  *
349  * Return: true if a retry after drm_exec_init() is recommended;
350  * false otherwise.
351  */
352 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
353 {
354 	ktime_t cur;
355 
356 	if (err != -ENOMEM)
357 		return false;
358 
359 	cur = ktime_get();
360 	*end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
361 	if (!ktime_before(cur, *end))
362 		return false;
363 
364 	msleep(20);
365 	return true;
366 }
367 
368 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
369 {
370 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
371 	struct drm_gpuva *gpuva;
372 	int ret;
373 
374 	lockdep_assert_held(&vm->lock);
375 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
376 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
377 			       &vm->rebind_list);
378 
379 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
380 	if (ret)
381 		return ret;
382 
383 	vm_bo->evicted = false;
384 	return 0;
385 }
386 
387 /**
388  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
389  * @vm: The vm for which we are rebinding.
390  * @exec: The struct drm_exec with the locked GEM objects.
391  * @num_fences: The number of fences to reserve for the operation, not
392  * including rebinds and validations.
393  *
394  * Validates all evicted gem objects and rebinds their vmas. Note that
395  * rebindings may cause evictions and hence the validation-rebind
396  * sequence is rerun until there are no more objects to validate.
397  *
398  * Return: 0 on success, negative error code on error. In particular,
399  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
400  * the drm_exec transaction needs to be restarted.
401  */
402 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
403 			  unsigned int num_fences)
404 {
405 	struct drm_gem_object *obj;
406 	unsigned long index;
407 	int ret;
408 
409 	do {
410 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
411 		if (ret)
412 			return ret;
413 
414 		ret = xe_vm_rebind(vm, false);
415 		if (ret)
416 			return ret;
417 	} while (!list_empty(&vm->gpuvm.evict.list));
418 
419 	drm_exec_for_each_locked_object(exec, index, obj) {
420 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
421 		if (ret)
422 			return ret;
423 	}
424 
425 	return 0;
426 }
427 
428 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
429 				 bool *done)
430 {
431 	int err;
432 
433 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
434 	if (err)
435 		return err;
436 
437 	if (xe_vm_is_idle(vm)) {
438 		vm->preempt.rebind_deactivated = true;
439 		*done = true;
440 		return 0;
441 	}
442 
443 	if (!preempt_fences_waiting(vm)) {
444 		*done = true;
445 		return 0;
446 	}
447 
448 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
449 	if (err)
450 		return err;
451 
452 	err = wait_for_existing_preempt_fences(vm);
453 	if (err)
454 		return err;
455 
456 	/*
457 	 * Add validation and rebinding to the locking loop since both can
458 	 * cause evictions which may require blocing dma_resv locks.
459 	 * The fence reservation here is intended for the new preempt fences
460 	 * we attach at the end of the rebind work.
461 	 */
462 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
463 }
464 
465 static void preempt_rebind_work_func(struct work_struct *w)
466 {
467 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
468 	struct drm_exec exec;
469 	unsigned int fence_count = 0;
470 	LIST_HEAD(preempt_fences);
471 	ktime_t end = 0;
472 	int err = 0;
473 	long wait;
474 	int __maybe_unused tries = 0;
475 
476 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
477 	trace_xe_vm_rebind_worker_enter(vm);
478 
479 	down_write(&vm->lock);
480 
481 	if (xe_vm_is_closed_or_banned(vm)) {
482 		up_write(&vm->lock);
483 		trace_xe_vm_rebind_worker_exit(vm);
484 		return;
485 	}
486 
487 retry:
488 	if (xe_vm_userptr_check_repin(vm)) {
489 		err = xe_vm_userptr_pin(vm);
490 		if (err)
491 			goto out_unlock_outer;
492 	}
493 
494 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
495 
496 	drm_exec_until_all_locked(&exec) {
497 		bool done = false;
498 
499 		err = xe_preempt_work_begin(&exec, vm, &done);
500 		drm_exec_retry_on_contention(&exec);
501 		if (err || done) {
502 			drm_exec_fini(&exec);
503 			if (err && xe_vm_validate_should_retry(&exec, err, &end))
504 				err = -EAGAIN;
505 
506 			goto out_unlock_outer;
507 		}
508 	}
509 
510 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
511 	if (err)
512 		goto out_unlock;
513 
514 	err = xe_vm_rebind(vm, true);
515 	if (err)
516 		goto out_unlock;
517 
518 	/* Wait on rebinds and munmap style VM unbinds */
519 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
520 				     DMA_RESV_USAGE_KERNEL,
521 				     false, MAX_SCHEDULE_TIMEOUT);
522 	if (wait <= 0) {
523 		err = -ETIME;
524 		goto out_unlock;
525 	}
526 
527 #define retry_required(__tries, __vm) \
528 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
529 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
530 	__xe_vm_userptr_needs_repin(__vm))
531 
532 	down_read(&vm->userptr.notifier_lock);
533 	if (retry_required(tries, vm)) {
534 		up_read(&vm->userptr.notifier_lock);
535 		err = -EAGAIN;
536 		goto out_unlock;
537 	}
538 
539 #undef retry_required
540 
541 	spin_lock(&vm->xe->ttm.lru_lock);
542 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
543 	spin_unlock(&vm->xe->ttm.lru_lock);
544 
545 	/* Point of no return. */
546 	arm_preempt_fences(vm, &preempt_fences);
547 	resume_and_reinstall_preempt_fences(vm, &exec);
548 	up_read(&vm->userptr.notifier_lock);
549 
550 out_unlock:
551 	drm_exec_fini(&exec);
552 out_unlock_outer:
553 	if (err == -EAGAIN) {
554 		trace_xe_vm_rebind_worker_retry(vm);
555 		goto retry;
556 	}
557 
558 	if (err) {
559 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
560 		xe_vm_kill(vm, true);
561 	}
562 	up_write(&vm->lock);
563 
564 	free_preempt_fences(&preempt_fences);
565 
566 	trace_xe_vm_rebind_worker_exit(vm);
567 }
568 
569 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
570 				   const struct mmu_notifier_range *range,
571 				   unsigned long cur_seq)
572 {
573 	struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
574 	struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
575 	struct xe_vma *vma = &uvma->vma;
576 	struct xe_vm *vm = xe_vma_vm(vma);
577 	struct dma_resv_iter cursor;
578 	struct dma_fence *fence;
579 	long err;
580 
581 	xe_assert(vm->xe, xe_vma_is_userptr(vma));
582 	trace_xe_vma_userptr_invalidate(vma);
583 
584 	if (!mmu_notifier_range_blockable(range))
585 		return false;
586 
587 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
588 	       "NOTIFIER: addr=0x%016llx, range=0x%016llx",
589 		xe_vma_start(vma), xe_vma_size(vma));
590 
591 	down_write(&vm->userptr.notifier_lock);
592 	mmu_interval_set_seq(mni, cur_seq);
593 
594 	/* No need to stop gpu access if the userptr is not yet bound. */
595 	if (!userptr->initial_bind) {
596 		up_write(&vm->userptr.notifier_lock);
597 		return true;
598 	}
599 
600 	/*
601 	 * Tell exec and rebind worker they need to repin and rebind this
602 	 * userptr.
603 	 */
604 	if (!xe_vm_in_fault_mode(vm) &&
605 	    !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
606 		spin_lock(&vm->userptr.invalidated_lock);
607 		list_move_tail(&userptr->invalidate_link,
608 			       &vm->userptr.invalidated);
609 		spin_unlock(&vm->userptr.invalidated_lock);
610 	}
611 
612 	up_write(&vm->userptr.notifier_lock);
613 
614 	/*
615 	 * Preempt fences turn into schedule disables, pipeline these.
616 	 * Note that even in fault mode, we need to wait for binds and
617 	 * unbinds to complete, and those are attached as BOOKMARK fences
618 	 * to the vm.
619 	 */
620 	dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
621 			    DMA_RESV_USAGE_BOOKKEEP);
622 	dma_resv_for_each_fence_unlocked(&cursor, fence)
623 		dma_fence_enable_sw_signaling(fence);
624 	dma_resv_iter_end(&cursor);
625 
626 	err = dma_resv_wait_timeout(xe_vm_resv(vm),
627 				    DMA_RESV_USAGE_BOOKKEEP,
628 				    false, MAX_SCHEDULE_TIMEOUT);
629 	XE_WARN_ON(err <= 0);
630 
631 	if (xe_vm_in_fault_mode(vm)) {
632 		err = xe_vm_invalidate_vma(vma);
633 		XE_WARN_ON(err);
634 	}
635 
636 	trace_xe_vma_userptr_invalidate_complete(vma);
637 
638 	return true;
639 }
640 
641 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
642 	.invalidate = vma_userptr_invalidate,
643 };
644 
645 int xe_vm_userptr_pin(struct xe_vm *vm)
646 {
647 	struct xe_userptr_vma *uvma, *next;
648 	int err = 0;
649 	LIST_HEAD(tmp_evict);
650 
651 	xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
652 	lockdep_assert_held_write(&vm->lock);
653 
654 	/* Collect invalidated userptrs */
655 	spin_lock(&vm->userptr.invalidated_lock);
656 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
657 				 userptr.invalidate_link) {
658 		list_del_init(&uvma->userptr.invalidate_link);
659 		list_move_tail(&uvma->userptr.repin_link,
660 			       &vm->userptr.repin_list);
661 	}
662 	spin_unlock(&vm->userptr.invalidated_lock);
663 
664 	/* Pin and move to temporary list */
665 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
666 				 userptr.repin_link) {
667 		err = xe_vma_userptr_pin_pages(uvma);
668 		if (err == -EFAULT) {
669 			list_del_init(&uvma->userptr.repin_link);
670 
671 			/* Wait for pending binds */
672 			xe_vm_lock(vm, false);
673 			dma_resv_wait_timeout(xe_vm_resv(vm),
674 					      DMA_RESV_USAGE_BOOKKEEP,
675 					      false, MAX_SCHEDULE_TIMEOUT);
676 
677 			err = xe_vm_invalidate_vma(&uvma->vma);
678 			xe_vm_unlock(vm);
679 			if (err)
680 				return err;
681 		} else {
682 			if (err < 0)
683 				return err;
684 
685 			list_del_init(&uvma->userptr.repin_link);
686 			list_move_tail(&uvma->vma.combined_links.rebind,
687 				       &vm->rebind_list);
688 		}
689 	}
690 
691 	return 0;
692 }
693 
694 /**
695  * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
696  * that need repinning.
697  * @vm: The VM.
698  *
699  * This function does an advisory check for whether the VM has userptrs that
700  * need repinning.
701  *
702  * Return: 0 if there are no indications of userptrs needing repinning,
703  * -EAGAIN if there are.
704  */
705 int xe_vm_userptr_check_repin(struct xe_vm *vm)
706 {
707 	return (list_empty_careful(&vm->userptr.repin_list) &&
708 		list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
709 }
710 
711 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
712 				  u8 tile_mask)
713 {
714 	INIT_LIST_HEAD(&op->link);
715 	op->tile_mask = tile_mask;
716 	op->base.op = DRM_GPUVA_OP_MAP;
717 	op->base.map.va.addr = vma->gpuva.va.addr;
718 	op->base.map.va.range = vma->gpuva.va.range;
719 	op->base.map.gem.obj = vma->gpuva.gem.obj;
720 	op->base.map.gem.offset = vma->gpuva.gem.offset;
721 	op->map.vma = vma;
722 	op->map.immediate = true;
723 	op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
724 	op->map.is_null = xe_vma_is_null(vma);
725 }
726 
727 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
728 				u8 tile_mask)
729 {
730 	struct xe_vma_op *op;
731 
732 	op = kzalloc(sizeof(*op), GFP_KERNEL);
733 	if (!op)
734 		return -ENOMEM;
735 
736 	xe_vm_populate_rebind(op, vma, tile_mask);
737 	list_add_tail(&op->link, &vops->list);
738 
739 	return 0;
740 }
741 
742 static struct dma_fence *ops_execute(struct xe_vm *vm,
743 				     struct xe_vma_ops *vops);
744 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
745 			    struct xe_exec_queue *q,
746 			    struct xe_sync_entry *syncs, u32 num_syncs);
747 
748 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
749 {
750 	struct dma_fence *fence;
751 	struct xe_vma *vma, *next;
752 	struct xe_vma_ops vops;
753 	struct xe_vma_op *op, *next_op;
754 	int err;
755 
756 	lockdep_assert_held(&vm->lock);
757 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
758 	    list_empty(&vm->rebind_list))
759 		return 0;
760 
761 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
762 
763 	xe_vm_assert_held(vm);
764 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
765 		xe_assert(vm->xe, vma->tile_present);
766 
767 		if (rebind_worker)
768 			trace_xe_vma_rebind_worker(vma);
769 		else
770 			trace_xe_vma_rebind_exec(vma);
771 
772 		err = xe_vm_ops_add_rebind(&vops, vma,
773 					   vma->tile_present);
774 		if (err)
775 			goto free_ops;
776 	}
777 
778 	fence = ops_execute(vm, &vops);
779 	if (IS_ERR(fence)) {
780 		err = PTR_ERR(fence);
781 	} else {
782 		dma_fence_put(fence);
783 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
784 					 combined_links.rebind)
785 			list_del_init(&vma->combined_links.rebind);
786 	}
787 free_ops:
788 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
789 		list_del(&op->link);
790 		kfree(op);
791 	}
792 
793 	return err;
794 }
795 
796 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
797 {
798 	struct dma_fence *fence = NULL;
799 	struct xe_vma_ops vops;
800 	struct xe_vma_op *op, *next_op;
801 	int err;
802 
803 	lockdep_assert_held(&vm->lock);
804 	xe_vm_assert_held(vm);
805 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
806 
807 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
808 
809 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
810 	if (err)
811 		return ERR_PTR(err);
812 
813 	fence = ops_execute(vm, &vops);
814 
815 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
816 		list_del(&op->link);
817 		kfree(op);
818 	}
819 
820 	return fence;
821 }
822 
823 static void xe_vma_free(struct xe_vma *vma)
824 {
825 	if (xe_vma_is_userptr(vma))
826 		kfree(to_userptr_vma(vma));
827 	else
828 		kfree(vma);
829 }
830 
831 #define VMA_CREATE_FLAG_READ_ONLY	BIT(0)
832 #define VMA_CREATE_FLAG_IS_NULL		BIT(1)
833 #define VMA_CREATE_FLAG_DUMPABLE	BIT(2)
834 
835 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
836 				    struct xe_bo *bo,
837 				    u64 bo_offset_or_userptr,
838 				    u64 start, u64 end,
839 				    u16 pat_index, unsigned int flags)
840 {
841 	struct xe_vma *vma;
842 	struct xe_tile *tile;
843 	u8 id;
844 	bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
845 	bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
846 	bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
847 
848 	xe_assert(vm->xe, start < end);
849 	xe_assert(vm->xe, end < vm->size);
850 
851 	/*
852 	 * Allocate and ensure that the xe_vma_is_userptr() return
853 	 * matches what was allocated.
854 	 */
855 	if (!bo && !is_null) {
856 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
857 
858 		if (!uvma)
859 			return ERR_PTR(-ENOMEM);
860 
861 		vma = &uvma->vma;
862 	} else {
863 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
864 		if (!vma)
865 			return ERR_PTR(-ENOMEM);
866 
867 		if (is_null)
868 			vma->gpuva.flags |= DRM_GPUVA_SPARSE;
869 		if (bo)
870 			vma->gpuva.gem.obj = &bo->ttm.base;
871 	}
872 
873 	INIT_LIST_HEAD(&vma->combined_links.rebind);
874 
875 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
876 	vma->gpuva.vm = &vm->gpuvm;
877 	vma->gpuva.va.addr = start;
878 	vma->gpuva.va.range = end - start + 1;
879 	if (read_only)
880 		vma->gpuva.flags |= XE_VMA_READ_ONLY;
881 	if (dumpable)
882 		vma->gpuva.flags |= XE_VMA_DUMPABLE;
883 
884 	for_each_tile(tile, vm->xe, id)
885 		vma->tile_mask |= 0x1 << id;
886 
887 	if (vm->xe->info.has_atomic_enable_pte_bit)
888 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
889 
890 	vma->pat_index = pat_index;
891 
892 	if (bo) {
893 		struct drm_gpuvm_bo *vm_bo;
894 
895 		xe_bo_assert_held(bo);
896 
897 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
898 		if (IS_ERR(vm_bo)) {
899 			xe_vma_free(vma);
900 			return ERR_CAST(vm_bo);
901 		}
902 
903 		drm_gpuvm_bo_extobj_add(vm_bo);
904 		drm_gem_object_get(&bo->ttm.base);
905 		vma->gpuva.gem.offset = bo_offset_or_userptr;
906 		drm_gpuva_link(&vma->gpuva, vm_bo);
907 		drm_gpuvm_bo_put(vm_bo);
908 	} else /* userptr or null */ {
909 		if (!is_null) {
910 			struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
911 			u64 size = end - start + 1;
912 			int err;
913 
914 			INIT_LIST_HEAD(&userptr->invalidate_link);
915 			INIT_LIST_HEAD(&userptr->repin_link);
916 			vma->gpuva.gem.offset = bo_offset_or_userptr;
917 
918 			err = mmu_interval_notifier_insert(&userptr->notifier,
919 							   current->mm,
920 							   xe_vma_userptr(vma), size,
921 							   &vma_userptr_notifier_ops);
922 			if (err) {
923 				xe_vma_free(vma);
924 				return ERR_PTR(err);
925 			}
926 
927 			userptr->notifier_seq = LONG_MAX;
928 		}
929 
930 		xe_vm_get(vm);
931 	}
932 
933 	return vma;
934 }
935 
936 static void xe_vma_destroy_late(struct xe_vma *vma)
937 {
938 	struct xe_vm *vm = xe_vma_vm(vma);
939 
940 	if (vma->ufence) {
941 		xe_sync_ufence_put(vma->ufence);
942 		vma->ufence = NULL;
943 	}
944 
945 	if (xe_vma_is_userptr(vma)) {
946 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
947 		struct xe_userptr *userptr = &uvma->userptr;
948 
949 		if (userptr->sg)
950 			xe_hmm_userptr_free_sg(uvma);
951 
952 		/*
953 		 * Since userptr pages are not pinned, we can't remove
954 		 * the notifer until we're sure the GPU is not accessing
955 		 * them anymore
956 		 */
957 		mmu_interval_notifier_remove(&userptr->notifier);
958 		xe_vm_put(vm);
959 	} else if (xe_vma_is_null(vma)) {
960 		xe_vm_put(vm);
961 	} else {
962 		xe_bo_put(xe_vma_bo(vma));
963 	}
964 
965 	xe_vma_free(vma);
966 }
967 
968 static void vma_destroy_work_func(struct work_struct *w)
969 {
970 	struct xe_vma *vma =
971 		container_of(w, struct xe_vma, destroy_work);
972 
973 	xe_vma_destroy_late(vma);
974 }
975 
976 static void vma_destroy_cb(struct dma_fence *fence,
977 			   struct dma_fence_cb *cb)
978 {
979 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
980 
981 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
982 	queue_work(system_unbound_wq, &vma->destroy_work);
983 }
984 
985 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
986 {
987 	struct xe_vm *vm = xe_vma_vm(vma);
988 
989 	lockdep_assert_held_write(&vm->lock);
990 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
991 
992 	if (xe_vma_is_userptr(vma)) {
993 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
994 
995 		spin_lock(&vm->userptr.invalidated_lock);
996 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
997 		spin_unlock(&vm->userptr.invalidated_lock);
998 	} else if (!xe_vma_is_null(vma)) {
999 		xe_bo_assert_held(xe_vma_bo(vma));
1000 
1001 		drm_gpuva_unlink(&vma->gpuva);
1002 	}
1003 
1004 	xe_vm_assert_held(vm);
1005 	if (fence) {
1006 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1007 						 vma_destroy_cb);
1008 
1009 		if (ret) {
1010 			XE_WARN_ON(ret != -ENOENT);
1011 			xe_vma_destroy_late(vma);
1012 		}
1013 	} else {
1014 		xe_vma_destroy_late(vma);
1015 	}
1016 }
1017 
1018 /**
1019  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1020  * @exec: The drm_exec object we're currently locking for.
1021  * @vma: The vma for witch we want to lock the vm resv and any attached
1022  * object's resv.
1023  *
1024  * Return: 0 on success, negative error code on error. In particular
1025  * may return -EDEADLK on WW transaction contention and -EINTR if
1026  * an interruptible wait is terminated by a signal.
1027  */
1028 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1029 {
1030 	struct xe_vm *vm = xe_vma_vm(vma);
1031 	struct xe_bo *bo = xe_vma_bo(vma);
1032 	int err;
1033 
1034 	XE_WARN_ON(!vm);
1035 
1036 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1037 	if (!err && bo && !bo->vm)
1038 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1039 
1040 	return err;
1041 }
1042 
1043 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1044 {
1045 	struct drm_exec exec;
1046 	int err;
1047 
1048 	drm_exec_init(&exec, 0, 0);
1049 	drm_exec_until_all_locked(&exec) {
1050 		err = xe_vm_lock_vma(&exec, vma);
1051 		drm_exec_retry_on_contention(&exec);
1052 		if (XE_WARN_ON(err))
1053 			break;
1054 	}
1055 
1056 	xe_vma_destroy(vma, NULL);
1057 
1058 	drm_exec_fini(&exec);
1059 }
1060 
1061 struct xe_vma *
1062 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1063 {
1064 	struct drm_gpuva *gpuva;
1065 
1066 	lockdep_assert_held(&vm->lock);
1067 
1068 	if (xe_vm_is_closed_or_banned(vm))
1069 		return NULL;
1070 
1071 	xe_assert(vm->xe, start + range <= vm->size);
1072 
1073 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1074 
1075 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1076 }
1077 
1078 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1079 {
1080 	int err;
1081 
1082 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1083 	lockdep_assert_held(&vm->lock);
1084 
1085 	mutex_lock(&vm->snap_mutex);
1086 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1087 	mutex_unlock(&vm->snap_mutex);
1088 	XE_WARN_ON(err);	/* Shouldn't be possible */
1089 
1090 	return err;
1091 }
1092 
1093 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1094 {
1095 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1096 	lockdep_assert_held(&vm->lock);
1097 
1098 	mutex_lock(&vm->snap_mutex);
1099 	drm_gpuva_remove(&vma->gpuva);
1100 	mutex_unlock(&vm->snap_mutex);
1101 	if (vm->usm.last_fault_vma == vma)
1102 		vm->usm.last_fault_vma = NULL;
1103 }
1104 
1105 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1106 {
1107 	struct xe_vma_op *op;
1108 
1109 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1110 
1111 	if (unlikely(!op))
1112 		return NULL;
1113 
1114 	return &op->base;
1115 }
1116 
1117 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1118 
1119 static const struct drm_gpuvm_ops gpuvm_ops = {
1120 	.op_alloc = xe_vm_op_alloc,
1121 	.vm_bo_validate = xe_gpuvm_validate,
1122 	.vm_free = xe_vm_free,
1123 };
1124 
1125 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
1126 {
1127 	u64 pte = 0;
1128 
1129 	if (pat_index & BIT(0))
1130 		pte |= XE_PPGTT_PTE_PAT0;
1131 
1132 	if (pat_index & BIT(1))
1133 		pte |= XE_PPGTT_PTE_PAT1;
1134 
1135 	return pte;
1136 }
1137 
1138 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
1139 				u32 pt_level)
1140 {
1141 	u64 pte = 0;
1142 
1143 	if (pat_index & BIT(0))
1144 		pte |= XE_PPGTT_PTE_PAT0;
1145 
1146 	if (pat_index & BIT(1))
1147 		pte |= XE_PPGTT_PTE_PAT1;
1148 
1149 	if (pat_index & BIT(2)) {
1150 		if (pt_level)
1151 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1152 		else
1153 			pte |= XE_PPGTT_PTE_PAT2;
1154 	}
1155 
1156 	if (pat_index & BIT(3))
1157 		pte |= XELPG_PPGTT_PTE_PAT3;
1158 
1159 	if (pat_index & (BIT(4)))
1160 		pte |= XE2_PPGTT_PTE_PAT4;
1161 
1162 	return pte;
1163 }
1164 
1165 static u64 pte_encode_ps(u32 pt_level)
1166 {
1167 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1168 
1169 	if (pt_level == 1)
1170 		return XE_PDE_PS_2M;
1171 	else if (pt_level == 2)
1172 		return XE_PDPE_PS_1G;
1173 
1174 	return 0;
1175 }
1176 
1177 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
1178 			      const u16 pat_index)
1179 {
1180 	struct xe_device *xe = xe_bo_device(bo);
1181 	u64 pde;
1182 
1183 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1184 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1185 	pde |= pde_encode_pat_index(xe, pat_index);
1186 
1187 	return pde;
1188 }
1189 
1190 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1191 			      u16 pat_index, u32 pt_level)
1192 {
1193 	struct xe_device *xe = xe_bo_device(bo);
1194 	u64 pte;
1195 
1196 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1197 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1198 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1199 	pte |= pte_encode_ps(pt_level);
1200 
1201 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1202 		pte |= XE_PPGTT_PTE_DM;
1203 
1204 	return pte;
1205 }
1206 
1207 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1208 			       u16 pat_index, u32 pt_level)
1209 {
1210 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1211 
1212 	pte |= XE_PAGE_PRESENT;
1213 
1214 	if (likely(!xe_vma_read_only(vma)))
1215 		pte |= XE_PAGE_RW;
1216 
1217 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1218 	pte |= pte_encode_ps(pt_level);
1219 
1220 	if (unlikely(xe_vma_is_null(vma)))
1221 		pte |= XE_PTE_NULL;
1222 
1223 	return pte;
1224 }
1225 
1226 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1227 				u16 pat_index,
1228 				u32 pt_level, bool devmem, u64 flags)
1229 {
1230 	u64 pte;
1231 
1232 	/* Avoid passing random bits directly as flags */
1233 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1234 
1235 	pte = addr;
1236 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1237 	pte |= pte_encode_pat_index(xe, pat_index, pt_level);
1238 	pte |= pte_encode_ps(pt_level);
1239 
1240 	if (devmem)
1241 		pte |= XE_PPGTT_PTE_DM;
1242 
1243 	pte |= flags;
1244 
1245 	return pte;
1246 }
1247 
1248 static const struct xe_pt_ops xelp_pt_ops = {
1249 	.pte_encode_bo = xelp_pte_encode_bo,
1250 	.pte_encode_vma = xelp_pte_encode_vma,
1251 	.pte_encode_addr = xelp_pte_encode_addr,
1252 	.pde_encode_bo = xelp_pde_encode_bo,
1253 };
1254 
1255 static void vm_destroy_work_func(struct work_struct *w);
1256 
1257 /**
1258  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1259  * given tile and vm.
1260  * @xe: xe device.
1261  * @tile: tile to set up for.
1262  * @vm: vm to set up for.
1263  *
1264  * Sets up a pagetable tree with one page-table per level and a single
1265  * leaf PTE. All pagetable entries point to the single page-table or,
1266  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1267  * writes become NOPs.
1268  *
1269  * Return: 0 on success, negative error code on error.
1270  */
1271 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1272 				struct xe_vm *vm)
1273 {
1274 	u8 id = tile->id;
1275 	int i;
1276 
1277 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1278 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
1279 		if (IS_ERR(vm->scratch_pt[id][i]))
1280 			return PTR_ERR(vm->scratch_pt[id][i]);
1281 
1282 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static void xe_vm_free_scratch(struct xe_vm *vm)
1289 {
1290 	struct xe_tile *tile;
1291 	u8 id;
1292 
1293 	if (!xe_vm_has_scratch(vm))
1294 		return;
1295 
1296 	for_each_tile(tile, vm->xe, id) {
1297 		u32 i;
1298 
1299 		if (!vm->pt_root[id])
1300 			continue;
1301 
1302 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1303 			if (vm->scratch_pt[id][i])
1304 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1305 	}
1306 }
1307 
1308 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
1309 {
1310 	struct drm_gem_object *vm_resv_obj;
1311 	struct xe_vm *vm;
1312 	int err, number_tiles = 0;
1313 	struct xe_tile *tile;
1314 	u8 id;
1315 
1316 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1317 	if (!vm)
1318 		return ERR_PTR(-ENOMEM);
1319 
1320 	vm->xe = xe;
1321 
1322 	vm->size = 1ull << xe->info.va_bits;
1323 
1324 	vm->flags = flags;
1325 
1326 	init_rwsem(&vm->lock);
1327 	mutex_init(&vm->snap_mutex);
1328 
1329 	INIT_LIST_HEAD(&vm->rebind_list);
1330 
1331 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1332 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1333 	init_rwsem(&vm->userptr.notifier_lock);
1334 	spin_lock_init(&vm->userptr.invalidated_lock);
1335 
1336 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1337 
1338 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1339 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1340 
1341 	for_each_tile(tile, xe, id)
1342 		xe_range_fence_tree_init(&vm->rftree[id]);
1343 
1344 	vm->pt_ops = &xelp_pt_ops;
1345 
1346 	/*
1347 	 * Long-running workloads are not protected by the scheduler references.
1348 	 * By design, run_job for long-running workloads returns NULL and the
1349 	 * scheduler drops all the references of it, hence protecting the VM
1350 	 * for this case is necessary.
1351 	 */
1352 	if (flags & XE_VM_FLAG_LR_MODE)
1353 		xe_pm_runtime_get_noresume(xe);
1354 
1355 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1356 	if (!vm_resv_obj) {
1357 		err = -ENOMEM;
1358 		goto err_no_resv;
1359 	}
1360 
1361 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1362 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1363 
1364 	drm_gem_object_put(vm_resv_obj);
1365 
1366 	err = xe_vm_lock(vm, true);
1367 	if (err)
1368 		goto err_close;
1369 
1370 	if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1371 		vm->flags |= XE_VM_FLAG_64K;
1372 
1373 	for_each_tile(tile, xe, id) {
1374 		if (flags & XE_VM_FLAG_MIGRATION &&
1375 		    tile->id != XE_VM_FLAG_TILE_ID(flags))
1376 			continue;
1377 
1378 		vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
1379 		if (IS_ERR(vm->pt_root[id])) {
1380 			err = PTR_ERR(vm->pt_root[id]);
1381 			vm->pt_root[id] = NULL;
1382 			goto err_unlock_close;
1383 		}
1384 	}
1385 
1386 	if (xe_vm_has_scratch(vm)) {
1387 		for_each_tile(tile, xe, id) {
1388 			if (!vm->pt_root[id])
1389 				continue;
1390 
1391 			err = xe_vm_create_scratch(xe, tile, vm);
1392 			if (err)
1393 				goto err_unlock_close;
1394 		}
1395 		vm->batch_invalidate_tlb = true;
1396 	}
1397 
1398 	if (vm->flags & XE_VM_FLAG_LR_MODE) {
1399 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1400 		vm->batch_invalidate_tlb = false;
1401 	}
1402 
1403 	/* Fill pt_root after allocating scratch tables */
1404 	for_each_tile(tile, xe, id) {
1405 		if (!vm->pt_root[id])
1406 			continue;
1407 
1408 		xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1409 	}
1410 	xe_vm_unlock(vm);
1411 
1412 	/* Kernel migration VM shouldn't have a circular loop.. */
1413 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1414 		for_each_tile(tile, xe, id) {
1415 			struct xe_gt *gt = tile->primary_gt;
1416 			struct xe_vm *migrate_vm;
1417 			struct xe_exec_queue *q;
1418 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1419 
1420 			if (!vm->pt_root[id])
1421 				continue;
1422 
1423 			migrate_vm = xe_migrate_get_vm(tile->migrate);
1424 			q = xe_exec_queue_create_class(xe, gt, migrate_vm,
1425 						       XE_ENGINE_CLASS_COPY,
1426 						       create_flags);
1427 			xe_vm_put(migrate_vm);
1428 			if (IS_ERR(q)) {
1429 				err = PTR_ERR(q);
1430 				goto err_close;
1431 			}
1432 			vm->q[id] = q;
1433 			number_tiles++;
1434 		}
1435 	}
1436 
1437 	if (number_tiles > 1)
1438 		vm->composite_fence_ctx = dma_fence_context_alloc(1);
1439 
1440 	mutex_lock(&xe->usm.lock);
1441 	if (flags & XE_VM_FLAG_FAULT_MODE)
1442 		xe->usm.num_vm_in_fault_mode++;
1443 	else if (!(flags & XE_VM_FLAG_MIGRATION))
1444 		xe->usm.num_vm_in_non_fault_mode++;
1445 	mutex_unlock(&xe->usm.lock);
1446 
1447 	trace_xe_vm_create(vm);
1448 
1449 	return vm;
1450 
1451 err_unlock_close:
1452 	xe_vm_unlock(vm);
1453 err_close:
1454 	xe_vm_close_and_put(vm);
1455 	return ERR_PTR(err);
1456 
1457 err_no_resv:
1458 	mutex_destroy(&vm->snap_mutex);
1459 	for_each_tile(tile, xe, id)
1460 		xe_range_fence_tree_fini(&vm->rftree[id]);
1461 	kfree(vm);
1462 	if (flags & XE_VM_FLAG_LR_MODE)
1463 		xe_pm_runtime_put(xe);
1464 	return ERR_PTR(err);
1465 }
1466 
1467 static void xe_vm_close(struct xe_vm *vm)
1468 {
1469 	down_write(&vm->lock);
1470 	vm->size = 0;
1471 	up_write(&vm->lock);
1472 }
1473 
1474 void xe_vm_close_and_put(struct xe_vm *vm)
1475 {
1476 	LIST_HEAD(contested);
1477 	struct xe_device *xe = vm->xe;
1478 	struct xe_tile *tile;
1479 	struct xe_vma *vma, *next_vma;
1480 	struct drm_gpuva *gpuva, *next;
1481 	u8 id;
1482 
1483 	xe_assert(xe, !vm->preempt.num_exec_queues);
1484 
1485 	xe_vm_close(vm);
1486 	if (xe_vm_in_preempt_fence_mode(vm))
1487 		flush_work(&vm->preempt.rebind_work);
1488 
1489 	down_write(&vm->lock);
1490 	for_each_tile(tile, xe, id) {
1491 		if (vm->q[id])
1492 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1493 	}
1494 	up_write(&vm->lock);
1495 
1496 	for_each_tile(tile, xe, id) {
1497 		if (vm->q[id]) {
1498 			xe_exec_queue_kill(vm->q[id]);
1499 			xe_exec_queue_put(vm->q[id]);
1500 			vm->q[id] = NULL;
1501 		}
1502 	}
1503 
1504 	down_write(&vm->lock);
1505 	xe_vm_lock(vm, false);
1506 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1507 		vma = gpuva_to_vma(gpuva);
1508 
1509 		if (xe_vma_has_no_bo(vma)) {
1510 			down_read(&vm->userptr.notifier_lock);
1511 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1512 			up_read(&vm->userptr.notifier_lock);
1513 		}
1514 
1515 		xe_vm_remove_vma(vm, vma);
1516 
1517 		/* easy case, remove from VMA? */
1518 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1519 			list_del_init(&vma->combined_links.rebind);
1520 			xe_vma_destroy(vma, NULL);
1521 			continue;
1522 		}
1523 
1524 		list_move_tail(&vma->combined_links.destroy, &contested);
1525 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1526 	}
1527 
1528 	/*
1529 	 * All vm operations will add shared fences to resv.
1530 	 * The only exception is eviction for a shared object,
1531 	 * but even so, the unbind when evicted would still
1532 	 * install a fence to resv. Hence it's safe to
1533 	 * destroy the pagetables immediately.
1534 	 */
1535 	xe_vm_free_scratch(vm);
1536 
1537 	for_each_tile(tile, xe, id) {
1538 		if (vm->pt_root[id]) {
1539 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1540 			vm->pt_root[id] = NULL;
1541 		}
1542 	}
1543 	xe_vm_unlock(vm);
1544 
1545 	/*
1546 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1547 	 * Since we hold a refcount to the bo, we can remove and free
1548 	 * the members safely without locking.
1549 	 */
1550 	list_for_each_entry_safe(vma, next_vma, &contested,
1551 				 combined_links.destroy) {
1552 		list_del_init(&vma->combined_links.destroy);
1553 		xe_vma_destroy_unlocked(vma);
1554 	}
1555 
1556 	up_write(&vm->lock);
1557 
1558 	mutex_lock(&xe->usm.lock);
1559 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
1560 		xe->usm.num_vm_in_fault_mode--;
1561 	else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
1562 		xe->usm.num_vm_in_non_fault_mode--;
1563 
1564 	if (vm->usm.asid) {
1565 		void *lookup;
1566 
1567 		xe_assert(xe, xe->info.has_asid);
1568 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1569 
1570 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1571 		xe_assert(xe, lookup == vm);
1572 	}
1573 	mutex_unlock(&xe->usm.lock);
1574 
1575 	for_each_tile(tile, xe, id)
1576 		xe_range_fence_tree_fini(&vm->rftree[id]);
1577 
1578 	xe_vm_put(vm);
1579 }
1580 
1581 static void vm_destroy_work_func(struct work_struct *w)
1582 {
1583 	struct xe_vm *vm =
1584 		container_of(w, struct xe_vm, destroy_work);
1585 	struct xe_device *xe = vm->xe;
1586 	struct xe_tile *tile;
1587 	u8 id;
1588 
1589 	/* xe_vm_close_and_put was not called? */
1590 	xe_assert(xe, !vm->size);
1591 
1592 	if (xe_vm_in_preempt_fence_mode(vm))
1593 		flush_work(&vm->preempt.rebind_work);
1594 
1595 	mutex_destroy(&vm->snap_mutex);
1596 
1597 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1598 		xe_pm_runtime_put(xe);
1599 
1600 	for_each_tile(tile, xe, id)
1601 		XE_WARN_ON(vm->pt_root[id]);
1602 
1603 	trace_xe_vm_free(vm);
1604 	kfree(vm);
1605 }
1606 
1607 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1608 {
1609 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1610 
1611 	/* To destroy the VM we need to be able to sleep */
1612 	queue_work(system_unbound_wq, &vm->destroy_work);
1613 }
1614 
1615 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1616 {
1617 	struct xe_vm *vm;
1618 
1619 	mutex_lock(&xef->vm.lock);
1620 	vm = xa_load(&xef->vm.xa, id);
1621 	if (vm)
1622 		xe_vm_get(vm);
1623 	mutex_unlock(&xef->vm.lock);
1624 
1625 	return vm;
1626 }
1627 
1628 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1629 {
1630 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
1631 					 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
1632 }
1633 
1634 static struct xe_exec_queue *
1635 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1636 {
1637 	return q ? q : vm->q[0];
1638 }
1639 
1640 static struct dma_fence *
1641 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1642 		 struct xe_sync_entry *syncs, u32 num_syncs,
1643 		 bool first_op, bool last_op)
1644 {
1645 	struct xe_vm *vm = xe_vma_vm(vma);
1646 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1647 	struct xe_tile *tile;
1648 	struct dma_fence *fence = NULL;
1649 	struct dma_fence **fences = NULL;
1650 	struct dma_fence_array *cf = NULL;
1651 	int cur_fence = 0;
1652 	int number_tiles = hweight8(vma->tile_present);
1653 	int err;
1654 	u8 id;
1655 
1656 	trace_xe_vma_unbind(vma);
1657 
1658 	if (number_tiles > 1) {
1659 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1660 				       GFP_KERNEL);
1661 		if (!fences)
1662 			return ERR_PTR(-ENOMEM);
1663 	}
1664 
1665 	for_each_tile(tile, vm->xe, id) {
1666 		if (!(vma->tile_present & BIT(id)))
1667 			goto next;
1668 
1669 		fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
1670 					   first_op ? syncs : NULL,
1671 					   first_op ? num_syncs : 0);
1672 		if (IS_ERR(fence)) {
1673 			err = PTR_ERR(fence);
1674 			goto err_fences;
1675 		}
1676 
1677 		if (fences)
1678 			fences[cur_fence++] = fence;
1679 
1680 next:
1681 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1682 			q = list_next_entry(q, multi_gt_list);
1683 	}
1684 
1685 	if (fences) {
1686 		cf = dma_fence_array_create(number_tiles, fences,
1687 					    vm->composite_fence_ctx,
1688 					    vm->composite_fence_seqno++,
1689 					    false);
1690 		if (!cf) {
1691 			--vm->composite_fence_seqno;
1692 			err = -ENOMEM;
1693 			goto err_fences;
1694 		}
1695 	}
1696 
1697 	fence = cf ? &cf->base : !fence ?
1698 		xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
1699 
1700 	return fence;
1701 
1702 err_fences:
1703 	if (fences) {
1704 		while (cur_fence)
1705 			dma_fence_put(fences[--cur_fence]);
1706 		kfree(fences);
1707 	}
1708 
1709 	return ERR_PTR(err);
1710 }
1711 
1712 static struct dma_fence *
1713 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
1714 	       struct xe_sync_entry *syncs, u32 num_syncs,
1715 	       u8 tile_mask, bool first_op, bool last_op)
1716 {
1717 	struct xe_tile *tile;
1718 	struct dma_fence *fence;
1719 	struct dma_fence **fences = NULL;
1720 	struct dma_fence_array *cf = NULL;
1721 	struct xe_vm *vm = xe_vma_vm(vma);
1722 	int cur_fence = 0;
1723 	int number_tiles = hweight8(tile_mask);
1724 	int err;
1725 	u8 id;
1726 
1727 	trace_xe_vma_bind(vma);
1728 
1729 	if (number_tiles > 1) {
1730 		fences = kmalloc_array(number_tiles, sizeof(*fences),
1731 				       GFP_KERNEL);
1732 		if (!fences)
1733 			return ERR_PTR(-ENOMEM);
1734 	}
1735 
1736 	for_each_tile(tile, vm->xe, id) {
1737 		if (!(tile_mask & BIT(id)))
1738 			goto next;
1739 
1740 		fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
1741 					 first_op ? syncs : NULL,
1742 					 first_op ? num_syncs : 0,
1743 					 vma->tile_present & BIT(id));
1744 		if (IS_ERR(fence)) {
1745 			err = PTR_ERR(fence);
1746 			goto err_fences;
1747 		}
1748 
1749 		if (fences)
1750 			fences[cur_fence++] = fence;
1751 
1752 next:
1753 		if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
1754 			q = list_next_entry(q, multi_gt_list);
1755 	}
1756 
1757 	if (fences) {
1758 		cf = dma_fence_array_create(number_tiles, fences,
1759 					    vm->composite_fence_ctx,
1760 					    vm->composite_fence_seqno++,
1761 					    false);
1762 		if (!cf) {
1763 			--vm->composite_fence_seqno;
1764 			err = -ENOMEM;
1765 			goto err_fences;
1766 		}
1767 	}
1768 
1769 	return cf ? &cf->base : fence;
1770 
1771 err_fences:
1772 	if (fences) {
1773 		while (cur_fence)
1774 			dma_fence_put(fences[--cur_fence]);
1775 		kfree(fences);
1776 	}
1777 
1778 	return ERR_PTR(err);
1779 }
1780 
1781 static struct xe_user_fence *
1782 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1783 {
1784 	unsigned int i;
1785 
1786 	for (i = 0; i < num_syncs; i++) {
1787 		struct xe_sync_entry *e = &syncs[i];
1788 
1789 		if (xe_sync_is_ufence(e))
1790 			return xe_sync_ufence_get(e);
1791 	}
1792 
1793 	return NULL;
1794 }
1795 
1796 static struct dma_fence *
1797 xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
1798 	   struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
1799 	   u8 tile_mask, bool immediate, bool first_op, bool last_op)
1800 {
1801 	struct dma_fence *fence;
1802 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1803 
1804 	xe_vm_assert_held(vm);
1805 	xe_bo_assert_held(bo);
1806 
1807 	if (immediate) {
1808 		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
1809 				       first_op, last_op);
1810 		if (IS_ERR(fence))
1811 			return fence;
1812 	} else {
1813 		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
1814 
1815 		fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1816 	}
1817 
1818 	return fence;
1819 }
1820 
1821 static struct dma_fence *
1822 xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
1823 	     struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1824 	     u32 num_syncs, bool first_op, bool last_op)
1825 {
1826 	struct dma_fence *fence;
1827 
1828 	xe_vm_assert_held(vm);
1829 	xe_bo_assert_held(xe_vma_bo(vma));
1830 
1831 	fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
1832 	if (IS_ERR(fence))
1833 		return fence;
1834 
1835 	return fence;
1836 }
1837 
1838 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1839 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1840 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1841 
1842 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1843 		       struct drm_file *file)
1844 {
1845 	struct xe_device *xe = to_xe_device(dev);
1846 	struct xe_file *xef = to_xe_file(file);
1847 	struct drm_xe_vm_create *args = data;
1848 	struct xe_tile *tile;
1849 	struct xe_vm *vm;
1850 	u32 id, asid;
1851 	int err;
1852 	u32 flags = 0;
1853 
1854 	if (XE_IOCTL_DBG(xe, args->extensions))
1855 		return -EINVAL;
1856 
1857 	if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
1858 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1859 
1860 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1861 			 !xe->info.has_usm))
1862 		return -EINVAL;
1863 
1864 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1865 		return -EINVAL;
1866 
1867 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1868 		return -EINVAL;
1869 
1870 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1871 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1872 		return -EINVAL;
1873 
1874 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1875 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1876 		return -EINVAL;
1877 
1878 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1879 			 xe_device_in_non_fault_mode(xe)))
1880 		return -EINVAL;
1881 
1882 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
1883 			 xe_device_in_fault_mode(xe)))
1884 		return -EINVAL;
1885 
1886 	if (XE_IOCTL_DBG(xe, args->extensions))
1887 		return -EINVAL;
1888 
1889 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1890 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1891 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1892 		flags |= XE_VM_FLAG_LR_MODE;
1893 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1894 		flags |= XE_VM_FLAG_FAULT_MODE;
1895 
1896 	vm = xe_vm_create(xe, flags);
1897 	if (IS_ERR(vm))
1898 		return PTR_ERR(vm);
1899 
1900 	mutex_lock(&xef->vm.lock);
1901 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1902 	mutex_unlock(&xef->vm.lock);
1903 	if (err)
1904 		goto err_close_and_put;
1905 
1906 	if (xe->info.has_asid) {
1907 		mutex_lock(&xe->usm.lock);
1908 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1909 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1910 				      &xe->usm.next_asid, GFP_KERNEL);
1911 		mutex_unlock(&xe->usm.lock);
1912 		if (err < 0)
1913 			goto err_free_id;
1914 
1915 		vm->usm.asid = asid;
1916 	}
1917 
1918 	args->vm_id = id;
1919 	vm->xef = xef;
1920 
1921 	/* Record BO memory for VM pagetable created against client */
1922 	for_each_tile(tile, xe, id)
1923 		if (vm->pt_root[id])
1924 			xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
1925 
1926 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1927 	/* Warning: Security issue - never enable by default */
1928 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1929 #endif
1930 
1931 	return 0;
1932 
1933 err_free_id:
1934 	mutex_lock(&xef->vm.lock);
1935 	xa_erase(&xef->vm.xa, id);
1936 	mutex_unlock(&xef->vm.lock);
1937 err_close_and_put:
1938 	xe_vm_close_and_put(vm);
1939 
1940 	return err;
1941 }
1942 
1943 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1944 			struct drm_file *file)
1945 {
1946 	struct xe_device *xe = to_xe_device(dev);
1947 	struct xe_file *xef = to_xe_file(file);
1948 	struct drm_xe_vm_destroy *args = data;
1949 	struct xe_vm *vm;
1950 	int err = 0;
1951 
1952 	if (XE_IOCTL_DBG(xe, args->pad) ||
1953 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1954 		return -EINVAL;
1955 
1956 	mutex_lock(&xef->vm.lock);
1957 	vm = xa_load(&xef->vm.xa, args->vm_id);
1958 	if (XE_IOCTL_DBG(xe, !vm))
1959 		err = -ENOENT;
1960 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1961 		err = -EBUSY;
1962 	else
1963 		xa_erase(&xef->vm.xa, args->vm_id);
1964 	mutex_unlock(&xef->vm.lock);
1965 
1966 	if (!err)
1967 		xe_vm_close_and_put(vm);
1968 
1969 	return err;
1970 }
1971 
1972 static const u32 region_to_mem_type[] = {
1973 	XE_PL_TT,
1974 	XE_PL_VRAM0,
1975 	XE_PL_VRAM1,
1976 };
1977 
1978 static struct dma_fence *
1979 xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
1980 	       struct xe_exec_queue *q, struct xe_sync_entry *syncs,
1981 	       u32 num_syncs, bool first_op, bool last_op)
1982 {
1983 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
1984 
1985 	if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
1986 		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
1987 				  vma->tile_mask, true, first_op, last_op);
1988 	} else {
1989 		return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
1990 	}
1991 }
1992 
1993 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
1994 			     bool post_commit)
1995 {
1996 	down_read(&vm->userptr.notifier_lock);
1997 	vma->gpuva.flags |= XE_VMA_DESTROYED;
1998 	up_read(&vm->userptr.notifier_lock);
1999 	if (post_commit)
2000 		xe_vm_remove_vma(vm, vma);
2001 }
2002 
2003 #undef ULL
2004 #define ULL	unsigned long long
2005 
2006 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2007 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2008 {
2009 	struct xe_vma *vma;
2010 
2011 	switch (op->op) {
2012 	case DRM_GPUVA_OP_MAP:
2013 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2014 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2015 		break;
2016 	case DRM_GPUVA_OP_REMAP:
2017 		vma = gpuva_to_vma(op->remap.unmap->va);
2018 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2019 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2020 		       op->remap.unmap->keep ? 1 : 0);
2021 		if (op->remap.prev)
2022 			vm_dbg(&xe->drm,
2023 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2024 			       (ULL)op->remap.prev->va.addr,
2025 			       (ULL)op->remap.prev->va.range);
2026 		if (op->remap.next)
2027 			vm_dbg(&xe->drm,
2028 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2029 			       (ULL)op->remap.next->va.addr,
2030 			       (ULL)op->remap.next->va.range);
2031 		break;
2032 	case DRM_GPUVA_OP_UNMAP:
2033 		vma = gpuva_to_vma(op->unmap.va);
2034 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2035 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2036 		       op->unmap.keep ? 1 : 0);
2037 		break;
2038 	case DRM_GPUVA_OP_PREFETCH:
2039 		vma = gpuva_to_vma(op->prefetch.va);
2040 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2041 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2042 		break;
2043 	default:
2044 		drm_warn(&xe->drm, "NOT POSSIBLE");
2045 	}
2046 }
2047 #else
2048 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2049 {
2050 }
2051 #endif
2052 
2053 /*
2054  * Create operations list from IOCTL arguments, setup operations fields so parse
2055  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2056  */
2057 static struct drm_gpuva_ops *
2058 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
2059 			 u64 bo_offset_or_userptr, u64 addr, u64 range,
2060 			 u32 operation, u32 flags,
2061 			 u32 prefetch_region, u16 pat_index)
2062 {
2063 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2064 	struct drm_gpuva_ops *ops;
2065 	struct drm_gpuva_op *__op;
2066 	struct drm_gpuvm_bo *vm_bo;
2067 	int err;
2068 
2069 	lockdep_assert_held_write(&vm->lock);
2070 
2071 	vm_dbg(&vm->xe->drm,
2072 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2073 	       operation, (ULL)addr, (ULL)range,
2074 	       (ULL)bo_offset_or_userptr);
2075 
2076 	switch (operation) {
2077 	case DRM_XE_VM_BIND_OP_MAP:
2078 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
2079 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
2080 						  obj, bo_offset_or_userptr);
2081 		break;
2082 	case DRM_XE_VM_BIND_OP_UNMAP:
2083 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2084 		break;
2085 	case DRM_XE_VM_BIND_OP_PREFETCH:
2086 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2087 		break;
2088 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2089 		xe_assert(vm->xe, bo);
2090 
2091 		err = xe_bo_lock(bo, true);
2092 		if (err)
2093 			return ERR_PTR(err);
2094 
2095 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2096 		if (IS_ERR(vm_bo)) {
2097 			xe_bo_unlock(bo);
2098 			return ERR_CAST(vm_bo);
2099 		}
2100 
2101 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2102 		drm_gpuvm_bo_put(vm_bo);
2103 		xe_bo_unlock(bo);
2104 		break;
2105 	default:
2106 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2107 		ops = ERR_PTR(-EINVAL);
2108 	}
2109 	if (IS_ERR(ops))
2110 		return ops;
2111 
2112 	drm_gpuva_for_each_op(__op, ops) {
2113 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2114 
2115 		if (__op->op == DRM_GPUVA_OP_MAP) {
2116 			op->map.immediate =
2117 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2118 			op->map.read_only =
2119 				flags & DRM_XE_VM_BIND_FLAG_READONLY;
2120 			op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2121 			op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
2122 			op->map.pat_index = pat_index;
2123 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2124 			op->prefetch.region = prefetch_region;
2125 		}
2126 
2127 		print_op(vm->xe, __op);
2128 	}
2129 
2130 	return ops;
2131 }
2132 
2133 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2134 			      u16 pat_index, unsigned int flags)
2135 {
2136 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2137 	struct drm_exec exec;
2138 	struct xe_vma *vma;
2139 	int err = 0;
2140 
2141 	lockdep_assert_held_write(&vm->lock);
2142 
2143 	if (bo) {
2144 		drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
2145 		drm_exec_until_all_locked(&exec) {
2146 			err = 0;
2147 			if (!bo->vm) {
2148 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2149 				drm_exec_retry_on_contention(&exec);
2150 			}
2151 			if (!err) {
2152 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2153 				drm_exec_retry_on_contention(&exec);
2154 			}
2155 			if (err) {
2156 				drm_exec_fini(&exec);
2157 				return ERR_PTR(err);
2158 			}
2159 		}
2160 	}
2161 	vma = xe_vma_create(vm, bo, op->gem.offset,
2162 			    op->va.addr, op->va.addr +
2163 			    op->va.range - 1, pat_index, flags);
2164 	if (IS_ERR(vma))
2165 		goto err_unlock;
2166 
2167 	if (xe_vma_is_userptr(vma))
2168 		err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2169 	else if (!xe_vma_has_no_bo(vma) && !bo->vm)
2170 		err = add_preempt_fences(vm, bo);
2171 
2172 err_unlock:
2173 	if (bo)
2174 		drm_exec_fini(&exec);
2175 
2176 	if (err) {
2177 		prep_vma_destroy(vm, vma, false);
2178 		xe_vma_destroy_unlocked(vma);
2179 		vma = ERR_PTR(err);
2180 	}
2181 
2182 	return vma;
2183 }
2184 
2185 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2186 {
2187 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2188 		return SZ_1G;
2189 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2190 		return SZ_2M;
2191 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2192 		return SZ_64K;
2193 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2194 		return SZ_4K;
2195 
2196 	return SZ_1G;	/* Uninitialized, used max size */
2197 }
2198 
2199 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2200 {
2201 	switch (size) {
2202 	case SZ_1G:
2203 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2204 		break;
2205 	case SZ_2M:
2206 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2207 		break;
2208 	case SZ_64K:
2209 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2210 		break;
2211 	case SZ_4K:
2212 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2213 		break;
2214 	}
2215 }
2216 
2217 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2218 {
2219 	int err = 0;
2220 
2221 	lockdep_assert_held_write(&vm->lock);
2222 
2223 	switch (op->base.op) {
2224 	case DRM_GPUVA_OP_MAP:
2225 		err |= xe_vm_insert_vma(vm, op->map.vma);
2226 		if (!err)
2227 			op->flags |= XE_VMA_OP_COMMITTED;
2228 		break;
2229 	case DRM_GPUVA_OP_REMAP:
2230 	{
2231 		u8 tile_present =
2232 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2233 
2234 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2235 				 true);
2236 		op->flags |= XE_VMA_OP_COMMITTED;
2237 
2238 		if (op->remap.prev) {
2239 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2240 			if (!err)
2241 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2242 			if (!err && op->remap.skip_prev) {
2243 				op->remap.prev->tile_present =
2244 					tile_present;
2245 				op->remap.prev = NULL;
2246 			}
2247 		}
2248 		if (op->remap.next) {
2249 			err |= xe_vm_insert_vma(vm, op->remap.next);
2250 			if (!err)
2251 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2252 			if (!err && op->remap.skip_next) {
2253 				op->remap.next->tile_present =
2254 					tile_present;
2255 				op->remap.next = NULL;
2256 			}
2257 		}
2258 
2259 		/* Adjust for partial unbind after removin VMA from VM */
2260 		if (!err) {
2261 			op->base.remap.unmap->va->va.addr = op->remap.start;
2262 			op->base.remap.unmap->va->va.range = op->remap.range;
2263 		}
2264 		break;
2265 	}
2266 	case DRM_GPUVA_OP_UNMAP:
2267 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2268 		op->flags |= XE_VMA_OP_COMMITTED;
2269 		break;
2270 	case DRM_GPUVA_OP_PREFETCH:
2271 		op->flags |= XE_VMA_OP_COMMITTED;
2272 		break;
2273 	default:
2274 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2275 	}
2276 
2277 	return err;
2278 }
2279 
2280 
2281 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
2282 				   struct drm_gpuva_ops *ops,
2283 				   struct xe_sync_entry *syncs, u32 num_syncs,
2284 				   struct xe_vma_ops *vops, bool last)
2285 {
2286 	struct xe_device *xe = vm->xe;
2287 	struct xe_vma_op *last_op = NULL;
2288 	struct drm_gpuva_op *__op;
2289 	struct xe_tile *tile;
2290 	u8 id, tile_mask = 0;
2291 	int err = 0;
2292 
2293 	lockdep_assert_held_write(&vm->lock);
2294 
2295 	for_each_tile(tile, vm->xe, id)
2296 		tile_mask |= 0x1 << id;
2297 
2298 	drm_gpuva_for_each_op(__op, ops) {
2299 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2300 		struct xe_vma *vma;
2301 		bool first = list_empty(&vops->list);
2302 		unsigned int flags = 0;
2303 
2304 		INIT_LIST_HEAD(&op->link);
2305 		list_add_tail(&op->link, &vops->list);
2306 
2307 		if (first) {
2308 			op->flags |= XE_VMA_OP_FIRST;
2309 			op->num_syncs = num_syncs;
2310 			op->syncs = syncs;
2311 		}
2312 
2313 		op->q = q;
2314 		op->tile_mask = tile_mask;
2315 
2316 		switch (op->base.op) {
2317 		case DRM_GPUVA_OP_MAP:
2318 		{
2319 			flags |= op->map.read_only ?
2320 				VMA_CREATE_FLAG_READ_ONLY : 0;
2321 			flags |= op->map.is_null ?
2322 				VMA_CREATE_FLAG_IS_NULL : 0;
2323 			flags |= op->map.dumpable ?
2324 				VMA_CREATE_FLAG_DUMPABLE : 0;
2325 
2326 			vma = new_vma(vm, &op->base.map, op->map.pat_index,
2327 				      flags);
2328 			if (IS_ERR(vma))
2329 				return PTR_ERR(vma);
2330 
2331 			op->map.vma = vma;
2332 			break;
2333 		}
2334 		case DRM_GPUVA_OP_REMAP:
2335 		{
2336 			struct xe_vma *old =
2337 				gpuva_to_vma(op->base.remap.unmap->va);
2338 
2339 			op->remap.start = xe_vma_start(old);
2340 			op->remap.range = xe_vma_size(old);
2341 
2342 			if (op->base.remap.prev) {
2343 				flags |= op->base.remap.unmap->va->flags &
2344 					XE_VMA_READ_ONLY ?
2345 					VMA_CREATE_FLAG_READ_ONLY : 0;
2346 				flags |= op->base.remap.unmap->va->flags &
2347 					DRM_GPUVA_SPARSE ?
2348 					VMA_CREATE_FLAG_IS_NULL : 0;
2349 				flags |= op->base.remap.unmap->va->flags &
2350 					XE_VMA_DUMPABLE ?
2351 					VMA_CREATE_FLAG_DUMPABLE : 0;
2352 
2353 				vma = new_vma(vm, op->base.remap.prev,
2354 					      old->pat_index, flags);
2355 				if (IS_ERR(vma))
2356 					return PTR_ERR(vma);
2357 
2358 				op->remap.prev = vma;
2359 
2360 				/*
2361 				 * Userptr creates a new SG mapping so
2362 				 * we must also rebind.
2363 				 */
2364 				op->remap.skip_prev = !xe_vma_is_userptr(old) &&
2365 					IS_ALIGNED(xe_vma_end(vma),
2366 						   xe_vma_max_pte_size(old));
2367 				if (op->remap.skip_prev) {
2368 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2369 					op->remap.range -=
2370 						xe_vma_end(vma) -
2371 						xe_vma_start(old);
2372 					op->remap.start = xe_vma_end(vma);
2373 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2374 					       (ULL)op->remap.start,
2375 					       (ULL)op->remap.range);
2376 				}
2377 			}
2378 
2379 			if (op->base.remap.next) {
2380 				flags |= op->base.remap.unmap->va->flags &
2381 					XE_VMA_READ_ONLY ?
2382 					VMA_CREATE_FLAG_READ_ONLY : 0;
2383 				flags |= op->base.remap.unmap->va->flags &
2384 					DRM_GPUVA_SPARSE ?
2385 					VMA_CREATE_FLAG_IS_NULL : 0;
2386 				flags |= op->base.remap.unmap->va->flags &
2387 					XE_VMA_DUMPABLE ?
2388 					VMA_CREATE_FLAG_DUMPABLE : 0;
2389 
2390 				vma = new_vma(vm, op->base.remap.next,
2391 					      old->pat_index, flags);
2392 				if (IS_ERR(vma))
2393 					return PTR_ERR(vma);
2394 
2395 				op->remap.next = vma;
2396 
2397 				/*
2398 				 * Userptr creates a new SG mapping so
2399 				 * we must also rebind.
2400 				 */
2401 				op->remap.skip_next = !xe_vma_is_userptr(old) &&
2402 					IS_ALIGNED(xe_vma_start(vma),
2403 						   xe_vma_max_pte_size(old));
2404 				if (op->remap.skip_next) {
2405 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2406 					op->remap.range -=
2407 						xe_vma_end(old) -
2408 						xe_vma_start(vma);
2409 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2410 					       (ULL)op->remap.start,
2411 					       (ULL)op->remap.range);
2412 				}
2413 			}
2414 			break;
2415 		}
2416 		case DRM_GPUVA_OP_UNMAP:
2417 		case DRM_GPUVA_OP_PREFETCH:
2418 			/* Nothing to do */
2419 			break;
2420 		default:
2421 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2422 		}
2423 
2424 		last_op = op;
2425 
2426 		err = xe_vma_op_commit(vm, op);
2427 		if (err)
2428 			return err;
2429 	}
2430 
2431 	/* FIXME: Unhandled corner case */
2432 	XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
2433 
2434 	if (!last_op)
2435 		return 0;
2436 
2437 	if (last) {
2438 		last_op->flags |= XE_VMA_OP_LAST;
2439 		last_op->num_syncs = num_syncs;
2440 		last_op->syncs = syncs;
2441 	}
2442 
2443 	return 0;
2444 }
2445 
2446 static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
2447 				    struct xe_vma_op *op)
2448 {
2449 	struct dma_fence *fence = NULL;
2450 
2451 	lockdep_assert_held(&vm->lock);
2452 
2453 	xe_vm_assert_held(vm);
2454 	xe_bo_assert_held(xe_vma_bo(vma));
2455 
2456 	switch (op->base.op) {
2457 	case DRM_GPUVA_OP_MAP:
2458 		fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
2459 				   op->syncs, op->num_syncs,
2460 				   op->tile_mask,
2461 				   op->map.immediate || !xe_vm_in_fault_mode(vm),
2462 				   op->flags & XE_VMA_OP_FIRST,
2463 				   op->flags & XE_VMA_OP_LAST);
2464 		break;
2465 	case DRM_GPUVA_OP_REMAP:
2466 	{
2467 		bool prev = !!op->remap.prev;
2468 		bool next = !!op->remap.next;
2469 
2470 		if (!op->remap.unmap_done) {
2471 			if (prev || next)
2472 				vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
2473 			fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2474 					     op->num_syncs,
2475 					     op->flags & XE_VMA_OP_FIRST,
2476 					     op->flags & XE_VMA_OP_LAST &&
2477 					     !prev && !next);
2478 			if (IS_ERR(fence))
2479 				break;
2480 			op->remap.unmap_done = true;
2481 		}
2482 
2483 		if (prev) {
2484 			op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
2485 			dma_fence_put(fence);
2486 			fence = xe_vm_bind(vm, op->remap.prev, op->q,
2487 					   xe_vma_bo(op->remap.prev), op->syncs,
2488 					   op->num_syncs,
2489 					   op->remap.prev->tile_mask, true,
2490 					   false,
2491 					   op->flags & XE_VMA_OP_LAST && !next);
2492 			op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2493 			if (IS_ERR(fence))
2494 				break;
2495 			op->remap.prev = NULL;
2496 		}
2497 
2498 		if (next) {
2499 			op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
2500 			dma_fence_put(fence);
2501 			fence = xe_vm_bind(vm, op->remap.next, op->q,
2502 					   xe_vma_bo(op->remap.next),
2503 					   op->syncs, op->num_syncs,
2504 					   op->remap.next->tile_mask, true,
2505 					   false, op->flags & XE_VMA_OP_LAST);
2506 			op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
2507 			if (IS_ERR(fence))
2508 				break;
2509 			op->remap.next = NULL;
2510 		}
2511 
2512 		break;
2513 	}
2514 	case DRM_GPUVA_OP_UNMAP:
2515 		fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
2516 				     op->num_syncs, op->flags & XE_VMA_OP_FIRST,
2517 				     op->flags & XE_VMA_OP_LAST);
2518 		break;
2519 	case DRM_GPUVA_OP_PREFETCH:
2520 		fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
2521 				       op->flags & XE_VMA_OP_FIRST,
2522 				       op->flags & XE_VMA_OP_LAST);
2523 		break;
2524 	default:
2525 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2526 	}
2527 
2528 	if (IS_ERR(fence))
2529 		trace_xe_vma_fail(vma);
2530 
2531 	return fence;
2532 }
2533 
2534 static struct dma_fence *
2535 __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
2536 		    struct xe_vma_op *op)
2537 {
2538 	struct dma_fence *fence;
2539 	int err;
2540 
2541 retry_userptr:
2542 	fence = op_execute(vm, vma, op);
2543 	if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
2544 		lockdep_assert_held_write(&vm->lock);
2545 
2546 		if (op->base.op == DRM_GPUVA_OP_REMAP) {
2547 			if (!op->remap.unmap_done)
2548 				vma = gpuva_to_vma(op->base.remap.unmap->va);
2549 			else if (op->remap.prev)
2550 				vma = op->remap.prev;
2551 			else
2552 				vma = op->remap.next;
2553 		}
2554 
2555 		if (xe_vma_is_userptr(vma)) {
2556 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2557 			if (!err)
2558 				goto retry_userptr;
2559 
2560 			fence = ERR_PTR(err);
2561 			trace_xe_vma_fail(vma);
2562 		}
2563 	}
2564 
2565 	return fence;
2566 }
2567 
2568 static struct dma_fence *
2569 xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
2570 {
2571 	struct dma_fence *fence = ERR_PTR(-ENOMEM);
2572 
2573 	lockdep_assert_held(&vm->lock);
2574 
2575 	switch (op->base.op) {
2576 	case DRM_GPUVA_OP_MAP:
2577 		fence = __xe_vma_op_execute(vm, op->map.vma, op);
2578 		break;
2579 	case DRM_GPUVA_OP_REMAP:
2580 	{
2581 		struct xe_vma *vma;
2582 
2583 		if (!op->remap.unmap_done)
2584 			vma = gpuva_to_vma(op->base.remap.unmap->va);
2585 		else if (op->remap.prev)
2586 			vma = op->remap.prev;
2587 		else
2588 			vma = op->remap.next;
2589 
2590 		fence = __xe_vma_op_execute(vm, vma, op);
2591 		break;
2592 	}
2593 	case DRM_GPUVA_OP_UNMAP:
2594 		fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
2595 					    op);
2596 		break;
2597 	case DRM_GPUVA_OP_PREFETCH:
2598 		fence = __xe_vma_op_execute(vm,
2599 					    gpuva_to_vma(op->base.prefetch.va),
2600 					    op);
2601 		break;
2602 	default:
2603 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2604 	}
2605 
2606 	return fence;
2607 }
2608 
2609 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2610 			     bool post_commit, bool prev_post_commit,
2611 			     bool next_post_commit)
2612 {
2613 	lockdep_assert_held_write(&vm->lock);
2614 
2615 	switch (op->base.op) {
2616 	case DRM_GPUVA_OP_MAP:
2617 		if (op->map.vma) {
2618 			prep_vma_destroy(vm, op->map.vma, post_commit);
2619 			xe_vma_destroy_unlocked(op->map.vma);
2620 		}
2621 		break;
2622 	case DRM_GPUVA_OP_UNMAP:
2623 	{
2624 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2625 
2626 		if (vma) {
2627 			down_read(&vm->userptr.notifier_lock);
2628 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2629 			up_read(&vm->userptr.notifier_lock);
2630 			if (post_commit)
2631 				xe_vm_insert_vma(vm, vma);
2632 		}
2633 		break;
2634 	}
2635 	case DRM_GPUVA_OP_REMAP:
2636 	{
2637 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2638 
2639 		if (op->remap.prev) {
2640 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2641 			xe_vma_destroy_unlocked(op->remap.prev);
2642 		}
2643 		if (op->remap.next) {
2644 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2645 			xe_vma_destroy_unlocked(op->remap.next);
2646 		}
2647 		if (vma) {
2648 			down_read(&vm->userptr.notifier_lock);
2649 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2650 			up_read(&vm->userptr.notifier_lock);
2651 			if (post_commit)
2652 				xe_vm_insert_vma(vm, vma);
2653 		}
2654 		break;
2655 	}
2656 	case DRM_GPUVA_OP_PREFETCH:
2657 		/* Nothing to do */
2658 		break;
2659 	default:
2660 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2661 	}
2662 }
2663 
2664 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2665 				     struct drm_gpuva_ops **ops,
2666 				     int num_ops_list)
2667 {
2668 	int i;
2669 
2670 	for (i = num_ops_list - 1; i >= 0; --i) {
2671 		struct drm_gpuva_ops *__ops = ops[i];
2672 		struct drm_gpuva_op *__op;
2673 
2674 		if (!__ops)
2675 			continue;
2676 
2677 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2678 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2679 
2680 			xe_vma_op_unwind(vm, op,
2681 					 op->flags & XE_VMA_OP_COMMITTED,
2682 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2683 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2684 		}
2685 	}
2686 }
2687 
2688 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2689 				 bool validate)
2690 {
2691 	struct xe_bo *bo = xe_vma_bo(vma);
2692 	int err = 0;
2693 
2694 	if (bo) {
2695 		if (!bo->vm)
2696 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2697 		if (!err && validate)
2698 			err = xe_bo_validate(bo, xe_vma_vm(vma), true);
2699 	}
2700 
2701 	return err;
2702 }
2703 
2704 static int check_ufence(struct xe_vma *vma)
2705 {
2706 	if (vma->ufence) {
2707 		struct xe_user_fence * const f = vma->ufence;
2708 
2709 		if (!xe_sync_ufence_get_status(f))
2710 			return -EBUSY;
2711 
2712 		vma->ufence = NULL;
2713 		xe_sync_ufence_put(f);
2714 	}
2715 
2716 	return 0;
2717 }
2718 
2719 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2720 			    struct xe_vma_op *op)
2721 {
2722 	int err = 0;
2723 
2724 	switch (op->base.op) {
2725 	case DRM_GPUVA_OP_MAP:
2726 		err = vma_lock_and_validate(exec, op->map.vma,
2727 					    !xe_vm_in_fault_mode(vm) ||
2728 					    op->map.immediate);
2729 		break;
2730 	case DRM_GPUVA_OP_REMAP:
2731 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2732 		if (err)
2733 			break;
2734 
2735 		err = vma_lock_and_validate(exec,
2736 					    gpuva_to_vma(op->base.remap.unmap->va),
2737 					    false);
2738 		if (!err && op->remap.prev)
2739 			err = vma_lock_and_validate(exec, op->remap.prev, true);
2740 		if (!err && op->remap.next)
2741 			err = vma_lock_and_validate(exec, op->remap.next, true);
2742 		break;
2743 	case DRM_GPUVA_OP_UNMAP:
2744 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2745 		if (err)
2746 			break;
2747 
2748 		err = vma_lock_and_validate(exec,
2749 					    gpuva_to_vma(op->base.unmap.va),
2750 					    false);
2751 		break;
2752 	case DRM_GPUVA_OP_PREFETCH:
2753 	{
2754 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2755 		u32 region = op->prefetch.region;
2756 
2757 		xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
2758 
2759 		err = vma_lock_and_validate(exec,
2760 					    gpuva_to_vma(op->base.prefetch.va),
2761 					    false);
2762 		if (!err && !xe_vma_has_no_bo(vma))
2763 			err = xe_bo_migrate(xe_vma_bo(vma),
2764 					    region_to_mem_type[region]);
2765 		break;
2766 	}
2767 	default:
2768 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2769 	}
2770 
2771 	return err;
2772 }
2773 
2774 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
2775 					   struct xe_vm *vm,
2776 					   struct xe_vma_ops *vops)
2777 {
2778 	struct xe_vma_op *op;
2779 	int err;
2780 
2781 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
2782 	if (err)
2783 		return err;
2784 
2785 	list_for_each_entry(op, &vops->list, link) {
2786 		err = op_lock_and_prep(exec, vm, op);
2787 		if (err)
2788 			return err;
2789 	}
2790 
2791 	return 0;
2792 }
2793 
2794 static struct dma_fence *ops_execute(struct xe_vm *vm,
2795 				     struct xe_vma_ops *vops)
2796 {
2797 	struct xe_vma_op *op, *next;
2798 	struct dma_fence *fence = NULL;
2799 
2800 	list_for_each_entry_safe(op, next, &vops->list, link) {
2801 		dma_fence_put(fence);
2802 		fence = xe_vma_op_execute(vm, op);
2803 		if (IS_ERR(fence)) {
2804 			drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
2805 				 op->base.op, PTR_ERR(fence));
2806 			fence = ERR_PTR(-ENOSPC);
2807 			break;
2808 		}
2809 	}
2810 
2811 	return fence;
2812 }
2813 
2814 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
2815 {
2816 	if (vma->ufence)
2817 		xe_sync_ufence_put(vma->ufence);
2818 	vma->ufence = __xe_sync_ufence_get(ufence);
2819 }
2820 
2821 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
2822 			  struct xe_user_fence *ufence)
2823 {
2824 	switch (op->base.op) {
2825 	case DRM_GPUVA_OP_MAP:
2826 		vma_add_ufence(op->map.vma, ufence);
2827 		break;
2828 	case DRM_GPUVA_OP_REMAP:
2829 		if (op->remap.prev)
2830 			vma_add_ufence(op->remap.prev, ufence);
2831 		if (op->remap.next)
2832 			vma_add_ufence(op->remap.next, ufence);
2833 		break;
2834 	case DRM_GPUVA_OP_UNMAP:
2835 		break;
2836 	case DRM_GPUVA_OP_PREFETCH:
2837 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
2838 		break;
2839 	default:
2840 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2841 	}
2842 }
2843 
2844 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
2845 				   struct dma_fence *fence)
2846 {
2847 	struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
2848 	struct xe_user_fence *ufence;
2849 	struct xe_vma_op *op;
2850 	int i;
2851 
2852 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
2853 	list_for_each_entry(op, &vops->list, link) {
2854 		if (ufence)
2855 			op_add_ufence(vm, op, ufence);
2856 
2857 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
2858 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
2859 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
2860 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
2861 				       fence);
2862 	}
2863 	if (ufence)
2864 		xe_sync_ufence_put(ufence);
2865 	for (i = 0; i < vops->num_syncs; i++)
2866 		xe_sync_entry_signal(vops->syncs + i, fence);
2867 	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
2868 	dma_fence_put(fence);
2869 }
2870 
2871 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
2872 				     struct xe_vma_ops *vops)
2873 {
2874 	struct drm_exec exec;
2875 	struct dma_fence *fence;
2876 	int err;
2877 
2878 	lockdep_assert_held_write(&vm->lock);
2879 
2880 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
2881 		      DRM_EXEC_IGNORE_DUPLICATES, 0);
2882 	drm_exec_until_all_locked(&exec) {
2883 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
2884 		drm_exec_retry_on_contention(&exec);
2885 		if (err)
2886 			goto unlock;
2887 
2888 		fence = ops_execute(vm, vops);
2889 		if (IS_ERR(fence)) {
2890 			err = PTR_ERR(fence);
2891 			/* FIXME: Killing VM rather than proper error handling */
2892 			xe_vm_kill(vm, false);
2893 			goto unlock;
2894 		} else {
2895 			vm_bind_ioctl_ops_fini(vm, vops, fence);
2896 		}
2897 	}
2898 
2899 unlock:
2900 	drm_exec_fini(&exec);
2901 	return err;
2902 }
2903 
2904 #define SUPPORTED_FLAGS	\
2905 	(DRM_XE_VM_BIND_FLAG_READONLY | \
2906 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
2907 	 DRM_XE_VM_BIND_FLAG_NULL | \
2908 	 DRM_XE_VM_BIND_FLAG_DUMPABLE)
2909 #define XE_64K_PAGE_MASK 0xffffull
2910 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
2911 
2912 static int vm_bind_ioctl_check_args(struct xe_device *xe,
2913 				    struct drm_xe_vm_bind *args,
2914 				    struct drm_xe_vm_bind_op **bind_ops)
2915 {
2916 	int err;
2917 	int i;
2918 
2919 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
2920 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2921 		return -EINVAL;
2922 
2923 	if (XE_IOCTL_DBG(xe, args->extensions))
2924 		return -EINVAL;
2925 
2926 	if (args->num_binds > 1) {
2927 		u64 __user *bind_user =
2928 			u64_to_user_ptr(args->vector_of_binds);
2929 
2930 		*bind_ops = kvmalloc_array(args->num_binds,
2931 					   sizeof(struct drm_xe_vm_bind_op),
2932 					   GFP_KERNEL | __GFP_ACCOUNT);
2933 		if (!*bind_ops)
2934 			return -ENOMEM;
2935 
2936 		err = __copy_from_user(*bind_ops, bind_user,
2937 				       sizeof(struct drm_xe_vm_bind_op) *
2938 				       args->num_binds);
2939 		if (XE_IOCTL_DBG(xe, err)) {
2940 			err = -EFAULT;
2941 			goto free_bind_ops;
2942 		}
2943 	} else {
2944 		*bind_ops = &args->bind;
2945 	}
2946 
2947 	for (i = 0; i < args->num_binds; ++i) {
2948 		u64 range = (*bind_ops)[i].range;
2949 		u64 addr = (*bind_ops)[i].addr;
2950 		u32 op = (*bind_ops)[i].op;
2951 		u32 flags = (*bind_ops)[i].flags;
2952 		u32 obj = (*bind_ops)[i].obj;
2953 		u64 obj_offset = (*bind_ops)[i].obj_offset;
2954 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
2955 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
2956 		u16 pat_index = (*bind_ops)[i].pat_index;
2957 		u16 coh_mode;
2958 
2959 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
2960 			err = -EINVAL;
2961 			goto free_bind_ops;
2962 		}
2963 
2964 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
2965 		(*bind_ops)[i].pat_index = pat_index;
2966 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
2967 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
2968 			err = -EINVAL;
2969 			goto free_bind_ops;
2970 		}
2971 
2972 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
2973 			err = -EINVAL;
2974 			goto free_bind_ops;
2975 		}
2976 
2977 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
2978 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
2979 		    XE_IOCTL_DBG(xe, obj && is_null) ||
2980 		    XE_IOCTL_DBG(xe, obj_offset && is_null) ||
2981 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
2982 				 is_null) ||
2983 		    XE_IOCTL_DBG(xe, !obj &&
2984 				 op == DRM_XE_VM_BIND_OP_MAP &&
2985 				 !is_null) ||
2986 		    XE_IOCTL_DBG(xe, !obj &&
2987 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2988 		    XE_IOCTL_DBG(xe, addr &&
2989 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2990 		    XE_IOCTL_DBG(xe, range &&
2991 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
2992 		    XE_IOCTL_DBG(xe, obj &&
2993 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2994 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
2995 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
2996 		    XE_IOCTL_DBG(xe, obj &&
2997 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
2998 		    XE_IOCTL_DBG(xe, prefetch_region &&
2999 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3000 		    XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
3001 				       xe->info.mem_region_mask)) ||
3002 		    XE_IOCTL_DBG(xe, obj &&
3003 				 op == DRM_XE_VM_BIND_OP_UNMAP)) {
3004 			err = -EINVAL;
3005 			goto free_bind_ops;
3006 		}
3007 
3008 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3009 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3010 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3011 		    XE_IOCTL_DBG(xe, !range &&
3012 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3013 			err = -EINVAL;
3014 			goto free_bind_ops;
3015 		}
3016 	}
3017 
3018 	return 0;
3019 
3020 free_bind_ops:
3021 	if (args->num_binds > 1)
3022 		kvfree(*bind_ops);
3023 	return err;
3024 }
3025 
3026 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3027 				       struct xe_exec_queue *q,
3028 				       struct xe_sync_entry *syncs,
3029 				       int num_syncs)
3030 {
3031 	struct dma_fence *fence;
3032 	int i, err = 0;
3033 
3034 	fence = xe_sync_in_fence_get(syncs, num_syncs,
3035 				     to_wait_exec_queue(vm, q), vm);
3036 	if (IS_ERR(fence))
3037 		return PTR_ERR(fence);
3038 
3039 	for (i = 0; i < num_syncs; i++)
3040 		xe_sync_entry_signal(&syncs[i], fence);
3041 
3042 	xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
3043 				     fence);
3044 	dma_fence_put(fence);
3045 
3046 	return err;
3047 }
3048 
3049 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3050 			    struct xe_exec_queue *q,
3051 			    struct xe_sync_entry *syncs, u32 num_syncs)
3052 {
3053 	memset(vops, 0, sizeof(*vops));
3054 	INIT_LIST_HEAD(&vops->list);
3055 	vops->vm = vm;
3056 	vops->q = q;
3057 	vops->syncs = syncs;
3058 	vops->num_syncs = num_syncs;
3059 }
3060 
3061 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3062 					u64 addr, u64 range, u64 obj_offset,
3063 					u16 pat_index)
3064 {
3065 	u16 coh_mode;
3066 
3067 	if (XE_IOCTL_DBG(xe, range > bo->size) ||
3068 	    XE_IOCTL_DBG(xe, obj_offset >
3069 			 bo->size - range)) {
3070 		return -EINVAL;
3071 	}
3072 
3073 	if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
3074 		if (XE_IOCTL_DBG(xe, obj_offset &
3075 				 XE_64K_PAGE_MASK) ||
3076 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3077 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3078 			return  -EINVAL;
3079 		}
3080 	}
3081 
3082 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3083 	if (bo->cpu_caching) {
3084 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3085 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3086 			return  -EINVAL;
3087 		}
3088 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3089 		/*
3090 		 * Imported dma-buf from a different device should
3091 		 * require 1way or 2way coherency since we don't know
3092 		 * how it was mapped on the CPU. Just assume is it
3093 		 * potentially cached on CPU side.
3094 		 */
3095 		return  -EINVAL;
3096 	}
3097 
3098 	return 0;
3099 }
3100 
3101 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3102 {
3103 	struct xe_device *xe = to_xe_device(dev);
3104 	struct xe_file *xef = to_xe_file(file);
3105 	struct drm_xe_vm_bind *args = data;
3106 	struct drm_xe_sync __user *syncs_user;
3107 	struct xe_bo **bos = NULL;
3108 	struct drm_gpuva_ops **ops = NULL;
3109 	struct xe_vm *vm;
3110 	struct xe_exec_queue *q = NULL;
3111 	u32 num_syncs, num_ufence = 0;
3112 	struct xe_sync_entry *syncs = NULL;
3113 	struct drm_xe_vm_bind_op *bind_ops;
3114 	struct xe_vma_ops vops;
3115 	int err;
3116 	int i;
3117 
3118 	err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
3119 	if (err)
3120 		return err;
3121 
3122 	if (args->exec_queue_id) {
3123 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3124 		if (XE_IOCTL_DBG(xe, !q)) {
3125 			err = -ENOENT;
3126 			goto free_objs;
3127 		}
3128 
3129 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3130 			err = -EINVAL;
3131 			goto put_exec_queue;
3132 		}
3133 	}
3134 
3135 	vm = xe_vm_lookup(xef, args->vm_id);
3136 	if (XE_IOCTL_DBG(xe, !vm)) {
3137 		err = -EINVAL;
3138 		goto put_exec_queue;
3139 	}
3140 
3141 	err = down_write_killable(&vm->lock);
3142 	if (err)
3143 		goto put_vm;
3144 
3145 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3146 		err = -ENOENT;
3147 		goto release_vm_lock;
3148 	}
3149 
3150 	for (i = 0; i < args->num_binds; ++i) {
3151 		u64 range = bind_ops[i].range;
3152 		u64 addr = bind_ops[i].addr;
3153 
3154 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3155 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3156 			err = -EINVAL;
3157 			goto release_vm_lock;
3158 		}
3159 	}
3160 
3161 	if (args->num_binds) {
3162 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3163 			       GFP_KERNEL | __GFP_ACCOUNT);
3164 		if (!bos) {
3165 			err = -ENOMEM;
3166 			goto release_vm_lock;
3167 		}
3168 
3169 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3170 			       GFP_KERNEL | __GFP_ACCOUNT);
3171 		if (!ops) {
3172 			err = -ENOMEM;
3173 			goto release_vm_lock;
3174 		}
3175 	}
3176 
3177 	for (i = 0; i < args->num_binds; ++i) {
3178 		struct drm_gem_object *gem_obj;
3179 		u64 range = bind_ops[i].range;
3180 		u64 addr = bind_ops[i].addr;
3181 		u32 obj = bind_ops[i].obj;
3182 		u64 obj_offset = bind_ops[i].obj_offset;
3183 		u16 pat_index = bind_ops[i].pat_index;
3184 
3185 		if (!obj)
3186 			continue;
3187 
3188 		gem_obj = drm_gem_object_lookup(file, obj);
3189 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3190 			err = -ENOENT;
3191 			goto put_obj;
3192 		}
3193 		bos[i] = gem_to_xe_bo(gem_obj);
3194 
3195 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3196 						   obj_offset, pat_index);
3197 		if (err)
3198 			goto put_obj;
3199 	}
3200 
3201 	if (args->num_syncs) {
3202 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3203 		if (!syncs) {
3204 			err = -ENOMEM;
3205 			goto put_obj;
3206 		}
3207 	}
3208 
3209 	syncs_user = u64_to_user_ptr(args->syncs);
3210 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3211 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3212 					  &syncs_user[num_syncs],
3213 					  (xe_vm_in_lr_mode(vm) ?
3214 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3215 					  (!args->num_binds ?
3216 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3217 		if (err)
3218 			goto free_syncs;
3219 
3220 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3221 			num_ufence++;
3222 	}
3223 
3224 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3225 		err = -EINVAL;
3226 		goto free_syncs;
3227 	}
3228 
3229 	if (!args->num_binds) {
3230 		err = -ENODATA;
3231 		goto free_syncs;
3232 	}
3233 
3234 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3235 	for (i = 0; i < args->num_binds; ++i) {
3236 		u64 range = bind_ops[i].range;
3237 		u64 addr = bind_ops[i].addr;
3238 		u32 op = bind_ops[i].op;
3239 		u32 flags = bind_ops[i].flags;
3240 		u64 obj_offset = bind_ops[i].obj_offset;
3241 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3242 		u16 pat_index = bind_ops[i].pat_index;
3243 
3244 		ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
3245 						  addr, range, op, flags,
3246 						  prefetch_region, pat_index);
3247 		if (IS_ERR(ops[i])) {
3248 			err = PTR_ERR(ops[i]);
3249 			ops[i] = NULL;
3250 			goto unwind_ops;
3251 		}
3252 
3253 		err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
3254 					      &vops, i == args->num_binds - 1);
3255 		if (err)
3256 			goto unwind_ops;
3257 	}
3258 
3259 	/* Nothing to do */
3260 	if (list_empty(&vops.list)) {
3261 		err = -ENODATA;
3262 		goto unwind_ops;
3263 	}
3264 
3265 	err = vm_bind_ioctl_ops_execute(vm, &vops);
3266 
3267 unwind_ops:
3268 	if (err && err != -ENODATA)
3269 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3270 	for (i = args->num_binds - 1; i >= 0; --i)
3271 		if (ops[i])
3272 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3273 free_syncs:
3274 	if (err == -ENODATA)
3275 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3276 	while (num_syncs--)
3277 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3278 
3279 	kfree(syncs);
3280 put_obj:
3281 	for (i = 0; i < args->num_binds; ++i)
3282 		xe_bo_put(bos[i]);
3283 release_vm_lock:
3284 	up_write(&vm->lock);
3285 put_vm:
3286 	xe_vm_put(vm);
3287 put_exec_queue:
3288 	if (q)
3289 		xe_exec_queue_put(q);
3290 free_objs:
3291 	kvfree(bos);
3292 	kvfree(ops);
3293 	if (args->num_binds > 1)
3294 		kvfree(bind_ops);
3295 	return err;
3296 }
3297 
3298 /**
3299  * xe_vm_lock() - Lock the vm's dma_resv object
3300  * @vm: The struct xe_vm whose lock is to be locked
3301  * @intr: Whether to perform any wait interruptible
3302  *
3303  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3304  * contended lock was interrupted. If @intr is false, the function
3305  * always returns 0.
3306  */
3307 int xe_vm_lock(struct xe_vm *vm, bool intr)
3308 {
3309 	if (intr)
3310 		return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3311 
3312 	return dma_resv_lock(xe_vm_resv(vm), NULL);
3313 }
3314 
3315 /**
3316  * xe_vm_unlock() - Unlock the vm's dma_resv object
3317  * @vm: The struct xe_vm whose lock is to be released.
3318  *
3319  * Unlock a buffer object lock that was locked by xe_vm_lock().
3320  */
3321 void xe_vm_unlock(struct xe_vm *vm)
3322 {
3323 	dma_resv_unlock(xe_vm_resv(vm));
3324 }
3325 
3326 /**
3327  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3328  * @vma: VMA to invalidate
3329  *
3330  * Walks a list of page tables leaves which it memset the entries owned by this
3331  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3332  * complete.
3333  *
3334  * Returns 0 for success, negative error code otherwise.
3335  */
3336 int xe_vm_invalidate_vma(struct xe_vma *vma)
3337 {
3338 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3339 	struct xe_tile *tile;
3340 	u32 tile_needs_invalidate = 0;
3341 	int seqno[XE_MAX_TILES_PER_DEVICE];
3342 	u8 id;
3343 	int ret;
3344 
3345 	xe_assert(xe, !xe_vma_is_null(vma));
3346 	trace_xe_vma_invalidate(vma);
3347 
3348 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
3349 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3350 		xe_vma_start(vma), xe_vma_size(vma));
3351 
3352 	/* Check that we don't race with page-table updates */
3353 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3354 		if (xe_vma_is_userptr(vma)) {
3355 			WARN_ON_ONCE(!mmu_interval_check_retry
3356 				     (&to_userptr_vma(vma)->userptr.notifier,
3357 				      to_userptr_vma(vma)->userptr.notifier_seq));
3358 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
3359 							     DMA_RESV_USAGE_BOOKKEEP));
3360 
3361 		} else {
3362 			xe_bo_assert_held(xe_vma_bo(vma));
3363 		}
3364 	}
3365 
3366 	for_each_tile(tile, xe, id) {
3367 		if (xe_pt_zap_ptes(tile, vma)) {
3368 			tile_needs_invalidate |= BIT(id);
3369 			xe_device_wmb(xe);
3370 			/*
3371 			 * FIXME: We potentially need to invalidate multiple
3372 			 * GTs within the tile
3373 			 */
3374 			seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
3375 			if (seqno[id] < 0)
3376 				return seqno[id];
3377 		}
3378 	}
3379 
3380 	for_each_tile(tile, xe, id) {
3381 		if (tile_needs_invalidate & BIT(id)) {
3382 			ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
3383 			if (ret < 0)
3384 				return ret;
3385 		}
3386 	}
3387 
3388 	vma->tile_invalidated = vma->tile_mask;
3389 
3390 	return 0;
3391 }
3392 
3393 struct xe_vm_snapshot {
3394 	unsigned long num_snaps;
3395 	struct {
3396 		u64 ofs, bo_ofs;
3397 		unsigned long len;
3398 		struct xe_bo *bo;
3399 		void *data;
3400 		struct mm_struct *mm;
3401 	} snap[];
3402 };
3403 
3404 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
3405 {
3406 	unsigned long num_snaps = 0, i;
3407 	struct xe_vm_snapshot *snap = NULL;
3408 	struct drm_gpuva *gpuva;
3409 
3410 	if (!vm)
3411 		return NULL;
3412 
3413 	mutex_lock(&vm->snap_mutex);
3414 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3415 		if (gpuva->flags & XE_VMA_DUMPABLE)
3416 			num_snaps++;
3417 	}
3418 
3419 	if (num_snaps)
3420 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
3421 	if (!snap) {
3422 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
3423 		goto out_unlock;
3424 	}
3425 
3426 	snap->num_snaps = num_snaps;
3427 	i = 0;
3428 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
3429 		struct xe_vma *vma = gpuva_to_vma(gpuva);
3430 		struct xe_bo *bo = vma->gpuva.gem.obj ?
3431 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
3432 
3433 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
3434 			continue;
3435 
3436 		snap->snap[i].ofs = xe_vma_start(vma);
3437 		snap->snap[i].len = xe_vma_size(vma);
3438 		if (bo) {
3439 			snap->snap[i].bo = xe_bo_get(bo);
3440 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
3441 		} else if (xe_vma_is_userptr(vma)) {
3442 			struct mm_struct *mm =
3443 				to_userptr_vma(vma)->userptr.notifier.mm;
3444 
3445 			if (mmget_not_zero(mm))
3446 				snap->snap[i].mm = mm;
3447 			else
3448 				snap->snap[i].data = ERR_PTR(-EFAULT);
3449 
3450 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
3451 		} else {
3452 			snap->snap[i].data = ERR_PTR(-ENOENT);
3453 		}
3454 		i++;
3455 	}
3456 
3457 out_unlock:
3458 	mutex_unlock(&vm->snap_mutex);
3459 	return snap;
3460 }
3461 
3462 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
3463 {
3464 	if (IS_ERR_OR_NULL(snap))
3465 		return;
3466 
3467 	for (int i = 0; i < snap->num_snaps; i++) {
3468 		struct xe_bo *bo = snap->snap[i].bo;
3469 		struct iosys_map src;
3470 		int err;
3471 
3472 		if (IS_ERR(snap->snap[i].data))
3473 			continue;
3474 
3475 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
3476 		if (!snap->snap[i].data) {
3477 			snap->snap[i].data = ERR_PTR(-ENOMEM);
3478 			goto cleanup_bo;
3479 		}
3480 
3481 		if (bo) {
3482 			xe_bo_lock(bo, false);
3483 			err = ttm_bo_vmap(&bo->ttm, &src);
3484 			if (!err) {
3485 				xe_map_memcpy_from(xe_bo_device(bo),
3486 						   snap->snap[i].data,
3487 						   &src, snap->snap[i].bo_ofs,
3488 						   snap->snap[i].len);
3489 				ttm_bo_vunmap(&bo->ttm, &src);
3490 			}
3491 			xe_bo_unlock(bo);
3492 		} else {
3493 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
3494 
3495 			kthread_use_mm(snap->snap[i].mm);
3496 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
3497 				err = 0;
3498 			else
3499 				err = -EFAULT;
3500 			kthread_unuse_mm(snap->snap[i].mm);
3501 
3502 			mmput(snap->snap[i].mm);
3503 			snap->snap[i].mm = NULL;
3504 		}
3505 
3506 		if (err) {
3507 			kvfree(snap->snap[i].data);
3508 			snap->snap[i].data = ERR_PTR(err);
3509 		}
3510 
3511 cleanup_bo:
3512 		xe_bo_put(bo);
3513 		snap->snap[i].bo = NULL;
3514 	}
3515 }
3516 
3517 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
3518 {
3519 	unsigned long i, j;
3520 
3521 	if (IS_ERR_OR_NULL(snap)) {
3522 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
3523 		return;
3524 	}
3525 
3526 	for (i = 0; i < snap->num_snaps; i++) {
3527 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
3528 
3529 		if (IS_ERR(snap->snap[i].data)) {
3530 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
3531 				   PTR_ERR(snap->snap[i].data));
3532 			continue;
3533 		}
3534 
3535 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
3536 
3537 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
3538 			u32 *val = snap->snap[i].data + j;
3539 			char dumped[ASCII85_BUFSZ];
3540 
3541 			drm_puts(p, ascii85_encode(*val, dumped));
3542 		}
3543 
3544 		drm_puts(p, "\n");
3545 	}
3546 }
3547 
3548 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
3549 {
3550 	unsigned long i;
3551 
3552 	if (IS_ERR_OR_NULL(snap))
3553 		return;
3554 
3555 	for (i = 0; i < snap->num_snaps; i++) {
3556 		if (!IS_ERR(snap->snap[i].data))
3557 			kvfree(snap->snap[i].data);
3558 		xe_bo_put(snap->snap[i].bo);
3559 		if (snap->snap[i].mm)
3560 			mmput(snap->snap[i].mm);
3561 	}
3562 	kvfree(snap);
3563 }
3564