xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 6704d98a4f48b7424edc0f7ae2a06c0a8af02e2f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_exec.h>
13 #include <drm/drm_print.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <uapi/drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_migrate.h"
31 #include "xe_pat.h"
32 #include "xe_pm.h"
33 #include "xe_preempt_fence.h"
34 #include "xe_pt.h"
35 #include "xe_pxp.h"
36 #include "xe_sriov_vf.h"
37 #include "xe_svm.h"
38 #include "xe_sync.h"
39 #include "xe_tile.h"
40 #include "xe_tlb_inval.h"
41 #include "xe_trace_bo.h"
42 #include "xe_wa.h"
43 
44 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
45 {
46 	return vm->gpuvm.r_obj;
47 }
48 
49 /**
50  * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
51  * @vm: The vm whose resv is to be locked.
52  * @exec: The drm_exec transaction.
53  *
54  * Helper to lock the vm's resv as part of a drm_exec transaction.
55  *
56  * Return: %0 on success. See drm_exec_lock_obj() for error codes.
57  */
58 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec)
59 {
60 	return drm_exec_lock_obj(exec, xe_vm_obj(vm));
61 }
62 
63 static bool preempt_fences_waiting(struct xe_vm *vm)
64 {
65 	struct xe_exec_queue *q;
66 
67 	lockdep_assert_held(&vm->lock);
68 	xe_vm_assert_held(vm);
69 
70 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
71 		if (!q->lr.pfence ||
72 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
73 			     &q->lr.pfence->flags)) {
74 			return true;
75 		}
76 	}
77 
78 	return false;
79 }
80 
81 static void free_preempt_fences(struct list_head *list)
82 {
83 	struct list_head *link, *next;
84 
85 	list_for_each_safe(link, next, list)
86 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
87 }
88 
89 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
90 				unsigned int *count)
91 {
92 	lockdep_assert_held(&vm->lock);
93 	xe_vm_assert_held(vm);
94 
95 	if (*count >= vm->preempt.num_exec_queues)
96 		return 0;
97 
98 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
99 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
100 
101 		if (IS_ERR(pfence))
102 			return PTR_ERR(pfence);
103 
104 		list_move_tail(xe_preempt_fence_link(pfence), list);
105 	}
106 
107 	return 0;
108 }
109 
110 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
111 {
112 	struct xe_exec_queue *q;
113 	bool vf_migration = IS_SRIOV_VF(vm->xe) &&
114 		xe_sriov_vf_migration_supported(vm->xe);
115 	signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT;
116 
117 	xe_vm_assert_held(vm);
118 
119 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
120 		if (q->lr.pfence) {
121 			long timeout;
122 
123 			timeout = dma_fence_wait_timeout(q->lr.pfence, false,
124 							 wait_time);
125 			if (!timeout) {
126 				xe_assert(vm->xe, vf_migration);
127 				return -EAGAIN;
128 			}
129 
130 			/* Only -ETIME on fence indicates VM needs to be killed */
131 			if (timeout < 0 || q->lr.pfence->error == -ETIME)
132 				return -ETIME;
133 
134 			dma_fence_put(q->lr.pfence);
135 			q->lr.pfence = NULL;
136 		}
137 	}
138 
139 	return 0;
140 }
141 
142 static bool xe_vm_is_idle(struct xe_vm *vm)
143 {
144 	struct xe_exec_queue *q;
145 
146 	xe_vm_assert_held(vm);
147 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
148 		if (!xe_exec_queue_is_idle(q))
149 			return false;
150 	}
151 
152 	return true;
153 }
154 
155 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
156 {
157 	struct list_head *link;
158 	struct xe_exec_queue *q;
159 
160 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
161 		struct dma_fence *fence;
162 
163 		link = list->next;
164 		xe_assert(vm->xe, link != list);
165 
166 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
167 					     q, q->lr.context,
168 					     ++q->lr.seqno);
169 		dma_fence_put(q->lr.pfence);
170 		q->lr.pfence = fence;
171 	}
172 }
173 
174 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
175 {
176 	struct xe_exec_queue *q;
177 	int err;
178 
179 	xe_bo_assert_held(bo);
180 
181 	if (!vm->preempt.num_exec_queues)
182 		return 0;
183 
184 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
185 	if (err)
186 		return err;
187 
188 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
189 		if (q->lr.pfence) {
190 			dma_resv_add_fence(bo->ttm.base.resv,
191 					   q->lr.pfence,
192 					   DMA_RESV_USAGE_BOOKKEEP);
193 		}
194 
195 	return 0;
196 }
197 
198 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
199 						struct drm_exec *exec)
200 {
201 	struct xe_exec_queue *q;
202 
203 	lockdep_assert_held(&vm->lock);
204 	xe_vm_assert_held(vm);
205 
206 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
207 		q->ops->resume(q);
208 
209 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
210 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
211 	}
212 }
213 
214 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
215 {
216 	struct drm_gpuvm_exec vm_exec = {
217 		.vm = &vm->gpuvm,
218 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
219 		.num_fences = 1,
220 	};
221 	struct drm_exec *exec = &vm_exec.exec;
222 	struct xe_validation_ctx ctx;
223 	struct dma_fence *pfence;
224 	int err;
225 	bool wait;
226 
227 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
228 
229 	down_write(&vm->lock);
230 	err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val);
231 	if (err)
232 		goto out_up_write;
233 
234 	pfence = xe_preempt_fence_create(q, q->lr.context,
235 					 ++q->lr.seqno);
236 	if (IS_ERR(pfence)) {
237 		err = PTR_ERR(pfence);
238 		goto out_fini;
239 	}
240 
241 	list_add(&q->lr.link, &vm->preempt.exec_queues);
242 	++vm->preempt.num_exec_queues;
243 	q->lr.pfence = pfence;
244 
245 	xe_svm_notifier_lock(vm);
246 
247 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
248 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
249 
250 	/*
251 	 * Check to see if a preemption on VM is in flight or userptr
252 	 * invalidation, if so trigger this preempt fence to sync state with
253 	 * other preempt fences on the VM.
254 	 */
255 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
256 	if (wait)
257 		dma_fence_enable_sw_signaling(pfence);
258 
259 	xe_svm_notifier_unlock(vm);
260 
261 out_fini:
262 	xe_validation_ctx_fini(&ctx);
263 out_up_write:
264 	up_write(&vm->lock);
265 
266 	return err;
267 }
268 ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
269 
270 /**
271  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
272  * @vm: The VM.
273  * @q: The exec_queue
274  *
275  * Note that this function might be called multiple times on the same queue.
276  */
277 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
278 {
279 	if (!xe_vm_in_preempt_fence_mode(vm))
280 		return;
281 
282 	down_write(&vm->lock);
283 	if (!list_empty(&q->lr.link)) {
284 		list_del_init(&q->lr.link);
285 		--vm->preempt.num_exec_queues;
286 	}
287 	if (q->lr.pfence) {
288 		dma_fence_enable_sw_signaling(q->lr.pfence);
289 		dma_fence_put(q->lr.pfence);
290 		q->lr.pfence = NULL;
291 	}
292 	up_write(&vm->lock);
293 }
294 
295 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
296 
297 /**
298  * xe_vm_kill() - VM Kill
299  * @vm: The VM.
300  * @unlocked: Flag indicates the VM's dma-resv is not held
301  *
302  * Kill the VM by setting banned flag indicated VM is no longer available for
303  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
304  */
305 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
306 {
307 	struct xe_exec_queue *q;
308 
309 	lockdep_assert_held(&vm->lock);
310 
311 	if (unlocked)
312 		xe_vm_lock(vm, false);
313 
314 	vm->flags |= XE_VM_FLAG_BANNED;
315 	trace_xe_vm_kill(vm);
316 
317 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
318 		q->ops->kill(q);
319 
320 	if (unlocked)
321 		xe_vm_unlock(vm);
322 
323 	/* TODO: Inform user the VM is banned */
324 }
325 
326 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
327 {
328 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
329 	struct drm_gpuva *gpuva;
330 	int ret;
331 
332 	lockdep_assert_held(&vm->lock);
333 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
334 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
335 			       &vm->rebind_list);
336 
337 	if (!try_wait_for_completion(&vm->xe->pm_block))
338 		return -EAGAIN;
339 
340 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec);
341 	if (ret)
342 		return ret;
343 
344 	vm_bo->evicted = false;
345 	return 0;
346 }
347 
348 /**
349  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
350  * @vm: The vm for which we are rebinding.
351  * @exec: The struct drm_exec with the locked GEM objects.
352  * @num_fences: The number of fences to reserve for the operation, not
353  * including rebinds and validations.
354  *
355  * Validates all evicted gem objects and rebinds their vmas. Note that
356  * rebindings may cause evictions and hence the validation-rebind
357  * sequence is rerun until there are no more objects to validate.
358  *
359  * Return: 0 on success, negative error code on error. In particular,
360  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
361  * the drm_exec transaction needs to be restarted.
362  */
363 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
364 			  unsigned int num_fences)
365 {
366 	struct drm_gem_object *obj;
367 	unsigned long index;
368 	int ret;
369 
370 	do {
371 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
372 		if (ret)
373 			return ret;
374 
375 		ret = xe_vm_rebind(vm, false);
376 		if (ret)
377 			return ret;
378 	} while (!list_empty(&vm->gpuvm.evict.list));
379 
380 	drm_exec_for_each_locked_object(exec, index, obj) {
381 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
382 		if (ret)
383 			return ret;
384 	}
385 
386 	return 0;
387 }
388 
389 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
390 				 bool *done)
391 {
392 	int err;
393 
394 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
395 	if (err)
396 		return err;
397 
398 	if (xe_vm_is_idle(vm)) {
399 		vm->preempt.rebind_deactivated = true;
400 		*done = true;
401 		return 0;
402 	}
403 
404 	if (!preempt_fences_waiting(vm)) {
405 		*done = true;
406 		return 0;
407 	}
408 
409 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
410 	if (err)
411 		return err;
412 
413 	err = wait_for_existing_preempt_fences(vm);
414 	if (err)
415 		return err;
416 
417 	/*
418 	 * Add validation and rebinding to the locking loop since both can
419 	 * cause evictions which may require blocing dma_resv locks.
420 	 * The fence reservation here is intended for the new preempt fences
421 	 * we attach at the end of the rebind work.
422 	 */
423 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
424 }
425 
426 static bool vm_suspend_rebind_worker(struct xe_vm *vm)
427 {
428 	struct xe_device *xe = vm->xe;
429 	bool ret = false;
430 
431 	mutex_lock(&xe->rebind_resume_lock);
432 	if (!try_wait_for_completion(&vm->xe->pm_block)) {
433 		ret = true;
434 		list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list);
435 	}
436 	mutex_unlock(&xe->rebind_resume_lock);
437 
438 	return ret;
439 }
440 
441 /**
442  * xe_vm_resume_rebind_worker() - Resume the rebind worker.
443  * @vm: The vm whose preempt worker to resume.
444  *
445  * Resume a preempt worker that was previously suspended by
446  * vm_suspend_rebind_worker().
447  */
448 void xe_vm_resume_rebind_worker(struct xe_vm *vm)
449 {
450 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
451 }
452 
453 static void preempt_rebind_work_func(struct work_struct *w)
454 {
455 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
456 	struct xe_validation_ctx ctx;
457 	struct drm_exec exec;
458 	unsigned int fence_count = 0;
459 	LIST_HEAD(preempt_fences);
460 	int err = 0;
461 	long wait;
462 	int __maybe_unused tries = 0;
463 
464 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
465 	trace_xe_vm_rebind_worker_enter(vm);
466 
467 	down_write(&vm->lock);
468 
469 	if (xe_vm_is_closed_or_banned(vm)) {
470 		up_write(&vm->lock);
471 		trace_xe_vm_rebind_worker_exit(vm);
472 		return;
473 	}
474 
475 retry:
476 	if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
477 		up_write(&vm->lock);
478 		/* We don't actually block but don't make progress. */
479 		xe_pm_might_block_on_suspend();
480 		return;
481 	}
482 
483 	if (xe_vm_userptr_check_repin(vm)) {
484 		err = xe_vm_userptr_pin(vm);
485 		if (err)
486 			goto out_unlock_outer;
487 	}
488 
489 	err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec,
490 				     (struct xe_val_flags) {.interruptible = true});
491 	if (err)
492 		goto out_unlock_outer;
493 
494 	drm_exec_until_all_locked(&exec) {
495 		bool done = false;
496 
497 		err = xe_preempt_work_begin(&exec, vm, &done);
498 		drm_exec_retry_on_contention(&exec);
499 		xe_validation_retry_on_oom(&ctx, &err);
500 		if (err || done) {
501 			xe_validation_ctx_fini(&ctx);
502 			goto out_unlock_outer;
503 		}
504 	}
505 
506 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
507 	if (err)
508 		goto out_unlock;
509 
510 	xe_vm_set_validation_exec(vm, &exec);
511 	err = xe_vm_rebind(vm, true);
512 	xe_vm_set_validation_exec(vm, NULL);
513 	if (err)
514 		goto out_unlock;
515 
516 	/* Wait on rebinds and munmap style VM unbinds */
517 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
518 				     DMA_RESV_USAGE_KERNEL,
519 				     false, MAX_SCHEDULE_TIMEOUT);
520 	if (wait <= 0) {
521 		err = -ETIME;
522 		goto out_unlock;
523 	}
524 
525 #define retry_required(__tries, __vm) \
526 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
527 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
528 	__xe_vm_userptr_needs_repin(__vm))
529 
530 	xe_svm_notifier_lock(vm);
531 	if (retry_required(tries, vm)) {
532 		xe_svm_notifier_unlock(vm);
533 		err = -EAGAIN;
534 		goto out_unlock;
535 	}
536 
537 #undef retry_required
538 
539 	spin_lock(&vm->xe->ttm.lru_lock);
540 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
541 	spin_unlock(&vm->xe->ttm.lru_lock);
542 
543 	/* Point of no return. */
544 	arm_preempt_fences(vm, &preempt_fences);
545 	resume_and_reinstall_preempt_fences(vm, &exec);
546 	xe_svm_notifier_unlock(vm);
547 
548 out_unlock:
549 	xe_validation_ctx_fini(&ctx);
550 out_unlock_outer:
551 	if (err == -EAGAIN) {
552 		trace_xe_vm_rebind_worker_retry(vm);
553 
554 		/*
555 		 * We can't block in workers on a VF which supports migration
556 		 * given this can block the VF post-migration workers from
557 		 * getting scheduled.
558 		 */
559 		if (IS_SRIOV_VF(vm->xe) &&
560 		    xe_sriov_vf_migration_supported(vm->xe)) {
561 			up_write(&vm->lock);
562 			xe_vm_queue_rebind_worker(vm);
563 			return;
564 		}
565 
566 		goto retry;
567 	}
568 
569 	if (err) {
570 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
571 		xe_vm_kill(vm, true);
572 	}
573 	up_write(&vm->lock);
574 
575 	free_preempt_fences(&preempt_fences);
576 
577 	trace_xe_vm_rebind_worker_exit(vm);
578 }
579 
580 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
581 {
582 	int i;
583 
584 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
585 		if (!vops->pt_update_ops[i].num_ops)
586 			continue;
587 
588 		vops->pt_update_ops[i].ops =
589 			kmalloc_array(vops->pt_update_ops[i].num_ops,
590 				      sizeof(*vops->pt_update_ops[i].ops),
591 				      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
592 		if (!vops->pt_update_ops[i].ops)
593 			return array_of_binds ? -ENOBUFS : -ENOMEM;
594 	}
595 
596 	return 0;
597 }
598 ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
599 
600 static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
601 {
602 	struct xe_vma *vma;
603 
604 	vma = gpuva_to_vma(op->base.prefetch.va);
605 
606 	if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
607 		xa_destroy(&op->prefetch_range.range);
608 }
609 
610 static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
611 {
612 	struct xe_vma_op *op;
613 
614 	if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
615 		return;
616 
617 	list_for_each_entry(op, &vops->list, link)
618 		xe_vma_svm_prefetch_op_fini(op);
619 }
620 
621 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
622 {
623 	int i;
624 
625 	xe_vma_svm_prefetch_ops_fini(vops);
626 
627 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
628 		kfree(vops->pt_update_ops[i].ops);
629 }
630 
631 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
632 {
633 	int i;
634 
635 	if (!inc_val)
636 		return;
637 
638 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
639 		if (BIT(i) & tile_mask)
640 			vops->pt_update_ops[i].num_ops += inc_val;
641 }
642 
643 #define XE_VMA_CREATE_MASK (		    \
644 	XE_VMA_READ_ONLY |		    \
645 	XE_VMA_DUMPABLE |		    \
646 	XE_VMA_SYSTEM_ALLOCATOR |           \
647 	DRM_GPUVA_SPARSE |		    \
648 	XE_VMA_MADV_AUTORESET)
649 
650 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
651 				  u8 tile_mask)
652 {
653 	INIT_LIST_HEAD(&op->link);
654 	op->tile_mask = tile_mask;
655 	op->base.op = DRM_GPUVA_OP_MAP;
656 	op->base.map.va.addr = vma->gpuva.va.addr;
657 	op->base.map.va.range = vma->gpuva.va.range;
658 	op->base.map.gem.obj = vma->gpuva.gem.obj;
659 	op->base.map.gem.offset = vma->gpuva.gem.offset;
660 	op->map.vma = vma;
661 	op->map.immediate = true;
662 	op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
663 }
664 
665 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
666 				u8 tile_mask)
667 {
668 	struct xe_vma_op *op;
669 
670 	op = kzalloc(sizeof(*op), GFP_KERNEL);
671 	if (!op)
672 		return -ENOMEM;
673 
674 	xe_vm_populate_rebind(op, vma, tile_mask);
675 	list_add_tail(&op->link, &vops->list);
676 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
677 
678 	return 0;
679 }
680 
681 static struct dma_fence *ops_execute(struct xe_vm *vm,
682 				     struct xe_vma_ops *vops);
683 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
684 			    struct xe_exec_queue *q,
685 			    struct xe_sync_entry *syncs, u32 num_syncs);
686 
687 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
688 {
689 	struct dma_fence *fence;
690 	struct xe_vma *vma, *next;
691 	struct xe_vma_ops vops;
692 	struct xe_vma_op *op, *next_op;
693 	int err, i;
694 
695 	lockdep_assert_held(&vm->lock);
696 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
697 	    list_empty(&vm->rebind_list))
698 		return 0;
699 
700 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
701 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
702 		vops.pt_update_ops[i].wait_vm_bookkeep = true;
703 
704 	xe_vm_assert_held(vm);
705 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
706 		xe_assert(vm->xe, vma->tile_present);
707 
708 		if (rebind_worker)
709 			trace_xe_vma_rebind_worker(vma);
710 		else
711 			trace_xe_vma_rebind_exec(vma);
712 
713 		err = xe_vm_ops_add_rebind(&vops, vma,
714 					   vma->tile_present);
715 		if (err)
716 			goto free_ops;
717 	}
718 
719 	err = xe_vma_ops_alloc(&vops, false);
720 	if (err)
721 		goto free_ops;
722 
723 	fence = ops_execute(vm, &vops);
724 	if (IS_ERR(fence)) {
725 		err = PTR_ERR(fence);
726 	} else {
727 		dma_fence_put(fence);
728 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
729 					 combined_links.rebind)
730 			list_del_init(&vma->combined_links.rebind);
731 	}
732 free_ops:
733 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
734 		list_del(&op->link);
735 		kfree(op);
736 	}
737 	xe_vma_ops_fini(&vops);
738 
739 	return err;
740 }
741 
742 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
743 {
744 	struct dma_fence *fence = NULL;
745 	struct xe_vma_ops vops;
746 	struct xe_vma_op *op, *next_op;
747 	struct xe_tile *tile;
748 	u8 id;
749 	int err;
750 
751 	lockdep_assert_held(&vm->lock);
752 	xe_vm_assert_held(vm);
753 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
754 
755 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
756 	vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
757 	for_each_tile(tile, vm->xe, id) {
758 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
759 		vops.pt_update_ops[tile->id].q =
760 			xe_migrate_exec_queue(tile->migrate);
761 	}
762 
763 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
764 	if (err)
765 		return ERR_PTR(err);
766 
767 	err = xe_vma_ops_alloc(&vops, false);
768 	if (err) {
769 		fence = ERR_PTR(err);
770 		goto free_ops;
771 	}
772 
773 	fence = ops_execute(vm, &vops);
774 
775 free_ops:
776 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
777 		list_del(&op->link);
778 		kfree(op);
779 	}
780 	xe_vma_ops_fini(&vops);
781 
782 	return fence;
783 }
784 
785 static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
786 					struct xe_vma *vma,
787 					struct xe_svm_range *range,
788 					u8 tile_mask)
789 {
790 	INIT_LIST_HEAD(&op->link);
791 	op->tile_mask = tile_mask;
792 	op->base.op = DRM_GPUVA_OP_DRIVER;
793 	op->subop = XE_VMA_SUBOP_MAP_RANGE;
794 	op->map_range.vma = vma;
795 	op->map_range.range = range;
796 }
797 
798 static int
799 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
800 			   struct xe_vma *vma,
801 			   struct xe_svm_range *range,
802 			   u8 tile_mask)
803 {
804 	struct xe_vma_op *op;
805 
806 	op = kzalloc(sizeof(*op), GFP_KERNEL);
807 	if (!op)
808 		return -ENOMEM;
809 
810 	xe_vm_populate_range_rebind(op, vma, range, tile_mask);
811 	list_add_tail(&op->link, &vops->list);
812 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
813 
814 	return 0;
815 }
816 
817 /**
818  * xe_vm_range_rebind() - VM range (re)bind
819  * @vm: The VM which the range belongs to.
820  * @vma: The VMA which the range belongs to.
821  * @range: SVM range to rebind.
822  * @tile_mask: Tile mask to bind the range to.
823  *
824  * (re)bind SVM range setting up GPU page tables for the range.
825  *
826  * Return: dma fence for rebind to signal completion on success, ERR_PTR on
827  * failure
828  */
829 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
830 				     struct xe_vma *vma,
831 				     struct xe_svm_range *range,
832 				     u8 tile_mask)
833 {
834 	struct dma_fence *fence = NULL;
835 	struct xe_vma_ops vops;
836 	struct xe_vma_op *op, *next_op;
837 	struct xe_tile *tile;
838 	u8 id;
839 	int err;
840 
841 	lockdep_assert_held(&vm->lock);
842 	xe_vm_assert_held(vm);
843 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
844 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
845 
846 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
847 	vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
848 	for_each_tile(tile, vm->xe, id) {
849 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
850 		vops.pt_update_ops[tile->id].q =
851 			xe_migrate_exec_queue(tile->migrate);
852 	}
853 
854 	err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
855 	if (err)
856 		return ERR_PTR(err);
857 
858 	err = xe_vma_ops_alloc(&vops, false);
859 	if (err) {
860 		fence = ERR_PTR(err);
861 		goto free_ops;
862 	}
863 
864 	fence = ops_execute(vm, &vops);
865 
866 free_ops:
867 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
868 		list_del(&op->link);
869 		kfree(op);
870 	}
871 	xe_vma_ops_fini(&vops);
872 
873 	return fence;
874 }
875 
876 static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
877 					struct xe_svm_range *range)
878 {
879 	INIT_LIST_HEAD(&op->link);
880 	op->tile_mask = range->tile_present;
881 	op->base.op = DRM_GPUVA_OP_DRIVER;
882 	op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
883 	op->unmap_range.range = range;
884 }
885 
886 static int
887 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
888 			   struct xe_svm_range *range)
889 {
890 	struct xe_vma_op *op;
891 
892 	op = kzalloc(sizeof(*op), GFP_KERNEL);
893 	if (!op)
894 		return -ENOMEM;
895 
896 	xe_vm_populate_range_unbind(op, range);
897 	list_add_tail(&op->link, &vops->list);
898 	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
899 
900 	return 0;
901 }
902 
903 /**
904  * xe_vm_range_unbind() - VM range unbind
905  * @vm: The VM which the range belongs to.
906  * @range: SVM range to rebind.
907  *
908  * Unbind SVM range removing the GPU page tables for the range.
909  *
910  * Return: dma fence for unbind to signal completion on success, ERR_PTR on
911  * failure
912  */
913 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
914 				     struct xe_svm_range *range)
915 {
916 	struct dma_fence *fence = NULL;
917 	struct xe_vma_ops vops;
918 	struct xe_vma_op *op, *next_op;
919 	struct xe_tile *tile;
920 	u8 id;
921 	int err;
922 
923 	lockdep_assert_held(&vm->lock);
924 	xe_vm_assert_held(vm);
925 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
926 
927 	if (!range->tile_present)
928 		return dma_fence_get_stub();
929 
930 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
931 	for_each_tile(tile, vm->xe, id) {
932 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
933 		vops.pt_update_ops[tile->id].q =
934 			xe_migrate_exec_queue(tile->migrate);
935 	}
936 
937 	err = xe_vm_ops_add_range_unbind(&vops, range);
938 	if (err)
939 		return ERR_PTR(err);
940 
941 	err = xe_vma_ops_alloc(&vops, false);
942 	if (err) {
943 		fence = ERR_PTR(err);
944 		goto free_ops;
945 	}
946 
947 	fence = ops_execute(vm, &vops);
948 
949 free_ops:
950 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
951 		list_del(&op->link);
952 		kfree(op);
953 	}
954 	xe_vma_ops_fini(&vops);
955 
956 	return fence;
957 }
958 
959 static void xe_vma_mem_attr_fini(struct xe_vma_mem_attr *attr)
960 {
961 	drm_pagemap_put(attr->preferred_loc.dpagemap);
962 }
963 
964 static void xe_vma_free(struct xe_vma *vma)
965 {
966 	xe_vma_mem_attr_fini(&vma->attr);
967 
968 	if (xe_vma_is_userptr(vma))
969 		kfree(to_userptr_vma(vma));
970 	else
971 		kfree(vma);
972 }
973 
974 /**
975  * xe_vma_mem_attr_copy() - copy an xe_vma_mem_attr structure.
976  * @to: Destination.
977  * @from: Source.
978  *
979  * Copies an xe_vma_mem_attr structure taking care to get reference
980  * counting of individual members right.
981  */
982 void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from)
983 {
984 	xe_vma_mem_attr_fini(to);
985 	*to = *from;
986 	if (to->preferred_loc.dpagemap)
987 		drm_pagemap_get(to->preferred_loc.dpagemap);
988 }
989 
990 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
991 				    struct xe_bo *bo,
992 				    u64 bo_offset_or_userptr,
993 				    u64 start, u64 end,
994 				    struct xe_vma_mem_attr *attr,
995 				    unsigned int flags)
996 {
997 	struct xe_vma *vma;
998 	struct xe_tile *tile;
999 	u8 id;
1000 	bool is_null = (flags & DRM_GPUVA_SPARSE);
1001 	bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
1002 
1003 	xe_assert(vm->xe, start < end);
1004 	xe_assert(vm->xe, end < vm->size);
1005 
1006 	/*
1007 	 * Allocate and ensure that the xe_vma_is_userptr() return
1008 	 * matches what was allocated.
1009 	 */
1010 	if (!bo && !is_null && !is_cpu_addr_mirror) {
1011 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
1012 
1013 		if (!uvma)
1014 			return ERR_PTR(-ENOMEM);
1015 
1016 		vma = &uvma->vma;
1017 	} else {
1018 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1019 		if (!vma)
1020 			return ERR_PTR(-ENOMEM);
1021 
1022 		if (bo)
1023 			vma->gpuva.gem.obj = &bo->ttm.base;
1024 	}
1025 
1026 	INIT_LIST_HEAD(&vma->combined_links.rebind);
1027 
1028 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
1029 	vma->gpuva.vm = &vm->gpuvm;
1030 	vma->gpuva.va.addr = start;
1031 	vma->gpuva.va.range = end - start + 1;
1032 	vma->gpuva.flags = flags;
1033 
1034 	for_each_tile(tile, vm->xe, id)
1035 		vma->tile_mask |= 0x1 << id;
1036 
1037 	if (vm->xe->info.has_atomic_enable_pte_bit)
1038 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
1039 
1040 	xe_vma_mem_attr_copy(&vma->attr, attr);
1041 	if (bo) {
1042 		struct drm_gpuvm_bo *vm_bo;
1043 
1044 		xe_bo_assert_held(bo);
1045 
1046 		vm_bo = drm_gpuvm_bo_obtain_locked(vma->gpuva.vm, &bo->ttm.base);
1047 		if (IS_ERR(vm_bo)) {
1048 			xe_vma_free(vma);
1049 			return ERR_CAST(vm_bo);
1050 		}
1051 
1052 		drm_gpuvm_bo_extobj_add(vm_bo);
1053 		drm_gem_object_get(&bo->ttm.base);
1054 		vma->gpuva.gem.offset = bo_offset_or_userptr;
1055 		drm_gpuva_link(&vma->gpuva, vm_bo);
1056 		drm_gpuvm_bo_put(vm_bo);
1057 	} else /* userptr or null */ {
1058 		if (!is_null && !is_cpu_addr_mirror) {
1059 			struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1060 			u64 size = end - start + 1;
1061 			int err;
1062 
1063 			vma->gpuva.gem.offset = bo_offset_or_userptr;
1064 
1065 			err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
1066 			if (err) {
1067 				xe_vma_free(vma);
1068 				return ERR_PTR(err);
1069 			}
1070 		}
1071 
1072 		xe_vm_get(vm);
1073 	}
1074 
1075 	return vma;
1076 }
1077 
1078 static void xe_vma_destroy_late(struct xe_vma *vma)
1079 {
1080 	struct xe_vm *vm = xe_vma_vm(vma);
1081 
1082 	if (vma->ufence) {
1083 		xe_sync_ufence_put(vma->ufence);
1084 		vma->ufence = NULL;
1085 	}
1086 
1087 	if (xe_vma_is_userptr(vma)) {
1088 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1089 
1090 		xe_userptr_remove(uvma);
1091 		xe_vm_put(vm);
1092 	} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
1093 		xe_vm_put(vm);
1094 	} else {
1095 		xe_bo_put(xe_vma_bo(vma));
1096 	}
1097 
1098 	xe_vma_free(vma);
1099 }
1100 
1101 static void vma_destroy_work_func(struct work_struct *w)
1102 {
1103 	struct xe_vma *vma =
1104 		container_of(w, struct xe_vma, destroy_work);
1105 
1106 	xe_vma_destroy_late(vma);
1107 }
1108 
1109 static void vma_destroy_cb(struct dma_fence *fence,
1110 			   struct dma_fence_cb *cb)
1111 {
1112 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1113 
1114 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1115 	queue_work(system_unbound_wq, &vma->destroy_work);
1116 }
1117 
1118 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1119 {
1120 	struct xe_vm *vm = xe_vma_vm(vma);
1121 
1122 	lockdep_assert_held_write(&vm->lock);
1123 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1124 
1125 	if (xe_vma_is_userptr(vma)) {
1126 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1127 		xe_userptr_destroy(to_userptr_vma(vma));
1128 	} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
1129 		xe_bo_assert_held(xe_vma_bo(vma));
1130 
1131 		drm_gpuva_unlink(&vma->gpuva);
1132 	}
1133 
1134 	xe_vm_assert_held(vm);
1135 	if (fence) {
1136 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1137 						 vma_destroy_cb);
1138 
1139 		if (ret) {
1140 			XE_WARN_ON(ret != -ENOENT);
1141 			xe_vma_destroy_late(vma);
1142 		}
1143 	} else {
1144 		xe_vma_destroy_late(vma);
1145 	}
1146 }
1147 
1148 /**
1149  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1150  * @exec: The drm_exec object we're currently locking for.
1151  * @vma: The vma for witch we want to lock the vm resv and any attached
1152  * object's resv.
1153  *
1154  * Return: 0 on success, negative error code on error. In particular
1155  * may return -EDEADLK on WW transaction contention and -EINTR if
1156  * an interruptible wait is terminated by a signal.
1157  */
1158 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1159 {
1160 	struct xe_vm *vm = xe_vma_vm(vma);
1161 	struct xe_bo *bo = xe_vma_bo(vma);
1162 	int err;
1163 
1164 	XE_WARN_ON(!vm);
1165 
1166 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1167 	if (!err && bo && !bo->vm)
1168 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1169 
1170 	return err;
1171 }
1172 
1173 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1174 {
1175 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1176 	struct xe_validation_ctx ctx;
1177 	struct drm_exec exec;
1178 	int err = 0;
1179 
1180 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
1181 		err = xe_vm_lock_vma(&exec, vma);
1182 		drm_exec_retry_on_contention(&exec);
1183 		if (XE_WARN_ON(err))
1184 			break;
1185 		xe_vma_destroy(vma, NULL);
1186 	}
1187 	xe_assert(xe, !err);
1188 }
1189 
1190 struct xe_vma *
1191 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1192 {
1193 	struct drm_gpuva *gpuva;
1194 
1195 	lockdep_assert_held(&vm->lock);
1196 
1197 	if (xe_vm_is_closed_or_banned(vm))
1198 		return NULL;
1199 
1200 	xe_assert(vm->xe, start + range <= vm->size);
1201 
1202 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1203 
1204 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1205 }
1206 
1207 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1208 {
1209 	int err;
1210 
1211 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1212 	lockdep_assert_held(&vm->lock);
1213 
1214 	mutex_lock(&vm->snap_mutex);
1215 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1216 	mutex_unlock(&vm->snap_mutex);
1217 	XE_WARN_ON(err);	/* Shouldn't be possible */
1218 
1219 	return err;
1220 }
1221 
1222 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1223 {
1224 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1225 	lockdep_assert_held(&vm->lock);
1226 
1227 	mutex_lock(&vm->snap_mutex);
1228 	drm_gpuva_remove(&vma->gpuva);
1229 	mutex_unlock(&vm->snap_mutex);
1230 	if (vm->usm.last_fault_vma == vma)
1231 		vm->usm.last_fault_vma = NULL;
1232 }
1233 
1234 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1235 {
1236 	struct xe_vma_op *op;
1237 
1238 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1239 
1240 	if (unlikely(!op))
1241 		return NULL;
1242 
1243 	return &op->base;
1244 }
1245 
1246 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1247 
1248 static const struct drm_gpuvm_ops gpuvm_ops = {
1249 	.op_alloc = xe_vm_op_alloc,
1250 	.vm_bo_validate = xe_gpuvm_validate,
1251 	.vm_free = xe_vm_free,
1252 };
1253 
1254 static u64 pde_encode_pat_index(u16 pat_index)
1255 {
1256 	u64 pte = 0;
1257 
1258 	if (pat_index & BIT(0))
1259 		pte |= XE_PPGTT_PTE_PAT0;
1260 
1261 	if (pat_index & BIT(1))
1262 		pte |= XE_PPGTT_PTE_PAT1;
1263 
1264 	return pte;
1265 }
1266 
1267 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1268 {
1269 	u64 pte = 0;
1270 
1271 	if (pat_index & BIT(0))
1272 		pte |= XE_PPGTT_PTE_PAT0;
1273 
1274 	if (pat_index & BIT(1))
1275 		pte |= XE_PPGTT_PTE_PAT1;
1276 
1277 	if (pat_index & BIT(2)) {
1278 		if (pt_level)
1279 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1280 		else
1281 			pte |= XE_PPGTT_PTE_PAT2;
1282 	}
1283 
1284 	if (pat_index & BIT(3))
1285 		pte |= XELPG_PPGTT_PTE_PAT3;
1286 
1287 	if (pat_index & (BIT(4)))
1288 		pte |= XE2_PPGTT_PTE_PAT4;
1289 
1290 	return pte;
1291 }
1292 
1293 static u64 pte_encode_ps(u32 pt_level)
1294 {
1295 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1296 
1297 	if (pt_level == 1)
1298 		return XE_PDE_PS_2M;
1299 	else if (pt_level == 2)
1300 		return XE_PDPE_PS_1G;
1301 
1302 	return 0;
1303 }
1304 
1305 static u16 pde_pat_index(struct xe_bo *bo)
1306 {
1307 	struct xe_device *xe = xe_bo_device(bo);
1308 	u16 pat_index;
1309 
1310 	/*
1311 	 * We only have two bits to encode the PAT index in non-leaf nodes, but
1312 	 * these only point to other paging structures so we only need a minimal
1313 	 * selection of options. The user PAT index is only for encoding leaf
1314 	 * nodes, where we have use of more bits to do the encoding. The
1315 	 * non-leaf nodes are instead under driver control so the chosen index
1316 	 * here should be distinct from the user PAT index. Also the
1317 	 * corresponding coherency of the PAT index should be tied to the
1318 	 * allocation type of the page table (or at least we should pick
1319 	 * something which is always safe).
1320 	 */
1321 	if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)
1322 		pat_index = xe->pat.idx[XE_CACHE_WB];
1323 	else
1324 		pat_index = xe->pat.idx[XE_CACHE_NONE];
1325 
1326 	xe_assert(xe, pat_index <= 3);
1327 
1328 	return pat_index;
1329 }
1330 
1331 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset)
1332 {
1333 	u64 pde;
1334 
1335 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1336 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1337 	pde |= pde_encode_pat_index(pde_pat_index(bo));
1338 
1339 	return pde;
1340 }
1341 
1342 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1343 			      u16 pat_index, u32 pt_level)
1344 {
1345 	u64 pte;
1346 
1347 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1348 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1349 	pte |= pte_encode_pat_index(pat_index, pt_level);
1350 	pte |= pte_encode_ps(pt_level);
1351 
1352 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1353 		pte |= XE_PPGTT_PTE_DM;
1354 
1355 	return pte;
1356 }
1357 
1358 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1359 			       u16 pat_index, u32 pt_level)
1360 {
1361 	pte |= XE_PAGE_PRESENT;
1362 
1363 	if (likely(!xe_vma_read_only(vma)))
1364 		pte |= XE_PAGE_RW;
1365 
1366 	pte |= pte_encode_pat_index(pat_index, pt_level);
1367 	pte |= pte_encode_ps(pt_level);
1368 
1369 	if (unlikely(xe_vma_is_null(vma)))
1370 		pte |= XE_PTE_NULL;
1371 
1372 	return pte;
1373 }
1374 
1375 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1376 				u16 pat_index,
1377 				u32 pt_level, bool devmem, u64 flags)
1378 {
1379 	u64 pte;
1380 
1381 	/* Avoid passing random bits directly as flags */
1382 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1383 
1384 	pte = addr;
1385 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1386 	pte |= pte_encode_pat_index(pat_index, pt_level);
1387 	pte |= pte_encode_ps(pt_level);
1388 
1389 	if (devmem)
1390 		pte |= XE_PPGTT_PTE_DM;
1391 
1392 	pte |= flags;
1393 
1394 	return pte;
1395 }
1396 
1397 static const struct xe_pt_ops xelp_pt_ops = {
1398 	.pte_encode_bo = xelp_pte_encode_bo,
1399 	.pte_encode_vma = xelp_pte_encode_vma,
1400 	.pte_encode_addr = xelp_pte_encode_addr,
1401 	.pde_encode_bo = xelp_pde_encode_bo,
1402 };
1403 
1404 static void vm_destroy_work_func(struct work_struct *w);
1405 
1406 /**
1407  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1408  * given tile and vm.
1409  * @xe: xe device.
1410  * @tile: tile to set up for.
1411  * @vm: vm to set up for.
1412  * @exec: The struct drm_exec object used to lock the vm resv.
1413  *
1414  * Sets up a pagetable tree with one page-table per level and a single
1415  * leaf PTE. All pagetable entries point to the single page-table or,
1416  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1417  * writes become NOPs.
1418  *
1419  * Return: 0 on success, negative error code on error.
1420  */
1421 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1422 				struct xe_vm *vm, struct drm_exec *exec)
1423 {
1424 	u8 id = tile->id;
1425 	int i;
1426 
1427 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1428 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
1429 		if (IS_ERR(vm->scratch_pt[id][i])) {
1430 			int err = PTR_ERR(vm->scratch_pt[id][i]);
1431 
1432 			vm->scratch_pt[id][i] = NULL;
1433 			return err;
1434 		}
1435 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1436 	}
1437 
1438 	return 0;
1439 }
1440 ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
1441 
1442 static void xe_vm_free_scratch(struct xe_vm *vm)
1443 {
1444 	struct xe_tile *tile;
1445 	u8 id;
1446 
1447 	if (!xe_vm_has_scratch(vm))
1448 		return;
1449 
1450 	for_each_tile(tile, vm->xe, id) {
1451 		u32 i;
1452 
1453 		if (!vm->pt_root[id])
1454 			continue;
1455 
1456 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1457 			if (vm->scratch_pt[id][i])
1458 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1459 	}
1460 }
1461 
1462 static void xe_vm_pt_destroy(struct xe_vm *vm)
1463 {
1464 	struct xe_tile *tile;
1465 	u8 id;
1466 
1467 	xe_vm_assert_held(vm);
1468 
1469 	for_each_tile(tile, vm->xe, id) {
1470 		if (vm->pt_root[id]) {
1471 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1472 			vm->pt_root[id] = NULL;
1473 		}
1474 	}
1475 }
1476 
1477 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
1478 {
1479 	struct drm_gem_object *vm_resv_obj;
1480 	struct xe_validation_ctx ctx;
1481 	struct drm_exec exec;
1482 	struct xe_vm *vm;
1483 	int err;
1484 	struct xe_tile *tile;
1485 	u8 id;
1486 
1487 	/*
1488 	 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
1489 	 * ever be in faulting mode.
1490 	 */
1491 	xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
1492 
1493 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1494 	if (!vm)
1495 		return ERR_PTR(-ENOMEM);
1496 
1497 	vm->xe = xe;
1498 
1499 	vm->size = 1ull << xe->info.va_bits;
1500 	vm->flags = flags;
1501 
1502 	if (xef)
1503 		vm->xef = xe_file_get(xef);
1504 	/**
1505 	 * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
1506 	 * manipulated under the PXP mutex. However, the PXP mutex can be taken
1507 	 * under a user-VM lock when the PXP session is started at exec_queue
1508 	 * creation time. Those are different VMs and therefore there is no risk
1509 	 * of deadlock, but we need to tell lockdep that this is the case or it
1510 	 * will print a warning.
1511 	 */
1512 	if (flags & XE_VM_FLAG_GSC) {
1513 		static struct lock_class_key gsc_vm_key;
1514 
1515 		__init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
1516 	} else {
1517 		init_rwsem(&vm->lock);
1518 	}
1519 	mutex_init(&vm->snap_mutex);
1520 
1521 	INIT_LIST_HEAD(&vm->rebind_list);
1522 
1523 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1524 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1525 	spin_lock_init(&vm->userptr.invalidated_lock);
1526 
1527 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1528 
1529 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1530 
1531 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1532 	if (flags & XE_VM_FLAG_FAULT_MODE)
1533 		vm->preempt.min_run_period_ms = xe->min_run_period_pf_ms;
1534 	else
1535 		vm->preempt.min_run_period_ms = xe->min_run_period_lr_ms;
1536 
1537 	for_each_tile(tile, xe, id)
1538 		xe_range_fence_tree_init(&vm->rftree[id]);
1539 
1540 	vm->pt_ops = &xelp_pt_ops;
1541 
1542 	/*
1543 	 * Long-running workloads are not protected by the scheduler references.
1544 	 * By design, run_job for long-running workloads returns NULL and the
1545 	 * scheduler drops all the references of it, hence protecting the VM
1546 	 * for this case is necessary.
1547 	 */
1548 	if (flags & XE_VM_FLAG_LR_MODE) {
1549 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1550 		xe_pm_runtime_get_noresume(xe);
1551 		INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
1552 	}
1553 
1554 	err = xe_svm_init(vm);
1555 	if (err)
1556 		goto err_no_resv;
1557 
1558 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1559 	if (!vm_resv_obj) {
1560 		err = -ENOMEM;
1561 		goto err_svm_fini;
1562 	}
1563 
1564 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1565 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1566 
1567 	drm_gem_object_put(vm_resv_obj);
1568 
1569 	err = 0;
1570 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
1571 			    err) {
1572 		err = xe_vm_drm_exec_lock(vm, &exec);
1573 		drm_exec_retry_on_contention(&exec);
1574 
1575 		if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1576 			vm->flags |= XE_VM_FLAG_64K;
1577 
1578 		for_each_tile(tile, xe, id) {
1579 			if (flags & XE_VM_FLAG_MIGRATION &&
1580 			    tile->id != XE_VM_FLAG_TILE_ID(flags))
1581 				continue;
1582 
1583 			vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
1584 						       &exec);
1585 			if (IS_ERR(vm->pt_root[id])) {
1586 				err = PTR_ERR(vm->pt_root[id]);
1587 				vm->pt_root[id] = NULL;
1588 				xe_vm_pt_destroy(vm);
1589 				drm_exec_retry_on_contention(&exec);
1590 				xe_validation_retry_on_oom(&ctx, &err);
1591 				break;
1592 			}
1593 		}
1594 		if (err)
1595 			break;
1596 
1597 		if (xe_vm_has_scratch(vm)) {
1598 			for_each_tile(tile, xe, id) {
1599 				if (!vm->pt_root[id])
1600 					continue;
1601 
1602 				err = xe_vm_create_scratch(xe, tile, vm, &exec);
1603 				if (err) {
1604 					xe_vm_free_scratch(vm);
1605 					xe_vm_pt_destroy(vm);
1606 					drm_exec_retry_on_contention(&exec);
1607 					xe_validation_retry_on_oom(&ctx, &err);
1608 					break;
1609 				}
1610 			}
1611 			if (err)
1612 				break;
1613 			vm->batch_invalidate_tlb = true;
1614 		}
1615 
1616 		if (vm->flags & XE_VM_FLAG_LR_MODE) {
1617 			INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1618 			vm->batch_invalidate_tlb = false;
1619 		}
1620 
1621 		/* Fill pt_root after allocating scratch tables */
1622 		for_each_tile(tile, xe, id) {
1623 			if (!vm->pt_root[id])
1624 				continue;
1625 
1626 			xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1627 		}
1628 	}
1629 	if (err)
1630 		goto err_close;
1631 
1632 	/* Kernel migration VM shouldn't have a circular loop.. */
1633 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1634 		for_each_tile(tile, xe, id) {
1635 			struct xe_exec_queue *q;
1636 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1637 
1638 			if (!vm->pt_root[id])
1639 				continue;
1640 
1641 			q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0);
1642 			if (IS_ERR(q)) {
1643 				err = PTR_ERR(q);
1644 				goto err_close;
1645 			}
1646 			vm->q[id] = q;
1647 		}
1648 	}
1649 
1650 	if (xef && xe->info.has_asid) {
1651 		u32 asid;
1652 
1653 		down_write(&xe->usm.lock);
1654 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1655 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1656 				      &xe->usm.next_asid, GFP_KERNEL);
1657 		up_write(&xe->usm.lock);
1658 		if (err < 0)
1659 			goto err_close;
1660 
1661 		vm->usm.asid = asid;
1662 	}
1663 
1664 	trace_xe_vm_create(vm);
1665 
1666 	return vm;
1667 
1668 err_close:
1669 	xe_vm_close_and_put(vm);
1670 	return ERR_PTR(err);
1671 
1672 err_svm_fini:
1673 	if (flags & XE_VM_FLAG_FAULT_MODE) {
1674 		vm->size = 0; /* close the vm */
1675 		xe_svm_fini(vm);
1676 	}
1677 err_no_resv:
1678 	mutex_destroy(&vm->snap_mutex);
1679 	for_each_tile(tile, xe, id)
1680 		xe_range_fence_tree_fini(&vm->rftree[id]);
1681 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1682 	if (vm->xef)
1683 		xe_file_put(vm->xef);
1684 	kfree(vm);
1685 	if (flags & XE_VM_FLAG_LR_MODE)
1686 		xe_pm_runtime_put(xe);
1687 	return ERR_PTR(err);
1688 }
1689 
1690 static void xe_vm_close(struct xe_vm *vm)
1691 {
1692 	struct xe_device *xe = vm->xe;
1693 	bool bound;
1694 	int idx;
1695 
1696 	bound = drm_dev_enter(&xe->drm, &idx);
1697 
1698 	down_write(&vm->lock);
1699 	if (xe_vm_in_fault_mode(vm))
1700 		xe_svm_notifier_lock(vm);
1701 
1702 	vm->size = 0;
1703 
1704 	if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
1705 		struct xe_tile *tile;
1706 		struct xe_gt *gt;
1707 		u8 id;
1708 
1709 		/* Wait for pending binds */
1710 		dma_resv_wait_timeout(xe_vm_resv(vm),
1711 				      DMA_RESV_USAGE_BOOKKEEP,
1712 				      false, MAX_SCHEDULE_TIMEOUT);
1713 
1714 		if (bound) {
1715 			for_each_tile(tile, xe, id)
1716 				if (vm->pt_root[id])
1717 					xe_pt_clear(xe, vm->pt_root[id]);
1718 
1719 			for_each_gt(gt, xe, id)
1720 				xe_tlb_inval_vm(&gt->tlb_inval, vm);
1721 		}
1722 	}
1723 
1724 	if (xe_vm_in_fault_mode(vm))
1725 		xe_svm_notifier_unlock(vm);
1726 	up_write(&vm->lock);
1727 
1728 	if (bound)
1729 		drm_dev_exit(idx);
1730 }
1731 
1732 void xe_vm_close_and_put(struct xe_vm *vm)
1733 {
1734 	LIST_HEAD(contested);
1735 	struct xe_device *xe = vm->xe;
1736 	struct xe_tile *tile;
1737 	struct xe_vma *vma, *next_vma;
1738 	struct drm_gpuva *gpuva, *next;
1739 	u8 id;
1740 
1741 	xe_assert(xe, !vm->preempt.num_exec_queues);
1742 
1743 	xe_vm_close(vm);
1744 	if (xe_vm_in_preempt_fence_mode(vm)) {
1745 		mutex_lock(&xe->rebind_resume_lock);
1746 		list_del_init(&vm->preempt.pm_activate_link);
1747 		mutex_unlock(&xe->rebind_resume_lock);
1748 		flush_work(&vm->preempt.rebind_work);
1749 	}
1750 	if (xe_vm_in_fault_mode(vm))
1751 		xe_svm_close(vm);
1752 
1753 	down_write(&vm->lock);
1754 	for_each_tile(tile, xe, id) {
1755 		if (vm->q[id]) {
1756 			int i;
1757 
1758 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1759 			for_each_tlb_inval(i)
1760 				xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i);
1761 		}
1762 	}
1763 	up_write(&vm->lock);
1764 
1765 	for_each_tile(tile, xe, id) {
1766 		if (vm->q[id]) {
1767 			xe_exec_queue_kill(vm->q[id]);
1768 			xe_exec_queue_put(vm->q[id]);
1769 			vm->q[id] = NULL;
1770 		}
1771 	}
1772 
1773 	down_write(&vm->lock);
1774 	xe_vm_lock(vm, false);
1775 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1776 		vma = gpuva_to_vma(gpuva);
1777 
1778 		if (xe_vma_has_no_bo(vma)) {
1779 			xe_svm_notifier_lock(vm);
1780 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1781 			xe_svm_notifier_unlock(vm);
1782 		}
1783 
1784 		xe_vm_remove_vma(vm, vma);
1785 
1786 		/* easy case, remove from VMA? */
1787 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1788 			list_del_init(&vma->combined_links.rebind);
1789 			xe_vma_destroy(vma, NULL);
1790 			continue;
1791 		}
1792 
1793 		list_move_tail(&vma->combined_links.destroy, &contested);
1794 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1795 	}
1796 
1797 	/*
1798 	 * All vm operations will add shared fences to resv.
1799 	 * The only exception is eviction for a shared object,
1800 	 * but even so, the unbind when evicted would still
1801 	 * install a fence to resv. Hence it's safe to
1802 	 * destroy the pagetables immediately.
1803 	 */
1804 	xe_vm_free_scratch(vm);
1805 	xe_vm_pt_destroy(vm);
1806 	xe_vm_unlock(vm);
1807 
1808 	/*
1809 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1810 	 * Since we hold a refcount to the bo, we can remove and free
1811 	 * the members safely without locking.
1812 	 */
1813 	list_for_each_entry_safe(vma, next_vma, &contested,
1814 				 combined_links.destroy) {
1815 		list_del_init(&vma->combined_links.destroy);
1816 		xe_vma_destroy_unlocked(vma);
1817 	}
1818 
1819 	xe_svm_fini(vm);
1820 
1821 	up_write(&vm->lock);
1822 
1823 	down_write(&xe->usm.lock);
1824 	if (vm->usm.asid) {
1825 		void *lookup;
1826 
1827 		xe_assert(xe, xe->info.has_asid);
1828 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1829 
1830 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1831 		xe_assert(xe, lookup == vm);
1832 	}
1833 	up_write(&xe->usm.lock);
1834 
1835 	for_each_tile(tile, xe, id)
1836 		xe_range_fence_tree_fini(&vm->rftree[id]);
1837 
1838 	xe_vm_put(vm);
1839 }
1840 
1841 static void vm_destroy_work_func(struct work_struct *w)
1842 {
1843 	struct xe_vm *vm =
1844 		container_of(w, struct xe_vm, destroy_work);
1845 	struct xe_device *xe = vm->xe;
1846 	struct xe_tile *tile;
1847 	u8 id;
1848 
1849 	/* xe_vm_close_and_put was not called? */
1850 	xe_assert(xe, !vm->size);
1851 
1852 	if (xe_vm_in_preempt_fence_mode(vm))
1853 		flush_work(&vm->preempt.rebind_work);
1854 
1855 	mutex_destroy(&vm->snap_mutex);
1856 
1857 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1858 		xe_pm_runtime_put(xe);
1859 
1860 	for_each_tile(tile, xe, id)
1861 		XE_WARN_ON(vm->pt_root[id]);
1862 
1863 	trace_xe_vm_free(vm);
1864 
1865 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1866 
1867 	if (vm->xef)
1868 		xe_file_put(vm->xef);
1869 
1870 	kfree(vm);
1871 }
1872 
1873 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1874 {
1875 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1876 
1877 	/* To destroy the VM we need to be able to sleep */
1878 	queue_work(system_unbound_wq, &vm->destroy_work);
1879 }
1880 
1881 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1882 {
1883 	struct xe_vm *vm;
1884 
1885 	mutex_lock(&xef->vm.lock);
1886 	vm = xa_load(&xef->vm.xa, id);
1887 	if (vm)
1888 		xe_vm_get(vm);
1889 	mutex_unlock(&xef->vm.lock);
1890 
1891 	return vm;
1892 }
1893 
1894 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1895 {
1896 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0);
1897 }
1898 
1899 static struct xe_exec_queue *
1900 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1901 {
1902 	return q ? q : vm->q[0];
1903 }
1904 
1905 static struct xe_user_fence *
1906 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1907 {
1908 	unsigned int i;
1909 
1910 	for (i = 0; i < num_syncs; i++) {
1911 		struct xe_sync_entry *e = &syncs[i];
1912 
1913 		if (xe_sync_is_ufence(e))
1914 			return xe_sync_ufence_get(e);
1915 	}
1916 
1917 	return NULL;
1918 }
1919 
1920 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1921 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1922 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1923 
1924 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1925 		       struct drm_file *file)
1926 {
1927 	struct xe_device *xe = to_xe_device(dev);
1928 	struct xe_file *xef = to_xe_file(file);
1929 	struct drm_xe_vm_create *args = data;
1930 	struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
1931 	struct xe_vm *vm;
1932 	u32 id;
1933 	int err;
1934 	u32 flags = 0;
1935 
1936 	if (XE_IOCTL_DBG(xe, args->extensions))
1937 		return -EINVAL;
1938 
1939 	if (wa_gt && XE_GT_WA(wa_gt, 22014953428))
1940 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1941 
1942 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1943 			 !xe->info.has_usm))
1944 		return -EINVAL;
1945 
1946 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1947 		return -EINVAL;
1948 
1949 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1950 		return -EINVAL;
1951 
1952 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1953 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1954 			 !xe->info.needs_scratch))
1955 		return -EINVAL;
1956 
1957 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1958 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1959 		return -EINVAL;
1960 
1961 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1962 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1963 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1964 		flags |= XE_VM_FLAG_LR_MODE;
1965 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1966 		flags |= XE_VM_FLAG_FAULT_MODE;
1967 
1968 	vm = xe_vm_create(xe, flags, xef);
1969 	if (IS_ERR(vm))
1970 		return PTR_ERR(vm);
1971 
1972 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1973 	/* Warning: Security issue - never enable by default */
1974 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1975 #endif
1976 
1977 	/* user id alloc must always be last in ioctl to prevent UAF */
1978 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1979 	if (err)
1980 		goto err_close_and_put;
1981 
1982 	args->vm_id = id;
1983 
1984 	return 0;
1985 
1986 err_close_and_put:
1987 	xe_vm_close_and_put(vm);
1988 
1989 	return err;
1990 }
1991 
1992 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1993 			struct drm_file *file)
1994 {
1995 	struct xe_device *xe = to_xe_device(dev);
1996 	struct xe_file *xef = to_xe_file(file);
1997 	struct drm_xe_vm_destroy *args = data;
1998 	struct xe_vm *vm;
1999 	int err = 0;
2000 
2001 	if (XE_IOCTL_DBG(xe, args->pad) ||
2002 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2003 		return -EINVAL;
2004 
2005 	mutex_lock(&xef->vm.lock);
2006 	vm = xa_load(&xef->vm.xa, args->vm_id);
2007 	if (XE_IOCTL_DBG(xe, !vm))
2008 		err = -ENOENT;
2009 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
2010 		err = -EBUSY;
2011 	else
2012 		xa_erase(&xef->vm.xa, args->vm_id);
2013 	mutex_unlock(&xef->vm.lock);
2014 
2015 	if (!err)
2016 		xe_vm_close_and_put(vm);
2017 
2018 	return err;
2019 }
2020 
2021 static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end)
2022 {
2023 	struct drm_gpuva *gpuva;
2024 	u32 num_vmas = 0;
2025 
2026 	lockdep_assert_held(&vm->lock);
2027 	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
2028 		num_vmas++;
2029 
2030 	return num_vmas;
2031 }
2032 
2033 static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
2034 			 u64 end, struct drm_xe_mem_range_attr *attrs)
2035 {
2036 	struct drm_gpuva *gpuva;
2037 	int i = 0;
2038 
2039 	lockdep_assert_held(&vm->lock);
2040 
2041 	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
2042 		struct xe_vma *vma = gpuva_to_vma(gpuva);
2043 
2044 		if (i == *num_vmas)
2045 			return -ENOSPC;
2046 
2047 		attrs[i].start = xe_vma_start(vma);
2048 		attrs[i].end = xe_vma_end(vma);
2049 		attrs[i].atomic.val = vma->attr.atomic_access;
2050 		attrs[i].pat_index.val = vma->attr.pat_index;
2051 		attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
2052 		attrs[i].preferred_mem_loc.migration_policy =
2053 		vma->attr.preferred_loc.migration_policy;
2054 
2055 		i++;
2056 	}
2057 
2058 	*num_vmas = i;
2059 	return 0;
2060 }
2061 
2062 int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2063 {
2064 	struct xe_device *xe = to_xe_device(dev);
2065 	struct xe_file *xef = to_xe_file(file);
2066 	struct drm_xe_mem_range_attr *mem_attrs;
2067 	struct drm_xe_vm_query_mem_range_attr *args = data;
2068 	u64 __user *attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
2069 	struct xe_vm *vm;
2070 	int err = 0;
2071 
2072 	if (XE_IOCTL_DBG(xe,
2073 			 ((args->num_mem_ranges == 0 &&
2074 			  (attrs_user || args->sizeof_mem_range_attr != 0)) ||
2075 			 (args->num_mem_ranges > 0 &&
2076 			  (!attrs_user ||
2077 			   args->sizeof_mem_range_attr !=
2078 			   sizeof(struct drm_xe_mem_range_attr))))))
2079 		return -EINVAL;
2080 
2081 	vm = xe_vm_lookup(xef, args->vm_id);
2082 	if (XE_IOCTL_DBG(xe, !vm))
2083 		return -EINVAL;
2084 
2085 	err = down_read_interruptible(&vm->lock);
2086 	if (err)
2087 		goto put_vm;
2088 
2089 	attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
2090 
2091 	if (args->num_mem_ranges == 0 && !attrs_user) {
2092 		args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
2093 		args->sizeof_mem_range_attr = sizeof(struct drm_xe_mem_range_attr);
2094 		goto unlock_vm;
2095 	}
2096 
2097 	mem_attrs = kvmalloc_array(args->num_mem_ranges, args->sizeof_mem_range_attr,
2098 				   GFP_KERNEL | __GFP_ACCOUNT |
2099 				   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2100 	if (!mem_attrs) {
2101 		err = args->num_mem_ranges > 1 ? -ENOBUFS : -ENOMEM;
2102 		goto unlock_vm;
2103 	}
2104 
2105 	memset(mem_attrs, 0, args->num_mem_ranges * args->sizeof_mem_range_attr);
2106 	err = get_mem_attrs(vm, &args->num_mem_ranges, args->start,
2107 			    args->start + args->range, mem_attrs);
2108 	if (err)
2109 		goto free_mem_attrs;
2110 
2111 	err = copy_to_user(attrs_user, mem_attrs,
2112 			   args->sizeof_mem_range_attr * args->num_mem_ranges);
2113 	if (err)
2114 		err = -EFAULT;
2115 
2116 free_mem_attrs:
2117 	kvfree(mem_attrs);
2118 unlock_vm:
2119 	up_read(&vm->lock);
2120 put_vm:
2121 	xe_vm_put(vm);
2122 	return err;
2123 }
2124 
2125 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
2126 {
2127 	if (page_addr > xe_vma_end(vma) - 1 ||
2128 	    page_addr + SZ_4K - 1 < xe_vma_start(vma))
2129 		return false;
2130 
2131 	return true;
2132 }
2133 
2134 /**
2135  * xe_vm_find_vma_by_addr() - Find a VMA by its address
2136  *
2137  * @vm: the xe_vm the vma belongs to
2138  * @page_addr: address to look up
2139  */
2140 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
2141 {
2142 	struct xe_vma *vma = NULL;
2143 
2144 	if (vm->usm.last_fault_vma) {   /* Fast lookup */
2145 		if (vma_matches(vm->usm.last_fault_vma, page_addr))
2146 			vma = vm->usm.last_fault_vma;
2147 	}
2148 	if (!vma)
2149 		vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
2150 
2151 	return vma;
2152 }
2153 
2154 static const u32 region_to_mem_type[] = {
2155 	XE_PL_TT,
2156 	XE_PL_VRAM0,
2157 	XE_PL_VRAM1,
2158 };
2159 
2160 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2161 			     bool post_commit)
2162 {
2163 	xe_svm_notifier_lock(vm);
2164 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2165 	xe_svm_notifier_unlock(vm);
2166 	if (post_commit)
2167 		xe_vm_remove_vma(vm, vma);
2168 }
2169 
2170 #undef ULL
2171 #define ULL	unsigned long long
2172 
2173 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2174 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2175 {
2176 	struct xe_vma *vma;
2177 
2178 	switch (op->op) {
2179 	case DRM_GPUVA_OP_MAP:
2180 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2181 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2182 		break;
2183 	case DRM_GPUVA_OP_REMAP:
2184 		vma = gpuva_to_vma(op->remap.unmap->va);
2185 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2186 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2187 		       op->remap.unmap->keep ? 1 : 0);
2188 		if (op->remap.prev)
2189 			vm_dbg(&xe->drm,
2190 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2191 			       (ULL)op->remap.prev->va.addr,
2192 			       (ULL)op->remap.prev->va.range);
2193 		if (op->remap.next)
2194 			vm_dbg(&xe->drm,
2195 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2196 			       (ULL)op->remap.next->va.addr,
2197 			       (ULL)op->remap.next->va.range);
2198 		break;
2199 	case DRM_GPUVA_OP_UNMAP:
2200 		vma = gpuva_to_vma(op->unmap.va);
2201 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2202 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2203 		       op->unmap.keep ? 1 : 0);
2204 		break;
2205 	case DRM_GPUVA_OP_PREFETCH:
2206 		vma = gpuva_to_vma(op->prefetch.va);
2207 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2208 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2209 		break;
2210 	default:
2211 		drm_warn(&xe->drm, "NOT POSSIBLE\n");
2212 	}
2213 }
2214 #else
2215 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2216 {
2217 }
2218 #endif
2219 
2220 static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
2221 {
2222 	if (!xe_vm_in_fault_mode(vm))
2223 		return false;
2224 
2225 	if (!xe_vm_has_scratch(vm))
2226 		return false;
2227 
2228 	if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
2229 		return false;
2230 
2231 	return true;
2232 }
2233 
2234 static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
2235 {
2236 	struct drm_gpuva_op *__op;
2237 
2238 	drm_gpuva_for_each_op(__op, ops) {
2239 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2240 
2241 		xe_vma_svm_prefetch_op_fini(op);
2242 	}
2243 }
2244 
2245 /*
2246  * Create operations list from IOCTL arguments, setup operations fields so parse
2247  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2248  */
2249 static struct drm_gpuva_ops *
2250 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
2251 			 struct xe_bo *bo, u64 bo_offset_or_userptr,
2252 			 u64 addr, u64 range,
2253 			 u32 operation, u32 flags,
2254 			 u32 prefetch_region, u16 pat_index)
2255 {
2256 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2257 	struct drm_gpuva_ops *ops;
2258 	struct drm_gpuva_op *__op;
2259 	struct drm_gpuvm_bo *vm_bo;
2260 	u64 range_start = addr;
2261 	u64 range_end = addr + range;
2262 	int err;
2263 
2264 	lockdep_assert_held_write(&vm->lock);
2265 
2266 	vm_dbg(&vm->xe->drm,
2267 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2268 	       operation, (ULL)addr, (ULL)range,
2269 	       (ULL)bo_offset_or_userptr);
2270 
2271 	switch (operation) {
2272 	case DRM_XE_VM_BIND_OP_MAP:
2273 		if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) {
2274 			xe_vm_find_cpu_addr_mirror_vma_range(vm, &range_start, &range_end);
2275 			vops->flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
2276 		}
2277 
2278 		fallthrough;
2279 	case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
2280 		struct drm_gpuvm_map_req map_req = {
2281 			.map.va.addr = range_start,
2282 			.map.va.range = range_end - range_start,
2283 			.map.gem.obj = obj,
2284 			.map.gem.offset = bo_offset_or_userptr,
2285 		};
2286 
2287 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
2288 		break;
2289 	}
2290 	case DRM_XE_VM_BIND_OP_UNMAP:
2291 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2292 		break;
2293 	case DRM_XE_VM_BIND_OP_PREFETCH:
2294 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2295 		break;
2296 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2297 		xe_assert(vm->xe, bo);
2298 
2299 		err = xe_bo_lock(bo, true);
2300 		if (err)
2301 			return ERR_PTR(err);
2302 
2303 		vm_bo = drm_gpuvm_bo_obtain_locked(&vm->gpuvm, obj);
2304 		if (IS_ERR(vm_bo)) {
2305 			xe_bo_unlock(bo);
2306 			return ERR_CAST(vm_bo);
2307 		}
2308 
2309 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2310 		drm_gpuvm_bo_put(vm_bo);
2311 		xe_bo_unlock(bo);
2312 		break;
2313 	default:
2314 		drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
2315 		ops = ERR_PTR(-EINVAL);
2316 	}
2317 	if (IS_ERR(ops))
2318 		return ops;
2319 
2320 	drm_gpuva_for_each_op(__op, ops) {
2321 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2322 
2323 		if (__op->op == DRM_GPUVA_OP_MAP) {
2324 			op->map.immediate =
2325 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2326 			if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
2327 				op->map.vma_flags |= XE_VMA_READ_ONLY;
2328 			if (flags & DRM_XE_VM_BIND_FLAG_NULL)
2329 				op->map.vma_flags |= DRM_GPUVA_SPARSE;
2330 			if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
2331 				op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
2332 			if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
2333 				op->map.vma_flags |= XE_VMA_DUMPABLE;
2334 			if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
2335 				op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
2336 			op->map.pat_index = pat_index;
2337 			op->map.invalidate_on_bind =
2338 				__xe_vm_needs_clear_scratch_pages(vm, flags);
2339 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2340 			struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2341 			struct xe_tile *tile;
2342 			struct xe_svm_range *svm_range;
2343 			struct drm_gpusvm_ctx ctx = {};
2344 			struct drm_pagemap *dpagemap = NULL;
2345 			u8 id, tile_mask = 0;
2346 			u32 i;
2347 
2348 			if (!xe_vma_is_cpu_addr_mirror(vma)) {
2349 				op->prefetch.region = prefetch_region;
2350 				break;
2351 			}
2352 
2353 			ctx.read_only = xe_vma_read_only(vma);
2354 			ctx.devmem_possible = IS_DGFX(vm->xe) &&
2355 					      IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
2356 
2357 			for_each_tile(tile, vm->xe, id)
2358 				tile_mask |= 0x1 << id;
2359 
2360 			xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
2361 			op->prefetch_range.ranges_count = 0;
2362 
2363 			if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
2364 				dpagemap = xe_vma_resolve_pagemap(vma,
2365 								  xe_device_get_root_tile(vm->xe));
2366 			} else if (prefetch_region) {
2367 				tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
2368 						      XE_PL_VRAM0];
2369 				dpagemap = xe_tile_local_pagemap(tile);
2370 			}
2371 
2372 			op->prefetch_range.dpagemap = dpagemap;
2373 alloc_next_range:
2374 			svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
2375 
2376 			if (PTR_ERR(svm_range) == -ENOENT) {
2377 				u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
2378 
2379 				addr = ret == ULONG_MAX ? 0 : ret;
2380 				if (addr)
2381 					goto alloc_next_range;
2382 				else
2383 					goto print_op_label;
2384 			}
2385 
2386 			if (IS_ERR(svm_range)) {
2387 				err = PTR_ERR(svm_range);
2388 				goto unwind_prefetch_ops;
2389 			}
2390 
2391 			if (xe_svm_range_validate(vm, svm_range, tile_mask, dpagemap)) {
2392 				xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
2393 				goto check_next_range;
2394 			}
2395 
2396 			err = xa_alloc(&op->prefetch_range.range,
2397 				       &i, svm_range, xa_limit_32b,
2398 				       GFP_KERNEL);
2399 
2400 			if (err)
2401 				goto unwind_prefetch_ops;
2402 
2403 			op->prefetch_range.ranges_count++;
2404 			vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
2405 			xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
2406 check_next_range:
2407 			if (range_end > xe_svm_range_end(svm_range) &&
2408 			    xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
2409 				addr = xe_svm_range_end(svm_range);
2410 				goto alloc_next_range;
2411 			}
2412 		}
2413 print_op_label:
2414 		print_op(vm->xe, __op);
2415 	}
2416 
2417 	return ops;
2418 
2419 unwind_prefetch_ops:
2420 	xe_svm_prefetch_gpuva_ops_fini(ops);
2421 	drm_gpuva_ops_free(&vm->gpuvm, ops);
2422 	return ERR_PTR(err);
2423 }
2424 
2425 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
2426 
2427 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2428 			      struct xe_vma_mem_attr *attr, unsigned int flags)
2429 {
2430 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2431 	struct xe_validation_ctx ctx;
2432 	struct drm_exec exec;
2433 	struct xe_vma *vma;
2434 	int err = 0;
2435 
2436 	lockdep_assert_held_write(&vm->lock);
2437 
2438 	if (bo) {
2439 		err = 0;
2440 		xe_validation_guard(&ctx, &vm->xe->val, &exec,
2441 				    (struct xe_val_flags) {.interruptible = true}, err) {
2442 			if (!bo->vm) {
2443 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2444 				drm_exec_retry_on_contention(&exec);
2445 			}
2446 			if (!err) {
2447 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2448 				drm_exec_retry_on_contention(&exec);
2449 			}
2450 			if (err)
2451 				return ERR_PTR(err);
2452 
2453 			vma = xe_vma_create(vm, bo, op->gem.offset,
2454 					    op->va.addr, op->va.addr +
2455 					    op->va.range - 1, attr, flags);
2456 			if (IS_ERR(vma))
2457 				return vma;
2458 
2459 			if (!bo->vm) {
2460 				err = add_preempt_fences(vm, bo);
2461 				if (err) {
2462 					prep_vma_destroy(vm, vma, false);
2463 					xe_vma_destroy(vma, NULL);
2464 				}
2465 			}
2466 		}
2467 		if (err)
2468 			return ERR_PTR(err);
2469 	} else {
2470 		vma = xe_vma_create(vm, NULL, op->gem.offset,
2471 				    op->va.addr, op->va.addr +
2472 				    op->va.range - 1, attr, flags);
2473 		if (IS_ERR(vma))
2474 			return vma;
2475 
2476 		if (xe_vma_is_userptr(vma)) {
2477 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2478 			/*
2479 			 * -EBUSY has dedicated meaning that a user fence
2480 			 * attached to the VMA is busy, in practice
2481 			 * xe_vma_userptr_pin_pages can only fail with -EBUSY if
2482 			 * we are low on memory so convert this to -ENOMEM.
2483 			 */
2484 			if (err == -EBUSY)
2485 				err = -ENOMEM;
2486 		}
2487 	}
2488 	if (err) {
2489 		prep_vma_destroy(vm, vma, false);
2490 		xe_vma_destroy_unlocked(vma);
2491 		vma = ERR_PTR(err);
2492 	}
2493 
2494 	return vma;
2495 }
2496 
2497 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2498 {
2499 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2500 		return SZ_1G;
2501 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2502 		return SZ_2M;
2503 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2504 		return SZ_64K;
2505 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2506 		return SZ_4K;
2507 
2508 	return SZ_1G;	/* Uninitialized, used max size */
2509 }
2510 
2511 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2512 {
2513 	switch (size) {
2514 	case SZ_1G:
2515 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2516 		break;
2517 	case SZ_2M:
2518 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2519 		break;
2520 	case SZ_64K:
2521 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2522 		break;
2523 	case SZ_4K:
2524 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2525 		break;
2526 	}
2527 }
2528 
2529 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2530 {
2531 	int err = 0;
2532 
2533 	lockdep_assert_held_write(&vm->lock);
2534 
2535 	switch (op->base.op) {
2536 	case DRM_GPUVA_OP_MAP:
2537 		err |= xe_vm_insert_vma(vm, op->map.vma);
2538 		if (!err)
2539 			op->flags |= XE_VMA_OP_COMMITTED;
2540 		break;
2541 	case DRM_GPUVA_OP_REMAP:
2542 	{
2543 		u8 tile_present =
2544 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2545 
2546 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2547 				 true);
2548 		op->flags |= XE_VMA_OP_COMMITTED;
2549 
2550 		if (op->remap.prev) {
2551 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2552 			if (!err)
2553 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2554 			if (!err && op->remap.skip_prev) {
2555 				op->remap.prev->tile_present =
2556 					tile_present;
2557 				op->remap.prev = NULL;
2558 			}
2559 		}
2560 		if (op->remap.next) {
2561 			err |= xe_vm_insert_vma(vm, op->remap.next);
2562 			if (!err)
2563 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2564 			if (!err && op->remap.skip_next) {
2565 				op->remap.next->tile_present =
2566 					tile_present;
2567 				op->remap.next = NULL;
2568 			}
2569 		}
2570 
2571 		/* Adjust for partial unbind after removing VMA from VM */
2572 		if (!err) {
2573 			op->base.remap.unmap->va->va.addr = op->remap.start;
2574 			op->base.remap.unmap->va->va.range = op->remap.range;
2575 		}
2576 		break;
2577 	}
2578 	case DRM_GPUVA_OP_UNMAP:
2579 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2580 		op->flags |= XE_VMA_OP_COMMITTED;
2581 		break;
2582 	case DRM_GPUVA_OP_PREFETCH:
2583 		op->flags |= XE_VMA_OP_COMMITTED;
2584 		break;
2585 	default:
2586 		drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
2587 	}
2588 
2589 	return err;
2590 }
2591 
2592 /**
2593  * xe_vma_has_default_mem_attrs - Check if a VMA has default memory attributes
2594  * @vma: Pointer to the xe_vma structure to check
2595  *
2596  * This function determines whether the given VMA (Virtual Memory Area)
2597  * has its memory attributes set to their default values. Specifically,
2598  * it checks the following conditions:
2599  *
2600  * - `atomic_access` is `DRM_XE_VMA_ATOMIC_UNDEFINED`
2601  * - `pat_index` is equal to `default_pat_index`
2602  * - `preferred_loc.devmem_fd` is `DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE`
2603  * - `preferred_loc.migration_policy` is `DRM_XE_MIGRATE_ALL_PAGES`
2604  *
2605  * Return: true if all attributes are at their default values, false otherwise.
2606  */
2607 bool xe_vma_has_default_mem_attrs(struct xe_vma *vma)
2608 {
2609 	return (vma->attr.atomic_access == DRM_XE_ATOMIC_UNDEFINED &&
2610 		vma->attr.pat_index ==  vma->attr.default_pat_index &&
2611 		vma->attr.preferred_loc.devmem_fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE &&
2612 		vma->attr.preferred_loc.migration_policy == DRM_XE_MIGRATE_ALL_PAGES);
2613 }
2614 
2615 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2616 				   struct xe_vma_ops *vops)
2617 {
2618 	struct xe_device *xe = vm->xe;
2619 	struct drm_gpuva_op *__op;
2620 	struct xe_tile *tile;
2621 	u8 id, tile_mask = 0;
2622 	int err = 0;
2623 
2624 	lockdep_assert_held_write(&vm->lock);
2625 
2626 	for_each_tile(tile, vm->xe, id)
2627 		tile_mask |= 0x1 << id;
2628 
2629 	drm_gpuva_for_each_op(__op, ops) {
2630 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2631 		struct xe_vma *vma;
2632 		unsigned int flags = 0;
2633 
2634 		INIT_LIST_HEAD(&op->link);
2635 		list_add_tail(&op->link, &vops->list);
2636 		op->tile_mask = tile_mask;
2637 
2638 		switch (op->base.op) {
2639 		case DRM_GPUVA_OP_MAP:
2640 		{
2641 			struct xe_vma_mem_attr default_attr = {
2642 				.preferred_loc = {
2643 					.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
2644 					.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
2645 				},
2646 				.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
2647 				.default_pat_index = op->map.pat_index,
2648 				.pat_index = op->map.pat_index,
2649 			};
2650 
2651 			flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
2652 
2653 			vma = new_vma(vm, &op->base.map, &default_attr,
2654 				      flags);
2655 			if (IS_ERR(vma))
2656 				return PTR_ERR(vma);
2657 
2658 			op->map.vma = vma;
2659 			if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
2660 			     !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
2661 			    op->map.invalidate_on_bind)
2662 				xe_vma_ops_incr_pt_update_ops(vops,
2663 							      op->tile_mask, 1);
2664 			break;
2665 		}
2666 		case DRM_GPUVA_OP_REMAP:
2667 		{
2668 			struct xe_vma *old =
2669 				gpuva_to_vma(op->base.remap.unmap->va);
2670 			bool skip = xe_vma_is_cpu_addr_mirror(old);
2671 			u64 start = xe_vma_start(old), end = xe_vma_end(old);
2672 			int num_remap_ops = 0;
2673 
2674 			if (op->base.remap.prev)
2675 				start = op->base.remap.prev->va.addr +
2676 					op->base.remap.prev->va.range;
2677 			if (op->base.remap.next)
2678 				end = op->base.remap.next->va.addr;
2679 
2680 			if (xe_vma_is_cpu_addr_mirror(old) &&
2681 			    xe_svm_has_mapping(vm, start, end)) {
2682 				if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
2683 					xe_svm_unmap_address_range(vm, start, end);
2684 				else
2685 					return -EBUSY;
2686 			}
2687 
2688 			op->remap.start = xe_vma_start(old);
2689 			op->remap.range = xe_vma_size(old);
2690 
2691 			flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
2692 			if (op->base.remap.prev) {
2693 				vma = new_vma(vm, op->base.remap.prev,
2694 					      &old->attr, flags);
2695 				if (IS_ERR(vma))
2696 					return PTR_ERR(vma);
2697 
2698 				op->remap.prev = vma;
2699 
2700 				/*
2701 				 * Userptr creates a new SG mapping so
2702 				 * we must also rebind.
2703 				 */
2704 				op->remap.skip_prev = skip ||
2705 					(!xe_vma_is_userptr(old) &&
2706 					IS_ALIGNED(xe_vma_end(vma),
2707 						   xe_vma_max_pte_size(old)));
2708 				if (op->remap.skip_prev) {
2709 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2710 					op->remap.range -=
2711 						xe_vma_end(vma) -
2712 						xe_vma_start(old);
2713 					op->remap.start = xe_vma_end(vma);
2714 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2715 					       (ULL)op->remap.start,
2716 					       (ULL)op->remap.range);
2717 				} else {
2718 					num_remap_ops++;
2719 				}
2720 			}
2721 
2722 			if (op->base.remap.next) {
2723 				vma = new_vma(vm, op->base.remap.next,
2724 					      &old->attr, flags);
2725 				if (IS_ERR(vma))
2726 					return PTR_ERR(vma);
2727 
2728 				op->remap.next = vma;
2729 
2730 				/*
2731 				 * Userptr creates a new SG mapping so
2732 				 * we must also rebind.
2733 				 */
2734 				op->remap.skip_next = skip ||
2735 					(!xe_vma_is_userptr(old) &&
2736 					IS_ALIGNED(xe_vma_start(vma),
2737 						   xe_vma_max_pte_size(old)));
2738 				if (op->remap.skip_next) {
2739 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2740 					op->remap.range -=
2741 						xe_vma_end(old) -
2742 						xe_vma_start(vma);
2743 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2744 					       (ULL)op->remap.start,
2745 					       (ULL)op->remap.range);
2746 				} else {
2747 					num_remap_ops++;
2748 				}
2749 			}
2750 			if (!skip)
2751 				num_remap_ops++;
2752 
2753 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
2754 			break;
2755 		}
2756 		case DRM_GPUVA_OP_UNMAP:
2757 			vma = gpuva_to_vma(op->base.unmap.va);
2758 
2759 			if (xe_vma_is_cpu_addr_mirror(vma) &&
2760 			    xe_svm_has_mapping(vm, xe_vma_start(vma),
2761 					       xe_vma_end(vma)) &&
2762 			    !(vops->flags & XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP))
2763 				return -EBUSY;
2764 
2765 			if (!xe_vma_is_cpu_addr_mirror(vma))
2766 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
2767 			break;
2768 		case DRM_GPUVA_OP_PREFETCH:
2769 			vma = gpuva_to_vma(op->base.prefetch.va);
2770 
2771 			if (xe_vma_is_userptr(vma)) {
2772 				err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2773 				if (err)
2774 					return err;
2775 			}
2776 
2777 			if (xe_vma_is_cpu_addr_mirror(vma))
2778 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
2779 							      op->prefetch_range.ranges_count);
2780 			else
2781 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
2782 
2783 			break;
2784 		default:
2785 			drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
2786 		}
2787 
2788 		err = xe_vma_op_commit(vm, op);
2789 		if (err)
2790 			return err;
2791 	}
2792 
2793 	return 0;
2794 }
2795 
2796 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2797 			     bool post_commit, bool prev_post_commit,
2798 			     bool next_post_commit)
2799 {
2800 	lockdep_assert_held_write(&vm->lock);
2801 
2802 	switch (op->base.op) {
2803 	case DRM_GPUVA_OP_MAP:
2804 		if (op->map.vma) {
2805 			prep_vma_destroy(vm, op->map.vma, post_commit);
2806 			xe_vma_destroy_unlocked(op->map.vma);
2807 		}
2808 		break;
2809 	case DRM_GPUVA_OP_UNMAP:
2810 	{
2811 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2812 
2813 		if (vma) {
2814 			xe_svm_notifier_lock(vm);
2815 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2816 			xe_svm_notifier_unlock(vm);
2817 			if (post_commit)
2818 				xe_vm_insert_vma(vm, vma);
2819 		}
2820 		break;
2821 	}
2822 	case DRM_GPUVA_OP_REMAP:
2823 	{
2824 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2825 
2826 		if (op->remap.prev) {
2827 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2828 			xe_vma_destroy_unlocked(op->remap.prev);
2829 		}
2830 		if (op->remap.next) {
2831 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2832 			xe_vma_destroy_unlocked(op->remap.next);
2833 		}
2834 		if (vma) {
2835 			xe_svm_notifier_lock(vm);
2836 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2837 			xe_svm_notifier_unlock(vm);
2838 			if (post_commit)
2839 				xe_vm_insert_vma(vm, vma);
2840 		}
2841 		break;
2842 	}
2843 	case DRM_GPUVA_OP_PREFETCH:
2844 		/* Nothing to do */
2845 		break;
2846 	default:
2847 		drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
2848 	}
2849 }
2850 
2851 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2852 				     struct drm_gpuva_ops **ops,
2853 				     int num_ops_list)
2854 {
2855 	int i;
2856 
2857 	for (i = num_ops_list - 1; i >= 0; --i) {
2858 		struct drm_gpuva_ops *__ops = ops[i];
2859 		struct drm_gpuva_op *__op;
2860 
2861 		if (!__ops)
2862 			continue;
2863 
2864 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2865 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2866 
2867 			xe_vma_op_unwind(vm, op,
2868 					 op->flags & XE_VMA_OP_COMMITTED,
2869 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2870 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2871 		}
2872 	}
2873 }
2874 
2875 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2876 				 bool res_evict, bool validate)
2877 {
2878 	struct xe_bo *bo = xe_vma_bo(vma);
2879 	struct xe_vm *vm = xe_vma_vm(vma);
2880 	int err = 0;
2881 
2882 	if (bo) {
2883 		if (!bo->vm)
2884 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2885 		if (!err && validate)
2886 			err = xe_bo_validate(bo, vm,
2887 					     !xe_vm_in_preempt_fence_mode(vm) &&
2888 					     res_evict, exec);
2889 	}
2890 
2891 	return err;
2892 }
2893 
2894 static int check_ufence(struct xe_vma *vma)
2895 {
2896 	if (vma->ufence) {
2897 		struct xe_user_fence * const f = vma->ufence;
2898 
2899 		if (!xe_sync_ufence_get_status(f))
2900 			return -EBUSY;
2901 
2902 		vma->ufence = NULL;
2903 		xe_sync_ufence_put(f);
2904 	}
2905 
2906 	return 0;
2907 }
2908 
2909 static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
2910 {
2911 	bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
2912 	struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2913 	struct drm_pagemap *dpagemap = op->prefetch_range.dpagemap;
2914 	int err = 0;
2915 
2916 	struct xe_svm_range *svm_range;
2917 	struct drm_gpusvm_ctx ctx = {};
2918 	unsigned long i;
2919 
2920 	if (!xe_vma_is_cpu_addr_mirror(vma))
2921 		return 0;
2922 
2923 	ctx.read_only = xe_vma_read_only(vma);
2924 	ctx.devmem_possible = devmem_possible;
2925 	ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
2926 	ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
2927 
2928 	/* TODO: Threading the migration */
2929 	xa_for_each(&op->prefetch_range.range, i, svm_range) {
2930 		if (!dpagemap)
2931 			xe_svm_range_migrate_to_smem(vm, svm_range);
2932 
2933 		if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) {
2934 			drm_dbg(&vm->xe->drm,
2935 				"Prefetch pagemap is %s start 0x%016lx end 0x%016lx\n",
2936 				dpagemap ? dpagemap->drm->unique : "system",
2937 				xe_svm_range_start(svm_range), xe_svm_range_end(svm_range));
2938 		}
2939 
2940 		if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, dpagemap)) {
2941 			err = xe_svm_alloc_vram(svm_range, &ctx, dpagemap);
2942 			if (err) {
2943 				drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
2944 					vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
2945 				return -ENODATA;
2946 			}
2947 			xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
2948 		}
2949 
2950 		err = xe_svm_range_get_pages(vm, svm_range, &ctx);
2951 		if (err) {
2952 			drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
2953 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
2954 			if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
2955 				err = -ENODATA;
2956 			return err;
2957 		}
2958 		xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
2959 	}
2960 
2961 	return err;
2962 }
2963 
2964 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2965 			    struct xe_vma_ops *vops, struct xe_vma_op *op)
2966 {
2967 	int err = 0;
2968 	bool res_evict;
2969 
2970 	/*
2971 	 * We only allow evicting a BO within the VM if it is not part of an
2972 	 * array of binds, as an array of binds can evict another BO within the
2973 	 * bind.
2974 	 */
2975 	res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS);
2976 
2977 	switch (op->base.op) {
2978 	case DRM_GPUVA_OP_MAP:
2979 		if (!op->map.invalidate_on_bind)
2980 			err = vma_lock_and_validate(exec, op->map.vma,
2981 						    res_evict,
2982 						    !xe_vm_in_fault_mode(vm) ||
2983 						    op->map.immediate);
2984 		break;
2985 	case DRM_GPUVA_OP_REMAP:
2986 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2987 		if (err)
2988 			break;
2989 
2990 		err = vma_lock_and_validate(exec,
2991 					    gpuva_to_vma(op->base.remap.unmap->va),
2992 					    res_evict, false);
2993 		if (!err && op->remap.prev)
2994 			err = vma_lock_and_validate(exec, op->remap.prev,
2995 						    res_evict, true);
2996 		if (!err && op->remap.next)
2997 			err = vma_lock_and_validate(exec, op->remap.next,
2998 						    res_evict, true);
2999 		break;
3000 	case DRM_GPUVA_OP_UNMAP:
3001 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
3002 		if (err)
3003 			break;
3004 
3005 		err = vma_lock_and_validate(exec,
3006 					    gpuva_to_vma(op->base.unmap.va),
3007 					    res_evict, false);
3008 		break;
3009 	case DRM_GPUVA_OP_PREFETCH:
3010 	{
3011 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
3012 		u32 region;
3013 
3014 		if (!xe_vma_is_cpu_addr_mirror(vma)) {
3015 			region = op->prefetch.region;
3016 			xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
3017 				  region <= ARRAY_SIZE(region_to_mem_type));
3018 		}
3019 
3020 		err = vma_lock_and_validate(exec,
3021 					    gpuva_to_vma(op->base.prefetch.va),
3022 					    res_evict, false);
3023 		if (!err && !xe_vma_has_no_bo(vma))
3024 			err = xe_bo_migrate(xe_vma_bo(vma),
3025 					    region_to_mem_type[region],
3026 					    NULL,
3027 					    exec);
3028 		break;
3029 	}
3030 	default:
3031 		drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
3032 	}
3033 
3034 	return err;
3035 }
3036 
3037 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
3038 {
3039 	struct xe_vma_op *op;
3040 	int err;
3041 
3042 	if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
3043 		return 0;
3044 
3045 	list_for_each_entry(op, &vops->list, link) {
3046 		if (op->base.op  == DRM_GPUVA_OP_PREFETCH) {
3047 			err = prefetch_ranges(vm, op);
3048 			if (err)
3049 				return err;
3050 		}
3051 	}
3052 
3053 	return 0;
3054 }
3055 
3056 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
3057 					   struct xe_vm *vm,
3058 					   struct xe_vma_ops *vops)
3059 {
3060 	struct xe_vma_op *op;
3061 	int err;
3062 
3063 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
3064 	if (err)
3065 		return err;
3066 
3067 	list_for_each_entry(op, &vops->list, link) {
3068 		err = op_lock_and_prep(exec, vm, vops, op);
3069 		if (err)
3070 			return err;
3071 	}
3072 
3073 #ifdef TEST_VM_OPS_ERROR
3074 	if (vops->inject_error &&
3075 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
3076 		return -ENOSPC;
3077 #endif
3078 
3079 	return 0;
3080 }
3081 
3082 static void op_trace(struct xe_vma_op *op)
3083 {
3084 	switch (op->base.op) {
3085 	case DRM_GPUVA_OP_MAP:
3086 		trace_xe_vma_bind(op->map.vma);
3087 		break;
3088 	case DRM_GPUVA_OP_REMAP:
3089 		trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
3090 		if (op->remap.prev)
3091 			trace_xe_vma_bind(op->remap.prev);
3092 		if (op->remap.next)
3093 			trace_xe_vma_bind(op->remap.next);
3094 		break;
3095 	case DRM_GPUVA_OP_UNMAP:
3096 		trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
3097 		break;
3098 	case DRM_GPUVA_OP_PREFETCH:
3099 		trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
3100 		break;
3101 	case DRM_GPUVA_OP_DRIVER:
3102 		break;
3103 	default:
3104 		XE_WARN_ON("NOT POSSIBLE");
3105 	}
3106 }
3107 
3108 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
3109 {
3110 	struct xe_vma_op *op;
3111 
3112 	list_for_each_entry(op, &vops->list, link)
3113 		op_trace(op);
3114 }
3115 
3116 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
3117 {
3118 	struct xe_exec_queue *q = vops->q;
3119 	struct xe_tile *tile;
3120 	int number_tiles = 0;
3121 	u8 id;
3122 
3123 	for_each_tile(tile, vm->xe, id) {
3124 		if (vops->pt_update_ops[id].num_ops)
3125 			++number_tiles;
3126 
3127 		if (vops->pt_update_ops[id].q)
3128 			continue;
3129 
3130 		if (q) {
3131 			vops->pt_update_ops[id].q = q;
3132 			if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
3133 				q = list_next_entry(q, multi_gt_list);
3134 		} else {
3135 			vops->pt_update_ops[id].q = vm->q[id];
3136 		}
3137 	}
3138 
3139 	return number_tiles;
3140 }
3141 
3142 static struct dma_fence *ops_execute(struct xe_vm *vm,
3143 				     struct xe_vma_ops *vops)
3144 {
3145 	struct xe_tile *tile;
3146 	struct dma_fence *fence = NULL;
3147 	struct dma_fence **fences = NULL;
3148 	struct dma_fence_array *cf = NULL;
3149 	int number_tiles = 0, current_fence = 0, n_fence = 0, err, i;
3150 	u8 id;
3151 
3152 	number_tiles = vm_ops_setup_tile_args(vm, vops);
3153 	if (number_tiles == 0)
3154 		return ERR_PTR(-ENODATA);
3155 
3156 	for_each_tile(tile, vm->xe, id) {
3157 		++n_fence;
3158 
3159 		if (!(vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT))
3160 			for_each_tlb_inval(i)
3161 				++n_fence;
3162 	}
3163 
3164 	fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL);
3165 	if (!fences) {
3166 		fence = ERR_PTR(-ENOMEM);
3167 		goto err_trace;
3168 	}
3169 
3170 	cf = dma_fence_array_alloc(n_fence);
3171 	if (!cf) {
3172 		fence = ERR_PTR(-ENOMEM);
3173 		goto err_out;
3174 	}
3175 
3176 	for_each_tile(tile, vm->xe, id) {
3177 		if (!vops->pt_update_ops[id].num_ops)
3178 			continue;
3179 
3180 		err = xe_pt_update_ops_prepare(tile, vops);
3181 		if (err) {
3182 			fence = ERR_PTR(err);
3183 			goto err_out;
3184 		}
3185 	}
3186 
3187 	trace_xe_vm_ops_execute(vops);
3188 
3189 	for_each_tile(tile, vm->xe, id) {
3190 		struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q;
3191 
3192 		fence = NULL;
3193 		if (!vops->pt_update_ops[id].num_ops)
3194 			goto collect_fences;
3195 
3196 		fence = xe_pt_update_ops_run(tile, vops);
3197 		if (IS_ERR(fence))
3198 			goto err_out;
3199 
3200 collect_fences:
3201 		fences[current_fence++] = fence ?: dma_fence_get_stub();
3202 		if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)
3203 			continue;
3204 
3205 		xe_migrate_job_lock(tile->migrate, q);
3206 		for_each_tlb_inval(i)
3207 			fences[current_fence++] =
3208 				xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
3209 		xe_migrate_job_unlock(tile->migrate, q);
3210 	}
3211 
3212 	xe_assert(vm->xe, current_fence == n_fence);
3213 	dma_fence_array_init(cf, n_fence, fences, dma_fence_context_alloc(1),
3214 			     1, false);
3215 	fence = &cf->base;
3216 
3217 	for_each_tile(tile, vm->xe, id) {
3218 		if (!vops->pt_update_ops[id].num_ops)
3219 			continue;
3220 
3221 		xe_pt_update_ops_fini(tile, vops);
3222 	}
3223 
3224 	return fence;
3225 
3226 err_out:
3227 	for_each_tile(tile, vm->xe, id) {
3228 		if (!vops->pt_update_ops[id].num_ops)
3229 			continue;
3230 
3231 		xe_pt_update_ops_abort(tile, vops);
3232 	}
3233 	while (current_fence)
3234 		dma_fence_put(fences[--current_fence]);
3235 	kfree(fences);
3236 	kfree(cf);
3237 
3238 err_trace:
3239 	trace_xe_vm_ops_fail(vm);
3240 	return fence;
3241 }
3242 
3243 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
3244 {
3245 	if (vma->ufence)
3246 		xe_sync_ufence_put(vma->ufence);
3247 	vma->ufence = __xe_sync_ufence_get(ufence);
3248 }
3249 
3250 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
3251 			  struct xe_user_fence *ufence)
3252 {
3253 	switch (op->base.op) {
3254 	case DRM_GPUVA_OP_MAP:
3255 		if (!xe_vma_is_cpu_addr_mirror(op->map.vma))
3256 			vma_add_ufence(op->map.vma, ufence);
3257 		break;
3258 	case DRM_GPUVA_OP_REMAP:
3259 		if (op->remap.prev)
3260 			vma_add_ufence(op->remap.prev, ufence);
3261 		if (op->remap.next)
3262 			vma_add_ufence(op->remap.next, ufence);
3263 		break;
3264 	case DRM_GPUVA_OP_UNMAP:
3265 		break;
3266 	case DRM_GPUVA_OP_PREFETCH:
3267 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
3268 		break;
3269 	default:
3270 		drm_warn(&vm->xe->drm, "NOT POSSIBLE\n");
3271 	}
3272 }
3273 
3274 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
3275 				   struct dma_fence *fence)
3276 {
3277 	struct xe_user_fence *ufence;
3278 	struct xe_vma_op *op;
3279 	int i;
3280 
3281 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
3282 	list_for_each_entry(op, &vops->list, link) {
3283 		if (ufence)
3284 			op_add_ufence(vm, op, ufence);
3285 
3286 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
3287 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
3288 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
3289 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
3290 				       fence);
3291 	}
3292 	if (ufence)
3293 		xe_sync_ufence_put(ufence);
3294 	if (fence) {
3295 		for (i = 0; i < vops->num_syncs; i++)
3296 			xe_sync_entry_signal(vops->syncs + i, fence);
3297 	}
3298 }
3299 
3300 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
3301 						   struct xe_vma_ops *vops)
3302 {
3303 	struct xe_validation_ctx ctx;
3304 	struct drm_exec exec;
3305 	struct dma_fence *fence;
3306 	int err = 0;
3307 
3308 	lockdep_assert_held_write(&vm->lock);
3309 
3310 	xe_validation_guard(&ctx, &vm->xe->val, &exec,
3311 			    ((struct xe_val_flags) {
3312 				    .interruptible = true,
3313 				    .exec_ignore_duplicates = true,
3314 			    }), err) {
3315 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
3316 		drm_exec_retry_on_contention(&exec);
3317 		xe_validation_retry_on_oom(&ctx, &err);
3318 		if (err)
3319 			return ERR_PTR(err);
3320 
3321 		xe_vm_set_validation_exec(vm, &exec);
3322 		fence = ops_execute(vm, vops);
3323 		xe_vm_set_validation_exec(vm, NULL);
3324 		if (IS_ERR(fence)) {
3325 			if (PTR_ERR(fence) == -ENODATA)
3326 				vm_bind_ioctl_ops_fini(vm, vops, NULL);
3327 			return fence;
3328 		}
3329 
3330 		vm_bind_ioctl_ops_fini(vm, vops, fence);
3331 	}
3332 
3333 	return err ? ERR_PTR(err) : fence;
3334 }
3335 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
3336 
3337 #define SUPPORTED_FLAGS_STUB  \
3338 	(DRM_XE_VM_BIND_FLAG_READONLY | \
3339 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
3340 	 DRM_XE_VM_BIND_FLAG_NULL | \
3341 	 DRM_XE_VM_BIND_FLAG_DUMPABLE | \
3342 	 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
3343 	 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
3344 	 DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
3345 
3346 #ifdef TEST_VM_OPS_ERROR
3347 #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
3348 #else
3349 #define SUPPORTED_FLAGS	SUPPORTED_FLAGS_STUB
3350 #endif
3351 
3352 #define XE_64K_PAGE_MASK 0xffffull
3353 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
3354 
3355 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
3356 				    struct drm_xe_vm_bind *args,
3357 				    struct drm_xe_vm_bind_op **bind_ops)
3358 {
3359 	int err;
3360 	int i;
3361 
3362 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
3363 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3364 		return -EINVAL;
3365 
3366 	if (XE_IOCTL_DBG(xe, args->extensions))
3367 		return -EINVAL;
3368 
3369 	if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS))
3370 		return -EINVAL;
3371 
3372 	if (args->num_binds > 1) {
3373 		u64 __user *bind_user =
3374 			u64_to_user_ptr(args->vector_of_binds);
3375 
3376 		*bind_ops = kvmalloc_array(args->num_binds,
3377 					   sizeof(struct drm_xe_vm_bind_op),
3378 					   GFP_KERNEL | __GFP_ACCOUNT |
3379 					   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3380 		if (!*bind_ops)
3381 			return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
3382 
3383 		err = copy_from_user(*bind_ops, bind_user,
3384 				     sizeof(struct drm_xe_vm_bind_op) *
3385 				     args->num_binds);
3386 		if (XE_IOCTL_DBG(xe, err)) {
3387 			err = -EFAULT;
3388 			goto free_bind_ops;
3389 		}
3390 	} else {
3391 		*bind_ops = &args->bind;
3392 	}
3393 
3394 	for (i = 0; i < args->num_binds; ++i) {
3395 		u64 range = (*bind_ops)[i].range;
3396 		u64 addr = (*bind_ops)[i].addr;
3397 		u32 op = (*bind_ops)[i].op;
3398 		u32 flags = (*bind_ops)[i].flags;
3399 		u32 obj = (*bind_ops)[i].obj;
3400 		u64 obj_offset = (*bind_ops)[i].obj_offset;
3401 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
3402 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
3403 		bool is_cpu_addr_mirror = flags &
3404 			DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
3405 		u16 pat_index = (*bind_ops)[i].pat_index;
3406 		u16 coh_mode;
3407 		bool comp_en;
3408 
3409 		if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
3410 				 (!xe_vm_in_fault_mode(vm) ||
3411 				 !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
3412 			err = -EINVAL;
3413 			goto free_bind_ops;
3414 		}
3415 
3416 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
3417 			err = -EINVAL;
3418 			goto free_bind_ops;
3419 		}
3420 
3421 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
3422 		(*bind_ops)[i].pat_index = pat_index;
3423 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3424 		comp_en = xe_pat_index_get_comp_en(xe, pat_index);
3425 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
3426 			err = -EINVAL;
3427 			goto free_bind_ops;
3428 		}
3429 
3430 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
3431 			err = -EINVAL;
3432 			goto free_bind_ops;
3433 		}
3434 
3435 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
3436 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
3437 		    XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
3438 		    XE_IOCTL_DBG(xe, obj_offset && (is_null ||
3439 						    is_cpu_addr_mirror)) ||
3440 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
3441 				 (is_null || is_cpu_addr_mirror)) ||
3442 		    XE_IOCTL_DBG(xe, !obj &&
3443 				 op == DRM_XE_VM_BIND_OP_MAP &&
3444 				 !is_null && !is_cpu_addr_mirror) ||
3445 		    XE_IOCTL_DBG(xe, !obj &&
3446 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3447 		    XE_IOCTL_DBG(xe, addr &&
3448 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3449 		    XE_IOCTL_DBG(xe, range &&
3450 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3451 		    XE_IOCTL_DBG(xe, obj &&
3452 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3453 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3454 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3455 		    XE_IOCTL_DBG(xe, comp_en &&
3456 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3457 		    XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
3458 				 !IS_ENABLED(CONFIG_DRM_GPUSVM)) ||
3459 		    XE_IOCTL_DBG(xe, obj &&
3460 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3461 		    XE_IOCTL_DBG(xe, prefetch_region &&
3462 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3463 		    XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
3464 				      /* Guard against undefined shift in BIT(prefetch_region) */
3465 				      (prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
3466 				      !(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
3467 		    XE_IOCTL_DBG(xe, obj &&
3468 				 op == DRM_XE_VM_BIND_OP_UNMAP) ||
3469 		    XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
3470 				 (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
3471 			err = -EINVAL;
3472 			goto free_bind_ops;
3473 		}
3474 
3475 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3476 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3477 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3478 		    XE_IOCTL_DBG(xe, !range &&
3479 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3480 			err = -EINVAL;
3481 			goto free_bind_ops;
3482 		}
3483 	}
3484 
3485 	return 0;
3486 
3487 free_bind_ops:
3488 	if (args->num_binds > 1)
3489 		kvfree(*bind_ops);
3490 	*bind_ops = NULL;
3491 	return err;
3492 }
3493 
3494 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3495 				       struct xe_exec_queue *q,
3496 				       struct xe_sync_entry *syncs,
3497 				       int num_syncs)
3498 {
3499 	struct dma_fence *fence = NULL;
3500 	int i, err = 0;
3501 
3502 	if (num_syncs) {
3503 		fence = xe_sync_in_fence_get(syncs, num_syncs,
3504 					     to_wait_exec_queue(vm, q), vm);
3505 		if (IS_ERR(fence))
3506 			return PTR_ERR(fence);
3507 
3508 		for (i = 0; i < num_syncs; i++)
3509 			xe_sync_entry_signal(&syncs[i], fence);
3510 	}
3511 
3512 	dma_fence_put(fence);
3513 
3514 	return err;
3515 }
3516 
3517 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3518 			    struct xe_exec_queue *q,
3519 			    struct xe_sync_entry *syncs, u32 num_syncs)
3520 {
3521 	memset(vops, 0, sizeof(*vops));
3522 	INIT_LIST_HEAD(&vops->list);
3523 	vops->vm = vm;
3524 	vops->q = q;
3525 	vops->syncs = syncs;
3526 	vops->num_syncs = num_syncs;
3527 	vops->flags = 0;
3528 }
3529 
3530 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3531 					u64 addr, u64 range, u64 obj_offset,
3532 					u16 pat_index, u32 op, u32 bind_flags)
3533 {
3534 	u16 coh_mode;
3535 	bool comp_en;
3536 
3537 	if (XE_IOCTL_DBG(xe, (bo->flags & XE_BO_FLAG_NO_COMPRESSION) &&
3538 			 xe_pat_index_get_comp_en(xe, pat_index)))
3539 		return -EINVAL;
3540 
3541 	if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
3542 	    XE_IOCTL_DBG(xe, obj_offset >
3543 			 xe_bo_size(bo) - range)) {
3544 		return -EINVAL;
3545 	}
3546 
3547 	/*
3548 	 * Some platforms require 64k VM_BIND alignment,
3549 	 * specifically those with XE_VRAM_FLAGS_NEED64K.
3550 	 *
3551 	 * Other platforms may have BO's set to 64k physical placement,
3552 	 * but can be mapped at 4k offsets anyway. This check is only
3553 	 * there for the former case.
3554 	 */
3555 	if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
3556 	    (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
3557 		if (XE_IOCTL_DBG(xe, obj_offset &
3558 				 XE_64K_PAGE_MASK) ||
3559 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3560 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3561 			return -EINVAL;
3562 		}
3563 	}
3564 
3565 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3566 	if (bo->cpu_caching) {
3567 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3568 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3569 			return -EINVAL;
3570 		}
3571 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3572 		/*
3573 		 * Imported dma-buf from a different device should
3574 		 * require 1way or 2way coherency since we don't know
3575 		 * how it was mapped on the CPU. Just assume is it
3576 		 * potentially cached on CPU side.
3577 		 */
3578 		return -EINVAL;
3579 	}
3580 
3581 	/*
3582 	 * Ensures that imported buffer objects (dma-bufs) are not mapped
3583 	 * with a PAT index that enables compression.
3584 	 */
3585 	comp_en = xe_pat_index_get_comp_en(xe, pat_index);
3586 	if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && comp_en))
3587 		return -EINVAL;
3588 
3589 	/* If a BO is protected it can only be mapped if the key is still valid */
3590 	if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
3591 	    op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
3592 		if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
3593 			return -ENOEXEC;
3594 
3595 	return 0;
3596 }
3597 
3598 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3599 {
3600 	struct xe_device *xe = to_xe_device(dev);
3601 	struct xe_file *xef = to_xe_file(file);
3602 	struct drm_xe_vm_bind *args = data;
3603 	struct drm_xe_sync __user *syncs_user;
3604 	struct xe_bo **bos = NULL;
3605 	struct drm_gpuva_ops **ops = NULL;
3606 	struct xe_vm *vm;
3607 	struct xe_exec_queue *q = NULL;
3608 	u32 num_syncs, num_ufence = 0;
3609 	struct xe_sync_entry *syncs = NULL;
3610 	struct drm_xe_vm_bind_op *bind_ops = NULL;
3611 	struct xe_vma_ops vops;
3612 	struct dma_fence *fence;
3613 	int err;
3614 	int i;
3615 
3616 	vm = xe_vm_lookup(xef, args->vm_id);
3617 	if (XE_IOCTL_DBG(xe, !vm))
3618 		return -EINVAL;
3619 
3620 	err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
3621 	if (err)
3622 		goto put_vm;
3623 
3624 	if (args->exec_queue_id) {
3625 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3626 		if (XE_IOCTL_DBG(xe, !q)) {
3627 			err = -ENOENT;
3628 			goto free_bind_ops;
3629 		}
3630 
3631 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3632 			err = -EINVAL;
3633 			goto put_exec_queue;
3634 		}
3635 	}
3636 
3637 	if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) {
3638 		err = -EINVAL;
3639 		goto put_exec_queue;
3640 	}
3641 
3642 	/* Ensure all UNMAPs visible */
3643 	xe_svm_flush(vm);
3644 
3645 	err = down_write_killable(&vm->lock);
3646 	if (err)
3647 		goto put_exec_queue;
3648 
3649 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3650 		err = -ENOENT;
3651 		goto release_vm_lock;
3652 	}
3653 
3654 	for (i = 0; i < args->num_binds; ++i) {
3655 		u64 range = bind_ops[i].range;
3656 		u64 addr = bind_ops[i].addr;
3657 
3658 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3659 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3660 			err = -EINVAL;
3661 			goto release_vm_lock;
3662 		}
3663 	}
3664 
3665 	if (args->num_binds) {
3666 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3667 			       GFP_KERNEL | __GFP_ACCOUNT |
3668 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3669 		if (!bos) {
3670 			err = -ENOMEM;
3671 			goto release_vm_lock;
3672 		}
3673 
3674 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3675 			       GFP_KERNEL | __GFP_ACCOUNT |
3676 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3677 		if (!ops) {
3678 			err = -ENOMEM;
3679 			goto free_bos;
3680 		}
3681 	}
3682 
3683 	for (i = 0; i < args->num_binds; ++i) {
3684 		struct drm_gem_object *gem_obj;
3685 		u64 range = bind_ops[i].range;
3686 		u64 addr = bind_ops[i].addr;
3687 		u32 obj = bind_ops[i].obj;
3688 		u64 obj_offset = bind_ops[i].obj_offset;
3689 		u16 pat_index = bind_ops[i].pat_index;
3690 		u32 op = bind_ops[i].op;
3691 		u32 bind_flags = bind_ops[i].flags;
3692 
3693 		if (!obj)
3694 			continue;
3695 
3696 		gem_obj = drm_gem_object_lookup(file, obj);
3697 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3698 			err = -ENOENT;
3699 			goto put_obj;
3700 		}
3701 		bos[i] = gem_to_xe_bo(gem_obj);
3702 
3703 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3704 						   obj_offset, pat_index, op,
3705 						   bind_flags);
3706 		if (err)
3707 			goto put_obj;
3708 	}
3709 
3710 	if (args->num_syncs) {
3711 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3712 		if (!syncs) {
3713 			err = -ENOMEM;
3714 			goto put_obj;
3715 		}
3716 	}
3717 
3718 	syncs_user = u64_to_user_ptr(args->syncs);
3719 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3720 		struct xe_exec_queue *__q = q ?: vm->q[0];
3721 
3722 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3723 					  &syncs_user[num_syncs],
3724 					  __q->ufence_syncobj,
3725 					  ++__q->ufence_timeline_value,
3726 					  (xe_vm_in_lr_mode(vm) ?
3727 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3728 					  (!args->num_binds ?
3729 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3730 		if (err)
3731 			goto free_syncs;
3732 
3733 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3734 			num_ufence++;
3735 	}
3736 
3737 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3738 		err = -EINVAL;
3739 		goto free_syncs;
3740 	}
3741 
3742 	if (!args->num_binds) {
3743 		err = -ENODATA;
3744 		goto free_syncs;
3745 	}
3746 
3747 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3748 	if (args->num_binds > 1)
3749 		vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;
3750 	for (i = 0; i < args->num_binds; ++i) {
3751 		u64 range = bind_ops[i].range;
3752 		u64 addr = bind_ops[i].addr;
3753 		u32 op = bind_ops[i].op;
3754 		u32 flags = bind_ops[i].flags;
3755 		u64 obj_offset = bind_ops[i].obj_offset;
3756 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3757 		u16 pat_index = bind_ops[i].pat_index;
3758 
3759 		ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
3760 						  addr, range, op, flags,
3761 						  prefetch_region, pat_index);
3762 		if (IS_ERR(ops[i])) {
3763 			err = PTR_ERR(ops[i]);
3764 			ops[i] = NULL;
3765 			goto unwind_ops;
3766 		}
3767 
3768 		err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3769 		if (err)
3770 			goto unwind_ops;
3771 
3772 #ifdef TEST_VM_OPS_ERROR
3773 		if (flags & FORCE_OP_ERROR) {
3774 			vops.inject_error = true;
3775 			vm->xe->vm_inject_error_position =
3776 				(vm->xe->vm_inject_error_position + 1) %
3777 				FORCE_OP_ERROR_COUNT;
3778 		}
3779 #endif
3780 	}
3781 
3782 	/* Nothing to do */
3783 	if (list_empty(&vops.list)) {
3784 		err = -ENODATA;
3785 		goto unwind_ops;
3786 	}
3787 
3788 	err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3789 	if (err)
3790 		goto unwind_ops;
3791 
3792 	err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
3793 	if (err)
3794 		goto unwind_ops;
3795 
3796 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3797 	if (IS_ERR(fence))
3798 		err = PTR_ERR(fence);
3799 	else
3800 		dma_fence_put(fence);
3801 
3802 unwind_ops:
3803 	if (err && err != -ENODATA)
3804 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3805 	xe_vma_ops_fini(&vops);
3806 	for (i = args->num_binds - 1; i >= 0; --i)
3807 		if (ops[i])
3808 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3809 free_syncs:
3810 	if (err == -ENODATA)
3811 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3812 	while (num_syncs--)
3813 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3814 
3815 	kfree(syncs);
3816 put_obj:
3817 	for (i = 0; i < args->num_binds; ++i)
3818 		xe_bo_put(bos[i]);
3819 
3820 	kvfree(ops);
3821 free_bos:
3822 	kvfree(bos);
3823 release_vm_lock:
3824 	up_write(&vm->lock);
3825 put_exec_queue:
3826 	if (q)
3827 		xe_exec_queue_put(q);
3828 free_bind_ops:
3829 	if (args->num_binds > 1)
3830 		kvfree(bind_ops);
3831 put_vm:
3832 	xe_vm_put(vm);
3833 	return err;
3834 }
3835 
3836 /**
3837  * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
3838  * @vm: VM to bind the BO to
3839  * @bo: BO to bind
3840  * @q: exec queue to use for the bind (optional)
3841  * @addr: address at which to bind the BO
3842  * @cache_lvl: PAT cache level to use
3843  *
3844  * Execute a VM bind map operation on a kernel-owned BO to bind it into a
3845  * kernel-owned VM.
3846  *
3847  * Returns a dma_fence to track the binding completion if the job to do so was
3848  * successfully submitted, an error pointer otherwise.
3849  */
3850 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
3851 				       struct xe_exec_queue *q, u64 addr,
3852 				       enum xe_cache_level cache_lvl)
3853 {
3854 	struct xe_vma_ops vops;
3855 	struct drm_gpuva_ops *ops = NULL;
3856 	struct dma_fence *fence;
3857 	int err;
3858 
3859 	xe_bo_get(bo);
3860 	xe_vm_get(vm);
3861 	if (q)
3862 		xe_exec_queue_get(q);
3863 
3864 	down_write(&vm->lock);
3865 
3866 	xe_vma_ops_init(&vops, vm, q, NULL, 0);
3867 
3868 	ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo),
3869 				       DRM_XE_VM_BIND_OP_MAP, 0, 0,
3870 				       vm->xe->pat.idx[cache_lvl]);
3871 	if (IS_ERR(ops)) {
3872 		err = PTR_ERR(ops);
3873 		goto release_vm_lock;
3874 	}
3875 
3876 	err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
3877 	if (err)
3878 		goto release_vm_lock;
3879 
3880 	xe_assert(vm->xe, !list_empty(&vops.list));
3881 
3882 	err = xe_vma_ops_alloc(&vops, false);
3883 	if (err)
3884 		goto unwind_ops;
3885 
3886 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3887 	if (IS_ERR(fence))
3888 		err = PTR_ERR(fence);
3889 
3890 unwind_ops:
3891 	if (err && err != -ENODATA)
3892 		vm_bind_ioctl_ops_unwind(vm, &ops, 1);
3893 
3894 	xe_vma_ops_fini(&vops);
3895 	drm_gpuva_ops_free(&vm->gpuvm, ops);
3896 
3897 release_vm_lock:
3898 	up_write(&vm->lock);
3899 
3900 	if (q)
3901 		xe_exec_queue_put(q);
3902 	xe_vm_put(vm);
3903 	xe_bo_put(bo);
3904 
3905 	if (err)
3906 		fence = ERR_PTR(err);
3907 
3908 	return fence;
3909 }
3910 
3911 /**
3912  * xe_vm_lock() - Lock the vm's dma_resv object
3913  * @vm: The struct xe_vm whose lock is to be locked
3914  * @intr: Whether to perform any wait interruptible
3915  *
3916  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3917  * contended lock was interrupted. If @intr is false, the function
3918  * always returns 0.
3919  */
3920 int xe_vm_lock(struct xe_vm *vm, bool intr)
3921 {
3922 	int ret;
3923 
3924 	if (intr)
3925 		ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3926 	else
3927 		ret = dma_resv_lock(xe_vm_resv(vm), NULL);
3928 
3929 	return ret;
3930 }
3931 
3932 /**
3933  * xe_vm_unlock() - Unlock the vm's dma_resv object
3934  * @vm: The struct xe_vm whose lock is to be released.
3935  *
3936  * Unlock a buffer object lock that was locked by xe_vm_lock().
3937  */
3938 void xe_vm_unlock(struct xe_vm *vm)
3939 {
3940 	dma_resv_unlock(xe_vm_resv(vm));
3941 }
3942 
3943 /**
3944  * xe_vm_range_tilemask_tlb_inval - Issue a TLB invalidation on this tilemask for an
3945  * address range
3946  * @vm: The VM
3947  * @start: start address
3948  * @end: end address
3949  * @tile_mask: mask for which gt's issue tlb invalidation
3950  *
3951  * Issue a range based TLB invalidation for gt's in tilemask
3952  *
3953  * Returns 0 for success, negative error code otherwise.
3954  */
3955 int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
3956 				   u64 end, u8 tile_mask)
3957 {
3958 	struct xe_tlb_inval_fence
3959 		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3960 	struct xe_tile *tile;
3961 	u32 fence_id = 0;
3962 	u8 id;
3963 	int err;
3964 
3965 	if (!tile_mask)
3966 		return 0;
3967 
3968 	for_each_tile(tile, vm->xe, id) {
3969 		if (!(tile_mask & BIT(id)))
3970 			continue;
3971 
3972 		xe_tlb_inval_fence_init(&tile->primary_gt->tlb_inval,
3973 					&fence[fence_id], true);
3974 
3975 		err = xe_tlb_inval_range(&tile->primary_gt->tlb_inval,
3976 					 &fence[fence_id], start, end,
3977 					 vm->usm.asid, NULL);
3978 		if (err)
3979 			goto wait;
3980 		++fence_id;
3981 
3982 		if (!tile->media_gt)
3983 			continue;
3984 
3985 		xe_tlb_inval_fence_init(&tile->media_gt->tlb_inval,
3986 					&fence[fence_id], true);
3987 
3988 		err = xe_tlb_inval_range(&tile->media_gt->tlb_inval,
3989 					 &fence[fence_id], start, end,
3990 					 vm->usm.asid, NULL);
3991 		if (err)
3992 			goto wait;
3993 		++fence_id;
3994 	}
3995 
3996 wait:
3997 	for (id = 0; id < fence_id; ++id)
3998 		xe_tlb_inval_fence_wait(&fence[id]);
3999 
4000 	return err;
4001 }
4002 
4003 /**
4004  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
4005  * @vma: VMA to invalidate
4006  *
4007  * Walks a list of page tables leaves which it memset the entries owned by this
4008  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
4009  * complete.
4010  *
4011  * Returns 0 for success, negative error code otherwise.
4012  */
4013 int xe_vm_invalidate_vma(struct xe_vma *vma)
4014 {
4015 	struct xe_device *xe = xe_vma_vm(vma)->xe;
4016 	struct xe_vm *vm = xe_vma_vm(vma);
4017 	struct xe_tile *tile;
4018 	u8 tile_mask = 0;
4019 	int ret = 0;
4020 	u8 id;
4021 
4022 	xe_assert(xe, !xe_vma_is_null(vma));
4023 	xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
4024 	trace_xe_vma_invalidate(vma);
4025 
4026 	vm_dbg(&vm->xe->drm,
4027 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
4028 		xe_vma_start(vma), xe_vma_size(vma));
4029 
4030 	/*
4031 	 * Check that we don't race with page-table updates, tile_invalidated
4032 	 * update is safe
4033 	 */
4034 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
4035 		if (xe_vma_is_userptr(vma)) {
4036 			lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
4037 				       (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
4038 					lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
4039 
4040 			WARN_ON_ONCE(!mmu_interval_check_retry
4041 				     (&to_userptr_vma(vma)->userptr.notifier,
4042 				      to_userptr_vma(vma)->userptr.pages.notifier_seq));
4043 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
4044 							     DMA_RESV_USAGE_BOOKKEEP));
4045 
4046 		} else {
4047 			xe_bo_assert_held(xe_vma_bo(vma));
4048 		}
4049 	}
4050 
4051 	for_each_tile(tile, xe, id)
4052 		if (xe_pt_zap_ptes(tile, vma))
4053 			tile_mask |= BIT(id);
4054 
4055 	xe_device_wmb(xe);
4056 
4057 	ret = xe_vm_range_tilemask_tlb_inval(xe_vma_vm(vma), xe_vma_start(vma),
4058 					     xe_vma_end(vma), tile_mask);
4059 
4060 	/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
4061 	WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
4062 
4063 	return ret;
4064 }
4065 
4066 int xe_vm_validate_protected(struct xe_vm *vm)
4067 {
4068 	struct drm_gpuva *gpuva;
4069 	int err = 0;
4070 
4071 	if (!vm)
4072 		return -ENODEV;
4073 
4074 	mutex_lock(&vm->snap_mutex);
4075 
4076 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4077 		struct xe_vma *vma = gpuva_to_vma(gpuva);
4078 		struct xe_bo *bo = vma->gpuva.gem.obj ?
4079 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
4080 
4081 		if (!bo)
4082 			continue;
4083 
4084 		if (xe_bo_is_protected(bo)) {
4085 			err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
4086 			if (err)
4087 				break;
4088 		}
4089 	}
4090 
4091 	mutex_unlock(&vm->snap_mutex);
4092 	return err;
4093 }
4094 
4095 struct xe_vm_snapshot {
4096 	int uapi_flags;
4097 	unsigned long num_snaps;
4098 	struct {
4099 		u64 ofs, bo_ofs;
4100 		unsigned long len;
4101 #define XE_VM_SNAP_FLAG_USERPTR		BIT(0)
4102 #define XE_VM_SNAP_FLAG_READ_ONLY	BIT(1)
4103 #define XE_VM_SNAP_FLAG_IS_NULL		BIT(2)
4104 		unsigned long flags;
4105 		int uapi_mem_region;
4106 		int pat_index;
4107 		int cpu_caching;
4108 		struct xe_bo *bo;
4109 		void *data;
4110 		struct mm_struct *mm;
4111 	} snap[];
4112 };
4113 
4114 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
4115 {
4116 	unsigned long num_snaps = 0, i;
4117 	struct xe_vm_snapshot *snap = NULL;
4118 	struct drm_gpuva *gpuva;
4119 
4120 	if (!vm)
4121 		return NULL;
4122 
4123 	mutex_lock(&vm->snap_mutex);
4124 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4125 		if (gpuva->flags & XE_VMA_DUMPABLE)
4126 			num_snaps++;
4127 	}
4128 
4129 	if (num_snaps)
4130 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
4131 	if (!snap) {
4132 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
4133 		goto out_unlock;
4134 	}
4135 
4136 	if (vm->flags & XE_VM_FLAG_FAULT_MODE)
4137 		snap->uapi_flags |= DRM_XE_VM_CREATE_FLAG_FAULT_MODE;
4138 	if (vm->flags & XE_VM_FLAG_LR_MODE)
4139 		snap->uapi_flags |= DRM_XE_VM_CREATE_FLAG_LR_MODE;
4140 	if (vm->flags & XE_VM_FLAG_SCRATCH_PAGE)
4141 		snap->uapi_flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
4142 
4143 	snap->num_snaps = num_snaps;
4144 	i = 0;
4145 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4146 		struct xe_vma *vma = gpuva_to_vma(gpuva);
4147 		struct xe_bo *bo = vma->gpuva.gem.obj ?
4148 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
4149 
4150 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
4151 			continue;
4152 
4153 		snap->snap[i].ofs = xe_vma_start(vma);
4154 		snap->snap[i].len = xe_vma_size(vma);
4155 		snap->snap[i].flags = xe_vma_read_only(vma) ?
4156 			XE_VM_SNAP_FLAG_READ_ONLY : 0;
4157 		snap->snap[i].pat_index = vma->attr.pat_index;
4158 		if (bo) {
4159 			snap->snap[i].cpu_caching = bo->cpu_caching;
4160 			snap->snap[i].bo = xe_bo_get(bo);
4161 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
4162 			switch (bo->ttm.resource->mem_type) {
4163 			case XE_PL_SYSTEM:
4164 			case XE_PL_TT:
4165 				snap->snap[i].uapi_mem_region = 0;
4166 				break;
4167 			case XE_PL_VRAM0:
4168 				snap->snap[i].uapi_mem_region = 1;
4169 				break;
4170 			case XE_PL_VRAM1:
4171 				snap->snap[i].uapi_mem_region = 2;
4172 				break;
4173 			}
4174 		} else if (xe_vma_is_userptr(vma)) {
4175 			struct mm_struct *mm =
4176 				to_userptr_vma(vma)->userptr.notifier.mm;
4177 
4178 			if (mmget_not_zero(mm))
4179 				snap->snap[i].mm = mm;
4180 			else
4181 				snap->snap[i].data = ERR_PTR(-EFAULT);
4182 
4183 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
4184 			snap->snap[i].flags |= XE_VM_SNAP_FLAG_USERPTR;
4185 			snap->snap[i].uapi_mem_region = 0;
4186 		} else if (xe_vma_is_null(vma)) {
4187 			snap->snap[i].flags |= XE_VM_SNAP_FLAG_IS_NULL;
4188 			snap->snap[i].uapi_mem_region = -1;
4189 		} else {
4190 			snap->snap[i].data = ERR_PTR(-ENOENT);
4191 			snap->snap[i].uapi_mem_region = -1;
4192 		}
4193 		i++;
4194 	}
4195 
4196 out_unlock:
4197 	mutex_unlock(&vm->snap_mutex);
4198 	return snap;
4199 }
4200 
4201 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
4202 {
4203 	if (IS_ERR_OR_NULL(snap))
4204 		return;
4205 
4206 	for (int i = 0; i < snap->num_snaps; i++) {
4207 		struct xe_bo *bo = snap->snap[i].bo;
4208 		int err;
4209 
4210 		if (IS_ERR(snap->snap[i].data) ||
4211 		    snap->snap[i].flags & XE_VM_SNAP_FLAG_IS_NULL)
4212 			continue;
4213 
4214 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
4215 		if (!snap->snap[i].data) {
4216 			snap->snap[i].data = ERR_PTR(-ENOMEM);
4217 			goto cleanup_bo;
4218 		}
4219 
4220 		if (bo) {
4221 			err = xe_bo_read(bo, snap->snap[i].bo_ofs,
4222 					 snap->snap[i].data, snap->snap[i].len);
4223 		} else {
4224 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
4225 
4226 			kthread_use_mm(snap->snap[i].mm);
4227 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
4228 				err = 0;
4229 			else
4230 				err = -EFAULT;
4231 			kthread_unuse_mm(snap->snap[i].mm);
4232 
4233 			mmput(snap->snap[i].mm);
4234 			snap->snap[i].mm = NULL;
4235 		}
4236 
4237 		if (err) {
4238 			kvfree(snap->snap[i].data);
4239 			snap->snap[i].data = ERR_PTR(err);
4240 		}
4241 
4242 cleanup_bo:
4243 		xe_bo_put(bo);
4244 		snap->snap[i].bo = NULL;
4245 	}
4246 }
4247 
4248 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
4249 {
4250 	unsigned long i, j;
4251 
4252 	if (IS_ERR_OR_NULL(snap)) {
4253 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
4254 		return;
4255 	}
4256 
4257 	drm_printf(p, "VM.uapi_flags: 0x%x\n", snap->uapi_flags);
4258 	for (i = 0; i < snap->num_snaps; i++) {
4259 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
4260 
4261 		drm_printf(p, "[%llx].properties: %s|%s|mem_region=0x%lx|pat_index=%d|cpu_caching=%d\n",
4262 			   snap->snap[i].ofs,
4263 			   snap->snap[i].flags & XE_VM_SNAP_FLAG_READ_ONLY ?
4264 			   "read_only" : "read_write",
4265 			   snap->snap[i].flags & XE_VM_SNAP_FLAG_IS_NULL ?
4266 			   "null_sparse" :
4267 			   snap->snap[i].flags & XE_VM_SNAP_FLAG_USERPTR ?
4268 			   "userptr" : "bo",
4269 			   snap->snap[i].uapi_mem_region == -1 ? 0 :
4270 			   BIT(snap->snap[i].uapi_mem_region),
4271 			   snap->snap[i].pat_index,
4272 			   snap->snap[i].cpu_caching);
4273 
4274 		if (IS_ERR(snap->snap[i].data)) {
4275 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
4276 				   PTR_ERR(snap->snap[i].data));
4277 			continue;
4278 		}
4279 
4280 		if (snap->snap[i].flags & XE_VM_SNAP_FLAG_IS_NULL)
4281 			continue;
4282 
4283 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
4284 
4285 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
4286 			u32 *val = snap->snap[i].data + j;
4287 			char dumped[ASCII85_BUFSZ];
4288 
4289 			drm_puts(p, ascii85_encode(*val, dumped));
4290 		}
4291 
4292 		drm_puts(p, "\n");
4293 
4294 		if (drm_coredump_printer_is_full(p))
4295 			return;
4296 	}
4297 }
4298 
4299 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
4300 {
4301 	unsigned long i;
4302 
4303 	if (IS_ERR_OR_NULL(snap))
4304 		return;
4305 
4306 	for (i = 0; i < snap->num_snaps; i++) {
4307 		if (!IS_ERR(snap->snap[i].data))
4308 			kvfree(snap->snap[i].data);
4309 		xe_bo_put(snap->snap[i].bo);
4310 		if (snap->snap[i].mm)
4311 			mmput(snap->snap[i].mm);
4312 	}
4313 	kvfree(snap);
4314 }
4315 
4316 /**
4317  * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
4318  * @xe: Pointer to the Xe device structure
4319  * @vma: Pointer to the virtual memory area (VMA) structure
4320  * @is_atomic: In pagefault path and atomic operation
4321  *
4322  * This function determines whether the given VMA needs to be migrated to
4323  * VRAM in order to do atomic GPU operation.
4324  *
4325  * Return:
4326  *   1        - Migration to VRAM is required
4327  *   0        - Migration is not required
4328  *   -EACCES  - Invalid access for atomic memory attr
4329  *
4330  */
4331 int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
4332 {
4333 	u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
4334 					     vma->attr.atomic_access;
4335 
4336 	if (!IS_DGFX(xe) || !is_atomic)
4337 		return false;
4338 
4339 	/*
4340 	 * NOTE: The checks implemented here are platform-specific. For
4341 	 * instance, on a device supporting CXL atomics, these would ideally
4342 	 * work universally without additional handling.
4343 	 */
4344 	switch (atomic_access) {
4345 	case DRM_XE_ATOMIC_DEVICE:
4346 		return !xe->info.has_device_atomics_on_smem;
4347 
4348 	case DRM_XE_ATOMIC_CPU:
4349 		return -EACCES;
4350 
4351 	case DRM_XE_ATOMIC_UNDEFINED:
4352 	case DRM_XE_ATOMIC_GLOBAL:
4353 	default:
4354 		return 1;
4355 	}
4356 }
4357 
4358 static int xe_vm_alloc_vma(struct xe_vm *vm,
4359 			   struct drm_gpuvm_map_req *map_req,
4360 			   bool is_madvise)
4361 {
4362 	struct xe_vma_ops vops;
4363 	struct drm_gpuva_ops *ops = NULL;
4364 	struct drm_gpuva_op *__op;
4365 	unsigned int vma_flags = 0;
4366 	bool remap_op = false;
4367 	struct xe_vma_mem_attr tmp_attr = {};
4368 	u16 default_pat;
4369 	int err;
4370 
4371 	lockdep_assert_held_write(&vm->lock);
4372 
4373 	if (is_madvise)
4374 		ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req);
4375 	else
4376 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req);
4377 
4378 	if (IS_ERR(ops))
4379 		return PTR_ERR(ops);
4380 
4381 	if (list_empty(&ops->list)) {
4382 		err = 0;
4383 		goto free_ops;
4384 	}
4385 
4386 	drm_gpuva_for_each_op(__op, ops) {
4387 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
4388 		struct xe_vma *vma = NULL;
4389 
4390 		if (!is_madvise) {
4391 			if (__op->op == DRM_GPUVA_OP_UNMAP) {
4392 				vma = gpuva_to_vma(op->base.unmap.va);
4393 				XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
4394 				default_pat = vma->attr.default_pat_index;
4395 				vma_flags = vma->gpuva.flags;
4396 			}
4397 
4398 			if (__op->op == DRM_GPUVA_OP_REMAP) {
4399 				vma = gpuva_to_vma(op->base.remap.unmap->va);
4400 				default_pat = vma->attr.default_pat_index;
4401 				vma_flags = vma->gpuva.flags;
4402 			}
4403 
4404 			if (__op->op == DRM_GPUVA_OP_MAP) {
4405 				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
4406 				op->map.pat_index = default_pat;
4407 			}
4408 		} else {
4409 			if (__op->op == DRM_GPUVA_OP_REMAP) {
4410 				vma = gpuva_to_vma(op->base.remap.unmap->va);
4411 				xe_assert(vm->xe, !remap_op);
4412 				xe_assert(vm->xe, xe_vma_has_no_bo(vma));
4413 				remap_op = true;
4414 				vma_flags = vma->gpuva.flags;
4415 			}
4416 
4417 			if (__op->op == DRM_GPUVA_OP_MAP) {
4418 				xe_assert(vm->xe, remap_op);
4419 				remap_op = false;
4420 				/*
4421 				 * In case of madvise ops DRM_GPUVA_OP_MAP is
4422 				 * always after DRM_GPUVA_OP_REMAP, so ensure
4423 				 * to propagate the flags from the vma we're
4424 				 * unmapping.
4425 				 */
4426 				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
4427 			}
4428 		}
4429 		print_op(vm->xe, __op);
4430 	}
4431 
4432 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
4433 
4434 	if (is_madvise)
4435 		vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
4436 	else
4437 		vops.flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP;
4438 
4439 	err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
4440 	if (err)
4441 		goto unwind_ops;
4442 
4443 	xe_vm_lock(vm, false);
4444 
4445 	drm_gpuva_for_each_op(__op, ops) {
4446 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
4447 		struct xe_vma *vma;
4448 
4449 		if (__op->op == DRM_GPUVA_OP_UNMAP) {
4450 			vma = gpuva_to_vma(op->base.unmap.va);
4451 			/* There should be no unmap for madvise */
4452 			if (is_madvise)
4453 				XE_WARN_ON("UNEXPECTED UNMAP");
4454 
4455 			xe_vma_destroy(vma, NULL);
4456 		} else if (__op->op == DRM_GPUVA_OP_REMAP) {
4457 			vma = gpuva_to_vma(op->base.remap.unmap->va);
4458 			/* In case of madvise ops Store attributes for REMAP UNMAPPED
4459 			 * VMA, so they can be assigned to newly MAP created vma.
4460 			 */
4461 			if (is_madvise)
4462 				xe_vma_mem_attr_copy(&tmp_attr, &vma->attr);
4463 
4464 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
4465 		} else if (__op->op == DRM_GPUVA_OP_MAP) {
4466 			vma = op->map.vma;
4467 			/* In case of madvise call, MAP will always be followed by REMAP.
4468 			 * Therefore temp_attr will always have sane values, making it safe to
4469 			 * copy them to new vma.
4470 			 */
4471 			if (is_madvise)
4472 				xe_vma_mem_attr_copy(&vma->attr, &tmp_attr);
4473 		}
4474 	}
4475 
4476 	xe_vm_unlock(vm);
4477 	drm_gpuva_ops_free(&vm->gpuvm, ops);
4478 	xe_vma_mem_attr_fini(&tmp_attr);
4479 	return 0;
4480 
4481 unwind_ops:
4482 	vm_bind_ioctl_ops_unwind(vm, &ops, 1);
4483 free_ops:
4484 	drm_gpuva_ops_free(&vm->gpuvm, ops);
4485 	return err;
4486 }
4487 
4488 /**
4489  * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
4490  * @vm: Pointer to the xe_vm structure
4491  * @start: Starting input address
4492  * @range: Size of the input range
4493  *
4494  * This function splits existing vma to create new vma for user provided input range
4495  *
4496  * Return: 0 if success
4497  */
4498 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
4499 {
4500 	struct drm_gpuvm_map_req map_req = {
4501 		.map.va.addr = start,
4502 		.map.va.range = range,
4503 	};
4504 
4505 	lockdep_assert_held_write(&vm->lock);
4506 
4507 	vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
4508 
4509 	return xe_vm_alloc_vma(vm, &map_req, true);
4510 }
4511 
4512 static bool is_cpu_addr_vma_with_default_attr(struct xe_vma *vma)
4513 {
4514 	return vma && xe_vma_is_cpu_addr_mirror(vma) &&
4515 	       xe_vma_has_default_mem_attrs(vma);
4516 }
4517 
4518 /**
4519  * xe_vm_find_cpu_addr_mirror_vma_range - Extend a VMA range to include adjacent CPU-mirrored VMAs
4520  * @vm: VM to search within
4521  * @start: Input/output pointer to the starting address of the range
4522  * @end: Input/output pointer to the end address of the range
4523  *
4524  * Given a range defined by @start and @range, this function checks the VMAs
4525  * immediately before and after the range. If those neighboring VMAs are
4526  * CPU-address-mirrored and have default memory attributes, the function
4527  * updates @start and @range to include them. This extended range can then
4528  * be used for merging or other operations that require a unified VMA.
4529  *
4530  * The function does not perform the merge itself; it only computes the
4531  * mergeable boundaries.
4532  */
4533 void xe_vm_find_cpu_addr_mirror_vma_range(struct xe_vm *vm, u64 *start, u64 *end)
4534 {
4535 	struct xe_vma *prev, *next;
4536 
4537 	lockdep_assert_held(&vm->lock);
4538 
4539 	if (*start >= SZ_4K) {
4540 		prev = xe_vm_find_vma_by_addr(vm, *start - SZ_4K);
4541 		if (is_cpu_addr_vma_with_default_attr(prev))
4542 			*start = xe_vma_start(prev);
4543 	}
4544 
4545 	if (*end < vm->size) {
4546 		next = xe_vm_find_vma_by_addr(vm, *end + 1);
4547 		if (is_cpu_addr_vma_with_default_attr(next))
4548 			*end = xe_vma_end(next);
4549 	}
4550 }
4551 
4552 /**
4553  * xe_vm_alloc_cpu_addr_mirror_vma - Allocate CPU addr mirror vma
4554  * @vm: Pointer to the xe_vm structure
4555  * @start: Starting input address
4556  * @range: Size of the input range
4557  *
4558  * This function splits/merges existing vma to create new vma for user provided input range
4559  *
4560  * Return: 0 if success
4561  */
4562 int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
4563 {
4564 	struct drm_gpuvm_map_req map_req = {
4565 		.map.va.addr = start,
4566 		.map.va.range = range,
4567 	};
4568 
4569 	lockdep_assert_held_write(&vm->lock);
4570 
4571 	vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx",
4572 	       start, range);
4573 
4574 	return xe_vm_alloc_vma(vm, &map_req, false);
4575 }
4576 
4577