xref: /linux/drivers/gpu/drm/xe/xe_vm.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_vm.h"
7 
8 #include <linux/dma-fence-array.h>
9 #include <linux/nospec.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_exec.h>
13 #include <drm/drm_print.h>
14 #include <drm/ttm/ttm_tt.h>
15 #include <uapi/drm/xe_drm.h>
16 #include <linux/ascii85.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 
22 #include <generated/xe_wa_oob.h>
23 
24 #include "regs/xe_gtt_defs.h"
25 #include "xe_assert.h"
26 #include "xe_bo.h"
27 #include "xe_device.h"
28 #include "xe_drm_client.h"
29 #include "xe_exec_queue.h"
30 #include "xe_migrate.h"
31 #include "xe_pat.h"
32 #include "xe_pm.h"
33 #include "xe_preempt_fence.h"
34 #include "xe_pt.h"
35 #include "xe_pxp.h"
36 #include "xe_res_cursor.h"
37 #include "xe_sriov_vf.h"
38 #include "xe_svm.h"
39 #include "xe_sync.h"
40 #include "xe_tile.h"
41 #include "xe_tlb_inval.h"
42 #include "xe_trace_bo.h"
43 #include "xe_wa.h"
44 
45 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
46 {
47 	return vm->gpuvm.r_obj;
48 }
49 
50 /**
51  * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
52  * @vm: The vm whose resv is to be locked.
53  * @exec: The drm_exec transaction.
54  *
55  * Helper to lock the vm's resv as part of a drm_exec transaction.
56  *
57  * Return: %0 on success. See drm_exec_lock_obj() for error codes.
58  */
59 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec)
60 {
61 	return drm_exec_lock_obj(exec, xe_vm_obj(vm));
62 }
63 
64 static bool preempt_fences_waiting(struct xe_vm *vm)
65 {
66 	struct xe_exec_queue *q;
67 
68 	lockdep_assert_held(&vm->lock);
69 	xe_vm_assert_held(vm);
70 
71 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
72 		if (!q->lr.pfence ||
73 		    test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
74 			     &q->lr.pfence->flags)) {
75 			return true;
76 		}
77 	}
78 
79 	return false;
80 }
81 
82 static void free_preempt_fences(struct list_head *list)
83 {
84 	struct list_head *link, *next;
85 
86 	list_for_each_safe(link, next, list)
87 		xe_preempt_fence_free(to_preempt_fence_from_link(link));
88 }
89 
90 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
91 				unsigned int *count)
92 {
93 	lockdep_assert_held(&vm->lock);
94 	xe_vm_assert_held(vm);
95 
96 	if (*count >= vm->preempt.num_exec_queues)
97 		return 0;
98 
99 	for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
100 		struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
101 
102 		if (IS_ERR(pfence))
103 			return PTR_ERR(pfence);
104 
105 		list_move_tail(xe_preempt_fence_link(pfence), list);
106 	}
107 
108 	return 0;
109 }
110 
111 static int wait_for_existing_preempt_fences(struct xe_vm *vm)
112 {
113 	struct xe_exec_queue *q;
114 	bool vf_migration = IS_SRIOV_VF(vm->xe) &&
115 		xe_sriov_vf_migration_supported(vm->xe);
116 	signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT;
117 
118 	xe_vm_assert_held(vm);
119 
120 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
121 		if (q->lr.pfence) {
122 			long timeout;
123 
124 			timeout = dma_fence_wait_timeout(q->lr.pfence, false,
125 							 wait_time);
126 			if (!timeout) {
127 				xe_assert(vm->xe, vf_migration);
128 				return -EAGAIN;
129 			}
130 
131 			/* Only -ETIME on fence indicates VM needs to be killed */
132 			if (timeout < 0 || q->lr.pfence->error == -ETIME)
133 				return -ETIME;
134 
135 			dma_fence_put(q->lr.pfence);
136 			q->lr.pfence = NULL;
137 		}
138 	}
139 
140 	return 0;
141 }
142 
143 static bool xe_vm_is_idle(struct xe_vm *vm)
144 {
145 	struct xe_exec_queue *q;
146 
147 	xe_vm_assert_held(vm);
148 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
149 		if (!xe_exec_queue_is_idle(q))
150 			return false;
151 	}
152 
153 	return true;
154 }
155 
156 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
157 {
158 	struct list_head *link;
159 	struct xe_exec_queue *q;
160 
161 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
162 		struct dma_fence *fence;
163 
164 		link = list->next;
165 		xe_assert(vm->xe, link != list);
166 
167 		fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
168 					     q, q->lr.context,
169 					     ++q->lr.seqno);
170 		dma_fence_put(q->lr.pfence);
171 		q->lr.pfence = fence;
172 	}
173 }
174 
175 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
176 {
177 	struct xe_exec_queue *q;
178 	int err;
179 
180 	xe_bo_assert_held(bo);
181 
182 	if (!vm->preempt.num_exec_queues)
183 		return 0;
184 
185 	err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
186 	if (err)
187 		return err;
188 
189 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
190 		if (q->lr.pfence) {
191 			dma_resv_add_fence(bo->ttm.base.resv,
192 					   q->lr.pfence,
193 					   DMA_RESV_USAGE_BOOKKEEP);
194 		}
195 
196 	return 0;
197 }
198 
199 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
200 						struct drm_exec *exec)
201 {
202 	struct xe_exec_queue *q;
203 
204 	lockdep_assert_held(&vm->lock);
205 	xe_vm_assert_held(vm);
206 
207 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
208 		q->ops->resume(q);
209 
210 		drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
211 					 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
212 	}
213 }
214 
215 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
216 {
217 	struct drm_gpuvm_exec vm_exec = {
218 		.vm = &vm->gpuvm,
219 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
220 		.num_fences = 1,
221 	};
222 	struct drm_exec *exec = &vm_exec.exec;
223 	struct xe_validation_ctx ctx;
224 	struct dma_fence *pfence;
225 	int err;
226 	bool wait;
227 
228 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
229 
230 	down_write(&vm->lock);
231 	err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val);
232 	if (err)
233 		goto out_up_write;
234 
235 	pfence = xe_preempt_fence_create(q, q->lr.context,
236 					 ++q->lr.seqno);
237 	if (IS_ERR(pfence)) {
238 		err = PTR_ERR(pfence);
239 		goto out_fini;
240 	}
241 
242 	list_add(&q->lr.link, &vm->preempt.exec_queues);
243 	++vm->preempt.num_exec_queues;
244 	q->lr.pfence = pfence;
245 
246 	xe_svm_notifier_lock(vm);
247 
248 	drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
249 				 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
250 
251 	/*
252 	 * Check to see if a preemption on VM is in flight or userptr
253 	 * invalidation, if so trigger this preempt fence to sync state with
254 	 * other preempt fences on the VM.
255 	 */
256 	wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
257 	if (wait)
258 		dma_fence_enable_sw_signaling(pfence);
259 
260 	xe_svm_notifier_unlock(vm);
261 
262 out_fini:
263 	xe_validation_ctx_fini(&ctx);
264 out_up_write:
265 	up_write(&vm->lock);
266 
267 	return err;
268 }
269 ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
270 
271 /**
272  * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
273  * @vm: The VM.
274  * @q: The exec_queue
275  *
276  * Note that this function might be called multiple times on the same queue.
277  */
278 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
279 {
280 	if (!xe_vm_in_preempt_fence_mode(vm))
281 		return;
282 
283 	down_write(&vm->lock);
284 	if (!list_empty(&q->lr.link)) {
285 		list_del_init(&q->lr.link);
286 		--vm->preempt.num_exec_queues;
287 	}
288 	if (q->lr.pfence) {
289 		dma_fence_enable_sw_signaling(q->lr.pfence);
290 		dma_fence_put(q->lr.pfence);
291 		q->lr.pfence = NULL;
292 	}
293 	up_write(&vm->lock);
294 }
295 
296 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
297 
298 /**
299  * xe_vm_kill() - VM Kill
300  * @vm: The VM.
301  * @unlocked: Flag indicates the VM's dma-resv is not held
302  *
303  * Kill the VM by setting banned flag indicated VM is no longer available for
304  * use. If in preempt fence mode, also kill all exec queue attached to the VM.
305  */
306 void xe_vm_kill(struct xe_vm *vm, bool unlocked)
307 {
308 	struct xe_exec_queue *q;
309 
310 	lockdep_assert_held(&vm->lock);
311 
312 	if (unlocked)
313 		xe_vm_lock(vm, false);
314 
315 	vm->flags |= XE_VM_FLAG_BANNED;
316 	trace_xe_vm_kill(vm);
317 
318 	list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
319 		q->ops->kill(q);
320 
321 	if (unlocked)
322 		xe_vm_unlock(vm);
323 
324 	/* TODO: Inform user the VM is banned */
325 }
326 
327 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
328 {
329 	struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
330 	struct drm_gpuva *gpuva;
331 	int ret;
332 
333 	lockdep_assert_held(&vm->lock);
334 	drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
335 		list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
336 			       &vm->rebind_list);
337 
338 	if (!try_wait_for_completion(&vm->xe->pm_block))
339 		return -EAGAIN;
340 
341 	ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec);
342 	if (ret)
343 		return ret;
344 
345 	vm_bo->evicted = false;
346 	return 0;
347 }
348 
349 /**
350  * xe_vm_validate_rebind() - Validate buffer objects and rebind vmas
351  * @vm: The vm for which we are rebinding.
352  * @exec: The struct drm_exec with the locked GEM objects.
353  * @num_fences: The number of fences to reserve for the operation, not
354  * including rebinds and validations.
355  *
356  * Validates all evicted gem objects and rebinds their vmas. Note that
357  * rebindings may cause evictions and hence the validation-rebind
358  * sequence is rerun until there are no more objects to validate.
359  *
360  * Return: 0 on success, negative error code on error. In particular,
361  * may return -EINTR or -ERESTARTSYS if interrupted, and -EDEADLK if
362  * the drm_exec transaction needs to be restarted.
363  */
364 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
365 			  unsigned int num_fences)
366 {
367 	struct drm_gem_object *obj;
368 	unsigned long index;
369 	int ret;
370 
371 	do {
372 		ret = drm_gpuvm_validate(&vm->gpuvm, exec);
373 		if (ret)
374 			return ret;
375 
376 		ret = xe_vm_rebind(vm, false);
377 		if (ret)
378 			return ret;
379 	} while (!list_empty(&vm->gpuvm.evict.list));
380 
381 	drm_exec_for_each_locked_object(exec, index, obj) {
382 		ret = dma_resv_reserve_fences(obj->resv, num_fences);
383 		if (ret)
384 			return ret;
385 	}
386 
387 	return 0;
388 }
389 
390 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
391 				 bool *done)
392 {
393 	int err;
394 
395 	err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0);
396 	if (err)
397 		return err;
398 
399 	if (xe_vm_is_idle(vm)) {
400 		vm->preempt.rebind_deactivated = true;
401 		*done = true;
402 		return 0;
403 	}
404 
405 	if (!preempt_fences_waiting(vm)) {
406 		*done = true;
407 		return 0;
408 	}
409 
410 	err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0);
411 	if (err)
412 		return err;
413 
414 	err = wait_for_existing_preempt_fences(vm);
415 	if (err)
416 		return err;
417 
418 	/*
419 	 * Add validation and rebinding to the locking loop since both can
420 	 * cause evictions which may require blocing dma_resv locks.
421 	 * The fence reservation here is intended for the new preempt fences
422 	 * we attach at the end of the rebind work.
423 	 */
424 	return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
425 }
426 
427 static bool vm_suspend_rebind_worker(struct xe_vm *vm)
428 {
429 	struct xe_device *xe = vm->xe;
430 	bool ret = false;
431 
432 	mutex_lock(&xe->rebind_resume_lock);
433 	if (!try_wait_for_completion(&vm->xe->pm_block)) {
434 		ret = true;
435 		list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list);
436 	}
437 	mutex_unlock(&xe->rebind_resume_lock);
438 
439 	return ret;
440 }
441 
442 /**
443  * xe_vm_resume_rebind_worker() - Resume the rebind worker.
444  * @vm: The vm whose preempt worker to resume.
445  *
446  * Resume a preempt worker that was previously suspended by
447  * vm_suspend_rebind_worker().
448  */
449 void xe_vm_resume_rebind_worker(struct xe_vm *vm)
450 {
451 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
452 }
453 
454 static void preempt_rebind_work_func(struct work_struct *w)
455 {
456 	struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
457 	struct xe_validation_ctx ctx;
458 	struct drm_exec exec;
459 	unsigned int fence_count = 0;
460 	LIST_HEAD(preempt_fences);
461 	int err = 0;
462 	long wait;
463 	int __maybe_unused tries = 0;
464 
465 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
466 	trace_xe_vm_rebind_worker_enter(vm);
467 
468 	down_write(&vm->lock);
469 
470 	if (xe_vm_is_closed_or_banned(vm)) {
471 		up_write(&vm->lock);
472 		trace_xe_vm_rebind_worker_exit(vm);
473 		return;
474 	}
475 
476 retry:
477 	if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
478 		up_write(&vm->lock);
479 		/* We don't actually block but don't make progress. */
480 		xe_pm_might_block_on_suspend();
481 		return;
482 	}
483 
484 	if (xe_vm_userptr_check_repin(vm)) {
485 		err = xe_vm_userptr_pin(vm);
486 		if (err)
487 			goto out_unlock_outer;
488 	}
489 
490 	err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec,
491 				     (struct xe_val_flags) {.interruptible = true});
492 	if (err)
493 		goto out_unlock_outer;
494 
495 	drm_exec_until_all_locked(&exec) {
496 		bool done = false;
497 
498 		err = xe_preempt_work_begin(&exec, vm, &done);
499 		drm_exec_retry_on_contention(&exec);
500 		xe_validation_retry_on_oom(&ctx, &err);
501 		if (err || done) {
502 			xe_validation_ctx_fini(&ctx);
503 			goto out_unlock_outer;
504 		}
505 	}
506 
507 	err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
508 	if (err)
509 		goto out_unlock;
510 
511 	xe_vm_set_validation_exec(vm, &exec);
512 	err = xe_vm_rebind(vm, true);
513 	xe_vm_set_validation_exec(vm, NULL);
514 	if (err)
515 		goto out_unlock;
516 
517 	/* Wait on rebinds and munmap style VM unbinds */
518 	wait = dma_resv_wait_timeout(xe_vm_resv(vm),
519 				     DMA_RESV_USAGE_KERNEL,
520 				     false, MAX_SCHEDULE_TIMEOUT);
521 	if (wait <= 0) {
522 		err = -ETIME;
523 		goto out_unlock;
524 	}
525 
526 #define retry_required(__tries, __vm) \
527 	(IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
528 	(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
529 	__xe_vm_userptr_needs_repin(__vm))
530 
531 	xe_svm_notifier_lock(vm);
532 	if (retry_required(tries, vm)) {
533 		xe_svm_notifier_unlock(vm);
534 		err = -EAGAIN;
535 		goto out_unlock;
536 	}
537 
538 #undef retry_required
539 
540 	spin_lock(&vm->xe->ttm.lru_lock);
541 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
542 	spin_unlock(&vm->xe->ttm.lru_lock);
543 
544 	/* Point of no return. */
545 	arm_preempt_fences(vm, &preempt_fences);
546 	resume_and_reinstall_preempt_fences(vm, &exec);
547 	xe_svm_notifier_unlock(vm);
548 
549 out_unlock:
550 	xe_validation_ctx_fini(&ctx);
551 out_unlock_outer:
552 	if (err == -EAGAIN) {
553 		trace_xe_vm_rebind_worker_retry(vm);
554 
555 		/*
556 		 * We can't block in workers on a VF which supports migration
557 		 * given this can block the VF post-migration workers from
558 		 * getting scheduled.
559 		 */
560 		if (IS_SRIOV_VF(vm->xe) &&
561 		    xe_sriov_vf_migration_supported(vm->xe)) {
562 			up_write(&vm->lock);
563 			xe_vm_queue_rebind_worker(vm);
564 			return;
565 		}
566 
567 		goto retry;
568 	}
569 
570 	if (err) {
571 		drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
572 		xe_vm_kill(vm, true);
573 	}
574 	up_write(&vm->lock);
575 
576 	free_preempt_fences(&preempt_fences);
577 
578 	trace_xe_vm_rebind_worker_exit(vm);
579 }
580 
581 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
582 {
583 	int i;
584 
585 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
586 		if (!vops->pt_update_ops[i].num_ops)
587 			continue;
588 
589 		vops->pt_update_ops[i].ops =
590 			kmalloc_array(vops->pt_update_ops[i].num_ops,
591 				      sizeof(*vops->pt_update_ops[i].ops),
592 				      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
593 		if (!vops->pt_update_ops[i].ops)
594 			return array_of_binds ? -ENOBUFS : -ENOMEM;
595 	}
596 
597 	return 0;
598 }
599 ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
600 
601 static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
602 {
603 	struct xe_vma *vma;
604 
605 	vma = gpuva_to_vma(op->base.prefetch.va);
606 
607 	if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
608 		xa_destroy(&op->prefetch_range.range);
609 }
610 
611 static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
612 {
613 	struct xe_vma_op *op;
614 
615 	if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
616 		return;
617 
618 	list_for_each_entry(op, &vops->list, link)
619 		xe_vma_svm_prefetch_op_fini(op);
620 }
621 
622 static void xe_vma_ops_fini(struct xe_vma_ops *vops)
623 {
624 	int i;
625 
626 	xe_vma_svm_prefetch_ops_fini(vops);
627 
628 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
629 		kfree(vops->pt_update_ops[i].ops);
630 }
631 
632 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
633 {
634 	int i;
635 
636 	if (!inc_val)
637 		return;
638 
639 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
640 		if (BIT(i) & tile_mask)
641 			vops->pt_update_ops[i].num_ops += inc_val;
642 }
643 
644 #define XE_VMA_CREATE_MASK (		    \
645 	XE_VMA_READ_ONLY |		    \
646 	XE_VMA_DUMPABLE |		    \
647 	XE_VMA_SYSTEM_ALLOCATOR |           \
648 	DRM_GPUVA_SPARSE |		    \
649 	XE_VMA_MADV_AUTORESET)
650 
651 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
652 				  u8 tile_mask)
653 {
654 	INIT_LIST_HEAD(&op->link);
655 	op->tile_mask = tile_mask;
656 	op->base.op = DRM_GPUVA_OP_MAP;
657 	op->base.map.va.addr = vma->gpuva.va.addr;
658 	op->base.map.va.range = vma->gpuva.va.range;
659 	op->base.map.gem.obj = vma->gpuva.gem.obj;
660 	op->base.map.gem.offset = vma->gpuva.gem.offset;
661 	op->map.vma = vma;
662 	op->map.immediate = true;
663 	op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
664 }
665 
666 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
667 				u8 tile_mask)
668 {
669 	struct xe_vma_op *op;
670 
671 	op = kzalloc(sizeof(*op), GFP_KERNEL);
672 	if (!op)
673 		return -ENOMEM;
674 
675 	xe_vm_populate_rebind(op, vma, tile_mask);
676 	list_add_tail(&op->link, &vops->list);
677 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
678 
679 	return 0;
680 }
681 
682 static struct dma_fence *ops_execute(struct xe_vm *vm,
683 				     struct xe_vma_ops *vops);
684 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
685 			    struct xe_exec_queue *q,
686 			    struct xe_sync_entry *syncs, u32 num_syncs);
687 
688 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
689 {
690 	struct dma_fence *fence;
691 	struct xe_vma *vma, *next;
692 	struct xe_vma_ops vops;
693 	struct xe_vma_op *op, *next_op;
694 	int err, i;
695 
696 	lockdep_assert_held(&vm->lock);
697 	if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
698 	    list_empty(&vm->rebind_list))
699 		return 0;
700 
701 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
702 	for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
703 		vops.pt_update_ops[i].wait_vm_bookkeep = true;
704 
705 	xe_vm_assert_held(vm);
706 	list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
707 		xe_assert(vm->xe, vma->tile_present);
708 
709 		if (rebind_worker)
710 			trace_xe_vma_rebind_worker(vma);
711 		else
712 			trace_xe_vma_rebind_exec(vma);
713 
714 		err = xe_vm_ops_add_rebind(&vops, vma,
715 					   vma->tile_present);
716 		if (err)
717 			goto free_ops;
718 	}
719 
720 	err = xe_vma_ops_alloc(&vops, false);
721 	if (err)
722 		goto free_ops;
723 
724 	fence = ops_execute(vm, &vops);
725 	if (IS_ERR(fence)) {
726 		err = PTR_ERR(fence);
727 	} else {
728 		dma_fence_put(fence);
729 		list_for_each_entry_safe(vma, next, &vm->rebind_list,
730 					 combined_links.rebind)
731 			list_del_init(&vma->combined_links.rebind);
732 	}
733 free_ops:
734 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
735 		list_del(&op->link);
736 		kfree(op);
737 	}
738 	xe_vma_ops_fini(&vops);
739 
740 	return err;
741 }
742 
743 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
744 {
745 	struct dma_fence *fence = NULL;
746 	struct xe_vma_ops vops;
747 	struct xe_vma_op *op, *next_op;
748 	struct xe_tile *tile;
749 	u8 id;
750 	int err;
751 
752 	lockdep_assert_held(&vm->lock);
753 	xe_vm_assert_held(vm);
754 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
755 
756 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
757 	vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
758 	for_each_tile(tile, vm->xe, id) {
759 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
760 		vops.pt_update_ops[tile->id].q =
761 			xe_migrate_exec_queue(tile->migrate);
762 	}
763 
764 	err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
765 	if (err)
766 		return ERR_PTR(err);
767 
768 	err = xe_vma_ops_alloc(&vops, false);
769 	if (err) {
770 		fence = ERR_PTR(err);
771 		goto free_ops;
772 	}
773 
774 	fence = ops_execute(vm, &vops);
775 
776 free_ops:
777 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
778 		list_del(&op->link);
779 		kfree(op);
780 	}
781 	xe_vma_ops_fini(&vops);
782 
783 	return fence;
784 }
785 
786 static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
787 					struct xe_vma *vma,
788 					struct xe_svm_range *range,
789 					u8 tile_mask)
790 {
791 	INIT_LIST_HEAD(&op->link);
792 	op->tile_mask = tile_mask;
793 	op->base.op = DRM_GPUVA_OP_DRIVER;
794 	op->subop = XE_VMA_SUBOP_MAP_RANGE;
795 	op->map_range.vma = vma;
796 	op->map_range.range = range;
797 }
798 
799 static int
800 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
801 			   struct xe_vma *vma,
802 			   struct xe_svm_range *range,
803 			   u8 tile_mask)
804 {
805 	struct xe_vma_op *op;
806 
807 	op = kzalloc(sizeof(*op), GFP_KERNEL);
808 	if (!op)
809 		return -ENOMEM;
810 
811 	xe_vm_populate_range_rebind(op, vma, range, tile_mask);
812 	list_add_tail(&op->link, &vops->list);
813 	xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
814 
815 	return 0;
816 }
817 
818 /**
819  * xe_vm_range_rebind() - VM range (re)bind
820  * @vm: The VM which the range belongs to.
821  * @vma: The VMA which the range belongs to.
822  * @range: SVM range to rebind.
823  * @tile_mask: Tile mask to bind the range to.
824  *
825  * (re)bind SVM range setting up GPU page tables for the range.
826  *
827  * Return: dma fence for rebind to signal completion on success, ERR_PTR on
828  * failure
829  */
830 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
831 				     struct xe_vma *vma,
832 				     struct xe_svm_range *range,
833 				     u8 tile_mask)
834 {
835 	struct dma_fence *fence = NULL;
836 	struct xe_vma_ops vops;
837 	struct xe_vma_op *op, *next_op;
838 	struct xe_tile *tile;
839 	u8 id;
840 	int err;
841 
842 	lockdep_assert_held(&vm->lock);
843 	xe_vm_assert_held(vm);
844 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
845 	xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
846 
847 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
848 	vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
849 	for_each_tile(tile, vm->xe, id) {
850 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
851 		vops.pt_update_ops[tile->id].q =
852 			xe_migrate_exec_queue(tile->migrate);
853 	}
854 
855 	err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
856 	if (err)
857 		return ERR_PTR(err);
858 
859 	err = xe_vma_ops_alloc(&vops, false);
860 	if (err) {
861 		fence = ERR_PTR(err);
862 		goto free_ops;
863 	}
864 
865 	fence = ops_execute(vm, &vops);
866 
867 free_ops:
868 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
869 		list_del(&op->link);
870 		kfree(op);
871 	}
872 	xe_vma_ops_fini(&vops);
873 
874 	return fence;
875 }
876 
877 static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
878 					struct xe_svm_range *range)
879 {
880 	INIT_LIST_HEAD(&op->link);
881 	op->tile_mask = range->tile_present;
882 	op->base.op = DRM_GPUVA_OP_DRIVER;
883 	op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
884 	op->unmap_range.range = range;
885 }
886 
887 static int
888 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
889 			   struct xe_svm_range *range)
890 {
891 	struct xe_vma_op *op;
892 
893 	op = kzalloc(sizeof(*op), GFP_KERNEL);
894 	if (!op)
895 		return -ENOMEM;
896 
897 	xe_vm_populate_range_unbind(op, range);
898 	list_add_tail(&op->link, &vops->list);
899 	xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
900 
901 	return 0;
902 }
903 
904 /**
905  * xe_vm_range_unbind() - VM range unbind
906  * @vm: The VM which the range belongs to.
907  * @range: SVM range to rebind.
908  *
909  * Unbind SVM range removing the GPU page tables for the range.
910  *
911  * Return: dma fence for unbind to signal completion on success, ERR_PTR on
912  * failure
913  */
914 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
915 				     struct xe_svm_range *range)
916 {
917 	struct dma_fence *fence = NULL;
918 	struct xe_vma_ops vops;
919 	struct xe_vma_op *op, *next_op;
920 	struct xe_tile *tile;
921 	u8 id;
922 	int err;
923 
924 	lockdep_assert_held(&vm->lock);
925 	xe_vm_assert_held(vm);
926 	xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
927 
928 	if (!range->tile_present)
929 		return dma_fence_get_stub();
930 
931 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
932 	for_each_tile(tile, vm->xe, id) {
933 		vops.pt_update_ops[id].wait_vm_bookkeep = true;
934 		vops.pt_update_ops[tile->id].q =
935 			xe_migrate_exec_queue(tile->migrate);
936 	}
937 
938 	err = xe_vm_ops_add_range_unbind(&vops, range);
939 	if (err)
940 		return ERR_PTR(err);
941 
942 	err = xe_vma_ops_alloc(&vops, false);
943 	if (err) {
944 		fence = ERR_PTR(err);
945 		goto free_ops;
946 	}
947 
948 	fence = ops_execute(vm, &vops);
949 
950 free_ops:
951 	list_for_each_entry_safe(op, next_op, &vops.list, link) {
952 		list_del(&op->link);
953 		kfree(op);
954 	}
955 	xe_vma_ops_fini(&vops);
956 
957 	return fence;
958 }
959 
960 static void xe_vma_free(struct xe_vma *vma)
961 {
962 	if (xe_vma_is_userptr(vma))
963 		kfree(to_userptr_vma(vma));
964 	else
965 		kfree(vma);
966 }
967 
968 static struct xe_vma *xe_vma_create(struct xe_vm *vm,
969 				    struct xe_bo *bo,
970 				    u64 bo_offset_or_userptr,
971 				    u64 start, u64 end,
972 				    struct xe_vma_mem_attr *attr,
973 				    unsigned int flags)
974 {
975 	struct xe_vma *vma;
976 	struct xe_tile *tile;
977 	u8 id;
978 	bool is_null = (flags & DRM_GPUVA_SPARSE);
979 	bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
980 
981 	xe_assert(vm->xe, start < end);
982 	xe_assert(vm->xe, end < vm->size);
983 
984 	/*
985 	 * Allocate and ensure that the xe_vma_is_userptr() return
986 	 * matches what was allocated.
987 	 */
988 	if (!bo && !is_null && !is_cpu_addr_mirror) {
989 		struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
990 
991 		if (!uvma)
992 			return ERR_PTR(-ENOMEM);
993 
994 		vma = &uvma->vma;
995 	} else {
996 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
997 		if (!vma)
998 			return ERR_PTR(-ENOMEM);
999 
1000 		if (bo)
1001 			vma->gpuva.gem.obj = &bo->ttm.base;
1002 	}
1003 
1004 	INIT_LIST_HEAD(&vma->combined_links.rebind);
1005 
1006 	INIT_LIST_HEAD(&vma->gpuva.gem.entry);
1007 	vma->gpuva.vm = &vm->gpuvm;
1008 	vma->gpuva.va.addr = start;
1009 	vma->gpuva.va.range = end - start + 1;
1010 	vma->gpuva.flags = flags;
1011 
1012 	for_each_tile(tile, vm->xe, id)
1013 		vma->tile_mask |= 0x1 << id;
1014 
1015 	if (vm->xe->info.has_atomic_enable_pte_bit)
1016 		vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
1017 
1018 	vma->attr = *attr;
1019 
1020 	if (bo) {
1021 		struct drm_gpuvm_bo *vm_bo;
1022 
1023 		xe_bo_assert_held(bo);
1024 
1025 		vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
1026 		if (IS_ERR(vm_bo)) {
1027 			xe_vma_free(vma);
1028 			return ERR_CAST(vm_bo);
1029 		}
1030 
1031 		drm_gpuvm_bo_extobj_add(vm_bo);
1032 		drm_gem_object_get(&bo->ttm.base);
1033 		vma->gpuva.gem.offset = bo_offset_or_userptr;
1034 		drm_gpuva_link(&vma->gpuva, vm_bo);
1035 		drm_gpuvm_bo_put(vm_bo);
1036 	} else /* userptr or null */ {
1037 		if (!is_null && !is_cpu_addr_mirror) {
1038 			struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1039 			u64 size = end - start + 1;
1040 			int err;
1041 
1042 			vma->gpuva.gem.offset = bo_offset_or_userptr;
1043 
1044 			err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
1045 			if (err) {
1046 				xe_vma_free(vma);
1047 				return ERR_PTR(err);
1048 			}
1049 		}
1050 
1051 		xe_vm_get(vm);
1052 	}
1053 
1054 	return vma;
1055 }
1056 
1057 static void xe_vma_destroy_late(struct xe_vma *vma)
1058 {
1059 	struct xe_vm *vm = xe_vma_vm(vma);
1060 
1061 	if (vma->ufence) {
1062 		xe_sync_ufence_put(vma->ufence);
1063 		vma->ufence = NULL;
1064 	}
1065 
1066 	if (xe_vma_is_userptr(vma)) {
1067 		struct xe_userptr_vma *uvma = to_userptr_vma(vma);
1068 
1069 		xe_userptr_remove(uvma);
1070 		xe_vm_put(vm);
1071 	} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
1072 		xe_vm_put(vm);
1073 	} else {
1074 		xe_bo_put(xe_vma_bo(vma));
1075 	}
1076 
1077 	xe_vma_free(vma);
1078 }
1079 
1080 static void vma_destroy_work_func(struct work_struct *w)
1081 {
1082 	struct xe_vma *vma =
1083 		container_of(w, struct xe_vma, destroy_work);
1084 
1085 	xe_vma_destroy_late(vma);
1086 }
1087 
1088 static void vma_destroy_cb(struct dma_fence *fence,
1089 			   struct dma_fence_cb *cb)
1090 {
1091 	struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
1092 
1093 	INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
1094 	queue_work(system_unbound_wq, &vma->destroy_work);
1095 }
1096 
1097 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
1098 {
1099 	struct xe_vm *vm = xe_vma_vm(vma);
1100 
1101 	lockdep_assert_held_write(&vm->lock);
1102 	xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
1103 
1104 	if (xe_vma_is_userptr(vma)) {
1105 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
1106 		xe_userptr_destroy(to_userptr_vma(vma));
1107 	} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
1108 		xe_bo_assert_held(xe_vma_bo(vma));
1109 
1110 		drm_gpuva_unlink(&vma->gpuva);
1111 	}
1112 
1113 	xe_vm_assert_held(vm);
1114 	if (fence) {
1115 		int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
1116 						 vma_destroy_cb);
1117 
1118 		if (ret) {
1119 			XE_WARN_ON(ret != -ENOENT);
1120 			xe_vma_destroy_late(vma);
1121 		}
1122 	} else {
1123 		xe_vma_destroy_late(vma);
1124 	}
1125 }
1126 
1127 /**
1128  * xe_vm_lock_vma() - drm_exec utility to lock a vma
1129  * @exec: The drm_exec object we're currently locking for.
1130  * @vma: The vma for witch we want to lock the vm resv and any attached
1131  * object's resv.
1132  *
1133  * Return: 0 on success, negative error code on error. In particular
1134  * may return -EDEADLK on WW transaction contention and -EINTR if
1135  * an interruptible wait is terminated by a signal.
1136  */
1137 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
1138 {
1139 	struct xe_vm *vm = xe_vma_vm(vma);
1140 	struct xe_bo *bo = xe_vma_bo(vma);
1141 	int err;
1142 
1143 	XE_WARN_ON(!vm);
1144 
1145 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
1146 	if (!err && bo && !bo->vm)
1147 		err = drm_exec_lock_obj(exec, &bo->ttm.base);
1148 
1149 	return err;
1150 }
1151 
1152 static void xe_vma_destroy_unlocked(struct xe_vma *vma)
1153 {
1154 	struct xe_device *xe = xe_vma_vm(vma)->xe;
1155 	struct xe_validation_ctx ctx;
1156 	struct drm_exec exec;
1157 	int err = 0;
1158 
1159 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
1160 		err = xe_vm_lock_vma(&exec, vma);
1161 		drm_exec_retry_on_contention(&exec);
1162 		if (XE_WARN_ON(err))
1163 			break;
1164 		xe_vma_destroy(vma, NULL);
1165 	}
1166 	xe_assert(xe, !err);
1167 }
1168 
1169 struct xe_vma *
1170 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
1171 {
1172 	struct drm_gpuva *gpuva;
1173 
1174 	lockdep_assert_held(&vm->lock);
1175 
1176 	if (xe_vm_is_closed_or_banned(vm))
1177 		return NULL;
1178 
1179 	xe_assert(vm->xe, start + range <= vm->size);
1180 
1181 	gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
1182 
1183 	return gpuva ? gpuva_to_vma(gpuva) : NULL;
1184 }
1185 
1186 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
1187 {
1188 	int err;
1189 
1190 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1191 	lockdep_assert_held(&vm->lock);
1192 
1193 	mutex_lock(&vm->snap_mutex);
1194 	err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
1195 	mutex_unlock(&vm->snap_mutex);
1196 	XE_WARN_ON(err);	/* Shouldn't be possible */
1197 
1198 	return err;
1199 }
1200 
1201 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
1202 {
1203 	xe_assert(vm->xe, xe_vma_vm(vma) == vm);
1204 	lockdep_assert_held(&vm->lock);
1205 
1206 	mutex_lock(&vm->snap_mutex);
1207 	drm_gpuva_remove(&vma->gpuva);
1208 	mutex_unlock(&vm->snap_mutex);
1209 	if (vm->usm.last_fault_vma == vma)
1210 		vm->usm.last_fault_vma = NULL;
1211 }
1212 
1213 static struct drm_gpuva_op *xe_vm_op_alloc(void)
1214 {
1215 	struct xe_vma_op *op;
1216 
1217 	op = kzalloc(sizeof(*op), GFP_KERNEL);
1218 
1219 	if (unlikely(!op))
1220 		return NULL;
1221 
1222 	return &op->base;
1223 }
1224 
1225 static void xe_vm_free(struct drm_gpuvm *gpuvm);
1226 
1227 static const struct drm_gpuvm_ops gpuvm_ops = {
1228 	.op_alloc = xe_vm_op_alloc,
1229 	.vm_bo_validate = xe_gpuvm_validate,
1230 	.vm_free = xe_vm_free,
1231 };
1232 
1233 static u64 pde_encode_pat_index(u16 pat_index)
1234 {
1235 	u64 pte = 0;
1236 
1237 	if (pat_index & BIT(0))
1238 		pte |= XE_PPGTT_PTE_PAT0;
1239 
1240 	if (pat_index & BIT(1))
1241 		pte |= XE_PPGTT_PTE_PAT1;
1242 
1243 	return pte;
1244 }
1245 
1246 static u64 pte_encode_pat_index(u16 pat_index, u32 pt_level)
1247 {
1248 	u64 pte = 0;
1249 
1250 	if (pat_index & BIT(0))
1251 		pte |= XE_PPGTT_PTE_PAT0;
1252 
1253 	if (pat_index & BIT(1))
1254 		pte |= XE_PPGTT_PTE_PAT1;
1255 
1256 	if (pat_index & BIT(2)) {
1257 		if (pt_level)
1258 			pte |= XE_PPGTT_PDE_PDPE_PAT2;
1259 		else
1260 			pte |= XE_PPGTT_PTE_PAT2;
1261 	}
1262 
1263 	if (pat_index & BIT(3))
1264 		pte |= XELPG_PPGTT_PTE_PAT3;
1265 
1266 	if (pat_index & (BIT(4)))
1267 		pte |= XE2_PPGTT_PTE_PAT4;
1268 
1269 	return pte;
1270 }
1271 
1272 static u64 pte_encode_ps(u32 pt_level)
1273 {
1274 	XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
1275 
1276 	if (pt_level == 1)
1277 		return XE_PDE_PS_2M;
1278 	else if (pt_level == 2)
1279 		return XE_PDPE_PS_1G;
1280 
1281 	return 0;
1282 }
1283 
1284 static u16 pde_pat_index(struct xe_bo *bo)
1285 {
1286 	struct xe_device *xe = xe_bo_device(bo);
1287 	u16 pat_index;
1288 
1289 	/*
1290 	 * We only have two bits to encode the PAT index in non-leaf nodes, but
1291 	 * these only point to other paging structures so we only need a minimal
1292 	 * selection of options. The user PAT index is only for encoding leaf
1293 	 * nodes, where we have use of more bits to do the encoding. The
1294 	 * non-leaf nodes are instead under driver control so the chosen index
1295 	 * here should be distinct from the user PAT index. Also the
1296 	 * corresponding coherency of the PAT index should be tied to the
1297 	 * allocation type of the page table (or at least we should pick
1298 	 * something which is always safe).
1299 	 */
1300 	if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)
1301 		pat_index = xe->pat.idx[XE_CACHE_WB];
1302 	else
1303 		pat_index = xe->pat.idx[XE_CACHE_NONE];
1304 
1305 	xe_assert(xe, pat_index <= 3);
1306 
1307 	return pat_index;
1308 }
1309 
1310 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset)
1311 {
1312 	u64 pde;
1313 
1314 	pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1315 	pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
1316 	pde |= pde_encode_pat_index(pde_pat_index(bo));
1317 
1318 	return pde;
1319 }
1320 
1321 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
1322 			      u16 pat_index, u32 pt_level)
1323 {
1324 	u64 pte;
1325 
1326 	pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
1327 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1328 	pte |= pte_encode_pat_index(pat_index, pt_level);
1329 	pte |= pte_encode_ps(pt_level);
1330 
1331 	if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
1332 		pte |= XE_PPGTT_PTE_DM;
1333 
1334 	return pte;
1335 }
1336 
1337 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
1338 			       u16 pat_index, u32 pt_level)
1339 {
1340 	pte |= XE_PAGE_PRESENT;
1341 
1342 	if (likely(!xe_vma_read_only(vma)))
1343 		pte |= XE_PAGE_RW;
1344 
1345 	pte |= pte_encode_pat_index(pat_index, pt_level);
1346 	pte |= pte_encode_ps(pt_level);
1347 
1348 	if (unlikely(xe_vma_is_null(vma)))
1349 		pte |= XE_PTE_NULL;
1350 
1351 	return pte;
1352 }
1353 
1354 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
1355 				u16 pat_index,
1356 				u32 pt_level, bool devmem, u64 flags)
1357 {
1358 	u64 pte;
1359 
1360 	/* Avoid passing random bits directly as flags */
1361 	xe_assert(xe, !(flags & ~XE_PTE_PS64));
1362 
1363 	pte = addr;
1364 	pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
1365 	pte |= pte_encode_pat_index(pat_index, pt_level);
1366 	pte |= pte_encode_ps(pt_level);
1367 
1368 	if (devmem)
1369 		pte |= XE_PPGTT_PTE_DM;
1370 
1371 	pte |= flags;
1372 
1373 	return pte;
1374 }
1375 
1376 static const struct xe_pt_ops xelp_pt_ops = {
1377 	.pte_encode_bo = xelp_pte_encode_bo,
1378 	.pte_encode_vma = xelp_pte_encode_vma,
1379 	.pte_encode_addr = xelp_pte_encode_addr,
1380 	.pde_encode_bo = xelp_pde_encode_bo,
1381 };
1382 
1383 static void vm_destroy_work_func(struct work_struct *w);
1384 
1385 /**
1386  * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
1387  * given tile and vm.
1388  * @xe: xe device.
1389  * @tile: tile to set up for.
1390  * @vm: vm to set up for.
1391  * @exec: The struct drm_exec object used to lock the vm resv.
1392  *
1393  * Sets up a pagetable tree with one page-table per level and a single
1394  * leaf PTE. All pagetable entries point to the single page-table or,
1395  * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
1396  * writes become NOPs.
1397  *
1398  * Return: 0 on success, negative error code on error.
1399  */
1400 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
1401 				struct xe_vm *vm, struct drm_exec *exec)
1402 {
1403 	u8 id = tile->id;
1404 	int i;
1405 
1406 	for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
1407 		vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
1408 		if (IS_ERR(vm->scratch_pt[id][i])) {
1409 			int err = PTR_ERR(vm->scratch_pt[id][i]);
1410 
1411 			vm->scratch_pt[id][i] = NULL;
1412 			return err;
1413 		}
1414 		xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
1415 	}
1416 
1417 	return 0;
1418 }
1419 ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
1420 
1421 static void xe_vm_free_scratch(struct xe_vm *vm)
1422 {
1423 	struct xe_tile *tile;
1424 	u8 id;
1425 
1426 	if (!xe_vm_has_scratch(vm))
1427 		return;
1428 
1429 	for_each_tile(tile, vm->xe, id) {
1430 		u32 i;
1431 
1432 		if (!vm->pt_root[id])
1433 			continue;
1434 
1435 		for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
1436 			if (vm->scratch_pt[id][i])
1437 				xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
1438 	}
1439 }
1440 
1441 static void xe_vm_pt_destroy(struct xe_vm *vm)
1442 {
1443 	struct xe_tile *tile;
1444 	u8 id;
1445 
1446 	xe_vm_assert_held(vm);
1447 
1448 	for_each_tile(tile, vm->xe, id) {
1449 		if (vm->pt_root[id]) {
1450 			xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
1451 			vm->pt_root[id] = NULL;
1452 		}
1453 	}
1454 }
1455 
1456 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
1457 {
1458 	struct drm_gem_object *vm_resv_obj;
1459 	struct xe_validation_ctx ctx;
1460 	struct drm_exec exec;
1461 	struct xe_vm *vm;
1462 	int err;
1463 	struct xe_tile *tile;
1464 	u8 id;
1465 
1466 	/*
1467 	 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
1468 	 * ever be in faulting mode.
1469 	 */
1470 	xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
1471 
1472 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1473 	if (!vm)
1474 		return ERR_PTR(-ENOMEM);
1475 
1476 	vm->xe = xe;
1477 
1478 	vm->size = 1ull << xe->info.va_bits;
1479 	vm->flags = flags;
1480 
1481 	if (xef)
1482 		vm->xef = xe_file_get(xef);
1483 	/**
1484 	 * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
1485 	 * manipulated under the PXP mutex. However, the PXP mutex can be taken
1486 	 * under a user-VM lock when the PXP session is started at exec_queue
1487 	 * creation time. Those are different VMs and therefore there is no risk
1488 	 * of deadlock, but we need to tell lockdep that this is the case or it
1489 	 * will print a warning.
1490 	 */
1491 	if (flags & XE_VM_FLAG_GSC) {
1492 		static struct lock_class_key gsc_vm_key;
1493 
1494 		__init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
1495 	} else {
1496 		init_rwsem(&vm->lock);
1497 	}
1498 	mutex_init(&vm->snap_mutex);
1499 
1500 	INIT_LIST_HEAD(&vm->rebind_list);
1501 
1502 	INIT_LIST_HEAD(&vm->userptr.repin_list);
1503 	INIT_LIST_HEAD(&vm->userptr.invalidated);
1504 	spin_lock_init(&vm->userptr.invalidated_lock);
1505 
1506 	ttm_lru_bulk_move_init(&vm->lru_bulk_move);
1507 
1508 	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
1509 
1510 	INIT_LIST_HEAD(&vm->preempt.exec_queues);
1511 	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
1512 
1513 	for_each_tile(tile, xe, id)
1514 		xe_range_fence_tree_init(&vm->rftree[id]);
1515 
1516 	vm->pt_ops = &xelp_pt_ops;
1517 
1518 	/*
1519 	 * Long-running workloads are not protected by the scheduler references.
1520 	 * By design, run_job for long-running workloads returns NULL and the
1521 	 * scheduler drops all the references of it, hence protecting the VM
1522 	 * for this case is necessary.
1523 	 */
1524 	if (flags & XE_VM_FLAG_LR_MODE) {
1525 		INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1526 		xe_pm_runtime_get_noresume(xe);
1527 		INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
1528 	}
1529 
1530 	err = xe_svm_init(vm);
1531 	if (err)
1532 		goto err_no_resv;
1533 
1534 	vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
1535 	if (!vm_resv_obj) {
1536 		err = -ENOMEM;
1537 		goto err_svm_fini;
1538 	}
1539 
1540 	drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
1541 		       vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
1542 
1543 	drm_gem_object_put(vm_resv_obj);
1544 
1545 	err = 0;
1546 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
1547 			    err) {
1548 		err = xe_vm_drm_exec_lock(vm, &exec);
1549 		drm_exec_retry_on_contention(&exec);
1550 
1551 		if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
1552 			vm->flags |= XE_VM_FLAG_64K;
1553 
1554 		for_each_tile(tile, xe, id) {
1555 			if (flags & XE_VM_FLAG_MIGRATION &&
1556 			    tile->id != XE_VM_FLAG_TILE_ID(flags))
1557 				continue;
1558 
1559 			vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
1560 						       &exec);
1561 			if (IS_ERR(vm->pt_root[id])) {
1562 				err = PTR_ERR(vm->pt_root[id]);
1563 				vm->pt_root[id] = NULL;
1564 				xe_vm_pt_destroy(vm);
1565 				drm_exec_retry_on_contention(&exec);
1566 				xe_validation_retry_on_oom(&ctx, &err);
1567 				break;
1568 			}
1569 		}
1570 		if (err)
1571 			break;
1572 
1573 		if (xe_vm_has_scratch(vm)) {
1574 			for_each_tile(tile, xe, id) {
1575 				if (!vm->pt_root[id])
1576 					continue;
1577 
1578 				err = xe_vm_create_scratch(xe, tile, vm, &exec);
1579 				if (err) {
1580 					xe_vm_free_scratch(vm);
1581 					xe_vm_pt_destroy(vm);
1582 					drm_exec_retry_on_contention(&exec);
1583 					xe_validation_retry_on_oom(&ctx, &err);
1584 					break;
1585 				}
1586 			}
1587 			if (err)
1588 				break;
1589 			vm->batch_invalidate_tlb = true;
1590 		}
1591 
1592 		if (vm->flags & XE_VM_FLAG_LR_MODE) {
1593 			INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
1594 			vm->batch_invalidate_tlb = false;
1595 		}
1596 
1597 		/* Fill pt_root after allocating scratch tables */
1598 		for_each_tile(tile, xe, id) {
1599 			if (!vm->pt_root[id])
1600 				continue;
1601 
1602 			xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
1603 		}
1604 	}
1605 	if (err)
1606 		goto err_close;
1607 
1608 	/* Kernel migration VM shouldn't have a circular loop.. */
1609 	if (!(flags & XE_VM_FLAG_MIGRATION)) {
1610 		for_each_tile(tile, xe, id) {
1611 			struct xe_exec_queue *q;
1612 			u32 create_flags = EXEC_QUEUE_FLAG_VM;
1613 
1614 			if (!vm->pt_root[id])
1615 				continue;
1616 
1617 			q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
1618 			if (IS_ERR(q)) {
1619 				err = PTR_ERR(q);
1620 				goto err_close;
1621 			}
1622 			vm->q[id] = q;
1623 		}
1624 	}
1625 
1626 	if (xef && xe->info.has_asid) {
1627 		u32 asid;
1628 
1629 		down_write(&xe->usm.lock);
1630 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
1631 				      XA_LIMIT(1, XE_MAX_ASID - 1),
1632 				      &xe->usm.next_asid, GFP_KERNEL);
1633 		up_write(&xe->usm.lock);
1634 		if (err < 0)
1635 			goto err_close;
1636 
1637 		vm->usm.asid = asid;
1638 	}
1639 
1640 	trace_xe_vm_create(vm);
1641 
1642 	return vm;
1643 
1644 err_close:
1645 	xe_vm_close_and_put(vm);
1646 	return ERR_PTR(err);
1647 
1648 err_svm_fini:
1649 	if (flags & XE_VM_FLAG_FAULT_MODE) {
1650 		vm->size = 0; /* close the vm */
1651 		xe_svm_fini(vm);
1652 	}
1653 err_no_resv:
1654 	mutex_destroy(&vm->snap_mutex);
1655 	for_each_tile(tile, xe, id)
1656 		xe_range_fence_tree_fini(&vm->rftree[id]);
1657 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1658 	if (vm->xef)
1659 		xe_file_put(vm->xef);
1660 	kfree(vm);
1661 	if (flags & XE_VM_FLAG_LR_MODE)
1662 		xe_pm_runtime_put(xe);
1663 	return ERR_PTR(err);
1664 }
1665 
1666 static void xe_vm_close(struct xe_vm *vm)
1667 {
1668 	struct xe_device *xe = vm->xe;
1669 	bool bound;
1670 	int idx;
1671 
1672 	bound = drm_dev_enter(&xe->drm, &idx);
1673 
1674 	down_write(&vm->lock);
1675 	if (xe_vm_in_fault_mode(vm))
1676 		xe_svm_notifier_lock(vm);
1677 
1678 	vm->size = 0;
1679 
1680 	if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
1681 		struct xe_tile *tile;
1682 		struct xe_gt *gt;
1683 		u8 id;
1684 
1685 		/* Wait for pending binds */
1686 		dma_resv_wait_timeout(xe_vm_resv(vm),
1687 				      DMA_RESV_USAGE_BOOKKEEP,
1688 				      false, MAX_SCHEDULE_TIMEOUT);
1689 
1690 		if (bound) {
1691 			for_each_tile(tile, xe, id)
1692 				if (vm->pt_root[id])
1693 					xe_pt_clear(xe, vm->pt_root[id]);
1694 
1695 			for_each_gt(gt, xe, id)
1696 				xe_tlb_inval_vm(&gt->tlb_inval, vm);
1697 		}
1698 	}
1699 
1700 	if (xe_vm_in_fault_mode(vm))
1701 		xe_svm_notifier_unlock(vm);
1702 	up_write(&vm->lock);
1703 
1704 	if (bound)
1705 		drm_dev_exit(idx);
1706 }
1707 
1708 void xe_vm_close_and_put(struct xe_vm *vm)
1709 {
1710 	LIST_HEAD(contested);
1711 	struct xe_device *xe = vm->xe;
1712 	struct xe_tile *tile;
1713 	struct xe_vma *vma, *next_vma;
1714 	struct drm_gpuva *gpuva, *next;
1715 	u8 id;
1716 
1717 	xe_assert(xe, !vm->preempt.num_exec_queues);
1718 
1719 	xe_vm_close(vm);
1720 	if (xe_vm_in_preempt_fence_mode(vm)) {
1721 		mutex_lock(&xe->rebind_resume_lock);
1722 		list_del_init(&vm->preempt.pm_activate_link);
1723 		mutex_unlock(&xe->rebind_resume_lock);
1724 		flush_work(&vm->preempt.rebind_work);
1725 	}
1726 	if (xe_vm_in_fault_mode(vm))
1727 		xe_svm_close(vm);
1728 
1729 	down_write(&vm->lock);
1730 	for_each_tile(tile, xe, id) {
1731 		if (vm->q[id]) {
1732 			int i;
1733 
1734 			xe_exec_queue_last_fence_put(vm->q[id], vm);
1735 			for_each_tlb_inval(i)
1736 				xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i);
1737 		}
1738 	}
1739 	up_write(&vm->lock);
1740 
1741 	for_each_tile(tile, xe, id) {
1742 		if (vm->q[id]) {
1743 			xe_exec_queue_kill(vm->q[id]);
1744 			xe_exec_queue_put(vm->q[id]);
1745 			vm->q[id] = NULL;
1746 		}
1747 	}
1748 
1749 	down_write(&vm->lock);
1750 	xe_vm_lock(vm, false);
1751 	drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
1752 		vma = gpuva_to_vma(gpuva);
1753 
1754 		if (xe_vma_has_no_bo(vma)) {
1755 			xe_svm_notifier_lock(vm);
1756 			vma->gpuva.flags |= XE_VMA_DESTROYED;
1757 			xe_svm_notifier_unlock(vm);
1758 		}
1759 
1760 		xe_vm_remove_vma(vm, vma);
1761 
1762 		/* easy case, remove from VMA? */
1763 		if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
1764 			list_del_init(&vma->combined_links.rebind);
1765 			xe_vma_destroy(vma, NULL);
1766 			continue;
1767 		}
1768 
1769 		list_move_tail(&vma->combined_links.destroy, &contested);
1770 		vma->gpuva.flags |= XE_VMA_DESTROYED;
1771 	}
1772 
1773 	/*
1774 	 * All vm operations will add shared fences to resv.
1775 	 * The only exception is eviction for a shared object,
1776 	 * but even so, the unbind when evicted would still
1777 	 * install a fence to resv. Hence it's safe to
1778 	 * destroy the pagetables immediately.
1779 	 */
1780 	xe_vm_free_scratch(vm);
1781 	xe_vm_pt_destroy(vm);
1782 	xe_vm_unlock(vm);
1783 
1784 	/*
1785 	 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
1786 	 * Since we hold a refcount to the bo, we can remove and free
1787 	 * the members safely without locking.
1788 	 */
1789 	list_for_each_entry_safe(vma, next_vma, &contested,
1790 				 combined_links.destroy) {
1791 		list_del_init(&vma->combined_links.destroy);
1792 		xe_vma_destroy_unlocked(vma);
1793 	}
1794 
1795 	xe_svm_fini(vm);
1796 
1797 	up_write(&vm->lock);
1798 
1799 	down_write(&xe->usm.lock);
1800 	if (vm->usm.asid) {
1801 		void *lookup;
1802 
1803 		xe_assert(xe, xe->info.has_asid);
1804 		xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
1805 
1806 		lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
1807 		xe_assert(xe, lookup == vm);
1808 	}
1809 	up_write(&xe->usm.lock);
1810 
1811 	for_each_tile(tile, xe, id)
1812 		xe_range_fence_tree_fini(&vm->rftree[id]);
1813 
1814 	xe_vm_put(vm);
1815 }
1816 
1817 static void vm_destroy_work_func(struct work_struct *w)
1818 {
1819 	struct xe_vm *vm =
1820 		container_of(w, struct xe_vm, destroy_work);
1821 	struct xe_device *xe = vm->xe;
1822 	struct xe_tile *tile;
1823 	u8 id;
1824 
1825 	/* xe_vm_close_and_put was not called? */
1826 	xe_assert(xe, !vm->size);
1827 
1828 	if (xe_vm_in_preempt_fence_mode(vm))
1829 		flush_work(&vm->preempt.rebind_work);
1830 
1831 	mutex_destroy(&vm->snap_mutex);
1832 
1833 	if (vm->flags & XE_VM_FLAG_LR_MODE)
1834 		xe_pm_runtime_put(xe);
1835 
1836 	for_each_tile(tile, xe, id)
1837 		XE_WARN_ON(vm->pt_root[id]);
1838 
1839 	trace_xe_vm_free(vm);
1840 
1841 	ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
1842 
1843 	if (vm->xef)
1844 		xe_file_put(vm->xef);
1845 
1846 	kfree(vm);
1847 }
1848 
1849 static void xe_vm_free(struct drm_gpuvm *gpuvm)
1850 {
1851 	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
1852 
1853 	/* To destroy the VM we need to be able to sleep */
1854 	queue_work(system_unbound_wq, &vm->destroy_work);
1855 }
1856 
1857 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
1858 {
1859 	struct xe_vm *vm;
1860 
1861 	mutex_lock(&xef->vm.lock);
1862 	vm = xa_load(&xef->vm.xa, id);
1863 	if (vm)
1864 		xe_vm_get(vm);
1865 	mutex_unlock(&xef->vm.lock);
1866 
1867 	return vm;
1868 }
1869 
1870 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
1871 {
1872 	return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0);
1873 }
1874 
1875 static struct xe_exec_queue *
1876 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
1877 {
1878 	return q ? q : vm->q[0];
1879 }
1880 
1881 static struct xe_user_fence *
1882 find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
1883 {
1884 	unsigned int i;
1885 
1886 	for (i = 0; i < num_syncs; i++) {
1887 		struct xe_sync_entry *e = &syncs[i];
1888 
1889 		if (xe_sync_is_ufence(e))
1890 			return xe_sync_ufence_get(e);
1891 	}
1892 
1893 	return NULL;
1894 }
1895 
1896 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
1897 				    DRM_XE_VM_CREATE_FLAG_LR_MODE | \
1898 				    DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1899 
1900 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
1901 		       struct drm_file *file)
1902 {
1903 	struct xe_device *xe = to_xe_device(dev);
1904 	struct xe_file *xef = to_xe_file(file);
1905 	struct drm_xe_vm_create *args = data;
1906 	struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
1907 	struct xe_vm *vm;
1908 	u32 id;
1909 	int err;
1910 	u32 flags = 0;
1911 
1912 	if (XE_IOCTL_DBG(xe, args->extensions))
1913 		return -EINVAL;
1914 
1915 	if (wa_gt && XE_GT_WA(wa_gt, 22014953428))
1916 		args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
1917 
1918 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1919 			 !xe->info.has_usm))
1920 		return -EINVAL;
1921 
1922 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1923 		return -EINVAL;
1924 
1925 	if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
1926 		return -EINVAL;
1927 
1928 	if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
1929 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
1930 			 !xe->info.needs_scratch))
1931 		return -EINVAL;
1932 
1933 	if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
1934 			 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
1935 		return -EINVAL;
1936 
1937 	if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
1938 		flags |= XE_VM_FLAG_SCRATCH_PAGE;
1939 	if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
1940 		flags |= XE_VM_FLAG_LR_MODE;
1941 	if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
1942 		flags |= XE_VM_FLAG_FAULT_MODE;
1943 
1944 	vm = xe_vm_create(xe, flags, xef);
1945 	if (IS_ERR(vm))
1946 		return PTR_ERR(vm);
1947 
1948 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
1949 	/* Warning: Security issue - never enable by default */
1950 	args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
1951 #endif
1952 
1953 	/* user id alloc must always be last in ioctl to prevent UAF */
1954 	err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1955 	if (err)
1956 		goto err_close_and_put;
1957 
1958 	args->vm_id = id;
1959 
1960 	return 0;
1961 
1962 err_close_and_put:
1963 	xe_vm_close_and_put(vm);
1964 
1965 	return err;
1966 }
1967 
1968 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
1969 			struct drm_file *file)
1970 {
1971 	struct xe_device *xe = to_xe_device(dev);
1972 	struct xe_file *xef = to_xe_file(file);
1973 	struct drm_xe_vm_destroy *args = data;
1974 	struct xe_vm *vm;
1975 	int err = 0;
1976 
1977 	if (XE_IOCTL_DBG(xe, args->pad) ||
1978 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1979 		return -EINVAL;
1980 
1981 	mutex_lock(&xef->vm.lock);
1982 	vm = xa_load(&xef->vm.xa, args->vm_id);
1983 	if (XE_IOCTL_DBG(xe, !vm))
1984 		err = -ENOENT;
1985 	else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
1986 		err = -EBUSY;
1987 	else
1988 		xa_erase(&xef->vm.xa, args->vm_id);
1989 	mutex_unlock(&xef->vm.lock);
1990 
1991 	if (!err)
1992 		xe_vm_close_and_put(vm);
1993 
1994 	return err;
1995 }
1996 
1997 static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end)
1998 {
1999 	struct drm_gpuva *gpuva;
2000 	u32 num_vmas = 0;
2001 
2002 	lockdep_assert_held(&vm->lock);
2003 	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
2004 		num_vmas++;
2005 
2006 	return num_vmas;
2007 }
2008 
2009 static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
2010 			 u64 end, struct drm_xe_mem_range_attr *attrs)
2011 {
2012 	struct drm_gpuva *gpuva;
2013 	int i = 0;
2014 
2015 	lockdep_assert_held(&vm->lock);
2016 
2017 	drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
2018 		struct xe_vma *vma = gpuva_to_vma(gpuva);
2019 
2020 		if (i == *num_vmas)
2021 			return -ENOSPC;
2022 
2023 		attrs[i].start = xe_vma_start(vma);
2024 		attrs[i].end = xe_vma_end(vma);
2025 		attrs[i].atomic.val = vma->attr.atomic_access;
2026 		attrs[i].pat_index.val = vma->attr.pat_index;
2027 		attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
2028 		attrs[i].preferred_mem_loc.migration_policy =
2029 		vma->attr.preferred_loc.migration_policy;
2030 
2031 		i++;
2032 	}
2033 
2034 	*num_vmas = i;
2035 	return 0;
2036 }
2037 
2038 int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2039 {
2040 	struct xe_device *xe = to_xe_device(dev);
2041 	struct xe_file *xef = to_xe_file(file);
2042 	struct drm_xe_mem_range_attr *mem_attrs;
2043 	struct drm_xe_vm_query_mem_range_attr *args = data;
2044 	u64 __user *attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
2045 	struct xe_vm *vm;
2046 	int err = 0;
2047 
2048 	if (XE_IOCTL_DBG(xe,
2049 			 ((args->num_mem_ranges == 0 &&
2050 			  (attrs_user || args->sizeof_mem_range_attr != 0)) ||
2051 			 (args->num_mem_ranges > 0 &&
2052 			  (!attrs_user ||
2053 			   args->sizeof_mem_range_attr !=
2054 			   sizeof(struct drm_xe_mem_range_attr))))))
2055 		return -EINVAL;
2056 
2057 	vm = xe_vm_lookup(xef, args->vm_id);
2058 	if (XE_IOCTL_DBG(xe, !vm))
2059 		return -EINVAL;
2060 
2061 	err = down_read_interruptible(&vm->lock);
2062 	if (err)
2063 		goto put_vm;
2064 
2065 	attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
2066 
2067 	if (args->num_mem_ranges == 0 && !attrs_user) {
2068 		args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
2069 		args->sizeof_mem_range_attr = sizeof(struct drm_xe_mem_range_attr);
2070 		goto unlock_vm;
2071 	}
2072 
2073 	mem_attrs = kvmalloc_array(args->num_mem_ranges, args->sizeof_mem_range_attr,
2074 				   GFP_KERNEL | __GFP_ACCOUNT |
2075 				   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2076 	if (!mem_attrs) {
2077 		err = args->num_mem_ranges > 1 ? -ENOBUFS : -ENOMEM;
2078 		goto unlock_vm;
2079 	}
2080 
2081 	memset(mem_attrs, 0, args->num_mem_ranges * args->sizeof_mem_range_attr);
2082 	err = get_mem_attrs(vm, &args->num_mem_ranges, args->start,
2083 			    args->start + args->range, mem_attrs);
2084 	if (err)
2085 		goto free_mem_attrs;
2086 
2087 	err = copy_to_user(attrs_user, mem_attrs,
2088 			   args->sizeof_mem_range_attr * args->num_mem_ranges);
2089 	if (err)
2090 		err = -EFAULT;
2091 
2092 free_mem_attrs:
2093 	kvfree(mem_attrs);
2094 unlock_vm:
2095 	up_read(&vm->lock);
2096 put_vm:
2097 	xe_vm_put(vm);
2098 	return err;
2099 }
2100 
2101 static bool vma_matches(struct xe_vma *vma, u64 page_addr)
2102 {
2103 	if (page_addr > xe_vma_end(vma) - 1 ||
2104 	    page_addr + SZ_4K - 1 < xe_vma_start(vma))
2105 		return false;
2106 
2107 	return true;
2108 }
2109 
2110 /**
2111  * xe_vm_find_vma_by_addr() - Find a VMA by its address
2112  *
2113  * @vm: the xe_vm the vma belongs to
2114  * @page_addr: address to look up
2115  */
2116 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
2117 {
2118 	struct xe_vma *vma = NULL;
2119 
2120 	if (vm->usm.last_fault_vma) {   /* Fast lookup */
2121 		if (vma_matches(vm->usm.last_fault_vma, page_addr))
2122 			vma = vm->usm.last_fault_vma;
2123 	}
2124 	if (!vma)
2125 		vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
2126 
2127 	return vma;
2128 }
2129 
2130 static const u32 region_to_mem_type[] = {
2131 	XE_PL_TT,
2132 	XE_PL_VRAM0,
2133 	XE_PL_VRAM1,
2134 };
2135 
2136 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
2137 			     bool post_commit)
2138 {
2139 	xe_svm_notifier_lock(vm);
2140 	vma->gpuva.flags |= XE_VMA_DESTROYED;
2141 	xe_svm_notifier_unlock(vm);
2142 	if (post_commit)
2143 		xe_vm_remove_vma(vm, vma);
2144 }
2145 
2146 #undef ULL
2147 #define ULL	unsigned long long
2148 
2149 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
2150 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2151 {
2152 	struct xe_vma *vma;
2153 
2154 	switch (op->op) {
2155 	case DRM_GPUVA_OP_MAP:
2156 		vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
2157 		       (ULL)op->map.va.addr, (ULL)op->map.va.range);
2158 		break;
2159 	case DRM_GPUVA_OP_REMAP:
2160 		vma = gpuva_to_vma(op->remap.unmap->va);
2161 		vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2162 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2163 		       op->remap.unmap->keep ? 1 : 0);
2164 		if (op->remap.prev)
2165 			vm_dbg(&xe->drm,
2166 			       "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
2167 			       (ULL)op->remap.prev->va.addr,
2168 			       (ULL)op->remap.prev->va.range);
2169 		if (op->remap.next)
2170 			vm_dbg(&xe->drm,
2171 			       "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
2172 			       (ULL)op->remap.next->va.addr,
2173 			       (ULL)op->remap.next->va.range);
2174 		break;
2175 	case DRM_GPUVA_OP_UNMAP:
2176 		vma = gpuva_to_vma(op->unmap.va);
2177 		vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
2178 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
2179 		       op->unmap.keep ? 1 : 0);
2180 		break;
2181 	case DRM_GPUVA_OP_PREFETCH:
2182 		vma = gpuva_to_vma(op->prefetch.va);
2183 		vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
2184 		       (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
2185 		break;
2186 	default:
2187 		drm_warn(&xe->drm, "NOT POSSIBLE");
2188 	}
2189 }
2190 #else
2191 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
2192 {
2193 }
2194 #endif
2195 
2196 static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
2197 {
2198 	if (!xe_vm_in_fault_mode(vm))
2199 		return false;
2200 
2201 	if (!xe_vm_has_scratch(vm))
2202 		return false;
2203 
2204 	if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
2205 		return false;
2206 
2207 	return true;
2208 }
2209 
2210 static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
2211 {
2212 	struct drm_gpuva_op *__op;
2213 
2214 	drm_gpuva_for_each_op(__op, ops) {
2215 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2216 
2217 		xe_vma_svm_prefetch_op_fini(op);
2218 	}
2219 }
2220 
2221 /*
2222  * Create operations list from IOCTL arguments, setup operations fields so parse
2223  * and commit steps are decoupled from IOCTL arguments. This step can fail.
2224  */
2225 static struct drm_gpuva_ops *
2226 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
2227 			 struct xe_bo *bo, u64 bo_offset_or_userptr,
2228 			 u64 addr, u64 range,
2229 			 u32 operation, u32 flags,
2230 			 u32 prefetch_region, u16 pat_index)
2231 {
2232 	struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
2233 	struct drm_gpuva_ops *ops;
2234 	struct drm_gpuva_op *__op;
2235 	struct drm_gpuvm_bo *vm_bo;
2236 	u64 range_end = addr + range;
2237 	int err;
2238 
2239 	lockdep_assert_held_write(&vm->lock);
2240 
2241 	vm_dbg(&vm->xe->drm,
2242 	       "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
2243 	       operation, (ULL)addr, (ULL)range,
2244 	       (ULL)bo_offset_or_userptr);
2245 
2246 	switch (operation) {
2247 	case DRM_XE_VM_BIND_OP_MAP:
2248 	case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
2249 		struct drm_gpuvm_map_req map_req = {
2250 			.map.va.addr = addr,
2251 			.map.va.range = range,
2252 			.map.gem.obj = obj,
2253 			.map.gem.offset = bo_offset_or_userptr,
2254 		};
2255 
2256 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
2257 		break;
2258 	}
2259 	case DRM_XE_VM_BIND_OP_UNMAP:
2260 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
2261 		break;
2262 	case DRM_XE_VM_BIND_OP_PREFETCH:
2263 		ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
2264 		break;
2265 	case DRM_XE_VM_BIND_OP_UNMAP_ALL:
2266 		xe_assert(vm->xe, bo);
2267 
2268 		err = xe_bo_lock(bo, true);
2269 		if (err)
2270 			return ERR_PTR(err);
2271 
2272 		vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
2273 		if (IS_ERR(vm_bo)) {
2274 			xe_bo_unlock(bo);
2275 			return ERR_CAST(vm_bo);
2276 		}
2277 
2278 		ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
2279 		drm_gpuvm_bo_put(vm_bo);
2280 		xe_bo_unlock(bo);
2281 		break;
2282 	default:
2283 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2284 		ops = ERR_PTR(-EINVAL);
2285 	}
2286 	if (IS_ERR(ops))
2287 		return ops;
2288 
2289 	drm_gpuva_for_each_op(__op, ops) {
2290 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2291 
2292 		if (__op->op == DRM_GPUVA_OP_MAP) {
2293 			op->map.immediate =
2294 				flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
2295 			if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
2296 				op->map.vma_flags |= XE_VMA_READ_ONLY;
2297 			if (flags & DRM_XE_VM_BIND_FLAG_NULL)
2298 				op->map.vma_flags |= DRM_GPUVA_SPARSE;
2299 			if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
2300 				op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
2301 			if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
2302 				op->map.vma_flags |= XE_VMA_DUMPABLE;
2303 			if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
2304 				op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
2305 			op->map.pat_index = pat_index;
2306 			op->map.invalidate_on_bind =
2307 				__xe_vm_needs_clear_scratch_pages(vm, flags);
2308 		} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
2309 			struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2310 			struct xe_tile *tile;
2311 			struct xe_svm_range *svm_range;
2312 			struct drm_gpusvm_ctx ctx = {};
2313 			struct drm_pagemap *dpagemap;
2314 			u8 id, tile_mask = 0;
2315 			u32 i;
2316 
2317 			if (!xe_vma_is_cpu_addr_mirror(vma)) {
2318 				op->prefetch.region = prefetch_region;
2319 				break;
2320 			}
2321 
2322 			ctx.read_only = xe_vma_read_only(vma);
2323 			ctx.devmem_possible = IS_DGFX(vm->xe) &&
2324 					      IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
2325 
2326 			for_each_tile(tile, vm->xe, id)
2327 				tile_mask |= 0x1 << id;
2328 
2329 			xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
2330 			op->prefetch_range.ranges_count = 0;
2331 			tile = NULL;
2332 
2333 			if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
2334 				dpagemap = xe_vma_resolve_pagemap(vma,
2335 								  xe_device_get_root_tile(vm->xe));
2336 				/*
2337 				 * TODO: Once multigpu support is enabled will need
2338 				 * something to dereference tile from dpagemap.
2339 				 */
2340 				if (dpagemap)
2341 					tile = xe_device_get_root_tile(vm->xe);
2342 			} else if (prefetch_region) {
2343 				tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
2344 						      XE_PL_VRAM0];
2345 			}
2346 
2347 			op->prefetch_range.tile = tile;
2348 alloc_next_range:
2349 			svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
2350 
2351 			if (PTR_ERR(svm_range) == -ENOENT) {
2352 				u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
2353 
2354 				addr = ret == ULONG_MAX ? 0 : ret;
2355 				if (addr)
2356 					goto alloc_next_range;
2357 				else
2358 					goto print_op_label;
2359 			}
2360 
2361 			if (IS_ERR(svm_range)) {
2362 				err = PTR_ERR(svm_range);
2363 				goto unwind_prefetch_ops;
2364 			}
2365 
2366 			if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
2367 				xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
2368 				goto check_next_range;
2369 			}
2370 
2371 			err = xa_alloc(&op->prefetch_range.range,
2372 				       &i, svm_range, xa_limit_32b,
2373 				       GFP_KERNEL);
2374 
2375 			if (err)
2376 				goto unwind_prefetch_ops;
2377 
2378 			op->prefetch_range.ranges_count++;
2379 			vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
2380 			xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
2381 check_next_range:
2382 			if (range_end > xe_svm_range_end(svm_range) &&
2383 			    xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
2384 				addr = xe_svm_range_end(svm_range);
2385 				goto alloc_next_range;
2386 			}
2387 		}
2388 print_op_label:
2389 		print_op(vm->xe, __op);
2390 	}
2391 
2392 	return ops;
2393 
2394 unwind_prefetch_ops:
2395 	xe_svm_prefetch_gpuva_ops_fini(ops);
2396 	drm_gpuva_ops_free(&vm->gpuvm, ops);
2397 	return ERR_PTR(err);
2398 }
2399 
2400 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
2401 
2402 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
2403 			      struct xe_vma_mem_attr *attr, unsigned int flags)
2404 {
2405 	struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
2406 	struct xe_validation_ctx ctx;
2407 	struct drm_exec exec;
2408 	struct xe_vma *vma;
2409 	int err = 0;
2410 
2411 	lockdep_assert_held_write(&vm->lock);
2412 
2413 	if (bo) {
2414 		err = 0;
2415 		xe_validation_guard(&ctx, &vm->xe->val, &exec,
2416 				    (struct xe_val_flags) {.interruptible = true}, err) {
2417 			if (!bo->vm) {
2418 				err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
2419 				drm_exec_retry_on_contention(&exec);
2420 			}
2421 			if (!err) {
2422 				err = drm_exec_lock_obj(&exec, &bo->ttm.base);
2423 				drm_exec_retry_on_contention(&exec);
2424 			}
2425 			if (err)
2426 				return ERR_PTR(err);
2427 
2428 			vma = xe_vma_create(vm, bo, op->gem.offset,
2429 					    op->va.addr, op->va.addr +
2430 					    op->va.range - 1, attr, flags);
2431 			if (IS_ERR(vma))
2432 				return vma;
2433 
2434 			if (!bo->vm) {
2435 				err = add_preempt_fences(vm, bo);
2436 				if (err) {
2437 					prep_vma_destroy(vm, vma, false);
2438 					xe_vma_destroy(vma, NULL);
2439 				}
2440 			}
2441 		}
2442 		if (err)
2443 			return ERR_PTR(err);
2444 	} else {
2445 		vma = xe_vma_create(vm, NULL, op->gem.offset,
2446 				    op->va.addr, op->va.addr +
2447 				    op->va.range - 1, attr, flags);
2448 		if (IS_ERR(vma))
2449 			return vma;
2450 
2451 		if (xe_vma_is_userptr(vma))
2452 			err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2453 	}
2454 	if (err) {
2455 		prep_vma_destroy(vm, vma, false);
2456 		xe_vma_destroy_unlocked(vma);
2457 		vma = ERR_PTR(err);
2458 	}
2459 
2460 	return vma;
2461 }
2462 
2463 static u64 xe_vma_max_pte_size(struct xe_vma *vma)
2464 {
2465 	if (vma->gpuva.flags & XE_VMA_PTE_1G)
2466 		return SZ_1G;
2467 	else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
2468 		return SZ_2M;
2469 	else if (vma->gpuva.flags & XE_VMA_PTE_64K)
2470 		return SZ_64K;
2471 	else if (vma->gpuva.flags & XE_VMA_PTE_4K)
2472 		return SZ_4K;
2473 
2474 	return SZ_1G;	/* Uninitialized, used max size */
2475 }
2476 
2477 static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
2478 {
2479 	switch (size) {
2480 	case SZ_1G:
2481 		vma->gpuva.flags |= XE_VMA_PTE_1G;
2482 		break;
2483 	case SZ_2M:
2484 		vma->gpuva.flags |= XE_VMA_PTE_2M;
2485 		break;
2486 	case SZ_64K:
2487 		vma->gpuva.flags |= XE_VMA_PTE_64K;
2488 		break;
2489 	case SZ_4K:
2490 		vma->gpuva.flags |= XE_VMA_PTE_4K;
2491 		break;
2492 	}
2493 }
2494 
2495 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
2496 {
2497 	int err = 0;
2498 
2499 	lockdep_assert_held_write(&vm->lock);
2500 
2501 	switch (op->base.op) {
2502 	case DRM_GPUVA_OP_MAP:
2503 		err |= xe_vm_insert_vma(vm, op->map.vma);
2504 		if (!err)
2505 			op->flags |= XE_VMA_OP_COMMITTED;
2506 		break;
2507 	case DRM_GPUVA_OP_REMAP:
2508 	{
2509 		u8 tile_present =
2510 			gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
2511 
2512 		prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
2513 				 true);
2514 		op->flags |= XE_VMA_OP_COMMITTED;
2515 
2516 		if (op->remap.prev) {
2517 			err |= xe_vm_insert_vma(vm, op->remap.prev);
2518 			if (!err)
2519 				op->flags |= XE_VMA_OP_PREV_COMMITTED;
2520 			if (!err && op->remap.skip_prev) {
2521 				op->remap.prev->tile_present =
2522 					tile_present;
2523 				op->remap.prev = NULL;
2524 			}
2525 		}
2526 		if (op->remap.next) {
2527 			err |= xe_vm_insert_vma(vm, op->remap.next);
2528 			if (!err)
2529 				op->flags |= XE_VMA_OP_NEXT_COMMITTED;
2530 			if (!err && op->remap.skip_next) {
2531 				op->remap.next->tile_present =
2532 					tile_present;
2533 				op->remap.next = NULL;
2534 			}
2535 		}
2536 
2537 		/* Adjust for partial unbind after removing VMA from VM */
2538 		if (!err) {
2539 			op->base.remap.unmap->va->va.addr = op->remap.start;
2540 			op->base.remap.unmap->va->va.range = op->remap.range;
2541 		}
2542 		break;
2543 	}
2544 	case DRM_GPUVA_OP_UNMAP:
2545 		prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
2546 		op->flags |= XE_VMA_OP_COMMITTED;
2547 		break;
2548 	case DRM_GPUVA_OP_PREFETCH:
2549 		op->flags |= XE_VMA_OP_COMMITTED;
2550 		break;
2551 	default:
2552 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2553 	}
2554 
2555 	return err;
2556 }
2557 
2558 /**
2559  * xe_vma_has_default_mem_attrs - Check if a VMA has default memory attributes
2560  * @vma: Pointer to the xe_vma structure to check
2561  *
2562  * This function determines whether the given VMA (Virtual Memory Area)
2563  * has its memory attributes set to their default values. Specifically,
2564  * it checks the following conditions:
2565  *
2566  * - `atomic_access` is `DRM_XE_VMA_ATOMIC_UNDEFINED`
2567  * - `pat_index` is equal to `default_pat_index`
2568  * - `preferred_loc.devmem_fd` is `DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE`
2569  * - `preferred_loc.migration_policy` is `DRM_XE_MIGRATE_ALL_PAGES`
2570  *
2571  * Return: true if all attributes are at their default values, false otherwise.
2572  */
2573 bool xe_vma_has_default_mem_attrs(struct xe_vma *vma)
2574 {
2575 	return (vma->attr.atomic_access == DRM_XE_ATOMIC_UNDEFINED &&
2576 		vma->attr.pat_index ==  vma->attr.default_pat_index &&
2577 		vma->attr.preferred_loc.devmem_fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE &&
2578 		vma->attr.preferred_loc.migration_policy == DRM_XE_MIGRATE_ALL_PAGES);
2579 }
2580 
2581 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
2582 				   struct xe_vma_ops *vops)
2583 {
2584 	struct xe_device *xe = vm->xe;
2585 	struct drm_gpuva_op *__op;
2586 	struct xe_tile *tile;
2587 	u8 id, tile_mask = 0;
2588 	int err = 0;
2589 
2590 	lockdep_assert_held_write(&vm->lock);
2591 
2592 	for_each_tile(tile, vm->xe, id)
2593 		tile_mask |= 0x1 << id;
2594 
2595 	drm_gpuva_for_each_op(__op, ops) {
2596 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2597 		struct xe_vma *vma;
2598 		unsigned int flags = 0;
2599 
2600 		INIT_LIST_HEAD(&op->link);
2601 		list_add_tail(&op->link, &vops->list);
2602 		op->tile_mask = tile_mask;
2603 
2604 		switch (op->base.op) {
2605 		case DRM_GPUVA_OP_MAP:
2606 		{
2607 			struct xe_vma_mem_attr default_attr = {
2608 				.preferred_loc = {
2609 					.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
2610 					.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
2611 				},
2612 				.atomic_access = DRM_XE_ATOMIC_UNDEFINED,
2613 				.default_pat_index = op->map.pat_index,
2614 				.pat_index = op->map.pat_index,
2615 			};
2616 
2617 			flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
2618 
2619 			vma = new_vma(vm, &op->base.map, &default_attr,
2620 				      flags);
2621 			if (IS_ERR(vma))
2622 				return PTR_ERR(vma);
2623 
2624 			op->map.vma = vma;
2625 			if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
2626 			     !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
2627 			    op->map.invalidate_on_bind)
2628 				xe_vma_ops_incr_pt_update_ops(vops,
2629 							      op->tile_mask, 1);
2630 			break;
2631 		}
2632 		case DRM_GPUVA_OP_REMAP:
2633 		{
2634 			struct xe_vma *old =
2635 				gpuva_to_vma(op->base.remap.unmap->va);
2636 			bool skip = xe_vma_is_cpu_addr_mirror(old);
2637 			u64 start = xe_vma_start(old), end = xe_vma_end(old);
2638 			int num_remap_ops = 0;
2639 
2640 			if (op->base.remap.prev)
2641 				start = op->base.remap.prev->va.addr +
2642 					op->base.remap.prev->va.range;
2643 			if (op->base.remap.next)
2644 				end = op->base.remap.next->va.addr;
2645 
2646 			if (xe_vma_is_cpu_addr_mirror(old) &&
2647 			    xe_svm_has_mapping(vm, start, end)) {
2648 				if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
2649 					xe_svm_unmap_address_range(vm, start, end);
2650 				else
2651 					return -EBUSY;
2652 			}
2653 
2654 			op->remap.start = xe_vma_start(old);
2655 			op->remap.range = xe_vma_size(old);
2656 
2657 			flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
2658 			if (op->base.remap.prev) {
2659 				vma = new_vma(vm, op->base.remap.prev,
2660 					      &old->attr, flags);
2661 				if (IS_ERR(vma))
2662 					return PTR_ERR(vma);
2663 
2664 				op->remap.prev = vma;
2665 
2666 				/*
2667 				 * Userptr creates a new SG mapping so
2668 				 * we must also rebind.
2669 				 */
2670 				op->remap.skip_prev = skip ||
2671 					(!xe_vma_is_userptr(old) &&
2672 					IS_ALIGNED(xe_vma_end(vma),
2673 						   xe_vma_max_pte_size(old)));
2674 				if (op->remap.skip_prev) {
2675 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2676 					op->remap.range -=
2677 						xe_vma_end(vma) -
2678 						xe_vma_start(old);
2679 					op->remap.start = xe_vma_end(vma);
2680 					vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
2681 					       (ULL)op->remap.start,
2682 					       (ULL)op->remap.range);
2683 				} else {
2684 					num_remap_ops++;
2685 				}
2686 			}
2687 
2688 			if (op->base.remap.next) {
2689 				vma = new_vma(vm, op->base.remap.next,
2690 					      &old->attr, flags);
2691 				if (IS_ERR(vma))
2692 					return PTR_ERR(vma);
2693 
2694 				op->remap.next = vma;
2695 
2696 				/*
2697 				 * Userptr creates a new SG mapping so
2698 				 * we must also rebind.
2699 				 */
2700 				op->remap.skip_next = skip ||
2701 					(!xe_vma_is_userptr(old) &&
2702 					IS_ALIGNED(xe_vma_start(vma),
2703 						   xe_vma_max_pte_size(old)));
2704 				if (op->remap.skip_next) {
2705 					xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
2706 					op->remap.range -=
2707 						xe_vma_end(old) -
2708 						xe_vma_start(vma);
2709 					vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
2710 					       (ULL)op->remap.start,
2711 					       (ULL)op->remap.range);
2712 				} else {
2713 					num_remap_ops++;
2714 				}
2715 			}
2716 			if (!skip)
2717 				num_remap_ops++;
2718 
2719 			xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
2720 			break;
2721 		}
2722 		case DRM_GPUVA_OP_UNMAP:
2723 			vma = gpuva_to_vma(op->base.unmap.va);
2724 
2725 			if (xe_vma_is_cpu_addr_mirror(vma) &&
2726 			    xe_svm_has_mapping(vm, xe_vma_start(vma),
2727 					       xe_vma_end(vma)))
2728 				return -EBUSY;
2729 
2730 			if (!xe_vma_is_cpu_addr_mirror(vma))
2731 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
2732 			break;
2733 		case DRM_GPUVA_OP_PREFETCH:
2734 			vma = gpuva_to_vma(op->base.prefetch.va);
2735 
2736 			if (xe_vma_is_userptr(vma)) {
2737 				err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
2738 				if (err)
2739 					return err;
2740 			}
2741 
2742 			if (xe_vma_is_cpu_addr_mirror(vma))
2743 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
2744 							      op->prefetch_range.ranges_count);
2745 			else
2746 				xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
2747 
2748 			break;
2749 		default:
2750 			drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2751 		}
2752 
2753 		err = xe_vma_op_commit(vm, op);
2754 		if (err)
2755 			return err;
2756 	}
2757 
2758 	return 0;
2759 }
2760 
2761 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
2762 			     bool post_commit, bool prev_post_commit,
2763 			     bool next_post_commit)
2764 {
2765 	lockdep_assert_held_write(&vm->lock);
2766 
2767 	switch (op->base.op) {
2768 	case DRM_GPUVA_OP_MAP:
2769 		if (op->map.vma) {
2770 			prep_vma_destroy(vm, op->map.vma, post_commit);
2771 			xe_vma_destroy_unlocked(op->map.vma);
2772 		}
2773 		break;
2774 	case DRM_GPUVA_OP_UNMAP:
2775 	{
2776 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2777 
2778 		if (vma) {
2779 			xe_svm_notifier_lock(vm);
2780 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2781 			xe_svm_notifier_unlock(vm);
2782 			if (post_commit)
2783 				xe_vm_insert_vma(vm, vma);
2784 		}
2785 		break;
2786 	}
2787 	case DRM_GPUVA_OP_REMAP:
2788 	{
2789 		struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
2790 
2791 		if (op->remap.prev) {
2792 			prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
2793 			xe_vma_destroy_unlocked(op->remap.prev);
2794 		}
2795 		if (op->remap.next) {
2796 			prep_vma_destroy(vm, op->remap.next, next_post_commit);
2797 			xe_vma_destroy_unlocked(op->remap.next);
2798 		}
2799 		if (vma) {
2800 			xe_svm_notifier_lock(vm);
2801 			vma->gpuva.flags &= ~XE_VMA_DESTROYED;
2802 			xe_svm_notifier_unlock(vm);
2803 			if (post_commit)
2804 				xe_vm_insert_vma(vm, vma);
2805 		}
2806 		break;
2807 	}
2808 	case DRM_GPUVA_OP_PREFETCH:
2809 		/* Nothing to do */
2810 		break;
2811 	default:
2812 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2813 	}
2814 }
2815 
2816 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
2817 				     struct drm_gpuva_ops **ops,
2818 				     int num_ops_list)
2819 {
2820 	int i;
2821 
2822 	for (i = num_ops_list - 1; i >= 0; --i) {
2823 		struct drm_gpuva_ops *__ops = ops[i];
2824 		struct drm_gpuva_op *__op;
2825 
2826 		if (!__ops)
2827 			continue;
2828 
2829 		drm_gpuva_for_each_op_reverse(__op, __ops) {
2830 			struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
2831 
2832 			xe_vma_op_unwind(vm, op,
2833 					 op->flags & XE_VMA_OP_COMMITTED,
2834 					 op->flags & XE_VMA_OP_PREV_COMMITTED,
2835 					 op->flags & XE_VMA_OP_NEXT_COMMITTED);
2836 		}
2837 	}
2838 }
2839 
2840 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
2841 				 bool res_evict, bool validate)
2842 {
2843 	struct xe_bo *bo = xe_vma_bo(vma);
2844 	struct xe_vm *vm = xe_vma_vm(vma);
2845 	int err = 0;
2846 
2847 	if (bo) {
2848 		if (!bo->vm)
2849 			err = drm_exec_lock_obj(exec, &bo->ttm.base);
2850 		if (!err && validate)
2851 			err = xe_bo_validate(bo, vm,
2852 					     !xe_vm_in_preempt_fence_mode(vm) &&
2853 					     res_evict, exec);
2854 	}
2855 
2856 	return err;
2857 }
2858 
2859 static int check_ufence(struct xe_vma *vma)
2860 {
2861 	if (vma->ufence) {
2862 		struct xe_user_fence * const f = vma->ufence;
2863 
2864 		if (!xe_sync_ufence_get_status(f))
2865 			return -EBUSY;
2866 
2867 		vma->ufence = NULL;
2868 		xe_sync_ufence_put(f);
2869 	}
2870 
2871 	return 0;
2872 }
2873 
2874 static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
2875 {
2876 	bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
2877 	struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2878 	struct xe_tile *tile = op->prefetch_range.tile;
2879 	int err = 0;
2880 
2881 	struct xe_svm_range *svm_range;
2882 	struct drm_gpusvm_ctx ctx = {};
2883 	unsigned long i;
2884 
2885 	if (!xe_vma_is_cpu_addr_mirror(vma))
2886 		return 0;
2887 
2888 	ctx.read_only = xe_vma_read_only(vma);
2889 	ctx.devmem_possible = devmem_possible;
2890 	ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
2891 	ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
2892 
2893 	/* TODO: Threading the migration */
2894 	xa_for_each(&op->prefetch_range.range, i, svm_range) {
2895 		if (!tile)
2896 			xe_svm_range_migrate_to_smem(vm, svm_range);
2897 
2898 		if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
2899 			err = xe_svm_alloc_vram(tile, svm_range, &ctx);
2900 			if (err) {
2901 				drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
2902 					vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
2903 				return -ENODATA;
2904 			}
2905 			xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
2906 		}
2907 
2908 		err = xe_svm_range_get_pages(vm, svm_range, &ctx);
2909 		if (err) {
2910 			drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
2911 				vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
2912 			if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
2913 				err = -ENODATA;
2914 			return err;
2915 		}
2916 		xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
2917 	}
2918 
2919 	return err;
2920 }
2921 
2922 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
2923 			    struct xe_vma_ops *vops, struct xe_vma_op *op)
2924 {
2925 	int err = 0;
2926 	bool res_evict;
2927 
2928 	/*
2929 	 * We only allow evicting a BO within the VM if it is not part of an
2930 	 * array of binds, as an array of binds can evict another BO within the
2931 	 * bind.
2932 	 */
2933 	res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS);
2934 
2935 	switch (op->base.op) {
2936 	case DRM_GPUVA_OP_MAP:
2937 		if (!op->map.invalidate_on_bind)
2938 			err = vma_lock_and_validate(exec, op->map.vma,
2939 						    res_evict,
2940 						    !xe_vm_in_fault_mode(vm) ||
2941 						    op->map.immediate);
2942 		break;
2943 	case DRM_GPUVA_OP_REMAP:
2944 		err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
2945 		if (err)
2946 			break;
2947 
2948 		err = vma_lock_and_validate(exec,
2949 					    gpuva_to_vma(op->base.remap.unmap->va),
2950 					    res_evict, false);
2951 		if (!err && op->remap.prev)
2952 			err = vma_lock_and_validate(exec, op->remap.prev,
2953 						    res_evict, true);
2954 		if (!err && op->remap.next)
2955 			err = vma_lock_and_validate(exec, op->remap.next,
2956 						    res_evict, true);
2957 		break;
2958 	case DRM_GPUVA_OP_UNMAP:
2959 		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
2960 		if (err)
2961 			break;
2962 
2963 		err = vma_lock_and_validate(exec,
2964 					    gpuva_to_vma(op->base.unmap.va),
2965 					    res_evict, false);
2966 		break;
2967 	case DRM_GPUVA_OP_PREFETCH:
2968 	{
2969 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2970 		u32 region;
2971 
2972 		if (!xe_vma_is_cpu_addr_mirror(vma)) {
2973 			region = op->prefetch.region;
2974 			xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
2975 				  region <= ARRAY_SIZE(region_to_mem_type));
2976 		}
2977 
2978 		err = vma_lock_and_validate(exec,
2979 					    gpuva_to_vma(op->base.prefetch.va),
2980 					    res_evict, false);
2981 		if (!err && !xe_vma_has_no_bo(vma))
2982 			err = xe_bo_migrate(xe_vma_bo(vma),
2983 					    region_to_mem_type[region],
2984 					    NULL,
2985 					    exec);
2986 		break;
2987 	}
2988 	default:
2989 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2990 	}
2991 
2992 	return err;
2993 }
2994 
2995 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
2996 {
2997 	struct xe_vma_op *op;
2998 	int err;
2999 
3000 	if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
3001 		return 0;
3002 
3003 	list_for_each_entry(op, &vops->list, link) {
3004 		if (op->base.op  == DRM_GPUVA_OP_PREFETCH) {
3005 			err = prefetch_ranges(vm, op);
3006 			if (err)
3007 				return err;
3008 		}
3009 	}
3010 
3011 	return 0;
3012 }
3013 
3014 static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
3015 					   struct xe_vm *vm,
3016 					   struct xe_vma_ops *vops)
3017 {
3018 	struct xe_vma_op *op;
3019 	int err;
3020 
3021 	err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
3022 	if (err)
3023 		return err;
3024 
3025 	list_for_each_entry(op, &vops->list, link) {
3026 		err = op_lock_and_prep(exec, vm, vops, op);
3027 		if (err)
3028 			return err;
3029 	}
3030 
3031 #ifdef TEST_VM_OPS_ERROR
3032 	if (vops->inject_error &&
3033 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
3034 		return -ENOSPC;
3035 #endif
3036 
3037 	return 0;
3038 }
3039 
3040 static void op_trace(struct xe_vma_op *op)
3041 {
3042 	switch (op->base.op) {
3043 	case DRM_GPUVA_OP_MAP:
3044 		trace_xe_vma_bind(op->map.vma);
3045 		break;
3046 	case DRM_GPUVA_OP_REMAP:
3047 		trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
3048 		if (op->remap.prev)
3049 			trace_xe_vma_bind(op->remap.prev);
3050 		if (op->remap.next)
3051 			trace_xe_vma_bind(op->remap.next);
3052 		break;
3053 	case DRM_GPUVA_OP_UNMAP:
3054 		trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
3055 		break;
3056 	case DRM_GPUVA_OP_PREFETCH:
3057 		trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
3058 		break;
3059 	case DRM_GPUVA_OP_DRIVER:
3060 		break;
3061 	default:
3062 		XE_WARN_ON("NOT POSSIBLE");
3063 	}
3064 }
3065 
3066 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
3067 {
3068 	struct xe_vma_op *op;
3069 
3070 	list_for_each_entry(op, &vops->list, link)
3071 		op_trace(op);
3072 }
3073 
3074 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
3075 {
3076 	struct xe_exec_queue *q = vops->q;
3077 	struct xe_tile *tile;
3078 	int number_tiles = 0;
3079 	u8 id;
3080 
3081 	for_each_tile(tile, vm->xe, id) {
3082 		if (vops->pt_update_ops[id].num_ops)
3083 			++number_tiles;
3084 
3085 		if (vops->pt_update_ops[id].q)
3086 			continue;
3087 
3088 		if (q) {
3089 			vops->pt_update_ops[id].q = q;
3090 			if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
3091 				q = list_next_entry(q, multi_gt_list);
3092 		} else {
3093 			vops->pt_update_ops[id].q = vm->q[id];
3094 		}
3095 	}
3096 
3097 	return number_tiles;
3098 }
3099 
3100 static struct dma_fence *ops_execute(struct xe_vm *vm,
3101 				     struct xe_vma_ops *vops)
3102 {
3103 	struct xe_tile *tile;
3104 	struct dma_fence *fence = NULL;
3105 	struct dma_fence **fences = NULL;
3106 	struct dma_fence_array *cf = NULL;
3107 	int number_tiles = 0, current_fence = 0, n_fence = 0, err;
3108 	u8 id;
3109 
3110 	number_tiles = vm_ops_setup_tile_args(vm, vops);
3111 	if (number_tiles == 0)
3112 		return ERR_PTR(-ENODATA);
3113 
3114 	if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) {
3115 		for_each_tile(tile, vm->xe, id)
3116 			++n_fence;
3117 	} else {
3118 		for_each_tile(tile, vm->xe, id)
3119 			n_fence += (1 + XE_MAX_GT_PER_TILE);
3120 	}
3121 
3122 	fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL);
3123 	if (!fences) {
3124 		fence = ERR_PTR(-ENOMEM);
3125 		goto err_trace;
3126 	}
3127 
3128 	cf = dma_fence_array_alloc(n_fence);
3129 	if (!cf) {
3130 		fence = ERR_PTR(-ENOMEM);
3131 		goto err_out;
3132 	}
3133 
3134 	for_each_tile(tile, vm->xe, id) {
3135 		if (!vops->pt_update_ops[id].num_ops)
3136 			continue;
3137 
3138 		err = xe_pt_update_ops_prepare(tile, vops);
3139 		if (err) {
3140 			fence = ERR_PTR(err);
3141 			goto err_out;
3142 		}
3143 	}
3144 
3145 	trace_xe_vm_ops_execute(vops);
3146 
3147 	for_each_tile(tile, vm->xe, id) {
3148 		struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q;
3149 		int i;
3150 
3151 		fence = NULL;
3152 		if (!vops->pt_update_ops[id].num_ops)
3153 			goto collect_fences;
3154 
3155 		fence = xe_pt_update_ops_run(tile, vops);
3156 		if (IS_ERR(fence))
3157 			goto err_out;
3158 
3159 collect_fences:
3160 		fences[current_fence++] = fence ?: dma_fence_get_stub();
3161 		if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)
3162 			continue;
3163 
3164 		xe_migrate_job_lock(tile->migrate, q);
3165 		for_each_tlb_inval(i)
3166 			fences[current_fence++] =
3167 				xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
3168 		xe_migrate_job_unlock(tile->migrate, q);
3169 	}
3170 
3171 	xe_assert(vm->xe, current_fence == n_fence);
3172 	dma_fence_array_init(cf, n_fence, fences, dma_fence_context_alloc(1),
3173 			     1, false);
3174 	fence = &cf->base;
3175 
3176 	for_each_tile(tile, vm->xe, id) {
3177 		if (!vops->pt_update_ops[id].num_ops)
3178 			continue;
3179 
3180 		xe_pt_update_ops_fini(tile, vops);
3181 	}
3182 
3183 	return fence;
3184 
3185 err_out:
3186 	for_each_tile(tile, vm->xe, id) {
3187 		if (!vops->pt_update_ops[id].num_ops)
3188 			continue;
3189 
3190 		xe_pt_update_ops_abort(tile, vops);
3191 	}
3192 	while (current_fence)
3193 		dma_fence_put(fences[--current_fence]);
3194 	kfree(fences);
3195 	kfree(cf);
3196 
3197 err_trace:
3198 	trace_xe_vm_ops_fail(vm);
3199 	return fence;
3200 }
3201 
3202 static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
3203 {
3204 	if (vma->ufence)
3205 		xe_sync_ufence_put(vma->ufence);
3206 	vma->ufence = __xe_sync_ufence_get(ufence);
3207 }
3208 
3209 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
3210 			  struct xe_user_fence *ufence)
3211 {
3212 	switch (op->base.op) {
3213 	case DRM_GPUVA_OP_MAP:
3214 		vma_add_ufence(op->map.vma, ufence);
3215 		break;
3216 	case DRM_GPUVA_OP_REMAP:
3217 		if (op->remap.prev)
3218 			vma_add_ufence(op->remap.prev, ufence);
3219 		if (op->remap.next)
3220 			vma_add_ufence(op->remap.next, ufence);
3221 		break;
3222 	case DRM_GPUVA_OP_UNMAP:
3223 		break;
3224 	case DRM_GPUVA_OP_PREFETCH:
3225 		vma_add_ufence(gpuva_to_vma(op->base.prefetch.va), ufence);
3226 		break;
3227 	default:
3228 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
3229 	}
3230 }
3231 
3232 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
3233 				   struct dma_fence *fence)
3234 {
3235 	struct xe_user_fence *ufence;
3236 	struct xe_vma_op *op;
3237 	int i;
3238 
3239 	ufence = find_ufence_get(vops->syncs, vops->num_syncs);
3240 	list_for_each_entry(op, &vops->list, link) {
3241 		if (ufence)
3242 			op_add_ufence(vm, op, ufence);
3243 
3244 		if (op->base.op == DRM_GPUVA_OP_UNMAP)
3245 			xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence);
3246 		else if (op->base.op == DRM_GPUVA_OP_REMAP)
3247 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va),
3248 				       fence);
3249 	}
3250 	if (ufence)
3251 		xe_sync_ufence_put(ufence);
3252 	if (fence) {
3253 		for (i = 0; i < vops->num_syncs; i++)
3254 			xe_sync_entry_signal(vops->syncs + i, fence);
3255 	}
3256 }
3257 
3258 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
3259 						   struct xe_vma_ops *vops)
3260 {
3261 	struct xe_validation_ctx ctx;
3262 	struct drm_exec exec;
3263 	struct dma_fence *fence;
3264 	int err = 0;
3265 
3266 	lockdep_assert_held_write(&vm->lock);
3267 
3268 	xe_validation_guard(&ctx, &vm->xe->val, &exec,
3269 			    ((struct xe_val_flags) {
3270 				    .interruptible = true,
3271 				    .exec_ignore_duplicates = true,
3272 			    }), err) {
3273 		err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
3274 		drm_exec_retry_on_contention(&exec);
3275 		xe_validation_retry_on_oom(&ctx, &err);
3276 		if (err)
3277 			return ERR_PTR(err);
3278 
3279 		xe_vm_set_validation_exec(vm, &exec);
3280 		fence = ops_execute(vm, vops);
3281 		xe_vm_set_validation_exec(vm, NULL);
3282 		if (IS_ERR(fence)) {
3283 			if (PTR_ERR(fence) == -ENODATA)
3284 				vm_bind_ioctl_ops_fini(vm, vops, NULL);
3285 			return fence;
3286 		}
3287 
3288 		vm_bind_ioctl_ops_fini(vm, vops, fence);
3289 	}
3290 
3291 	return err ? ERR_PTR(err) : fence;
3292 }
3293 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
3294 
3295 #define SUPPORTED_FLAGS_STUB  \
3296 	(DRM_XE_VM_BIND_FLAG_READONLY | \
3297 	 DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
3298 	 DRM_XE_VM_BIND_FLAG_NULL | \
3299 	 DRM_XE_VM_BIND_FLAG_DUMPABLE | \
3300 	 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
3301 	 DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
3302 	 DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
3303 
3304 #ifdef TEST_VM_OPS_ERROR
3305 #define SUPPORTED_FLAGS	(SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
3306 #else
3307 #define SUPPORTED_FLAGS	SUPPORTED_FLAGS_STUB
3308 #endif
3309 
3310 #define XE_64K_PAGE_MASK 0xffffull
3311 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
3312 
3313 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
3314 				    struct drm_xe_vm_bind *args,
3315 				    struct drm_xe_vm_bind_op **bind_ops)
3316 {
3317 	int err;
3318 	int i;
3319 
3320 	if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
3321 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3322 		return -EINVAL;
3323 
3324 	if (XE_IOCTL_DBG(xe, args->extensions))
3325 		return -EINVAL;
3326 
3327 	if (args->num_binds > 1) {
3328 		u64 __user *bind_user =
3329 			u64_to_user_ptr(args->vector_of_binds);
3330 
3331 		*bind_ops = kvmalloc_array(args->num_binds,
3332 					   sizeof(struct drm_xe_vm_bind_op),
3333 					   GFP_KERNEL | __GFP_ACCOUNT |
3334 					   __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3335 		if (!*bind_ops)
3336 			return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
3337 
3338 		err = copy_from_user(*bind_ops, bind_user,
3339 				     sizeof(struct drm_xe_vm_bind_op) *
3340 				     args->num_binds);
3341 		if (XE_IOCTL_DBG(xe, err)) {
3342 			err = -EFAULT;
3343 			goto free_bind_ops;
3344 		}
3345 	} else {
3346 		*bind_ops = &args->bind;
3347 	}
3348 
3349 	for (i = 0; i < args->num_binds; ++i) {
3350 		u64 range = (*bind_ops)[i].range;
3351 		u64 addr = (*bind_ops)[i].addr;
3352 		u32 op = (*bind_ops)[i].op;
3353 		u32 flags = (*bind_ops)[i].flags;
3354 		u32 obj = (*bind_ops)[i].obj;
3355 		u64 obj_offset = (*bind_ops)[i].obj_offset;
3356 		u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
3357 		bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
3358 		bool is_cpu_addr_mirror = flags &
3359 			DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
3360 		u16 pat_index = (*bind_ops)[i].pat_index;
3361 		u16 coh_mode;
3362 
3363 		if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
3364 				 (!xe_vm_in_fault_mode(vm) ||
3365 				 !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
3366 			err = -EINVAL;
3367 			goto free_bind_ops;
3368 		}
3369 
3370 		if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
3371 			err = -EINVAL;
3372 			goto free_bind_ops;
3373 		}
3374 
3375 		pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
3376 		(*bind_ops)[i].pat_index = pat_index;
3377 		coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3378 		if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
3379 			err = -EINVAL;
3380 			goto free_bind_ops;
3381 		}
3382 
3383 		if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
3384 			err = -EINVAL;
3385 			goto free_bind_ops;
3386 		}
3387 
3388 		if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
3389 		    XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
3390 		    XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
3391 		    XE_IOCTL_DBG(xe, obj_offset && (is_null ||
3392 						    is_cpu_addr_mirror)) ||
3393 		    XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
3394 				 (is_null || is_cpu_addr_mirror)) ||
3395 		    XE_IOCTL_DBG(xe, !obj &&
3396 				 op == DRM_XE_VM_BIND_OP_MAP &&
3397 				 !is_null && !is_cpu_addr_mirror) ||
3398 		    XE_IOCTL_DBG(xe, !obj &&
3399 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3400 		    XE_IOCTL_DBG(xe, addr &&
3401 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3402 		    XE_IOCTL_DBG(xe, range &&
3403 				 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
3404 		    XE_IOCTL_DBG(xe, obj &&
3405 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3406 		    XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3407 				 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
3408 		    XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
3409 				 !IS_ENABLED(CONFIG_DRM_GPUSVM)) ||
3410 		    XE_IOCTL_DBG(xe, obj &&
3411 				 op == DRM_XE_VM_BIND_OP_PREFETCH) ||
3412 		    XE_IOCTL_DBG(xe, prefetch_region &&
3413 				 op != DRM_XE_VM_BIND_OP_PREFETCH) ||
3414 		    XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
3415 				      /* Guard against undefined shift in BIT(prefetch_region) */
3416 				      (prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
3417 				      !(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
3418 		    XE_IOCTL_DBG(xe, obj &&
3419 				 op == DRM_XE_VM_BIND_OP_UNMAP) ||
3420 		    XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
3421 				 (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
3422 			err = -EINVAL;
3423 			goto free_bind_ops;
3424 		}
3425 
3426 		if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
3427 		    XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
3428 		    XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
3429 		    XE_IOCTL_DBG(xe, !range &&
3430 				 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
3431 			err = -EINVAL;
3432 			goto free_bind_ops;
3433 		}
3434 	}
3435 
3436 	return 0;
3437 
3438 free_bind_ops:
3439 	if (args->num_binds > 1)
3440 		kvfree(*bind_ops);
3441 	*bind_ops = NULL;
3442 	return err;
3443 }
3444 
3445 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
3446 				       struct xe_exec_queue *q,
3447 				       struct xe_sync_entry *syncs,
3448 				       int num_syncs)
3449 {
3450 	struct dma_fence *fence = NULL;
3451 	int i, err = 0;
3452 
3453 	if (num_syncs) {
3454 		fence = xe_sync_in_fence_get(syncs, num_syncs,
3455 					     to_wait_exec_queue(vm, q), vm);
3456 		if (IS_ERR(fence))
3457 			return PTR_ERR(fence);
3458 
3459 		for (i = 0; i < num_syncs; i++)
3460 			xe_sync_entry_signal(&syncs[i], fence);
3461 	}
3462 
3463 	dma_fence_put(fence);
3464 
3465 	return err;
3466 }
3467 
3468 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
3469 			    struct xe_exec_queue *q,
3470 			    struct xe_sync_entry *syncs, u32 num_syncs)
3471 {
3472 	memset(vops, 0, sizeof(*vops));
3473 	INIT_LIST_HEAD(&vops->list);
3474 	vops->vm = vm;
3475 	vops->q = q;
3476 	vops->syncs = syncs;
3477 	vops->num_syncs = num_syncs;
3478 	vops->flags = 0;
3479 }
3480 
3481 static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
3482 					u64 addr, u64 range, u64 obj_offset,
3483 					u16 pat_index, u32 op, u32 bind_flags)
3484 {
3485 	u16 coh_mode;
3486 
3487 	if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) ||
3488 	    XE_IOCTL_DBG(xe, obj_offset >
3489 			 xe_bo_size(bo) - range)) {
3490 		return -EINVAL;
3491 	}
3492 
3493 	/*
3494 	 * Some platforms require 64k VM_BIND alignment,
3495 	 * specifically those with XE_VRAM_FLAGS_NEED64K.
3496 	 *
3497 	 * Other platforms may have BO's set to 64k physical placement,
3498 	 * but can be mapped at 4k offsets anyway. This check is only
3499 	 * there for the former case.
3500 	 */
3501 	if ((bo->flags & XE_BO_FLAG_INTERNAL_64K) &&
3502 	    (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)) {
3503 		if (XE_IOCTL_DBG(xe, obj_offset &
3504 				 XE_64K_PAGE_MASK) ||
3505 		    XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
3506 		    XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
3507 			return -EINVAL;
3508 		}
3509 	}
3510 
3511 	coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
3512 	if (bo->cpu_caching) {
3513 		if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
3514 				 bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
3515 			return -EINVAL;
3516 		}
3517 	} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
3518 		/*
3519 		 * Imported dma-buf from a different device should
3520 		 * require 1way or 2way coherency since we don't know
3521 		 * how it was mapped on the CPU. Just assume is it
3522 		 * potentially cached on CPU side.
3523 		 */
3524 		return -EINVAL;
3525 	}
3526 
3527 	/* If a BO is protected it can only be mapped if the key is still valid */
3528 	if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
3529 	    op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
3530 		if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
3531 			return -ENOEXEC;
3532 
3533 	return 0;
3534 }
3535 
3536 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3537 {
3538 	struct xe_device *xe = to_xe_device(dev);
3539 	struct xe_file *xef = to_xe_file(file);
3540 	struct drm_xe_vm_bind *args = data;
3541 	struct drm_xe_sync __user *syncs_user;
3542 	struct xe_bo **bos = NULL;
3543 	struct drm_gpuva_ops **ops = NULL;
3544 	struct xe_vm *vm;
3545 	struct xe_exec_queue *q = NULL;
3546 	u32 num_syncs, num_ufence = 0;
3547 	struct xe_sync_entry *syncs = NULL;
3548 	struct drm_xe_vm_bind_op *bind_ops = NULL;
3549 	struct xe_vma_ops vops;
3550 	struct dma_fence *fence;
3551 	int err;
3552 	int i;
3553 
3554 	vm = xe_vm_lookup(xef, args->vm_id);
3555 	if (XE_IOCTL_DBG(xe, !vm))
3556 		return -EINVAL;
3557 
3558 	err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
3559 	if (err)
3560 		goto put_vm;
3561 
3562 	if (args->exec_queue_id) {
3563 		q = xe_exec_queue_lookup(xef, args->exec_queue_id);
3564 		if (XE_IOCTL_DBG(xe, !q)) {
3565 			err = -ENOENT;
3566 			goto free_bind_ops;
3567 		}
3568 
3569 		if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
3570 			err = -EINVAL;
3571 			goto put_exec_queue;
3572 		}
3573 	}
3574 
3575 	/* Ensure all UNMAPs visible */
3576 	xe_svm_flush(vm);
3577 
3578 	err = down_write_killable(&vm->lock);
3579 	if (err)
3580 		goto put_exec_queue;
3581 
3582 	if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
3583 		err = -ENOENT;
3584 		goto release_vm_lock;
3585 	}
3586 
3587 	for (i = 0; i < args->num_binds; ++i) {
3588 		u64 range = bind_ops[i].range;
3589 		u64 addr = bind_ops[i].addr;
3590 
3591 		if (XE_IOCTL_DBG(xe, range > vm->size) ||
3592 		    XE_IOCTL_DBG(xe, addr > vm->size - range)) {
3593 			err = -EINVAL;
3594 			goto release_vm_lock;
3595 		}
3596 	}
3597 
3598 	if (args->num_binds) {
3599 		bos = kvcalloc(args->num_binds, sizeof(*bos),
3600 			       GFP_KERNEL | __GFP_ACCOUNT |
3601 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3602 		if (!bos) {
3603 			err = -ENOMEM;
3604 			goto release_vm_lock;
3605 		}
3606 
3607 		ops = kvcalloc(args->num_binds, sizeof(*ops),
3608 			       GFP_KERNEL | __GFP_ACCOUNT |
3609 			       __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
3610 		if (!ops) {
3611 			err = -ENOMEM;
3612 			goto free_bos;
3613 		}
3614 	}
3615 
3616 	for (i = 0; i < args->num_binds; ++i) {
3617 		struct drm_gem_object *gem_obj;
3618 		u64 range = bind_ops[i].range;
3619 		u64 addr = bind_ops[i].addr;
3620 		u32 obj = bind_ops[i].obj;
3621 		u64 obj_offset = bind_ops[i].obj_offset;
3622 		u16 pat_index = bind_ops[i].pat_index;
3623 		u32 op = bind_ops[i].op;
3624 		u32 bind_flags = bind_ops[i].flags;
3625 
3626 		if (!obj)
3627 			continue;
3628 
3629 		gem_obj = drm_gem_object_lookup(file, obj);
3630 		if (XE_IOCTL_DBG(xe, !gem_obj)) {
3631 			err = -ENOENT;
3632 			goto put_obj;
3633 		}
3634 		bos[i] = gem_to_xe_bo(gem_obj);
3635 
3636 		err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
3637 						   obj_offset, pat_index, op,
3638 						   bind_flags);
3639 		if (err)
3640 			goto put_obj;
3641 	}
3642 
3643 	if (args->num_syncs) {
3644 		syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
3645 		if (!syncs) {
3646 			err = -ENOMEM;
3647 			goto put_obj;
3648 		}
3649 	}
3650 
3651 	syncs_user = u64_to_user_ptr(args->syncs);
3652 	for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
3653 		struct xe_exec_queue *__q = q ?: vm->q[0];
3654 
3655 		err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
3656 					  &syncs_user[num_syncs],
3657 					  __q->ufence_syncobj,
3658 					  ++__q->ufence_timeline_value,
3659 					  (xe_vm_in_lr_mode(vm) ?
3660 					   SYNC_PARSE_FLAG_LR_MODE : 0) |
3661 					  (!args->num_binds ?
3662 					   SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
3663 		if (err)
3664 			goto free_syncs;
3665 
3666 		if (xe_sync_is_ufence(&syncs[num_syncs]))
3667 			num_ufence++;
3668 	}
3669 
3670 	if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
3671 		err = -EINVAL;
3672 		goto free_syncs;
3673 	}
3674 
3675 	if (!args->num_binds) {
3676 		err = -ENODATA;
3677 		goto free_syncs;
3678 	}
3679 
3680 	xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
3681 	if (args->num_binds > 1)
3682 		vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;
3683 	for (i = 0; i < args->num_binds; ++i) {
3684 		u64 range = bind_ops[i].range;
3685 		u64 addr = bind_ops[i].addr;
3686 		u32 op = bind_ops[i].op;
3687 		u32 flags = bind_ops[i].flags;
3688 		u64 obj_offset = bind_ops[i].obj_offset;
3689 		u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
3690 		u16 pat_index = bind_ops[i].pat_index;
3691 
3692 		ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
3693 						  addr, range, op, flags,
3694 						  prefetch_region, pat_index);
3695 		if (IS_ERR(ops[i])) {
3696 			err = PTR_ERR(ops[i]);
3697 			ops[i] = NULL;
3698 			goto unwind_ops;
3699 		}
3700 
3701 		err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
3702 		if (err)
3703 			goto unwind_ops;
3704 
3705 #ifdef TEST_VM_OPS_ERROR
3706 		if (flags & FORCE_OP_ERROR) {
3707 			vops.inject_error = true;
3708 			vm->xe->vm_inject_error_position =
3709 				(vm->xe->vm_inject_error_position + 1) %
3710 				FORCE_OP_ERROR_COUNT;
3711 		}
3712 #endif
3713 	}
3714 
3715 	/* Nothing to do */
3716 	if (list_empty(&vops.list)) {
3717 		err = -ENODATA;
3718 		goto unwind_ops;
3719 	}
3720 
3721 	err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
3722 	if (err)
3723 		goto unwind_ops;
3724 
3725 	err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
3726 	if (err)
3727 		goto unwind_ops;
3728 
3729 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3730 	if (IS_ERR(fence))
3731 		err = PTR_ERR(fence);
3732 	else
3733 		dma_fence_put(fence);
3734 
3735 unwind_ops:
3736 	if (err && err != -ENODATA)
3737 		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
3738 	xe_vma_ops_fini(&vops);
3739 	for (i = args->num_binds - 1; i >= 0; --i)
3740 		if (ops[i])
3741 			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
3742 free_syncs:
3743 	if (err == -ENODATA)
3744 		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
3745 	while (num_syncs--)
3746 		xe_sync_entry_cleanup(&syncs[num_syncs]);
3747 
3748 	kfree(syncs);
3749 put_obj:
3750 	for (i = 0; i < args->num_binds; ++i)
3751 		xe_bo_put(bos[i]);
3752 
3753 	kvfree(ops);
3754 free_bos:
3755 	kvfree(bos);
3756 release_vm_lock:
3757 	up_write(&vm->lock);
3758 put_exec_queue:
3759 	if (q)
3760 		xe_exec_queue_put(q);
3761 free_bind_ops:
3762 	if (args->num_binds > 1)
3763 		kvfree(bind_ops);
3764 put_vm:
3765 	xe_vm_put(vm);
3766 	return err;
3767 }
3768 
3769 /**
3770  * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
3771  * @vm: VM to bind the BO to
3772  * @bo: BO to bind
3773  * @q: exec queue to use for the bind (optional)
3774  * @addr: address at which to bind the BO
3775  * @cache_lvl: PAT cache level to use
3776  *
3777  * Execute a VM bind map operation on a kernel-owned BO to bind it into a
3778  * kernel-owned VM.
3779  *
3780  * Returns a dma_fence to track the binding completion if the job to do so was
3781  * successfully submitted, an error pointer otherwise.
3782  */
3783 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
3784 				       struct xe_exec_queue *q, u64 addr,
3785 				       enum xe_cache_level cache_lvl)
3786 {
3787 	struct xe_vma_ops vops;
3788 	struct drm_gpuva_ops *ops = NULL;
3789 	struct dma_fence *fence;
3790 	int err;
3791 
3792 	xe_bo_get(bo);
3793 	xe_vm_get(vm);
3794 	if (q)
3795 		xe_exec_queue_get(q);
3796 
3797 	down_write(&vm->lock);
3798 
3799 	xe_vma_ops_init(&vops, vm, q, NULL, 0);
3800 
3801 	ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo),
3802 				       DRM_XE_VM_BIND_OP_MAP, 0, 0,
3803 				       vm->xe->pat.idx[cache_lvl]);
3804 	if (IS_ERR(ops)) {
3805 		err = PTR_ERR(ops);
3806 		goto release_vm_lock;
3807 	}
3808 
3809 	err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
3810 	if (err)
3811 		goto release_vm_lock;
3812 
3813 	xe_assert(vm->xe, !list_empty(&vops.list));
3814 
3815 	err = xe_vma_ops_alloc(&vops, false);
3816 	if (err)
3817 		goto unwind_ops;
3818 
3819 	fence = vm_bind_ioctl_ops_execute(vm, &vops);
3820 	if (IS_ERR(fence))
3821 		err = PTR_ERR(fence);
3822 
3823 unwind_ops:
3824 	if (err && err != -ENODATA)
3825 		vm_bind_ioctl_ops_unwind(vm, &ops, 1);
3826 
3827 	xe_vma_ops_fini(&vops);
3828 	drm_gpuva_ops_free(&vm->gpuvm, ops);
3829 
3830 release_vm_lock:
3831 	up_write(&vm->lock);
3832 
3833 	if (q)
3834 		xe_exec_queue_put(q);
3835 	xe_vm_put(vm);
3836 	xe_bo_put(bo);
3837 
3838 	if (err)
3839 		fence = ERR_PTR(err);
3840 
3841 	return fence;
3842 }
3843 
3844 /**
3845  * xe_vm_lock() - Lock the vm's dma_resv object
3846  * @vm: The struct xe_vm whose lock is to be locked
3847  * @intr: Whether to perform any wait interruptible
3848  *
3849  * Return: 0 on success, -EINTR if @intr is true and the wait for a
3850  * contended lock was interrupted. If @intr is false, the function
3851  * always returns 0.
3852  */
3853 int xe_vm_lock(struct xe_vm *vm, bool intr)
3854 {
3855 	int ret;
3856 
3857 	if (intr)
3858 		ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
3859 	else
3860 		ret = dma_resv_lock(xe_vm_resv(vm), NULL);
3861 
3862 	return ret;
3863 }
3864 
3865 /**
3866  * xe_vm_unlock() - Unlock the vm's dma_resv object
3867  * @vm: The struct xe_vm whose lock is to be released.
3868  *
3869  * Unlock a buffer object lock that was locked by xe_vm_lock().
3870  */
3871 void xe_vm_unlock(struct xe_vm *vm)
3872 {
3873 	dma_resv_unlock(xe_vm_resv(vm));
3874 }
3875 
3876 /**
3877  * xe_vm_range_tilemask_tlb_inval - Issue a TLB invalidation on this tilemask for an
3878  * address range
3879  * @vm: The VM
3880  * @start: start address
3881  * @end: end address
3882  * @tile_mask: mask for which gt's issue tlb invalidation
3883  *
3884  * Issue a range based TLB invalidation for gt's in tilemask
3885  *
3886  * Returns 0 for success, negative error code otherwise.
3887  */
3888 int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
3889 				   u64 end, u8 tile_mask)
3890 {
3891 	struct xe_tlb_inval_fence
3892 		fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
3893 	struct xe_tile *tile;
3894 	u32 fence_id = 0;
3895 	u8 id;
3896 	int err;
3897 
3898 	if (!tile_mask)
3899 		return 0;
3900 
3901 	for_each_tile(tile, vm->xe, id) {
3902 		if (!(tile_mask & BIT(id)))
3903 			continue;
3904 
3905 		xe_tlb_inval_fence_init(&tile->primary_gt->tlb_inval,
3906 					&fence[fence_id], true);
3907 
3908 		err = xe_tlb_inval_range(&tile->primary_gt->tlb_inval,
3909 					 &fence[fence_id], start, end,
3910 					 vm->usm.asid);
3911 		if (err)
3912 			goto wait;
3913 		++fence_id;
3914 
3915 		if (!tile->media_gt)
3916 			continue;
3917 
3918 		xe_tlb_inval_fence_init(&tile->media_gt->tlb_inval,
3919 					&fence[fence_id], true);
3920 
3921 		err = xe_tlb_inval_range(&tile->media_gt->tlb_inval,
3922 					 &fence[fence_id], start, end,
3923 					 vm->usm.asid);
3924 		if (err)
3925 			goto wait;
3926 		++fence_id;
3927 	}
3928 
3929 wait:
3930 	for (id = 0; id < fence_id; ++id)
3931 		xe_tlb_inval_fence_wait(&fence[id]);
3932 
3933 	return err;
3934 }
3935 
3936 /**
3937  * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
3938  * @vma: VMA to invalidate
3939  *
3940  * Walks a list of page tables leaves which it memset the entries owned by this
3941  * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
3942  * complete.
3943  *
3944  * Returns 0 for success, negative error code otherwise.
3945  */
3946 int xe_vm_invalidate_vma(struct xe_vma *vma)
3947 {
3948 	struct xe_device *xe = xe_vma_vm(vma)->xe;
3949 	struct xe_vm *vm = xe_vma_vm(vma);
3950 	struct xe_tile *tile;
3951 	u8 tile_mask = 0;
3952 	int ret = 0;
3953 	u8 id;
3954 
3955 	xe_assert(xe, !xe_vma_is_null(vma));
3956 	xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
3957 	trace_xe_vma_invalidate(vma);
3958 
3959 	vm_dbg(&vm->xe->drm,
3960 	       "INVALIDATE: addr=0x%016llx, range=0x%016llx",
3961 		xe_vma_start(vma), xe_vma_size(vma));
3962 
3963 	/*
3964 	 * Check that we don't race with page-table updates, tile_invalidated
3965 	 * update is safe
3966 	 */
3967 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
3968 		if (xe_vma_is_userptr(vma)) {
3969 			lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
3970 				       (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
3971 					lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
3972 
3973 			WARN_ON_ONCE(!mmu_interval_check_retry
3974 				     (&to_userptr_vma(vma)->userptr.notifier,
3975 				      to_userptr_vma(vma)->userptr.pages.notifier_seq));
3976 			WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
3977 							     DMA_RESV_USAGE_BOOKKEEP));
3978 
3979 		} else {
3980 			xe_bo_assert_held(xe_vma_bo(vma));
3981 		}
3982 	}
3983 
3984 	for_each_tile(tile, xe, id)
3985 		if (xe_pt_zap_ptes(tile, vma))
3986 			tile_mask |= BIT(id);
3987 
3988 	xe_device_wmb(xe);
3989 
3990 	ret = xe_vm_range_tilemask_tlb_inval(xe_vma_vm(vma), xe_vma_start(vma),
3991 					     xe_vma_end(vma), tile_mask);
3992 
3993 	/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
3994 	WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
3995 
3996 	return ret;
3997 }
3998 
3999 int xe_vm_validate_protected(struct xe_vm *vm)
4000 {
4001 	struct drm_gpuva *gpuva;
4002 	int err = 0;
4003 
4004 	if (!vm)
4005 		return -ENODEV;
4006 
4007 	mutex_lock(&vm->snap_mutex);
4008 
4009 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4010 		struct xe_vma *vma = gpuva_to_vma(gpuva);
4011 		struct xe_bo *bo = vma->gpuva.gem.obj ?
4012 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
4013 
4014 		if (!bo)
4015 			continue;
4016 
4017 		if (xe_bo_is_protected(bo)) {
4018 			err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
4019 			if (err)
4020 				break;
4021 		}
4022 	}
4023 
4024 	mutex_unlock(&vm->snap_mutex);
4025 	return err;
4026 }
4027 
4028 struct xe_vm_snapshot {
4029 	unsigned long num_snaps;
4030 	struct {
4031 		u64 ofs, bo_ofs;
4032 		unsigned long len;
4033 		struct xe_bo *bo;
4034 		void *data;
4035 		struct mm_struct *mm;
4036 	} snap[];
4037 };
4038 
4039 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
4040 {
4041 	unsigned long num_snaps = 0, i;
4042 	struct xe_vm_snapshot *snap = NULL;
4043 	struct drm_gpuva *gpuva;
4044 
4045 	if (!vm)
4046 		return NULL;
4047 
4048 	mutex_lock(&vm->snap_mutex);
4049 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4050 		if (gpuva->flags & XE_VMA_DUMPABLE)
4051 			num_snaps++;
4052 	}
4053 
4054 	if (num_snaps)
4055 		snap = kvzalloc(offsetof(struct xe_vm_snapshot, snap[num_snaps]), GFP_NOWAIT);
4056 	if (!snap) {
4057 		snap = num_snaps ? ERR_PTR(-ENOMEM) : ERR_PTR(-ENODEV);
4058 		goto out_unlock;
4059 	}
4060 
4061 	snap->num_snaps = num_snaps;
4062 	i = 0;
4063 	drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
4064 		struct xe_vma *vma = gpuva_to_vma(gpuva);
4065 		struct xe_bo *bo = vma->gpuva.gem.obj ?
4066 			gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
4067 
4068 		if (!(gpuva->flags & XE_VMA_DUMPABLE))
4069 			continue;
4070 
4071 		snap->snap[i].ofs = xe_vma_start(vma);
4072 		snap->snap[i].len = xe_vma_size(vma);
4073 		if (bo) {
4074 			snap->snap[i].bo = xe_bo_get(bo);
4075 			snap->snap[i].bo_ofs = xe_vma_bo_offset(vma);
4076 		} else if (xe_vma_is_userptr(vma)) {
4077 			struct mm_struct *mm =
4078 				to_userptr_vma(vma)->userptr.notifier.mm;
4079 
4080 			if (mmget_not_zero(mm))
4081 				snap->snap[i].mm = mm;
4082 			else
4083 				snap->snap[i].data = ERR_PTR(-EFAULT);
4084 
4085 			snap->snap[i].bo_ofs = xe_vma_userptr(vma);
4086 		} else {
4087 			snap->snap[i].data = ERR_PTR(-ENOENT);
4088 		}
4089 		i++;
4090 	}
4091 
4092 out_unlock:
4093 	mutex_unlock(&vm->snap_mutex);
4094 	return snap;
4095 }
4096 
4097 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
4098 {
4099 	if (IS_ERR_OR_NULL(snap))
4100 		return;
4101 
4102 	for (int i = 0; i < snap->num_snaps; i++) {
4103 		struct xe_bo *bo = snap->snap[i].bo;
4104 		int err;
4105 
4106 		if (IS_ERR(snap->snap[i].data))
4107 			continue;
4108 
4109 		snap->snap[i].data = kvmalloc(snap->snap[i].len, GFP_USER);
4110 		if (!snap->snap[i].data) {
4111 			snap->snap[i].data = ERR_PTR(-ENOMEM);
4112 			goto cleanup_bo;
4113 		}
4114 
4115 		if (bo) {
4116 			err = xe_bo_read(bo, snap->snap[i].bo_ofs,
4117 					 snap->snap[i].data, snap->snap[i].len);
4118 		} else {
4119 			void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
4120 
4121 			kthread_use_mm(snap->snap[i].mm);
4122 			if (!copy_from_user(snap->snap[i].data, userptr, snap->snap[i].len))
4123 				err = 0;
4124 			else
4125 				err = -EFAULT;
4126 			kthread_unuse_mm(snap->snap[i].mm);
4127 
4128 			mmput(snap->snap[i].mm);
4129 			snap->snap[i].mm = NULL;
4130 		}
4131 
4132 		if (err) {
4133 			kvfree(snap->snap[i].data);
4134 			snap->snap[i].data = ERR_PTR(err);
4135 		}
4136 
4137 cleanup_bo:
4138 		xe_bo_put(bo);
4139 		snap->snap[i].bo = NULL;
4140 	}
4141 }
4142 
4143 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
4144 {
4145 	unsigned long i, j;
4146 
4147 	if (IS_ERR_OR_NULL(snap)) {
4148 		drm_printf(p, "[0].error: %li\n", PTR_ERR(snap));
4149 		return;
4150 	}
4151 
4152 	for (i = 0; i < snap->num_snaps; i++) {
4153 		drm_printf(p, "[%llx].length: 0x%lx\n", snap->snap[i].ofs, snap->snap[i].len);
4154 
4155 		if (IS_ERR(snap->snap[i].data)) {
4156 			drm_printf(p, "[%llx].error: %li\n", snap->snap[i].ofs,
4157 				   PTR_ERR(snap->snap[i].data));
4158 			continue;
4159 		}
4160 
4161 		drm_printf(p, "[%llx].data: ", snap->snap[i].ofs);
4162 
4163 		for (j = 0; j < snap->snap[i].len; j += sizeof(u32)) {
4164 			u32 *val = snap->snap[i].data + j;
4165 			char dumped[ASCII85_BUFSZ];
4166 
4167 			drm_puts(p, ascii85_encode(*val, dumped));
4168 		}
4169 
4170 		drm_puts(p, "\n");
4171 
4172 		if (drm_coredump_printer_is_full(p))
4173 			return;
4174 	}
4175 }
4176 
4177 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
4178 {
4179 	unsigned long i;
4180 
4181 	if (IS_ERR_OR_NULL(snap))
4182 		return;
4183 
4184 	for (i = 0; i < snap->num_snaps; i++) {
4185 		if (!IS_ERR(snap->snap[i].data))
4186 			kvfree(snap->snap[i].data);
4187 		xe_bo_put(snap->snap[i].bo);
4188 		if (snap->snap[i].mm)
4189 			mmput(snap->snap[i].mm);
4190 	}
4191 	kvfree(snap);
4192 }
4193 
4194 /**
4195  * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
4196  * @xe: Pointer to the Xe device structure
4197  * @vma: Pointer to the virtual memory area (VMA) structure
4198  * @is_atomic: In pagefault path and atomic operation
4199  *
4200  * This function determines whether the given VMA needs to be migrated to
4201  * VRAM in order to do atomic GPU operation.
4202  *
4203  * Return:
4204  *   1        - Migration to VRAM is required
4205  *   0        - Migration is not required
4206  *   -EACCES  - Invalid access for atomic memory attr
4207  *
4208  */
4209 int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
4210 {
4211 	u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
4212 					     vma->attr.atomic_access;
4213 
4214 	if (!IS_DGFX(xe) || !is_atomic)
4215 		return false;
4216 
4217 	/*
4218 	 * NOTE: The checks implemented here are platform-specific. For
4219 	 * instance, on a device supporting CXL atomics, these would ideally
4220 	 * work universally without additional handling.
4221 	 */
4222 	switch (atomic_access) {
4223 	case DRM_XE_ATOMIC_DEVICE:
4224 		return !xe->info.has_device_atomics_on_smem;
4225 
4226 	case DRM_XE_ATOMIC_CPU:
4227 		return -EACCES;
4228 
4229 	case DRM_XE_ATOMIC_UNDEFINED:
4230 	case DRM_XE_ATOMIC_GLOBAL:
4231 	default:
4232 		return 1;
4233 	}
4234 }
4235 
4236 static int xe_vm_alloc_vma(struct xe_vm *vm,
4237 			   struct drm_gpuvm_map_req *map_req,
4238 			   bool is_madvise)
4239 {
4240 	struct xe_vma_ops vops;
4241 	struct drm_gpuva_ops *ops = NULL;
4242 	struct drm_gpuva_op *__op;
4243 	unsigned int vma_flags = 0;
4244 	bool remap_op = false;
4245 	struct xe_vma_mem_attr tmp_attr;
4246 	u16 default_pat;
4247 	int err;
4248 
4249 	lockdep_assert_held_write(&vm->lock);
4250 
4251 	if (is_madvise)
4252 		ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req);
4253 	else
4254 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req);
4255 
4256 	if (IS_ERR(ops))
4257 		return PTR_ERR(ops);
4258 
4259 	if (list_empty(&ops->list)) {
4260 		err = 0;
4261 		goto free_ops;
4262 	}
4263 
4264 	drm_gpuva_for_each_op(__op, ops) {
4265 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
4266 		struct xe_vma *vma = NULL;
4267 
4268 		if (!is_madvise) {
4269 			if (__op->op == DRM_GPUVA_OP_UNMAP) {
4270 				vma = gpuva_to_vma(op->base.unmap.va);
4271 				XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
4272 				default_pat = vma->attr.default_pat_index;
4273 				vma_flags = vma->gpuva.flags;
4274 			}
4275 
4276 			if (__op->op == DRM_GPUVA_OP_REMAP) {
4277 				vma = gpuva_to_vma(op->base.remap.unmap->va);
4278 				default_pat = vma->attr.default_pat_index;
4279 				vma_flags = vma->gpuva.flags;
4280 			}
4281 
4282 			if (__op->op == DRM_GPUVA_OP_MAP) {
4283 				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
4284 				op->map.pat_index = default_pat;
4285 			}
4286 		} else {
4287 			if (__op->op == DRM_GPUVA_OP_REMAP) {
4288 				vma = gpuva_to_vma(op->base.remap.unmap->va);
4289 				xe_assert(vm->xe, !remap_op);
4290 				xe_assert(vm->xe, xe_vma_has_no_bo(vma));
4291 				remap_op = true;
4292 				vma_flags = vma->gpuva.flags;
4293 			}
4294 
4295 			if (__op->op == DRM_GPUVA_OP_MAP) {
4296 				xe_assert(vm->xe, remap_op);
4297 				remap_op = false;
4298 				/*
4299 				 * In case of madvise ops DRM_GPUVA_OP_MAP is
4300 				 * always after DRM_GPUVA_OP_REMAP, so ensure
4301 				 * to propagate the flags from the vma we're
4302 				 * unmapping.
4303 				 */
4304 				op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
4305 			}
4306 		}
4307 		print_op(vm->xe, __op);
4308 	}
4309 
4310 	xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
4311 
4312 	if (is_madvise)
4313 		vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
4314 
4315 	err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
4316 	if (err)
4317 		goto unwind_ops;
4318 
4319 	xe_vm_lock(vm, false);
4320 
4321 	drm_gpuva_for_each_op(__op, ops) {
4322 		struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
4323 		struct xe_vma *vma;
4324 
4325 		if (__op->op == DRM_GPUVA_OP_UNMAP) {
4326 			vma = gpuva_to_vma(op->base.unmap.va);
4327 			/* There should be no unmap for madvise */
4328 			if (is_madvise)
4329 				XE_WARN_ON("UNEXPECTED UNMAP");
4330 
4331 			xe_vma_destroy(vma, NULL);
4332 		} else if (__op->op == DRM_GPUVA_OP_REMAP) {
4333 			vma = gpuva_to_vma(op->base.remap.unmap->va);
4334 			/* In case of madvise ops Store attributes for REMAP UNMAPPED
4335 			 * VMA, so they can be assigned to newly MAP created vma.
4336 			 */
4337 			if (is_madvise)
4338 				tmp_attr = vma->attr;
4339 
4340 			xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
4341 		} else if (__op->op == DRM_GPUVA_OP_MAP) {
4342 			vma = op->map.vma;
4343 			/* In case of madvise call, MAP will always be followed by REMAP.
4344 			 * Therefore temp_attr will always have sane values, making it safe to
4345 			 * copy them to new vma.
4346 			 */
4347 			if (is_madvise)
4348 				vma->attr = tmp_attr;
4349 		}
4350 	}
4351 
4352 	xe_vm_unlock(vm);
4353 	drm_gpuva_ops_free(&vm->gpuvm, ops);
4354 	return 0;
4355 
4356 unwind_ops:
4357 	vm_bind_ioctl_ops_unwind(vm, &ops, 1);
4358 free_ops:
4359 	drm_gpuva_ops_free(&vm->gpuvm, ops);
4360 	return err;
4361 }
4362 
4363 /**
4364  * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
4365  * @vm: Pointer to the xe_vm structure
4366  * @start: Starting input address
4367  * @range: Size of the input range
4368  *
4369  * This function splits existing vma to create new vma for user provided input range
4370  *
4371  * Return: 0 if success
4372  */
4373 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
4374 {
4375 	struct drm_gpuvm_map_req map_req = {
4376 		.map.va.addr = start,
4377 		.map.va.range = range,
4378 	};
4379 
4380 	lockdep_assert_held_write(&vm->lock);
4381 
4382 	vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
4383 
4384 	return xe_vm_alloc_vma(vm, &map_req, true);
4385 }
4386 
4387 /**
4388  * xe_vm_alloc_cpu_addr_mirror_vma - Allocate CPU addr mirror vma
4389  * @vm: Pointer to the xe_vm structure
4390  * @start: Starting input address
4391  * @range: Size of the input range
4392  *
4393  * This function splits/merges existing vma to create new vma for user provided input range
4394  *
4395  * Return: 0 if success
4396  */
4397 int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
4398 {
4399 	struct drm_gpuvm_map_req map_req = {
4400 		.map.va.addr = start,
4401 		.map.va.range = range,
4402 	};
4403 
4404 	lockdep_assert_held_write(&vm->lock);
4405 
4406 	vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx",
4407 	       start, range);
4408 
4409 	return xe_vm_alloc_vma(vm, &map_req, false);
4410 }
4411