Lines Matching full:vm

46 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)  in xe_vm_obj()  argument
48 return vm->gpuvm.r_obj; in xe_vm_obj()
52 * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
53 * @vm: The vm whose resv is to be locked.
56 * Helper to lock the vm's resv as part of a drm_exec transaction.
60 int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec) in xe_vm_drm_exec_lock() argument
62 return drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_drm_exec_lock()
65 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting() argument
69 lockdep_assert_held(&vm->lock); in preempt_fences_waiting()
70 xe_vm_assert_held(vm); in preempt_fences_waiting()
72 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in preempt_fences_waiting()
91 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences() argument
94 lockdep_assert_held(&vm->lock); in alloc_preempt_fences()
95 xe_vm_assert_held(vm); in alloc_preempt_fences()
97 if (*count >= vm->preempt.num_exec_queues) in alloc_preempt_fences()
100 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { in alloc_preempt_fences()
112 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences() argument
115 bool vf_migration = IS_SRIOV_VF(vm->xe) && in wait_for_existing_preempt_fences()
116 xe_sriov_vf_migration_supported(vm->xe); in wait_for_existing_preempt_fences()
119 xe_vm_assert_held(vm); in wait_for_existing_preempt_fences()
121 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in wait_for_existing_preempt_fences()
128 xe_assert(vm->xe, vf_migration); in wait_for_existing_preempt_fences()
132 /* Only -ETIME on fence indicates VM needs to be killed */ in wait_for_existing_preempt_fences()
144 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle() argument
148 xe_vm_assert_held(vm); in xe_vm_is_idle()
149 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in xe_vm_is_idle()
157 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences() argument
162 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in arm_preempt_fences()
166 xe_assert(vm->xe, link != list); in arm_preempt_fences()
176 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences() argument
183 if (!vm->preempt.num_exec_queues) in add_preempt_fences()
186 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); in add_preempt_fences()
190 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in add_preempt_fences()
200 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences() argument
205 lockdep_assert_held(&vm->lock); in resume_and_reinstall_preempt_fences()
206 xe_vm_assert_held(vm); in resume_and_reinstall_preempt_fences()
208 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) { in resume_and_reinstall_preempt_fences()
211 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence, in resume_and_reinstall_preempt_fences()
216 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue() argument
219 .vm = &vm->gpuvm, in xe_vm_add_compute_exec_queue()
229 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue()
231 down_write(&vm->lock); in xe_vm_add_compute_exec_queue()
232 err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val); in xe_vm_add_compute_exec_queue()
243 list_add(&q->lr.link, &vm->preempt.exec_queues); in xe_vm_add_compute_exec_queue()
244 ++vm->preempt.num_exec_queues; in xe_vm_add_compute_exec_queue()
247 xe_svm_notifier_lock(vm); in xe_vm_add_compute_exec_queue()
249 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, in xe_vm_add_compute_exec_queue()
253 * Check to see if a preemption on VM is in flight or userptr in xe_vm_add_compute_exec_queue()
255 * other preempt fences on the VM. in xe_vm_add_compute_exec_queue()
257 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); in xe_vm_add_compute_exec_queue()
261 xe_svm_notifier_unlock(vm); in xe_vm_add_compute_exec_queue()
266 up_write(&vm->lock); in xe_vm_add_compute_exec_queue()
273 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
274 * @vm: The VM.
279 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue() argument
281 if (!xe_vm_in_preempt_fence_mode(vm)) in xe_vm_remove_compute_exec_queue()
284 down_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
287 --vm->preempt.num_exec_queues; in xe_vm_remove_compute_exec_queue()
294 up_write(&vm->lock); in xe_vm_remove_compute_exec_queue()
300 * xe_vm_kill() - VM Kill
301 * @vm: The VM.
302 * @unlocked: Flag indicates the VM's dma-resv is not held
304 * Kill the VM by setting banned flag indicated VM is no longer available for
305 * use. If in preempt fence mode, also kill all exec queue attached to the VM.
307 void xe_vm_kill(struct xe_vm *vm, bool unlocked) in xe_vm_kill() argument
311 lockdep_assert_held(&vm->lock); in xe_vm_kill()
314 xe_vm_lock(vm, false); in xe_vm_kill()
316 vm->flags |= XE_VM_FLAG_BANNED; in xe_vm_kill()
317 trace_xe_vm_kill(vm); in xe_vm_kill()
319 list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) in xe_vm_kill()
323 xe_vm_unlock(vm); in xe_vm_kill()
325 /* TODO: Inform user the VM is banned */ in xe_vm_kill()
330 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate() local
335 lockdep_assert_held(&vm->lock); in xe_gpuvm_validate()
338 &vm->rebind_list); in xe_gpuvm_validate()
346 if (!try_wait_for_completion(&vm->xe->pm_block)) in xe_gpuvm_validate()
349 ret = xe_bo_validate(bo, vm, false, exec); in xe_gpuvm_validate()
359 * @vm: The vm for which we are rebinding.
372 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind() argument
380 ret = drm_gpuvm_validate(&vm->gpuvm, exec); in xe_vm_validate_rebind()
384 ret = xe_vm_rebind(vm, false); in xe_vm_validate_rebind()
387 } while (!list_empty(&vm->gpuvm.evict.list)); in xe_vm_validate_rebind()
398 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin() argument
403 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
407 if (xe_vm_is_idle(vm)) { in xe_preempt_work_begin()
408 vm->preempt.rebind_deactivated = true; in xe_preempt_work_begin()
413 if (!preempt_fences_waiting(vm)) { in xe_preempt_work_begin()
418 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, 0); in xe_preempt_work_begin()
422 err = wait_for_existing_preempt_fences(vm); in xe_preempt_work_begin()
432 return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues); in xe_preempt_work_begin()
435 static bool vm_suspend_rebind_worker(struct xe_vm *vm) in vm_suspend_rebind_worker() argument
437 struct xe_device *xe = vm->xe; in vm_suspend_rebind_worker()
441 if (!try_wait_for_completion(&vm->xe->pm_block)) { in vm_suspend_rebind_worker()
443 list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list); in vm_suspend_rebind_worker()
452 * @vm: The vm whose preempt worker to resume.
457 void xe_vm_resume_rebind_worker(struct xe_vm *vm) in xe_vm_resume_rebind_worker() argument
459 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); in xe_vm_resume_rebind_worker()
464 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func() local
473 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func()
474 trace_xe_vm_rebind_worker_enter(vm); in preempt_rebind_work_func()
476 down_write(&vm->lock); in preempt_rebind_work_func()
478 if (xe_vm_is_closed_or_banned(vm)) { in preempt_rebind_work_func()
479 up_write(&vm->lock); in preempt_rebind_work_func()
480 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
485 if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) { in preempt_rebind_work_func()
486 up_write(&vm->lock); in preempt_rebind_work_func()
492 if (xe_vm_userptr_check_repin(vm)) { in preempt_rebind_work_func()
493 err = xe_vm_userptr_pin(vm); in preempt_rebind_work_func()
498 err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, in preempt_rebind_work_func()
506 err = xe_preempt_work_begin(&exec, vm, &done); in preempt_rebind_work_func()
515 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); in preempt_rebind_work_func()
519 xe_vm_set_validation_exec(vm, &exec); in preempt_rebind_work_func()
520 err = xe_vm_rebind(vm, true); in preempt_rebind_work_func()
521 xe_vm_set_validation_exec(vm, NULL); in preempt_rebind_work_func()
525 /* Wait on rebinds and munmap style VM unbinds */ in preempt_rebind_work_func()
526 wait = dma_resv_wait_timeout(xe_vm_resv(vm), in preempt_rebind_work_func()
539 xe_svm_notifier_lock(vm); in preempt_rebind_work_func()
540 if (retry_required(tries, vm)) { in preempt_rebind_work_func()
541 xe_svm_notifier_unlock(vm); in preempt_rebind_work_func()
548 spin_lock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
549 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in preempt_rebind_work_func()
550 spin_unlock(&vm->xe->ttm.lru_lock); in preempt_rebind_work_func()
553 arm_preempt_fences(vm, &preempt_fences); in preempt_rebind_work_func()
554 resume_and_reinstall_preempt_fences(vm, &exec); in preempt_rebind_work_func()
555 xe_svm_notifier_unlock(vm); in preempt_rebind_work_func()
561 trace_xe_vm_rebind_worker_retry(vm); in preempt_rebind_work_func()
568 if (IS_SRIOV_VF(vm->xe) && in preempt_rebind_work_func()
569 xe_sriov_vf_migration_supported(vm->xe)) { in preempt_rebind_work_func()
570 up_write(&vm->lock); in preempt_rebind_work_func()
571 xe_vm_queue_rebind_worker(vm); in preempt_rebind_work_func()
579 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); in preempt_rebind_work_func()
580 xe_vm_kill(vm, true); in preempt_rebind_work_func()
582 up_write(&vm->lock); in preempt_rebind_work_func()
586 trace_xe_vm_rebind_worker_exit(vm); in preempt_rebind_work_func()
590 * xe_vm_add_fault_entry_pf() - Add pagefault to vm fault list
591 * @vm: The VM.
594 * This function takes the data from the pagefault @pf and saves it to @vm->faults.list.
599 void xe_vm_add_fault_entry_pf(struct xe_vm *vm, struct xe_pagefault *pf) in xe_vm_add_fault_entry_pf() argument
612 drm_warn(&vm->xe->drm, in xe_vm_add_fault_entry_pf()
617 guard(spinlock)(&vm->faults.lock); in xe_vm_add_fault_entry_pf()
623 if (vm->faults.len >= MAX_FAULTS_SAVED_PER_VM) { in xe_vm_add_fault_entry_pf()
641 list_add_tail(&e->list, &vm->faults.list); in xe_vm_add_fault_entry_pf()
642 vm->faults.len++; in xe_vm_add_fault_entry_pf()
645 static void xe_vm_clear_fault_entries(struct xe_vm *vm) in xe_vm_clear_fault_entries() argument
649 guard(spinlock)(&vm->faults.lock); in xe_vm_clear_fault_entries()
650 list_for_each_entry_safe(e, tmp, &vm->faults.list, list) { in xe_vm_clear_fault_entries()
654 vm->faults.len = 0; in xe_vm_clear_fault_entries()
758 static struct dma_fence *ops_execute(struct xe_vm *vm,
760 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
764 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind() argument
772 lockdep_assert_held(&vm->lock); in xe_vm_rebind()
773 if ((xe_vm_in_lr_mode(vm) && !rebind_worker) || in xe_vm_rebind()
774 list_empty(&vm->rebind_list)) in xe_vm_rebind()
777 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
781 xe_vm_assert_held(vm); in xe_vm_rebind()
782 list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { in xe_vm_rebind()
783 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind()
800 fence = ops_execute(vm, &vops); in xe_vm_rebind()
805 list_for_each_entry_safe(vma, next, &vm->rebind_list, in xe_vm_rebind()
819 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask) in xe_vma_rebind() argument
828 lockdep_assert_held(&vm->lock); in xe_vma_rebind()
829 xe_vm_assert_held(vm); in xe_vma_rebind()
830 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind()
832 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
834 for_each_tile(tile, vm->xe, id) { in xe_vma_rebind()
850 fence = ops_execute(vm, &vops); in xe_vma_rebind()
895 * xe_vm_range_rebind() - VM range (re)bind
896 * @vm: The VM which the range belongs to.
906 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, in xe_vm_range_rebind() argument
918 lockdep_assert_held(&vm->lock); in xe_vm_range_rebind()
919 xe_vm_assert_held(vm); in xe_vm_range_rebind()
920 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_rebind()
921 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_vm_range_rebind()
923 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_rebind()
925 for_each_tile(tile, vm->xe, id) { in xe_vm_range_rebind()
941 fence = ops_execute(vm, &vops); in xe_vm_range_rebind()
981 * xe_vm_range_unbind() - VM range unbind
982 * @vm: The VM which the range belongs to.
990 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, in xe_vm_range_unbind() argument
1000 lockdep_assert_held(&vm->lock); in xe_vm_range_unbind()
1001 xe_vm_assert_held(vm); in xe_vm_range_unbind()
1002 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_unbind()
1007 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_unbind()
1008 for_each_tile(tile, vm->xe, id) { in xe_vm_range_unbind()
1024 fence = ops_execute(vm, &vops); in xe_vm_range_unbind()
1067 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create() argument
1080 xe_assert(vm->xe, start < end); in xe_vma_create()
1081 xe_assert(vm->xe, end < vm->size); in xe_vma_create()
1106 vma->gpuva.vm = &vm->gpuvm; in xe_vma_create()
1111 for_each_tile(tile, vm->xe, id) in xe_vma_create()
1114 if (vm->xe->info.has_atomic_enable_pte_bit) in xe_vma_create()
1142 vm_bo = drm_gpuvm_bo_obtain_locked(vma->gpuva.vm, &bo->ttm.base); in xe_vma_create()
1172 xe_vm_get(vm); in xe_vma_create()
1180 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late() local
1192 xe_vm_put(vm); in xe_vma_destroy_late()
1194 xe_vm_put(vm); in xe_vma_destroy_late()
1221 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy() local
1224 lockdep_assert_held_write(&vm->lock); in xe_vma_destroy()
1225 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); in xe_vma_destroy()
1228 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); in xe_vma_destroy()
1240 xe_vm_assert_held(vm); in xe_vma_destroy()
1257 * @vma: The vma for witch we want to lock the vm resv and any attached
1266 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_lock_vma() local
1270 XE_WARN_ON(!vm); in xe_vm_lock_vma()
1272 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in xe_vm_lock_vma()
1273 if (!err && bo && !bo->vm) in xe_vm_lock_vma()
1297 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma() argument
1301 lockdep_assert_held(&vm->lock); in xe_vm_find_overlapping_vma()
1303 if (xe_vm_is_closed_or_banned(vm)) in xe_vm_find_overlapping_vma()
1306 xe_assert(vm->xe, start + range <= vm->size); in xe_vm_find_overlapping_vma()
1308 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); in xe_vm_find_overlapping_vma()
1313 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma() argument
1317 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_insert_vma()
1318 lockdep_assert_held(&vm->lock); in xe_vm_insert_vma()
1320 mutex_lock(&vm->snap_mutex); in xe_vm_insert_vma()
1321 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); in xe_vm_insert_vma()
1322 mutex_unlock(&vm->snap_mutex); in xe_vm_insert_vma()
1328 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma() argument
1330 xe_assert(vm->xe, xe_vma_vm(vma) == vm); in xe_vm_remove_vma()
1331 lockdep_assert_held(&vm->lock); in xe_vm_remove_vma()
1333 mutex_lock(&vm->snap_mutex); in xe_vm_remove_vma()
1335 mutex_unlock(&vm->snap_mutex); in xe_vm_remove_vma()
1336 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma()
1337 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma()
1468 struct xe_vm *vm = xe_vma_vm(vma); in xelp_pte_encode_vma() local
1484 (bo && xe_bo_is_purged(bo) && xe_vm_has_scratch(vm)))) in xelp_pte_encode_vma()
1523 * given tile and vm.
1526 * @vm: vm to set up for.
1527 * @exec: The struct drm_exec object used to lock the vm resv.
1537 struct xe_vm *vm, struct drm_exec *exec) in xe_vm_create_scratch() argument
1542 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { in xe_vm_create_scratch()
1543 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec); in xe_vm_create_scratch()
1544 if (IS_ERR(vm->scratch_pt[id][i])) { in xe_vm_create_scratch()
1545 int err = PTR_ERR(vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1547 vm->scratch_pt[id][i] = NULL; in xe_vm_create_scratch()
1550 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); in xe_vm_create_scratch()
1557 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch() argument
1562 if (!xe_vm_has_scratch(vm)) in xe_vm_free_scratch()
1565 for_each_tile(tile, vm->xe, id) { in xe_vm_free_scratch()
1568 if (!vm->pt_root[id]) in xe_vm_free_scratch()
1571 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) in xe_vm_free_scratch()
1572 if (vm->scratch_pt[id][i]) in xe_vm_free_scratch()
1573 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); in xe_vm_free_scratch()
1577 static void xe_vm_pt_destroy(struct xe_vm *vm) in xe_vm_pt_destroy() argument
1582 xe_vm_assert_held(vm); in xe_vm_pt_destroy()
1584 for_each_tile(tile, vm->xe, id) { in xe_vm_pt_destroy()
1585 if (vm->pt_root[id]) { in xe_vm_pt_destroy()
1586 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); in xe_vm_pt_destroy()
1587 vm->pt_root[id] = NULL; in xe_vm_pt_destroy()
1592 static void xe_vm_init_prove_locking(struct xe_device *xe, struct xe_vm *vm) in xe_vm_init_prove_locking() argument
1598 might_lock(&vm->exec_queues.lock); in xe_vm_init_prove_locking()
1601 down_read(&vm->exec_queues.lock); in xe_vm_init_prove_locking()
1603 up_read(&vm->exec_queues.lock); in xe_vm_init_prove_locking()
1611 struct xe_vm *vm; in xe_vm_create() local
1617 * Since the GSCCS is not user-accessible, we don't expect a GSC VM to in xe_vm_create()
1622 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in xe_vm_create()
1623 if (!vm) in xe_vm_create()
1626 vm->xe = xe; in xe_vm_create()
1628 vm->size = 1ull << xe->info.va_bits; in xe_vm_create()
1629 vm->flags = flags; in xe_vm_create()
1632 vm->xef = xe_file_get(xef); in xe_vm_create()
1636 * under a user-VM lock when the PXP session is started at exec_queue in xe_vm_create()
1644 __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key); in xe_vm_create()
1646 init_rwsem(&vm->lock); in xe_vm_create()
1648 mutex_init(&vm->snap_mutex); in xe_vm_create()
1650 INIT_LIST_HEAD(&vm->rebind_list); in xe_vm_create()
1652 INIT_LIST_HEAD(&vm->userptr.repin_list); in xe_vm_create()
1653 INIT_LIST_HEAD(&vm->userptr.invalidated); in xe_vm_create()
1654 spin_lock_init(&vm->userptr.invalidated_lock); in xe_vm_create()
1656 INIT_LIST_HEAD(&vm->faults.list); in xe_vm_create()
1657 spin_lock_init(&vm->faults.lock); in xe_vm_create()
1659 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in xe_vm_create()
1661 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); in xe_vm_create()
1663 INIT_LIST_HEAD(&vm->preempt.exec_queues); in xe_vm_create()
1665 INIT_LIST_HEAD(&vm->exec_queues.list[id]); in xe_vm_create()
1667 vm->preempt.min_run_period_ms = xe->min_run_period_pf_ms; in xe_vm_create()
1669 vm->preempt.min_run_period_ms = xe->min_run_period_lr_ms; in xe_vm_create()
1671 init_rwsem(&vm->exec_queues.lock); in xe_vm_create()
1672 xe_vm_init_prove_locking(xe, vm); in xe_vm_create()
1675 xe_range_fence_tree_init(&vm->rftree[id]); in xe_vm_create()
1677 vm->pt_ops = &xelp_pt_ops; in xe_vm_create()
1682 * scheduler drops all the references of it, hence protecting the VM in xe_vm_create()
1686 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1688 INIT_LIST_HEAD(&vm->preempt.pm_activate_link); in xe_vm_create()
1691 err = xe_svm_init(vm); in xe_vm_create()
1701 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, in xe_vm_create()
1702 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); in xe_vm_create()
1709 err = xe_vm_drm_exec_lock(vm, &exec); in xe_vm_create()
1713 vm->flags |= XE_VM_FLAG_64K; in xe_vm_create()
1720 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level, in xe_vm_create()
1722 if (IS_ERR(vm->pt_root[id])) { in xe_vm_create()
1723 err = PTR_ERR(vm->pt_root[id]); in xe_vm_create()
1724 vm->pt_root[id] = NULL; in xe_vm_create()
1725 xe_vm_pt_destroy(vm); in xe_vm_create()
1734 if (xe_vm_has_scratch(vm)) { in xe_vm_create()
1736 if (!vm->pt_root[id]) in xe_vm_create()
1739 err = xe_vm_create_scratch(xe, tile, vm, &exec); in xe_vm_create()
1741 xe_vm_free_scratch(vm); in xe_vm_create()
1742 xe_vm_pt_destroy(vm); in xe_vm_create()
1750 vm->batch_invalidate_tlb = true; in xe_vm_create()
1753 if (vm->flags & XE_VM_FLAG_LR_MODE) { in xe_vm_create()
1754 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); in xe_vm_create()
1755 vm->batch_invalidate_tlb = false; in xe_vm_create()
1760 if (!vm->pt_root[id]) in xe_vm_create()
1763 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); in xe_vm_create()
1769 /* Kernel migration VM shouldn't have a circular loop.. */ in xe_vm_create()
1775 if (!vm->pt_root[id]) in xe_vm_create()
1781 q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0); in xe_vm_create()
1786 vm->q[id] = q; in xe_vm_create()
1794 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create()
1801 vm->usm.asid = asid; in xe_vm_create()
1804 trace_xe_vm_create(vm); in xe_vm_create()
1806 return vm; in xe_vm_create()
1809 xe_vm_close_and_put(vm); in xe_vm_create()
1814 vm->size = 0; /* close the vm */ in xe_vm_create()
1815 xe_svm_fini(vm); in xe_vm_create()
1818 mutex_destroy(&vm->snap_mutex); in xe_vm_create()
1820 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_create()
1821 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in xe_vm_create()
1822 if (vm->xef) in xe_vm_create()
1823 xe_file_put(vm->xef); in xe_vm_create()
1824 kfree(vm); in xe_vm_create()
1830 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close() argument
1832 struct xe_device *xe = vm->xe; in xe_vm_close()
1838 down_write(&vm->lock); in xe_vm_close()
1839 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1840 xe_svm_notifier_lock(vm); in xe_vm_close()
1842 vm->size = 0; in xe_vm_close()
1844 if (!((vm->flags & XE_VM_FLAG_MIGRATION))) { in xe_vm_close()
1850 dma_resv_wait_timeout(xe_vm_resv(vm), in xe_vm_close()
1856 if (vm->pt_root[id]) in xe_vm_close()
1857 xe_pt_clear(xe, vm->pt_root[id]); in xe_vm_close()
1860 xe_tlb_inval_vm(&gt->tlb_inval, vm); in xe_vm_close()
1864 if (xe_vm_in_fault_mode(vm)) in xe_vm_close()
1865 xe_svm_notifier_unlock(vm); in xe_vm_close()
1866 up_write(&vm->lock); in xe_vm_close()
1872 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put() argument
1875 struct xe_device *xe = vm->xe; in xe_vm_close_and_put()
1881 xe_assert(xe, !vm->preempt.num_exec_queues); in xe_vm_close_and_put()
1883 xe_vm_close(vm); in xe_vm_close_and_put()
1884 if (xe_vm_in_preempt_fence_mode(vm)) { in xe_vm_close_and_put()
1886 list_del_init(&vm->preempt.pm_activate_link); in xe_vm_close_and_put()
1888 flush_work(&vm->preempt.rebind_work); in xe_vm_close_and_put()
1890 if (xe_vm_in_fault_mode(vm)) in xe_vm_close_and_put()
1891 xe_svm_close(vm); in xe_vm_close_and_put()
1893 down_write(&vm->lock); in xe_vm_close_and_put()
1895 if (vm->q[id]) { in xe_vm_close_and_put()
1898 xe_exec_queue_last_fence_put(vm->q[id], vm); in xe_vm_close_and_put()
1900 xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i); in xe_vm_close_and_put()
1903 up_write(&vm->lock); in xe_vm_close_and_put()
1906 if (vm->q[id]) { in xe_vm_close_and_put()
1907 xe_exec_queue_kill(vm->q[id]); in xe_vm_close_and_put()
1908 xe_exec_queue_put(vm->q[id]); in xe_vm_close_and_put()
1909 vm->q[id] = NULL; in xe_vm_close_and_put()
1913 down_write(&vm->lock); in xe_vm_close_and_put()
1914 xe_vm_lock(vm, false); in xe_vm_close_and_put()
1915 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { in xe_vm_close_and_put()
1919 xe_svm_notifier_lock(vm); in xe_vm_close_and_put()
1921 xe_svm_notifier_unlock(vm); in xe_vm_close_and_put()
1924 xe_vm_remove_vma(vm, vma); in xe_vm_close_and_put()
1927 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { in xe_vm_close_and_put()
1938 * All vm operations will add shared fences to resv. in xe_vm_close_and_put()
1944 xe_vm_free_scratch(vm); in xe_vm_close_and_put()
1945 xe_vm_pt_destroy(vm); in xe_vm_close_and_put()
1946 xe_vm_unlock(vm); in xe_vm_close_and_put()
1949 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL in xe_vm_close_and_put()
1959 xe_svm_fini(vm); in xe_vm_close_and_put()
1961 up_write(&vm->lock); in xe_vm_close_and_put()
1964 if (vm->usm.asid) { in xe_vm_close_and_put()
1968 xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION)); in xe_vm_close_and_put()
1970 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put()
1971 xe_assert(xe, lookup == vm); in xe_vm_close_and_put()
1975 xe_vm_clear_fault_entries(vm); in xe_vm_close_and_put()
1978 xe_range_fence_tree_fini(&vm->rftree[id]); in xe_vm_close_and_put()
1980 xe_vm_put(vm); in xe_vm_close_and_put()
1985 struct xe_vm *vm = in vm_destroy_work_func() local
1987 struct xe_device *xe = vm->xe; in vm_destroy_work_func()
1992 xe_assert(xe, !vm->size); in vm_destroy_work_func()
1994 if (xe_vm_in_preempt_fence_mode(vm)) in vm_destroy_work_func()
1995 flush_work(&vm->preempt.rebind_work); in vm_destroy_work_func()
1997 mutex_destroy(&vm->snap_mutex); in vm_destroy_work_func()
1999 if (vm->flags & XE_VM_FLAG_LR_MODE) in vm_destroy_work_func()
2003 XE_WARN_ON(vm->pt_root[id]); in vm_destroy_work_func()
2005 trace_xe_vm_free(vm); in vm_destroy_work_func()
2007 ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); in vm_destroy_work_func()
2009 if (vm->xef) in vm_destroy_work_func()
2010 xe_file_put(vm->xef); in vm_destroy_work_func()
2012 kfree(vm); in vm_destroy_work_func()
2017 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free() local
2019 /* To destroy the VM we need to be able to sleep */ in xe_vm_free()
2020 queue_work(system_dfl_wq, &vm->destroy_work); in xe_vm_free()
2025 struct xe_vm *vm; in xe_vm_lookup() local
2027 mutex_lock(&xef->vm.lock); in xe_vm_lookup()
2028 vm = xa_load(&xef->vm.xa, id); in xe_vm_lookup()
2029 if (vm) in xe_vm_lookup()
2030 xe_vm_get(vm); in xe_vm_lookup()
2031 mutex_unlock(&xef->vm.lock); in xe_vm_lookup()
2033 return vm; in xe_vm_lookup()
2036 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor() argument
2038 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0); in xe_vm_pdp4_descriptor()
2042 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue() argument
2044 return q ? q : vm->q[0]; in to_wait_exec_queue()
2074 struct xe_vm *vm; in xe_vm_create_ioctl() local
2117 vm = xe_vm_create(xe, flags, xef); in xe_vm_create_ioctl()
2118 if (IS_ERR(vm)) in xe_vm_create_ioctl()
2119 return PTR_ERR(vm); in xe_vm_create_ioctl()
2123 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); in xe_vm_create_ioctl()
2127 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); in xe_vm_create_ioctl()
2136 xe_vm_close_and_put(vm); in xe_vm_create_ioctl()
2147 struct xe_vm *vm; in xe_vm_destroy_ioctl() local
2154 mutex_lock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2155 vm = xa_load(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2156 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_destroy_ioctl()
2158 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) in xe_vm_destroy_ioctl()
2161 xa_erase(&xef->vm.xa, args->vm_id); in xe_vm_destroy_ioctl()
2162 mutex_unlock(&xef->vm.lock); in xe_vm_destroy_ioctl()
2165 xe_vm_close_and_put(vm); in xe_vm_destroy_ioctl()
2170 static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end) in xe_vm_query_vmas() argument
2175 lockdep_assert_held(&vm->lock); in xe_vm_query_vmas()
2176 drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) in xe_vm_query_vmas()
2182 static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start, in get_mem_attrs() argument
2188 lockdep_assert_held(&vm->lock); in get_mem_attrs()
2190 drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) { in get_mem_attrs()
2218 struct xe_vm *vm; in xe_vm_query_vmas_attrs_ioctl() local
2230 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_query_vmas_attrs_ioctl()
2231 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_query_vmas_attrs_ioctl()
2234 err = down_read_interruptible(&vm->lock); in xe_vm_query_vmas_attrs_ioctl()
2241 args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range); in xe_vm_query_vmas_attrs_ioctl()
2255 err = get_mem_attrs(vm, &args->num_mem_ranges, args->start, in xe_vm_query_vmas_attrs_ioctl()
2268 up_read(&vm->lock); in xe_vm_query_vmas_attrs_ioctl()
2270 xe_vm_put(vm); in xe_vm_query_vmas_attrs_ioctl()
2286 * @vm: the xe_vm the vma belongs to
2289 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr) in xe_vm_find_vma_by_addr() argument
2293 if (vm->usm.last_fault_vma) { /* Fast lookup */ in xe_vm_find_vma_by_addr()
2294 if (vma_matches(vm->usm.last_fault_vma, page_addr)) in xe_vm_find_vma_by_addr()
2295 vma = vm->usm.last_fault_vma; in xe_vm_find_vma_by_addr()
2298 vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K); in xe_vm_find_vma_by_addr()
2309 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy() argument
2312 xe_svm_notifier_lock(vm); in prep_vma_destroy()
2314 xe_svm_notifier_unlock(vm); in prep_vma_destroy()
2316 xe_vm_remove_vma(vm, vma); in prep_vma_destroy()
2369 static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags) in __xe_vm_needs_clear_scratch_pages() argument
2371 if (!xe_vm_in_fault_mode(vm)) in __xe_vm_needs_clear_scratch_pages()
2374 if (!xe_vm_has_scratch(vm)) in __xe_vm_needs_clear_scratch_pages()
2399 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_create() argument
2413 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_create()
2415 vm_dbg(&vm->xe->drm, in vm_bind_ioctl_ops_create()
2423 xe_vm_find_cpu_addr_mirror_vma_range(vm, &range_start, &range_end); in vm_bind_ioctl_ops_create()
2436 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req); in vm_bind_ioctl_ops_create()
2440 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2443 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); in vm_bind_ioctl_ops_create()
2446 xe_assert(vm->xe, bo); in vm_bind_ioctl_ops_create()
2452 vm_bo = drm_gpuvm_bo_obtain_locked(&vm->gpuvm, obj); in vm_bind_ioctl_ops_create()
2463 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in vm_bind_ioctl_ops_create()
2488 __xe_vm_needs_clear_scratch_pages(vm, flags); in vm_bind_ioctl_ops_create()
2504 ctx.devmem_possible = IS_DGFX(vm->xe) && in vm_bind_ioctl_ops_create()
2507 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_create()
2515 xe_device_get_root_tile(vm->xe)); in vm_bind_ioctl_ops_create()
2517 tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] - in vm_bind_ioctl_ops_create()
2524 svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx); in vm_bind_ioctl_ops_create()
2527 u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma); in vm_bind_ioctl_ops_create()
2541 if (xe_svm_range_validate(vm, svm_range, tile_mask, dpagemap)) { in vm_bind_ioctl_ops_create()
2564 print_op(vm->xe, __op); in vm_bind_ioctl_ops_create()
2571 drm_gpuva_ops_free(&vm->gpuvm, ops); in vm_bind_ioctl_ops_create()
2577 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma() argument
2586 lockdep_assert_held_write(&vm->lock); in new_vma()
2590 xe_validation_guard(&ctx, &vm->xe->val, &exec, in new_vma()
2592 if (!bo->vm) { in new_vma()
2593 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); in new_vma()
2603 vma = xe_vma_create(vm, bo, op->gem.offset, in new_vma()
2609 if (!bo->vm) { in new_vma()
2610 err = add_preempt_fences(vm, bo); in new_vma()
2612 prep_vma_destroy(vm, vma, false); in new_vma()
2620 vma = xe_vma_create(vm, NULL, op->gem.offset, in new_vma()
2639 prep_vma_destroy(vm, vma, false); in new_vma()
2679 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit() argument
2683 lockdep_assert_held_write(&vm->lock); in xe_vma_op_commit()
2687 err |= xe_vm_insert_vma(vm, op->map.vma); in xe_vma_op_commit()
2696 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), in xe_vma_op_commit()
2701 err |= xe_vm_insert_vma(vm, op->remap.prev); in xe_vma_op_commit()
2710 err |= xe_vm_insert_vma(vm, op->remap.next); in xe_vma_op_commit()
2720 * Adjust for partial unbind after removing VMA from VM. In case in xe_vma_op_commit()
2730 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); in xe_vma_op_commit()
2737 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in xe_vma_op_commit()
2766 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, in vm_bind_ioctl_ops_parse() argument
2769 struct xe_device *xe = vm->xe; in vm_bind_ioctl_ops_parse()
2775 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_parse()
2777 for_each_tile(tile, vm->xe, id) in vm_bind_ioctl_ops_parse()
2805 vma = new_vma(vm, &op->base.map, &default_attr, in vm_bind_ioctl_ops_parse()
2811 if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && in vm_bind_ioctl_ops_parse()
2833 xe_svm_has_mapping(vm, start, end)) { in vm_bind_ioctl_ops_parse()
2835 xe_svm_unmap_address_range(vm, start, end); in vm_bind_ioctl_ops_parse()
2847 vma = new_vma(vm, op->base.remap.prev, in vm_bind_ioctl_ops_parse()
2877 vma = new_vma(vm, op->base.remap.next, in vm_bind_ioctl_ops_parse()
2914 xe_svm_has_mapping(vm, xe_vma_start(vma), in vm_bind_ioctl_ops_parse()
2939 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in vm_bind_ioctl_ops_parse()
2942 err = xe_vma_op_commit(vm, op); in vm_bind_ioctl_ops_parse()
2950 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind() argument
2954 lockdep_assert_held_write(&vm->lock); in xe_vma_op_unwind()
2959 prep_vma_destroy(vm, op->map.vma, post_commit); in xe_vma_op_unwind()
2968 xe_svm_notifier_lock(vm); in xe_vma_op_unwind()
2970 xe_svm_notifier_unlock(vm); in xe_vma_op_unwind()
2972 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
2981 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); in xe_vma_op_unwind()
2985 prep_vma_destroy(vm, op->remap.next, next_post_commit); in xe_vma_op_unwind()
2989 xe_svm_notifier_lock(vm); in xe_vma_op_unwind()
2991 xe_svm_notifier_unlock(vm); in xe_vma_op_unwind()
3003 xe_vm_insert_vma(vm, vma); in xe_vma_op_unwind()
3012 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in xe_vma_op_unwind()
3016 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind() argument
3032 xe_vma_op_unwind(vm, op, in vm_bind_ioctl_ops_unwind()
3058 struct xe_vm *vm = xe_vma_vm(vma); in vma_lock_and_validate() local
3063 if (!bo->vm) in vma_lock_and_validate()
3079 err = xe_bo_validate(bo, vm, in vma_lock_and_validate()
3080 xe_vm_allow_vm_eviction(vm) && in vma_lock_and_validate()
3108 static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) in prefetch_ranges() argument
3110 bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP); in prefetch_ranges()
3125 ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap); in prefetch_ranges()
3130 xe_svm_range_migrate_to_smem(vm, svm_range); in prefetch_ranges()
3133 drm_dbg(&vm->xe->drm, in prefetch_ranges()
3142 …drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe… in prefetch_ranges()
3143 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in prefetch_ranges()
3149 err = xe_svm_range_get_pages(vm, svm_range, &ctx); in prefetch_ranges()
3151 drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n", in prefetch_ranges()
3152 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); in prefetch_ranges()
3163 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep() argument
3170 * We only allow evicting a BO within the VM if it is not part of an in op_lock_and_prep()
3182 .validate = !xe_vm_in_fault_mode(vm) || in op_lock_and_prep()
3240 xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC || in op_lock_and_prep()
3267 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in op_lock_and_prep()
3273 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_bind_ioctl_ops_prefetch_ranges() argument
3283 err = prefetch_ranges(vm, op); in vm_bind_ioctl_ops_prefetch_ranges()
3293 struct xe_vm *vm, in vm_bind_ioctl_ops_lock_and_prep() argument
3299 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); in vm_bind_ioctl_ops_lock_and_prep()
3304 err = op_lock_and_prep(exec, vm, vops, op); in vm_bind_ioctl_ops_lock_and_prep()
3311 vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK) in vm_bind_ioctl_ops_lock_and_prep()
3352 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
3359 for_each_tile(tile, vm->xe, id) { in vm_ops_setup_tile_args()
3368 if (vm->pt_root[id] && !list_empty(&q->multi_gt_list)) in vm_ops_setup_tile_args()
3371 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
3378 static struct dma_fence *ops_execute(struct xe_vm *vm, in ops_execute() argument
3388 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
3392 for_each_tile(tile, vm->xe, id) { in ops_execute()
3412 for_each_tile(tile, vm->xe, id) { in ops_execute()
3425 for_each_tile(tile, vm->xe, id) { in ops_execute()
3444 xe_exec_queue_tlb_inval_last_fence_get(q, vm, i); in ops_execute()
3448 xe_assert(vm->xe, current_fence == n_fence); in ops_execute()
3453 for_each_tile(tile, vm->xe, id) { in ops_execute()
3463 for_each_tile(tile, vm->xe, id) { in ops_execute()
3475 trace_xe_vm_ops_fail(vm); in ops_execute()
3486 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, in op_add_ufence() argument
3506 drm_warn(&vm->xe->drm, "NOT POSSIBLE\n"); in op_add_ufence()
3510 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
3520 op_add_ufence(vm, op, ufence); in vm_bind_ioctl_ops_fini()
3536 static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute() argument
3544 lockdep_assert_held_write(&vm->lock); in vm_bind_ioctl_ops_execute()
3546 xe_validation_guard(&ctx, &vm->xe->val, &exec, in vm_bind_ioctl_ops_execute()
3551 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
3557 xe_vm_set_validation_exec(vm, &exec); in vm_bind_ioctl_ops_execute()
3558 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
3559 xe_vm_set_validation_exec(vm, NULL); in vm_bind_ioctl_ops_execute()
3562 vm_bind_ioctl_ops_fini(vm, vops, NULL); in vm_bind_ioctl_ops_execute()
3566 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
3592 static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, in vm_bind_ioctl_check_args() argument
3647 (!xe_vm_in_fault_mode(vm) || in vm_bind_ioctl_check_args()
3746 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences() argument
3756 to_wait_exec_queue(vm, q), vm); in vm_bind_ioctl_signal_fences()
3769 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
3775 vops->vm = vm; in xe_vma_ops_init()
3862 struct xe_vm *vm; in xe_vm_bind_ioctl() local
3872 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_bind_ioctl()
3873 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_bind_ioctl()
3876 err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops); in xe_vm_bind_ioctl()
3893 if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) { in xe_vm_bind_ioctl()
3899 xe_svm_flush(vm); in xe_vm_bind_ioctl()
3901 err = down_write_killable(&vm->lock); in xe_vm_bind_ioctl()
3905 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { in xe_vm_bind_ioctl()
3914 if (XE_IOCTL_DBG(xe, range > vm->size) || in xe_vm_bind_ioctl()
3915 XE_IOCTL_DBG(xe, addr > vm->size - range)) { in xe_vm_bind_ioctl()
3974 struct xe_exec_queue *__q = q ?: vm->q[0]; in xe_vm_bind_ioctl()
3980 (xe_vm_in_lr_mode(vm) ? in xe_vm_bind_ioctl()
4001 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
4013 ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset, in xe_vm_bind_ioctl()
4022 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
4029 vm->xe->vm_inject_error_position = in xe_vm_bind_ioctl()
4030 (vm->xe->vm_inject_error_position + 1) % in xe_vm_bind_ioctl()
4046 err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops); in xe_vm_bind_ioctl()
4050 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
4058 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); in xe_vm_bind_ioctl()
4062 drm_gpuva_ops_free(&vm->gpuvm, ops[i]); in xe_vm_bind_ioctl()
4065 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
4078 up_write(&vm->lock); in xe_vm_bind_ioctl()
4086 xe_vm_put(vm); in xe_vm_bind_ioctl()
4112 static int fill_faults(struct xe_vm *vm, in fill_faults() argument
4127 spin_lock(&vm->faults.lock); in fill_faults()
4128 list_for_each_entry(entry, &vm->faults.list, list) { in fill_faults()
4132 fault_entry.address = xe_device_canonicalize_addr(vm->xe, entry->address); in fill_faults()
4143 spin_unlock(&vm->faults.lock); in fill_faults()
4151 static int xe_vm_get_property_helper(struct xe_vm *vm, in xe_vm_get_property_helper() argument
4158 spin_lock(&vm->faults.lock); in xe_vm_get_property_helper()
4159 size = size_mul(sizeof(struct xe_vm_fault), vm->faults.len); in xe_vm_get_property_helper()
4160 spin_unlock(&vm->faults.lock); in xe_vm_get_property_helper()
4171 * the number of faults in the VM fault array. in xe_vm_get_property_helper()
4179 return fill_faults(vm, args); in xe_vm_get_property_helper()
4190 struct xe_vm *vm; in xe_vm_get_property_ioctl() local
4198 vm = xe_vm_lookup(xef, args->vm_id); in xe_vm_get_property_ioctl()
4199 if (XE_IOCTL_DBG(xe, !vm)) in xe_vm_get_property_ioctl()
4202 ret = xe_vm_get_property_helper(vm, args); in xe_vm_get_property_ioctl()
4204 xe_vm_put(vm); in xe_vm_get_property_ioctl()
4209 * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
4210 * @vm: VM to bind the BO to
4216 * Execute a VM bind map operation on a kernel-owned BO to bind it into a
4217 * kernel-owned VM.
4222 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, in xe_vm_bind_kernel_bo() argument
4232 xe_vm_get(vm); in xe_vm_bind_kernel_bo()
4236 down_write(&vm->lock); in xe_vm_bind_kernel_bo()
4238 xe_vma_ops_init(&vops, vm, q, NULL, 0); in xe_vm_bind_kernel_bo()
4240 ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), in xe_vm_bind_kernel_bo()
4242 vm->xe->pat.idx[cache_lvl]); in xe_vm_bind_kernel_bo()
4248 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_bind_kernel_bo()
4252 xe_assert(vm->xe, !list_empty(&vops.list)); in xe_vm_bind_kernel_bo()
4258 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_kernel_bo()
4264 vm_bind_ioctl_ops_unwind(vm, &ops, 1); in xe_vm_bind_kernel_bo()
4267 drm_gpuva_ops_free(&vm->gpuvm, ops); in xe_vm_bind_kernel_bo()
4270 up_write(&vm->lock); in xe_vm_bind_kernel_bo()
4274 xe_vm_put(vm); in xe_vm_bind_kernel_bo()
4284 * xe_vm_lock() - Lock the vm's dma_resv object
4285 * @vm: The struct xe_vm whose lock is to be locked
4292 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock() argument
4297 ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); in xe_vm_lock()
4299 ret = dma_resv_lock(xe_vm_resv(vm), NULL); in xe_vm_lock()
4305 * xe_vm_unlock() - Unlock the vm's dma_resv object
4306 * @vm: The struct xe_vm whose lock is to be released.
4310 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock() argument
4312 dma_resv_unlock(xe_vm_resv(vm)); in xe_vm_unlock()
4332 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_invalidate_vma_submit() local
4342 vm_dbg(&vm->xe->drm, in xe_vm_invalidate_vma_submit()
4352 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) || in xe_vm_invalidate_vma_submit()
4353 (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && in xe_vm_invalidate_vma_submit()
4354 lockdep_is_held(&xe_vm_resv(vm)->lock.base))); in xe_vm_invalidate_vma_submit()
4359 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm), in xe_vm_invalidate_vma_submit()
4405 int xe_vm_validate_protected(struct xe_vm *vm) in xe_vm_validate_protected() argument
4410 if (!vm) in xe_vm_validate_protected()
4413 mutex_lock(&vm->snap_mutex); in xe_vm_validate_protected()
4415 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_validate_protected()
4424 err = xe_pxp_bo_key_check(vm->xe->pxp, bo); in xe_vm_validate_protected()
4430 mutex_unlock(&vm->snap_mutex); in xe_vm_validate_protected()
4453 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) in xe_vm_snapshot_capture() argument
4459 if (!vm) in xe_vm_snapshot_capture()
4462 mutex_lock(&vm->snap_mutex); in xe_vm_snapshot_capture()
4463 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
4475 if (vm->flags & XE_VM_FLAG_FAULT_MODE) in xe_vm_snapshot_capture()
4477 if (vm->flags & XE_VM_FLAG_LR_MODE) in xe_vm_snapshot_capture()
4479 if (vm->flags & XE_VM_FLAG_SCRATCH_PAGE) in xe_vm_snapshot_capture()
4484 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { in xe_vm_snapshot_capture()
4536 mutex_unlock(&vm->snap_mutex); in xe_vm_snapshot_capture()
4596 drm_printf(p, "VM.uapi_flags: 0x%x\n", snap->uapi_flags); in xe_vm_snapshot_print()
4697 static int xe_vm_alloc_vma(struct xe_vm *vm, in xe_vm_alloc_vma() argument
4710 lockdep_assert_held_write(&vm->lock); in xe_vm_alloc_vma()
4713 ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req); in xe_vm_alloc_vma()
4715 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req); in xe_vm_alloc_vma()
4750 xe_assert(vm->xe, !remap_op); in xe_vm_alloc_vma()
4751 xe_assert(vm->xe, xe_vma_has_no_bo(vma)); in xe_vm_alloc_vma()
4757 xe_assert(vm->xe, remap_op); in xe_vm_alloc_vma()
4768 print_op(vm->xe, __op); in xe_vm_alloc_vma()
4771 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_alloc_vma()
4778 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_alloc_vma()
4782 xe_vm_lock(vm, false); in xe_vm_alloc_vma()
4815 xe_vm_unlock(vm); in xe_vm_alloc_vma()
4816 drm_gpuva_ops_free(&vm->gpuvm, ops); in xe_vm_alloc_vma()
4821 vm_bind_ioctl_ops_unwind(vm, &ops, 1); in xe_vm_alloc_vma()
4823 drm_gpuva_ops_free(&vm->gpuvm, ops); in xe_vm_alloc_vma()
4829 * @vm: Pointer to the xe_vm structure
4837 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range) in xe_vm_alloc_madvise_vma() argument
4844 lockdep_assert_held_write(&vm->lock); in xe_vm_alloc_madvise_vma()
4846 vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range); in xe_vm_alloc_madvise_vma()
4848 return xe_vm_alloc_vma(vm, &map_req, true); in xe_vm_alloc_madvise_vma()
4859 * @vm: VM to search within
4872 void xe_vm_find_cpu_addr_mirror_vma_range(struct xe_vm *vm, u64 *start, u64 *end) in xe_vm_find_cpu_addr_mirror_vma_range() argument
4876 lockdep_assert_held(&vm->lock); in xe_vm_find_cpu_addr_mirror_vma_range()
4879 prev = xe_vm_find_vma_by_addr(vm, *start - SZ_4K); in xe_vm_find_cpu_addr_mirror_vma_range()
4884 if (*end < vm->size) { in xe_vm_find_cpu_addr_mirror_vma_range()
4885 next = xe_vm_find_vma_by_addr(vm, *end + 1); in xe_vm_find_cpu_addr_mirror_vma_range()
4893 * @vm: Pointer to the xe_vm structure
4901 int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range) in xe_vm_alloc_cpu_addr_mirror_vma() argument
4908 lockdep_assert_held_write(&vm->lock); in xe_vm_alloc_cpu_addr_mirror_vma()
4910 vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx", in xe_vm_alloc_cpu_addr_mirror_vma()
4913 return xe_vm_alloc_vma(vm, &map_req, false); in xe_vm_alloc_cpu_addr_mirror_vma()
4917 * xe_vm_add_exec_queue() - Add exec queue to VM
4918 * @vm: The VM.
4921 * Add exec queue to VM, skipped if the device does not have context based TLB
4924 void xe_vm_add_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_exec_queue() argument
4926 struct xe_device *xe = vm->xe; in xe_vm_add_exec_queue()
4933 xe_assert(xe, vm->xef); in xe_vm_add_exec_queue()
4934 xe_assert(xe, vm == q->vm); in xe_vm_add_exec_queue()
4939 down_write(&vm->exec_queues.lock); in xe_vm_add_exec_queue()
4940 list_add(&q->vm_exec_queue_link, &vm->exec_queues.list[q->gt->info.id]); in xe_vm_add_exec_queue()
4941 ++vm->exec_queues.count[q->gt->info.id]; in xe_vm_add_exec_queue()
4942 up_write(&vm->exec_queues.lock); in xe_vm_add_exec_queue()
4946 * xe_vm_remove_exec_queue() - Remove exec queue from VM
4947 * @vm: The VM.
4950 * Remove exec queue from VM, skipped if the device does not have context based
4953 void xe_vm_remove_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_exec_queue() argument
4955 if (!vm->xe->info.has_ctx_tlb_inval) in xe_vm_remove_exec_queue()
4958 down_write(&vm->exec_queues.lock); in xe_vm_remove_exec_queue()
4961 --vm->exec_queues.count[q->gt->info.id]; in xe_vm_remove_exec_queue()
4963 up_write(&vm->exec_queues.lock); in xe_vm_remove_exec_queue()