Lines Matching full:vm

45 	/** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm; member
78 * We use this list to pick a VM to evict when all slots are
89 /** @vm: VMs management fields */
91 /** @vm.lock: Lock protecting access to list. */
94 /** @vm.list: List containing all VMs. */
97 /** @vm.reset_in_progress: True if a reset is in progress. */
100 /** @vm.wq: Workqueue used for the VM_BIND queues. */
102 } vm; member
106 * struct panthor_vm_pool - VM pool object
109 /** @xa: Array used for VM handle tracking. */
134 * struct panthor_vm_op_ctx - VM operation context
136 * With VM operations potentially taking place in a dma-signaling path, we
156 * After an VM operation, there might be free pages left in this array.
174 /** @va: Virtual range targeted by the VM operation. */
184 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
221 * struct panthor_vm - VM object
223 * A VM is an object representing a GPU (or MCU) virtual address space.
226 * the VM.
228 * Except for the MCU VM, which is managed by the kernel, all other VMs are
256 * There's currently one bind queue per VM. It doesn't make sense to
257 * allow more given the VM operations are serialized anyway.
274 * @op_lock: Lock used to serialize operations on a VM.
283 * @op_ctx: The context attached to the currently executing VM operation.
295 * For the MCU VM, this is managing the VA range that's used to map
318 * @as.id: ID of the address space this VM is bound to.
320 * A value of -1 means the VM is inactive/not bound.
324 /** @as.active_cnt: Number of active users of this VM. */
328 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
340 * @heaps.pool: The heap pool attached to this VM.
342 * Will stay NULL until someone creates a heap context on this VM.
350 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
353 /** @for_mcu: True if this is the MCU VM. */
357 * @destroyed: True if the VM was destroyed.
359 * No further bind requests should be queued to a destroyed VM.
364 * @unusable: True if the VM has turned unusable because something
372 * Instead, we should just flag the VM as unusable, and fail any
373 * further request targeting this VM.
375 * We also provide a way to query a VM state, so userspace can destroy
393 * struct panthor_vm_bind_job - VM bind job
402 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
405 /** @vm: VM targeted by the VM operation. */
406 struct panthor_vm *vm; member
431 * done to allow asynchronous VM operations.
438 struct panthor_vm *vm = cookie; in alloc_pt() local
442 if (unlikely(!vm->root_page_table)) { in alloc_pt()
445 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
446 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
449 vm->root_page_table = page; in alloc_pt()
456 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
459 /* We must have some op_ctx attached to the VM and it must have at least one in alloc_pt()
462 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
463 drm_WARN_ON(&vm->ptdev->base, in alloc_pt()
464 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count)) in alloc_pt()
467 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++]; in alloc_pt()
490 struct panthor_vm *vm = cookie; in free_pt() local
492 if (unlikely(vm->root_page_table == data)) { in free_pt()
494 vm->root_page_table = NULL; in free_pt()
498 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in free_pt()
595 static int mmu_hw_do_operation(struct panthor_vm *vm, in mmu_hw_do_operation() argument
598 struct panthor_device *ptdev = vm->ptdev; in mmu_hw_do_operation()
602 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op); in mmu_hw_do_operation()
651 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
652 * @vm: VM to check.
654 * Return: true if the VM has unhandled faults, false otherwise.
656 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) in panthor_vm_has_unhandled_faults() argument
658 return vm->unhandled_fault; in panthor_vm_has_unhandled_faults()
662 * panthor_vm_is_unusable() - Check if the VM is still usable
663 * @vm: VM to check.
665 * Return: true if the VM is unusable, false otherwise.
667 bool panthor_vm_is_unusable(struct panthor_vm *vm) in panthor_vm_is_unusable() argument
669 return vm->unusable; in panthor_vm_is_unusable()
672 static void panthor_vm_release_as_locked(struct panthor_vm *vm) in panthor_vm_release_as_locked() argument
674 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_release_as_locked()
678 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) in panthor_vm_release_as_locked()
681 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked()
682 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_release_as_locked()
683 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_release_as_locked()
684 list_del_init(&vm->as.lru_node); in panthor_vm_release_as_locked()
685 vm->as.id = -1; in panthor_vm_release_as_locked()
689 * panthor_vm_active() - Flag a VM as active
690 * @vm: VM to flag as active.
692 * Assigns an address space to a VM so it can be used by the GPU/MCU.
696 int panthor_vm_active(struct panthor_vm *vm) in panthor_vm_active() argument
698 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_active()
700 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; in panthor_vm_active()
707 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
712 if (refcount_inc_not_zero(&vm->as.active_cnt)) in panthor_vm_active()
715 as = vm->as.id; in panthor_vm_active()
727 if (vm->for_mcu) { in panthor_vm_active()
751 vm->as.id = as; in panthor_vm_active()
753 ptdev->mmu->as.slots[as].vm = vm; in panthor_vm_active()
764 /* If the VM is re-activated, we clear the fault. */ in panthor_vm_active()
765 vm->unhandled_fault = false; in panthor_vm_active()
777 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); in panthor_vm_active()
781 refcount_set(&vm->as.active_cnt, 1); in panthor_vm_active()
782 list_del_init(&vm->as.lru_node); in panthor_vm_active()
794 * panthor_vm_idle() - Flag a VM idle
795 * @vm: VM to flag as idle.
797 * When we know the GPU is done with the VM (no more jobs to process),
798 * we can relinquish the AS slot attached to this VM, if any.
800 * We don't release the slot immediately, but instead place the VM in
801 * the LRU list, so it can be evicted if another VM needs an AS slot.
806 void panthor_vm_idle(struct panthor_vm *vm) in panthor_vm_idle() argument
808 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_idle()
810 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) in panthor_vm_idle()
813 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) in panthor_vm_idle()
814 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); in panthor_vm_idle()
816 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_idle()
820 u32 panthor_vm_page_size(struct panthor_vm *vm) in panthor_vm_page_size() argument
822 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); in panthor_vm_page_size()
828 static void panthor_vm_stop(struct panthor_vm *vm) in panthor_vm_stop() argument
830 drm_sched_stop(&vm->sched, NULL); in panthor_vm_stop()
833 static void panthor_vm_start(struct panthor_vm *vm) in panthor_vm_start() argument
835 drm_sched_start(&vm->sched, 0); in panthor_vm_start()
839 * panthor_vm_as() - Get the AS slot attached to a VM
840 * @vm: VM to get the AS slot of.
842 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
844 int panthor_vm_as(struct panthor_vm *vm) in panthor_vm_as() argument
846 return vm->as.id; in panthor_vm_as()
870 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_flush_range() argument
872 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_flush_range()
875 if (vm->as.id < 0) in panthor_vm_flush_range()
882 ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); in panthor_vm_flush_range()
888 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) in panthor_vm_unmap_pages() argument
890 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_unmap_pages()
891 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_unmap_pages()
894 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); in panthor_vm_unmap_pages()
907 panthor_vm_flush_range(vm, iova, offset + unmapped_sz); in panthor_vm_unmap_pages()
913 return panthor_vm_flush_range(vm, iova, size); in panthor_vm_unmap_pages()
917 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, in panthor_vm_map_pages() argument
920 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_map_pages()
923 struct io_pgtable_ops *ops = vm->pgtbl_ops; in panthor_vm_map_pages()
945 vm->as.id, iova, &paddr, len); in panthor_vm_map_pages()
965 panthor_vm_unmap_pages(vm, start_iova, in panthor_vm_map_pages()
977 return panthor_vm_flush_range(vm, start_iova, iova - start_iova); in panthor_vm_map_pages()
1000 * @vm: VM to allocate a region on.
1007 * need to be mapped to the userspace VM, in the region reserved for kernel
1015 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, in panthor_vm_alloc_va() argument
1018 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_alloc_va()
1027 mutex_lock(&vm->mm_lock); in panthor_vm_alloc_va()
1031 ret = drm_mm_reserve_node(&vm->mm, va_node); in panthor_vm_alloc_va()
1033 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size, in panthor_vm_alloc_va()
1035 0, vm->kernel_auto_va.start, in panthor_vm_alloc_va()
1036 vm->kernel_auto_va.end, in panthor_vm_alloc_va()
1039 mutex_unlock(&vm->mm_lock); in panthor_vm_alloc_va()
1046 * @vm: VM to free the region on.
1049 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) in panthor_vm_free_va() argument
1051 mutex_lock(&vm->mm_lock); in panthor_vm_free_va()
1053 mutex_unlock(&vm->mm_lock); in panthor_vm_free_va()
1059 struct drm_gpuvm *vm = vm_bo->vm; in panthor_vm_bo_put() local
1064 * Same goes for the VM, since we take the VM resv lock. in panthor_vm_bo_put()
1067 drm_gpuvm_get(vm); in panthor_vm_bo_put()
1076 dma_resv_lock(drm_gpuvm_resv(vm), NULL); in panthor_vm_bo_put()
1080 dma_resv_unlock(drm_gpuvm_resv(vm)); in panthor_vm_bo_put()
1088 drm_gpuvm_put(vm); in panthor_vm_bo_put()
1093 struct panthor_vm *vm) in panthor_vm_cleanup_op_ctx() argument
1179 struct panthor_vm *vm, in panthor_vm_prepare_map_op_ctx() argument
1201 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */ in panthor_vm_prepare_map_op_ctx()
1203 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm)) in panthor_vm_prepare_map_op_ctx()
1236 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); in panthor_vm_prepare_map_op_ctx()
1246 * pre-allocated BO if the <BO,VM> association exists. Given we in panthor_vm_prepare_map_op_ctx()
1248 * be called immediately, and we have to hold the VM resv lock when in panthor_vm_prepare_map_op_ctx()
1251 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1255 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1257 /* If the a vm_bo for this <VM,BO> combination exists, it already in panthor_vm_prepare_map_op_ctx()
1295 dma_resv_lock(panthor_vm_resv(vm), NULL); in panthor_vm_prepare_map_op_ctx()
1297 dma_resv_unlock(panthor_vm_resv(vm)); in panthor_vm_prepare_map_op_ctx()
1302 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_map_op_ctx()
1307 struct panthor_vm *vm, in panthor_vm_prepare_unmap_op_ctx() argument
1354 panthor_vm_cleanup_op_ctx(op_ctx, vm); in panthor_vm_prepare_unmap_op_ctx()
1359 struct panthor_vm *vm) in panthor_vm_prepare_sync_only_op_ctx() argument
1368 * @vm: VM to look into.
1381 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) in panthor_vm_get_bo_for_va() argument
1387 /* Take the VM lock to prevent concurrent map/unmap operations. */ in panthor_vm_get_bo_for_va()
1388 mutex_lock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1389 gpuva = drm_gpuva_find_first(&vm->base, va, 1); in panthor_vm_get_bo_for_va()
1396 mutex_unlock(&vm->op_lock); in panthor_vm_get_bo_for_va()
1417 /* If the task VM size is smaller than the GPU VA range, pick this in panthor_vm_create_get_user_va_range()
1423 /* If the GPU VA range is smaller than the task VM size, we in panthor_vm_create_get_user_va_range()
1474 * panthor_vm_pool_create_vm() - Create a VM
1476 * @pool: The VM to create this VM on.
1477 * @args: VM creation args.
1479 * Return: a positive VM ID on success, a negative error code otherwise.
1486 struct panthor_vm *vm; in panthor_vm_pool_create_vm() local
1494 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, in panthor_vm_pool_create_vm()
1496 if (IS_ERR(vm)) in panthor_vm_pool_create_vm()
1497 return PTR_ERR(vm); in panthor_vm_pool_create_vm()
1499 ret = xa_alloc(&pool->xa, &id, vm, in panthor_vm_pool_create_vm()
1503 panthor_vm_put(vm); in panthor_vm_pool_create_vm()
1511 static void panthor_vm_destroy(struct panthor_vm *vm) in panthor_vm_destroy() argument
1513 if (!vm) in panthor_vm_destroy()
1516 vm->destroyed = true; in panthor_vm_destroy()
1518 mutex_lock(&vm->heaps.lock); in panthor_vm_destroy()
1519 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_destroy()
1520 vm->heaps.pool = NULL; in panthor_vm_destroy()
1521 mutex_unlock(&vm->heaps.lock); in panthor_vm_destroy()
1523 drm_WARN_ON(&vm->ptdev->base, in panthor_vm_destroy()
1524 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); in panthor_vm_destroy()
1525 panthor_vm_put(vm); in panthor_vm_destroy()
1529 * panthor_vm_pool_destroy_vm() - Destroy a VM.
1530 * @pool: VM pool.
1531 * @handle: VM handle.
1533 * This function doesn't free the VM object or its resources, it just kills
1539 * The VM resources are freed when the last reference on the VM object is
1546 struct panthor_vm *vm; in panthor_vm_pool_destroy_vm() local
1548 vm = xa_erase(&pool->xa, handle); in panthor_vm_pool_destroy_vm()
1550 panthor_vm_destroy(vm); in panthor_vm_pool_destroy_vm()
1552 return vm ? 0 : -EINVAL; in panthor_vm_pool_destroy_vm()
1556 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1557 * @pool: VM pool to check.
1558 * @handle: Handle of the VM to retrieve.
1560 * Return: A valid pointer if the VM exists, NULL otherwise.
1565 struct panthor_vm *vm; in panthor_vm_pool_get_vm() local
1568 vm = panthor_vm_get(xa_load(&pool->xa, handle)); in panthor_vm_pool_get_vm()
1571 return vm; in panthor_vm_pool_get_vm()
1575 * panthor_vm_pool_destroy() - Destroy a VM pool.
1585 struct panthor_vm *vm; in panthor_vm_pool_destroy() local
1591 xa_for_each(&pfile->vms->xa, i, vm) in panthor_vm_pool_destroy()
1592 panthor_vm_destroy(vm); in panthor_vm_pool_destroy()
1599 * panthor_vm_pool_create() - Create a VM pool
1690 /* We don't handle VM faults at the moment, so let's just clear the in panthor_mmu_irq_handler()
1702 if (ptdev->mmu->as.slots[as].vm) in panthor_mmu_irq_handler()
1703 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; in panthor_mmu_irq_handler()
1732 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_suspend() local
1734 if (vm) { in panthor_mmu_suspend()
1736 panthor_vm_release_as_locked(vm); in panthor_mmu_suspend()
1768 * don't get asked to do a VM operation while the GPU is down.
1775 struct panthor_vm *vm; in panthor_mmu_pre_reset() local
1779 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1780 ptdev->mmu->vm.reset_in_progress = true; in panthor_mmu_pre_reset()
1781 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) in panthor_mmu_pre_reset()
1782 panthor_vm_stop(vm); in panthor_mmu_pre_reset()
1783 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_pre_reset()
1795 struct panthor_vm *vm; in panthor_mmu_post_reset() local
1806 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_post_reset() local
1808 if (vm) in panthor_mmu_post_reset()
1809 panthor_vm_release_as_locked(vm); in panthor_mmu_post_reset()
1817 mutex_lock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1818 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in panthor_mmu_post_reset()
1819 panthor_vm_start(vm); in panthor_mmu_post_reset()
1821 ptdev->mmu->vm.reset_in_progress = false; in panthor_mmu_post_reset()
1822 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_mmu_post_reset()
1827 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); in panthor_vm_free() local
1828 struct panthor_device *ptdev = vm->ptdev; in panthor_vm_free()
1830 mutex_lock(&vm->heaps.lock); in panthor_vm_free()
1831 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) in panthor_vm_free()
1832 panthor_heap_pool_destroy(vm->heaps.pool); in panthor_vm_free()
1833 mutex_unlock(&vm->heaps.lock); in panthor_vm_free()
1834 mutex_destroy(&vm->heaps.lock); in panthor_vm_free()
1836 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1837 list_del(&vm->node); in panthor_vm_free()
1843 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_free()
1844 panthor_vm_start(vm); in panthor_vm_free()
1845 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_free()
1847 drm_sched_entity_destroy(&vm->entity); in panthor_vm_free()
1848 drm_sched_fini(&vm->sched); in panthor_vm_free()
1851 if (vm->as.id >= 0) { in panthor_vm_free()
1855 panthor_mmu_as_disable(ptdev, vm->as.id); in panthor_vm_free()
1859 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_free()
1860 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_free()
1861 list_del(&vm->as.lru_node); in panthor_vm_free()
1865 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_free()
1867 drm_mm_takedown(&vm->mm); in panthor_vm_free()
1868 kfree(vm); in panthor_vm_free()
1872 * panthor_vm_put() - Release a reference on a VM
1873 * @vm: VM to release the reference on. Can be NULL.
1875 void panthor_vm_put(struct panthor_vm *vm) in panthor_vm_put() argument
1877 drm_gpuvm_put(vm ? &vm->base : NULL); in panthor_vm_put()
1881 * panthor_vm_get() - Get a VM reference
1882 * @vm: VM to get the reference on. Can be NULL.
1884 * Return: @vm value.
1886 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) in panthor_vm_get() argument
1888 if (vm) in panthor_vm_get()
1889 drm_gpuvm_get(&vm->base); in panthor_vm_get()
1891 return vm; in panthor_vm_get()
1895 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1896 * @vm: VM to query the heap pool on.
1899 * Heap pools are per-VM. This function allows one to retrieve the heap pool
1900 * attached to a VM.
1908 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) in panthor_vm_get_heap_pool() argument
1912 mutex_lock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1913 if (!vm->heaps.pool && create) { in panthor_vm_get_heap_pool()
1914 if (vm->destroyed) in panthor_vm_get_heap_pool()
1917 pool = panthor_heap_pool_create(vm->ptdev, vm); in panthor_vm_get_heap_pool()
1920 vm->heaps.pool = panthor_heap_pool_get(pool); in panthor_vm_get_heap_pool()
1922 pool = panthor_heap_pool_get(vm->heaps.pool); in panthor_vm_get_heap_pool()
1926 mutex_unlock(&vm->heaps.lock); in panthor_vm_get_heap_pool()
1933 * heaps over all the heap pools in a VM
1937 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM
1942 struct panthor_vm *vm; in panthor_vm_heaps_sizes() local
1949 xa_for_each(&pfile->vms->xa, i, vm) { in panthor_vm_heaps_sizes()
1950 size_t size = panthor_heap_pool_size(vm->heaps.pool); in panthor_vm_heaps_sizes()
1952 if (vm->as.id >= 0) in panthor_vm_heaps_sizes()
2000 static void panthor_vma_link(struct panthor_vm *vm, in panthor_vma_link() argument
2008 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); in panthor_vma_link()
2012 static void panthor_vma_unlink(struct panthor_vm *vm, in panthor_vma_unlink() argument
2027 list_add_tail(&vma->node, &vm->op_ctx->returned_vmas); in panthor_vma_unlink()
2043 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_map() local
2044 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_map()
2053 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), in panthor_gpuva_sm_step_map()
2062 drm_gpuva_map(&vm->base, &vma->base, &op->map); in panthor_gpuva_sm_step_map()
2063 panthor_vma_link(vm, vma, op_ctx->map.vm_bo); in panthor_gpuva_sm_step_map()
2072 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_remap() local
2073 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; in panthor_gpuva_sm_step_remap()
2079 ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range); in panthor_gpuva_sm_step_remap()
2103 panthor_vma_link(vm, prev_vma, in panthor_gpuva_sm_step_remap()
2108 panthor_vma_link(vm, next_vma, in panthor_gpuva_sm_step_remap()
2112 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_remap()
2120 struct panthor_vm *vm = priv; in panthor_gpuva_sm_step_unmap() local
2123 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, in panthor_gpuva_sm_step_unmap()
2125 if (drm_WARN_ON(&vm->ptdev->base, ret)) in panthor_gpuva_sm_step_unmap()
2129 panthor_vma_unlink(vm, unmap_vma); in panthor_gpuva_sm_step_unmap()
2141 * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2142 * @vm: VM to get the dma_resv of.
2146 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) in panthor_vm_resv() argument
2148 return drm_gpuvm_resv(&vm->base); in panthor_vm_resv()
2151 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) in panthor_vm_root_gem() argument
2153 if (!vm) in panthor_vm_root_gem()
2156 return vm->base.r_obj; in panthor_vm_root_gem()
2160 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, in panthor_vm_exec_op() argument
2169 mutex_lock(&vm->op_lock); in panthor_vm_exec_op()
2170 vm->op_ctx = op; in panthor_vm_exec_op()
2173 if (vm->unusable) { in panthor_vm_exec_op()
2178 ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, in panthor_vm_exec_op()
2183 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); in panthor_vm_exec_op()
2192 vm->unusable = true; in panthor_vm_exec_op()
2194 vm->op_ctx = NULL; in panthor_vm_exec_op()
2195 mutex_unlock(&vm->op_lock); in panthor_vm_exec_op()
2208 * drm_sched finished fence, but we also flag the VM as unusable, because in panthor_vm_bind_run_job()
2209 * a failure in the async VM_BIND results in an inconsistent state. VM needs in panthor_vm_bind_run_job()
2213 ret = panthor_vm_exec_op(job->vm, &job->ctx, true); in panthor_vm_bind_run_job()
2226 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm); in panthor_vm_bind_job_release()
2227 panthor_vm_put(job->vm); in panthor_vm_bind_job_release()
2272 * panthor_vm_create() - Create a VM
2274 * @for_mcu: True if this is the FW MCU VM.
2294 .submit_wq = ptdev->mmu->vm.wq, in panthor_vm_create()
2299 .name = "panthor-vm-bind", in panthor_vm_create()
2304 struct panthor_vm *vm; in panthor_vm_create() local
2307 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in panthor_vm_create()
2308 if (!vm) in panthor_vm_create()
2311 /* We allocate a dummy GEM for the VM. */ in panthor_vm_create()
2318 mutex_init(&vm->heaps.lock); in panthor_vm_create()
2319 vm->for_mcu = for_mcu; in panthor_vm_create()
2320 vm->ptdev = ptdev; in panthor_vm_create()
2321 mutex_init(&vm->op_lock); in panthor_vm_create()
2332 mutex_init(&vm->mm_lock); in panthor_vm_create()
2333 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size); in panthor_vm_create()
2334 vm->kernel_auto_va.start = auto_kernel_va_start; in panthor_vm_create()
2335 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1; in panthor_vm_create()
2337 INIT_LIST_HEAD(&vm->node); in panthor_vm_create()
2338 INIT_LIST_HEAD(&vm->as.lru_node); in panthor_vm_create()
2339 vm->as.id = -1; in panthor_vm_create()
2340 refcount_set(&vm->as.active_cnt, 0); in panthor_vm_create()
2353 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm); in panthor_vm_create()
2354 if (!vm->pgtbl_ops) { in panthor_vm_create()
2359 ret = drm_sched_init(&vm->sched, &sched_args); in panthor_vm_create()
2363 sched = &vm->sched; in panthor_vm_create()
2364 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL); in panthor_vm_create()
2368 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair; in panthor_vm_create()
2369 vm->memattr = mair_to_memattr(mair, ptdev->coherent); in panthor_vm_create()
2371 mutex_lock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2372 list_add_tail(&vm->node, &ptdev->mmu->vm.list); in panthor_vm_create()
2375 if (ptdev->mmu->vm.reset_in_progress) in panthor_vm_create()
2376 panthor_vm_stop(vm); in panthor_vm_create()
2377 mutex_unlock(&ptdev->mmu->vm.lock); in panthor_vm_create()
2382 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", in panthor_vm_create()
2386 return vm; in panthor_vm_create()
2389 drm_sched_fini(&vm->sched); in panthor_vm_create()
2392 free_io_pgtable_ops(vm->pgtbl_ops); in panthor_vm_create()
2395 drm_mm_takedown(&vm->mm); in panthor_vm_create()
2399 kfree(vm); in panthor_vm_create()
2405 struct panthor_vm *vm, in panthor_vm_bind_prepare_op_ctx() argument
2409 ssize_t vm_pgsz = panthor_vm_page_size(vm); in panthor_vm_bind_prepare_op_ctx()
2420 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm, in panthor_vm_bind_prepare_op_ctx()
2436 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); in panthor_vm_bind_prepare_op_ctx()
2451 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm); in panthor_vm_bind_prepare_op_ctx()
2470 * @vm: VM targeted by the VM_BIND job.
2471 * @op: VM operation data.
2477 struct panthor_vm *vm, in panthor_vm_bind_job_create() argument
2483 if (!vm) in panthor_vm_bind_job_create()
2486 if (vm->destroyed || vm->unusable) in panthor_vm_bind_job_create()
2493 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx); in panthor_vm_bind_job_create()
2501 job->vm = panthor_vm_get(vm); in panthor_vm_bind_job_create()
2503 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id); in panthor_vm_bind_job_create()
2519 * Locks and prepare the VM resv.
2531 /* Acquire the VM lock an reserve a slot for this VM bind job. */ in panthor_vm_bind_job_prepare_resvs()
2532 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1); in panthor_vm_bind_job_prepare_resvs()
2557 drm_gpuvm_resv_add_fence(&job->vm->base, exec, in panthor_vm_bind_job_update_resvs()
2563 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, in panthor_vm_update_resvs() argument
2568 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage); in panthor_vm_update_resvs()
2574 * @vm: VM targeted by the VM operation.
2575 * @op: Data describing the VM operation.
2580 struct panthor_vm *vm, in panthor_vm_bind_exec_sync_op() argument
2593 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx); in panthor_vm_bind_exec_sync_op()
2597 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_bind_exec_sync_op()
2598 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_bind_exec_sync_op()
2604 * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2605 * @vm: VM to map the GEM to.
2618 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, in panthor_vm_map_bo_range() argument
2624 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); in panthor_vm_map_bo_range()
2628 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_map_bo_range()
2629 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_map_bo_range()
2636 * @vm: VM to unmap the region from.
2645 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) in panthor_vm_unmap_range() argument
2650 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); in panthor_vm_unmap_range()
2654 ret = panthor_vm_exec_op(vm, &op_ctx, false); in panthor_vm_unmap_range()
2655 panthor_vm_cleanup_op_ctx(&op_ctx, vm); in panthor_vm_unmap_range()
2661 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2663 * @vm: VM targeted by the GPU job.
2666 * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2668 * need to reserve a slot on all BOs mapped to a VM and update this slot with
2673 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, in panthor_vm_prepare_mapped_bos_resvs() argument
2678 /* Acquire the VM lock and reserve a slot for this GPU job. */ in panthor_vm_prepare_mapped_bos_resvs()
2679 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2683 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count); in panthor_vm_prepare_mapped_bos_resvs()
2700 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; in panthor_mmu_unplug() local
2702 if (vm) { in panthor_mmu_unplug()
2704 panthor_vm_release_as_locked(vm); in panthor_mmu_unplug()
2737 INIT_LIST_HEAD(&mmu->vm.list); in panthor_mmu_init()
2738 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); in panthor_mmu_init()
2753 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0); in panthor_mmu_init()
2754 if (!mmu->vm.wq) in panthor_mmu_init()
2766 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); in panthor_mmu_init()
2770 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) in show_vm_gpuvas() argument
2774 mutex_lock(&vm->op_lock); in show_vm_gpuvas()
2775 ret = drm_debugfs_gpuva_info(m, &vm->base); in show_vm_gpuvas()
2776 mutex_unlock(&vm->op_lock); in show_vm_gpuvas()
2787 struct panthor_vm *vm; in show_each_vm() local
2790 mutex_lock(&ptdev->mmu->vm.lock); in show_each_vm()
2791 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { in show_each_vm()
2792 ret = show(vm, m); in show_each_vm()
2798 mutex_unlock(&ptdev->mmu->vm.lock); in show_each_vm()