Lines Matching refs:xe_vm
43 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) in xe_vm_obj()
70 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_userptr_pin_pages()
79 static bool preempt_fences_waiting(struct xe_vm *vm) in preempt_fences_waiting()
105 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, in alloc_preempt_fences()
126 static int wait_for_existing_preempt_fences(struct xe_vm *vm) in wait_for_existing_preempt_fences()
148 static bool xe_vm_is_idle(struct xe_vm *vm) in xe_vm_is_idle()
161 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) in arm_preempt_fences()
180 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) in add_preempt_fences()
204 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, in resume_and_reinstall_preempt_fences()
220 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_add_compute_exec_queue()
281 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in xe_vm_remove_compute_exec_queue()
310 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) in __xe_vm_userptr_needs_repin()
328 void xe_vm_kill(struct xe_vm *vm, bool unlocked) in xe_vm_kill()
384 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); in xe_gpuvm_validate()
416 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, in xe_vm_validate_rebind()
442 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, in xe_preempt_work_begin()
481 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); in preempt_rebind_work_func()
590 struct xe_vm *vm = xe_vma_vm(vma); in vma_userptr_invalidate()
659 int xe_vm_userptr_pin(struct xe_vm *vm) in xe_vm_userptr_pin()
719 int xe_vm_userptr_check_repin(struct xe_vm *vm) in xe_vm_userptr_check_repin()
793 static struct dma_fence *ops_execute(struct xe_vm *vm,
795 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
799 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) in xe_vm_rebind()
854 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask) in xe_vma_rebind()
908 static struct xe_vma *xe_vma_create(struct xe_vm *vm, in xe_vma_create()
1011 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy_late()
1060 struct xe_vm *vm = xe_vma_vm(vma); in xe_vma_destroy()
1103 struct xe_vm *vm = xe_vma_vm(vma); in xe_vm_lock_vma()
1135 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) in xe_vm_find_overlapping_vma()
1151 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_insert_vma()
1166 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) in xe_vm_remove_vma()
1340 struct xe_vm *vm) in xe_vm_create_scratch()
1356 static void xe_vm_free_scratch(struct xe_vm *vm) in xe_vm_free_scratch()
1376 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) in xe_vm_create()
1379 struct xe_vm *vm; in xe_vm_create()
1525 static void xe_vm_close(struct xe_vm *vm) in xe_vm_close()
1532 void xe_vm_close_and_put(struct xe_vm *vm) in xe_vm_close_and_put()
1636 struct xe_vm *vm = in vm_destroy_work_func()
1637 container_of(w, struct xe_vm, destroy_work); in vm_destroy_work_func()
1668 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); in xe_vm_free()
1674 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id) in xe_vm_lookup()
1676 struct xe_vm *vm; in xe_vm_lookup()
1687 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) in xe_vm_pdp4_descriptor()
1694 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) in to_wait_exec_queue()
1725 struct xe_vm *vm; in xe_vm_create_ioctl()
1813 struct xe_vm *vm; in xe_vm_destroy_ioctl()
1842 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, in prep_vma_destroy()
1907 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, in vm_bind_ioctl_ops_create()
1982 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, in new_vma()
2066 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) in xe_vma_op_commit()
2129 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, in vm_bind_ioctl_ops_parse()
2277 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, in xe_vma_op_unwind()
2332 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, in vm_bind_ioctl_ops_unwind()
2387 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, in op_lock_and_prep()
2443 struct xe_vm *vm, in vm_bind_ioctl_ops_lock_and_prep()
2500 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args()
2526 static struct dma_fence *ops_execute(struct xe_vm *vm, in ops_execute()
2620 static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op, in op_add_ufence()
2643 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini()
2670 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, in vm_bind_ioctl_ops_execute()
2830 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, in vm_bind_ioctl_signal_fences()
2853 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init()
2922 struct xe_vm *vm; in xe_vm_bind_ioctl()
3133 int xe_vm_lock(struct xe_vm *vm, bool intr) in xe_vm_lock()
3147 void xe_vm_unlock(struct xe_vm *vm) in xe_vm_unlock()
3241 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm) in xe_vm_snapshot_capture()