Lines Matching refs:vm

32 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)  in xe_vm_get()  argument
34 drm_gpuvm_get(&vm->gpuvm); in xe_vm_get()
35 return vm; in xe_vm_get()
38 static inline void xe_vm_put(struct xe_vm *vm) in xe_vm_put() argument
40 drm_gpuvm_put(&vm->gpuvm); in xe_vm_put()
43 int xe_vm_lock(struct xe_vm *vm, bool intr);
45 void xe_vm_unlock(struct xe_vm *vm);
47 static inline bool xe_vm_is_closed(struct xe_vm *vm) in xe_vm_is_closed() argument
50 return !vm->size; in xe_vm_is_closed()
53 static inline bool xe_vm_is_banned(struct xe_vm *vm) in xe_vm_is_banned() argument
55 return vm->flags & XE_VM_FLAG_BANNED; in xe_vm_is_banned()
58 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) in xe_vm_is_closed_or_banned() argument
60 lockdep_assert_held(&vm->lock); in xe_vm_is_closed_or_banned()
61 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); in xe_vm_is_closed_or_banned()
65 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
73 static inline bool xe_vm_has_scratch(const struct xe_vm *vm) in xe_vm_has_scratch() argument
75 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE; in xe_vm_has_scratch()
91 return gpuvm_to_vm(gpuva->vm); in gpuva_to_vm()
136 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm); in xe_vma_vm()
176 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
185 void xe_vm_close_and_put(struct xe_vm *vm);
187 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) in xe_vm_in_fault_mode() argument
189 return vm->flags & XE_VM_FLAG_FAULT_MODE; in xe_vm_in_fault_mode()
192 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) in xe_vm_in_lr_mode() argument
194 return vm->flags & XE_VM_FLAG_LR_MODE; in xe_vm_in_lr_mode()
197 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) in xe_vm_in_preempt_fence_mode() argument
199 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); in xe_vm_in_preempt_fence_mode()
202 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
205 int xe_vm_userptr_pin(struct xe_vm *vm);
207 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
209 int xe_vm_userptr_check_repin(struct xe_vm *vm);
211 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
212 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
217 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) in xe_vm_queue_rebind_worker() argument
219 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_queue_rebind_worker()
220 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); in xe_vm_queue_rebind_worker()
232 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) in xe_vm_reactivate_rebind() argument
234 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { in xe_vm_reactivate_rebind()
235 vm->preempt.rebind_deactivated = false; in xe_vm_reactivate_rebind()
236 xe_vm_queue_rebind_worker(vm); in xe_vm_reactivate_rebind()
248 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
257 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) in xe_vm_resv() argument
259 return drm_gpuvm_resv(&vm->gpuvm); in xe_vm_resv()
262 void xe_vm_kill(struct xe_vm *vm, bool unlocked);
268 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) argument
280 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);