xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_H_
7 #define _XE_VM_H_
8 
9 #include "xe_macros.h"
10 #include "xe_map.h"
11 #include "xe_vm_types.h"
12 
13 struct drm_device;
14 struct drm_printer;
15 struct drm_file;
16 
17 struct ttm_buffer_object;
18 struct ttm_validate_buffer;
19 
20 struct xe_engine;
21 struct xe_file;
22 struct xe_sync_entry;
23 
24 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
25 void xe_vm_free(struct kref *ref);
26 
27 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
28 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
29 
30 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
31 {
32 	kref_get(&vm->refcount);
33 	return vm;
34 }
35 
36 static inline void xe_vm_put(struct xe_vm *vm)
37 {
38 	kref_put(&vm->refcount, xe_vm_free);
39 }
40 
41 int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww,
42 	       int num_resv, bool intr);
43 
44 void xe_vm_unlock(struct xe_vm *vm, struct ww_acquire_ctx *ww);
45 
46 static inline bool xe_vm_is_closed(struct xe_vm *vm)
47 {
48 	/* Only guaranteed not to change when vm->resv is held */
49 	return !vm->size;
50 }
51 
52 struct xe_vma *
53 xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma);
54 
55 #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
56 
57 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
58 
59 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
60 		       struct drm_file *file);
61 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
62 			struct drm_file *file);
63 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
64 		     struct drm_file *file);
65 
66 void xe_vm_close_and_put(struct xe_vm *vm);
67 
68 static inline bool xe_vm_in_compute_mode(struct xe_vm *vm)
69 {
70 	return vm->flags & XE_VM_FLAG_COMPUTE_MODE;
71 }
72 
73 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
74 {
75 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
76 }
77 
78 static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
79 {
80 	return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
81 }
82 
83 int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e);
84 
85 int xe_vm_userptr_pin(struct xe_vm *vm);
86 
87 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
88 
89 int xe_vm_userptr_check_repin(struct xe_vm *vm);
90 
91 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
92 
93 int xe_vm_invalidate_vma(struct xe_vma *vma);
94 
95 int xe_vm_async_fence_wait_start(struct dma_fence *fence);
96 
97 extern struct ttm_device_funcs xe_ttm_funcs;
98 
99 struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
100 
101 /**
102  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
103  * vms.
104  * @vm: The vm.
105  *
106  * If the rebind functionality on a compute vm was disabled due
107  * to nothing to execute. Reactivate it and run the rebind worker.
108  * This function should be called after submitting a batch to a compute vm.
109  */
110 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
111 {
112 	if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
113 		vm->preempt.rebind_deactivated = false;
114 		queue_work(system_unbound_wq, &vm->preempt.rebind_work);
115 	}
116 }
117 
118 static inline bool xe_vma_is_userptr(struct xe_vma *vma)
119 {
120 	return !vma->bo;
121 }
122 
123 int xe_vma_userptr_pin_pages(struct xe_vma *vma);
124 
125 int xe_vma_userptr_check_repin(struct xe_vma *vma);
126 
127 /*
128  * XE_ONSTACK_TV is used to size the tv_onstack array that is input
129  * to xe_vm_lock_dma_resv() and xe_vm_unlock_dma_resv().
130  */
131 #define XE_ONSTACK_TV 20
132 int xe_vm_lock_dma_resv(struct xe_vm *vm, struct ww_acquire_ctx *ww,
133 			struct ttm_validate_buffer *tv_onstack,
134 			struct ttm_validate_buffer **tv,
135 			struct list_head *objs,
136 			bool intr,
137 			unsigned int num_shared);
138 
139 void xe_vm_unlock_dma_resv(struct xe_vm *vm,
140 			   struct ttm_validate_buffer *tv_onstack,
141 			   struct ttm_validate_buffer *tv,
142 			   struct ww_acquire_ctx *ww,
143 			   struct list_head *objs);
144 
145 void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
146 			     enum dma_resv_usage usage);
147 
148 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
149 
150 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
151 #define vm_dbg drm_dbg
152 #else
153 __printf(2, 3)
154 static inline void vm_dbg(const struct drm_device *dev,
155 			  const char *format, ...)
156 { /* noop */ }
157 #endif
158 #endif
159