xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 };
43 
44 /**
45  * xe_svm_range_pages_valid() - SVM range pages valid
46  * @range: SVM range
47  *
48  * Return: True if SVM range pages are valid, False otherwise
49  */
50 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
51 {
52 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
53 }
54 
55 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
56 
57 int xe_svm_init(struct xe_vm *vm);
58 
59 void xe_svm_fini(struct xe_vm *vm);
60 
61 void xe_svm_close(struct xe_vm *vm);
62 
63 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
64 			    struct xe_gt *gt, u64 fault_addr,
65 			    bool atomic);
66 
67 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
68 
69 int xe_svm_bo_evict(struct xe_bo *bo);
70 
71 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
72 
73 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
74 		      struct xe_svm_range *range,
75 		      const struct drm_gpusvm_ctx *ctx);
76 
77 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
78 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
79 
80 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
81 			   struct drm_gpusvm_ctx *ctx);
82 
83 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
84 					bool preferred_region_is_vram);
85 
86 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
87 
88 bool xe_svm_range_validate(struct xe_vm *vm,
89 			   struct xe_svm_range *range,
90 			   u8 tile_mask, bool devmem_preferred);
91 
92 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
93 
94 /**
95  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
96  * @range: SVM range
97  *
98  * Return: True if SVM range has a DMA mapping, False otherwise
99  */
100 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
101 {
102 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
103 	return range->base.flags.has_dma_mapping;
104 }
105 
106 /**
107  * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
108  * @r: Pointer to the drm_gpusvm_range structure
109  *
110  * This function takes a pointer to a drm_gpusvm_range structure and
111  * converts it to a pointer to the containing xe_svm_range structure.
112  *
113  * Return: Pointer to the xe_svm_range structure
114  */
115 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
116 {
117 	return container_of(r, struct xe_svm_range, base);
118 }
119 
120 /**
121  * xe_svm_range_start() - SVM range start address
122  * @range: SVM range
123  *
124  * Return: start address of range.
125  */
126 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
127 {
128 	return drm_gpusvm_range_start(&range->base);
129 }
130 
131 /**
132  * xe_svm_range_end() - SVM range end address
133  * @range: SVM range
134  *
135  * Return: end address of range.
136  */
137 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
138 {
139 	return drm_gpusvm_range_end(&range->base);
140 }
141 
142 /**
143  * xe_svm_range_size() - SVM range size
144  * @range: SVM range
145  *
146  * Return: Size of range.
147  */
148 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
149 {
150 	return drm_gpusvm_range_size(&range->base);
151 }
152 
153 #define xe_svm_assert_in_notifier(vm__) \
154 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
155 
156 #define xe_svm_notifier_lock(vm__)	\
157 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
158 
159 #define xe_svm_notifier_unlock(vm__)	\
160 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
161 
162 void xe_svm_flush(struct xe_vm *vm);
163 
164 #else
165 #include <linux/interval_tree.h>
166 
167 struct drm_pagemap_device_addr;
168 struct drm_gpusvm_ctx;
169 struct drm_gpusvm_range;
170 struct xe_bo;
171 struct xe_gt;
172 struct xe_vm;
173 struct xe_vma;
174 struct xe_tile;
175 struct xe_vram_region;
176 
177 #define XE_INTERCONNECT_VRAM 1
178 
179 struct xe_svm_range {
180 	struct {
181 		struct interval_tree_node itree;
182 		const struct drm_pagemap_device_addr *dma_addr;
183 	} base;
184 	u32 tile_present;
185 	u32 tile_invalidated;
186 };
187 
188 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
189 {
190 	return false;
191 }
192 
193 static inline
194 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
195 {
196 	return 0;
197 }
198 
199 static inline
200 int xe_svm_init(struct xe_vm *vm)
201 {
202 	return 0;
203 }
204 
205 static inline
206 void xe_svm_fini(struct xe_vm *vm)
207 {
208 }
209 
210 static inline
211 void xe_svm_close(struct xe_vm *vm)
212 {
213 }
214 
215 static inline
216 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
217 			    struct xe_gt *gt, u64 fault_addr,
218 			    bool atomic)
219 {
220 	return 0;
221 }
222 
223 static inline
224 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
225 {
226 	return false;
227 }
228 
229 static inline
230 int xe_svm_bo_evict(struct xe_bo *bo)
231 {
232 	return 0;
233 }
234 
235 static inline
236 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
237 {
238 }
239 
240 static inline
241 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
242 		      struct xe_svm_range *range,
243 		      const struct drm_gpusvm_ctx *ctx)
244 {
245 	return -EOPNOTSUPP;
246 }
247 
248 static inline
249 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
250 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
251 {
252 	return ERR_PTR(-EINVAL);
253 }
254 
255 static inline
256 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
257 			   struct drm_gpusvm_ctx *ctx)
258 {
259 	return -EINVAL;
260 }
261 
262 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
263 {
264 	return NULL;
265 }
266 
267 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
268 {
269 	return 0;
270 }
271 
272 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
273 {
274 	return 0;
275 }
276 
277 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
278 {
279 	return 0;
280 }
281 
282 static inline
283 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
284 					u32 region)
285 {
286 	return false;
287 }
288 
289 static inline
290 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
291 {
292 }
293 
294 static inline
295 bool xe_svm_range_validate(struct xe_vm *vm,
296 			   struct xe_svm_range *range,
297 			   u8 tile_mask, bool devmem_preferred)
298 {
299 	return false;
300 }
301 
302 static inline
303 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
304 {
305 	return ULONG_MAX;
306 }
307 
308 #define xe_svm_assert_in_notifier(...) do {} while (0)
309 #define xe_svm_range_has_dma_mapping(...) false
310 
311 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
312 {
313 }
314 
315 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
316 {
317 }
318 
319 static inline void xe_svm_flush(struct xe_vm *vm)
320 {
321 }
322 #endif
323 #endif
324