xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 68a052239fc4b351e961f698b824f7654a346091)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 };
43 
44 /**
45  * xe_svm_range_pages_valid() - SVM range pages valid
46  * @range: SVM range
47  *
48  * Return: True if SVM range pages are valid, False otherwise
49  */
50 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
51 {
52 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
53 }
54 
55 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
56 
57 int xe_svm_init(struct xe_vm *vm);
58 
59 void xe_svm_fini(struct xe_vm *vm);
60 
61 void xe_svm_close(struct xe_vm *vm);
62 
63 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
64 			    struct xe_gt *gt, u64 fault_addr,
65 			    bool atomic);
66 
67 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
68 
69 int xe_svm_bo_evict(struct xe_bo *bo);
70 
71 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
72 
73 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
74 		      const struct drm_gpusvm_ctx *ctx);
75 
76 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
77 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
78 
79 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
80 			   struct drm_gpusvm_ctx *ctx);
81 
82 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
83 					bool preferred_region_is_vram);
84 
85 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
86 
87 bool xe_svm_range_validate(struct xe_vm *vm,
88 			   struct xe_svm_range *range,
89 			   u8 tile_mask, bool devmem_preferred);
90 
91 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
92 
93 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
94 
95 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
96 
97 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
98 
99 /**
100  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
101  * @range: SVM range
102  *
103  * Return: True if SVM range has a DMA mapping, False otherwise
104  */
105 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
106 {
107 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
108 	return range->base.pages.flags.has_dma_mapping;
109 }
110 
111 /**
112  * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
113  * @r: Pointer to the drm_gpusvm_range structure
114  *
115  * This function takes a pointer to a drm_gpusvm_range structure and
116  * converts it to a pointer to the containing xe_svm_range structure.
117  *
118  * Return: Pointer to the xe_svm_range structure
119  */
120 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
121 {
122 	return container_of(r, struct xe_svm_range, base);
123 }
124 
125 /**
126  * xe_svm_range_start() - SVM range start address
127  * @range: SVM range
128  *
129  * Return: start address of range.
130  */
131 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
132 {
133 	return drm_gpusvm_range_start(&range->base);
134 }
135 
136 /**
137  * xe_svm_range_end() - SVM range end address
138  * @range: SVM range
139  *
140  * Return: end address of range.
141  */
142 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
143 {
144 	return drm_gpusvm_range_end(&range->base);
145 }
146 
147 /**
148  * xe_svm_range_size() - SVM range size
149  * @range: SVM range
150  *
151  * Return: Size of range.
152  */
153 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
154 {
155 	return drm_gpusvm_range_size(&range->base);
156 }
157 
158 void xe_svm_flush(struct xe_vm *vm);
159 
160 #else
161 #include <linux/interval_tree.h>
162 #include "xe_vm.h"
163 
164 struct drm_pagemap_addr;
165 struct drm_gpusvm_ctx;
166 struct drm_gpusvm_range;
167 struct xe_bo;
168 struct xe_gt;
169 struct xe_vm;
170 struct xe_vma;
171 struct xe_tile;
172 struct xe_vram_region;
173 
174 #define XE_INTERCONNECT_VRAM 1
175 
176 struct xe_svm_range {
177 	struct {
178 		struct interval_tree_node itree;
179 		struct {
180 			const struct drm_pagemap_addr *dma_addr;
181 		} pages;
182 	} base;
183 	u32 tile_present;
184 	u32 tile_invalidated;
185 };
186 
187 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
188 {
189 	return false;
190 }
191 
192 static inline
193 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
194 {
195 	return 0;
196 }
197 
198 static inline
199 int xe_svm_init(struct xe_vm *vm)
200 {
201 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
202 	return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
203 			       NULL, NULL, 0, 0, 0, NULL, NULL, 0);
204 #else
205 	return 0;
206 #endif
207 }
208 
209 static inline
210 void xe_svm_fini(struct xe_vm *vm)
211 {
212 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
213 	xe_assert(vm->xe, xe_vm_is_closed(vm));
214 	drm_gpusvm_fini(&vm->svm.gpusvm);
215 #endif
216 }
217 
218 static inline
219 void xe_svm_close(struct xe_vm *vm)
220 {
221 }
222 
223 static inline
224 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
225 			    struct xe_gt *gt, u64 fault_addr,
226 			    bool atomic)
227 {
228 	return 0;
229 }
230 
231 static inline
232 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
233 {
234 	return false;
235 }
236 
237 static inline
238 int xe_svm_bo_evict(struct xe_bo *bo)
239 {
240 	return 0;
241 }
242 
243 static inline
244 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
245 {
246 }
247 
248 static inline int
249 xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
250 		  const struct drm_gpusvm_ctx *ctx)
251 {
252 	return -EOPNOTSUPP;
253 }
254 
255 static inline
256 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
257 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
258 {
259 	return ERR_PTR(-EINVAL);
260 }
261 
262 static inline
263 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
264 			   struct drm_gpusvm_ctx *ctx)
265 {
266 	return -EINVAL;
267 }
268 
269 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
270 {
271 	return NULL;
272 }
273 
274 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
275 {
276 	return 0;
277 }
278 
279 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
280 {
281 	return 0;
282 }
283 
284 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
285 {
286 	return 0;
287 }
288 
289 static inline
290 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
291 					u32 region)
292 {
293 	return false;
294 }
295 
296 static inline
297 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
298 {
299 }
300 
301 static inline
302 bool xe_svm_range_validate(struct xe_vm *vm,
303 			   struct xe_svm_range *range,
304 			   u8 tile_mask, bool devmem_preferred)
305 {
306 	return false;
307 }
308 
309 static inline
310 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
311 {
312 	return ULONG_MAX;
313 }
314 
315 static inline
316 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
317 {
318 }
319 
320 static inline
321 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
322 {
323 	return 0;
324 }
325 
326 static inline
327 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
328 {
329 	return NULL;
330 }
331 
332 static inline void xe_svm_flush(struct xe_vm *vm)
333 {
334 }
335 #define xe_svm_range_has_dma_mapping(...) false
336 #endif /* CONFIG_DRM_XE_GPUSVM */
337 
338 #if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
339 #define xe_svm_assert_in_notifier(vm__) \
340 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
341 
342 #define xe_svm_assert_held_read(vm__) \
343 	lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
344 
345 #define xe_svm_notifier_lock(vm__)	\
346 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
347 
348 #define xe_svm_notifier_lock_interruptible(vm__)	\
349 	down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
350 
351 #define xe_svm_notifier_unlock(vm__)	\
352 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
353 
354 #else
355 #define xe_svm_assert_in_notifier(...) do {} while (0)
356 
357 static inline void xe_svm_assert_held_read(struct xe_vm *vm)
358 {
359 }
360 
361 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
362 {
363 }
364 
365 static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
366 {
367 	return 0;
368 }
369 
370 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
371 {
372 }
373 #endif /* CONFIG_DRM_GPUSVM */
374 
375 #endif
376