xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 6f17ab9a63e670bd62a287f95e3982f99eafd77e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 };
43 
44 /**
45  * xe_svm_range_pages_valid() - SVM range pages valid
46  * @range: SVM range
47  *
48  * Return: True if SVM range pages are valid, False otherwise
49  */
50 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
51 {
52 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
53 }
54 
55 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
56 
57 int xe_svm_init(struct xe_vm *vm);
58 
59 void xe_svm_fini(struct xe_vm *vm);
60 
61 void xe_svm_close(struct xe_vm *vm);
62 
63 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
64 			    struct xe_gt *gt, u64 fault_addr,
65 			    bool atomic);
66 
67 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
68 
69 int xe_svm_bo_evict(struct xe_bo *bo);
70 
71 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
72 
73 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
74 		      const struct drm_gpusvm_ctx *ctx);
75 
76 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
77 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
78 
79 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
80 			   struct drm_gpusvm_ctx *ctx);
81 
82 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
83 					bool preferred_region_is_vram);
84 
85 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
86 
87 bool xe_svm_range_validate(struct xe_vm *vm,
88 			   struct xe_svm_range *range,
89 			   u8 tile_mask, bool devmem_preferred);
90 
91 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
92 
93 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
94 
95 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
96 
97 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
98 
99 /**
100  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
101  * @range: SVM range
102  *
103  * Return: True if SVM range has a DMA mapping, False otherwise
104  */
105 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
106 {
107 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
108 	return range->base.flags.has_dma_mapping;
109 }
110 
111 /**
112  * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
113  * @r: Pointer to the drm_gpusvm_range structure
114  *
115  * This function takes a pointer to a drm_gpusvm_range structure and
116  * converts it to a pointer to the containing xe_svm_range structure.
117  *
118  * Return: Pointer to the xe_svm_range structure
119  */
120 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
121 {
122 	return container_of(r, struct xe_svm_range, base);
123 }
124 
125 /**
126  * xe_svm_range_start() - SVM range start address
127  * @range: SVM range
128  *
129  * Return: start address of range.
130  */
131 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
132 {
133 	return drm_gpusvm_range_start(&range->base);
134 }
135 
136 /**
137  * xe_svm_range_end() - SVM range end address
138  * @range: SVM range
139  *
140  * Return: end address of range.
141  */
142 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
143 {
144 	return drm_gpusvm_range_end(&range->base);
145 }
146 
147 /**
148  * xe_svm_range_size() - SVM range size
149  * @range: SVM range
150  *
151  * Return: Size of range.
152  */
153 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
154 {
155 	return drm_gpusvm_range_size(&range->base);
156 }
157 
158 #define xe_svm_assert_in_notifier(vm__) \
159 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
160 
161 #define xe_svm_notifier_lock(vm__)	\
162 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
163 
164 #define xe_svm_notifier_unlock(vm__)	\
165 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
166 
167 void xe_svm_flush(struct xe_vm *vm);
168 
169 #else
170 #include <linux/interval_tree.h>
171 
172 struct drm_pagemap_addr;
173 struct drm_gpusvm_ctx;
174 struct drm_gpusvm_range;
175 struct xe_bo;
176 struct xe_gt;
177 struct xe_vm;
178 struct xe_vma;
179 struct xe_tile;
180 struct xe_vram_region;
181 
182 #define XE_INTERCONNECT_VRAM 1
183 
184 struct xe_svm_range {
185 	struct {
186 		struct interval_tree_node itree;
187 		const struct drm_pagemap_addr *dma_addr;
188 	} base;
189 	u32 tile_present;
190 	u32 tile_invalidated;
191 };
192 
193 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
194 {
195 	return false;
196 }
197 
198 static inline
199 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
200 {
201 	return 0;
202 }
203 
204 static inline
205 int xe_svm_init(struct xe_vm *vm)
206 {
207 	return 0;
208 }
209 
210 static inline
211 void xe_svm_fini(struct xe_vm *vm)
212 {
213 }
214 
215 static inline
216 void xe_svm_close(struct xe_vm *vm)
217 {
218 }
219 
220 static inline
221 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
222 			    struct xe_gt *gt, u64 fault_addr,
223 			    bool atomic)
224 {
225 	return 0;
226 }
227 
228 static inline
229 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
230 {
231 	return false;
232 }
233 
234 static inline
235 int xe_svm_bo_evict(struct xe_bo *bo)
236 {
237 	return 0;
238 }
239 
240 static inline
241 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
242 {
243 }
244 
245 static inline int
246 xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
247 		  const struct drm_gpusvm_ctx *ctx)
248 {
249 	return -EOPNOTSUPP;
250 }
251 
252 static inline
253 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
254 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
255 {
256 	return ERR_PTR(-EINVAL);
257 }
258 
259 static inline
260 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
261 			   struct drm_gpusvm_ctx *ctx)
262 {
263 	return -EINVAL;
264 }
265 
266 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
267 {
268 	return NULL;
269 }
270 
271 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
272 {
273 	return 0;
274 }
275 
276 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
277 {
278 	return 0;
279 }
280 
281 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
282 {
283 	return 0;
284 }
285 
286 static inline
287 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
288 					u32 region)
289 {
290 	return false;
291 }
292 
293 static inline
294 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
295 {
296 }
297 
298 static inline
299 bool xe_svm_range_validate(struct xe_vm *vm,
300 			   struct xe_svm_range *range,
301 			   u8 tile_mask, bool devmem_preferred)
302 {
303 	return false;
304 }
305 
306 static inline
307 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
308 {
309 	return ULONG_MAX;
310 }
311 
312 static inline
313 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
314 {
315 }
316 
317 static inline
318 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
319 {
320 	return 0;
321 }
322 
323 static inline
324 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
325 {
326 	return NULL;
327 }
328 
329 #define xe_svm_assert_in_notifier(...) do {} while (0)
330 #define xe_svm_range_has_dma_mapping(...) false
331 
332 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
333 {
334 }
335 
336 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
337 {
338 }
339 
340 static inline void xe_svm_flush(struct xe_vm *vm)
341 {
342 }
343 #endif
344 #endif
345