xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 6574f01ef95dd9029a0230f4f56a62f93fdd8319)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 #include <drm/drm_pagemap_util.h>
14 
15 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
16 #define XE_INTERCONNECT_P2P (XE_INTERCONNECT_VRAM + 1)
17 
18 struct drm_device;
19 struct drm_file;
20 
21 struct xe_bo;
22 struct xe_gt;
23 struct xe_device;
24 struct xe_vram_region;
25 struct xe_tile;
26 struct xe_vm;
27 struct xe_vma;
28 struct xe_vram_region;
29 
30 /** struct xe_svm_range - SVM range */
31 struct xe_svm_range {
32 	/** @base: base drm_gpusvm_range */
33 	struct drm_gpusvm_range base;
34 	/**
35 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
36 	 * list. Protected by VM's garbage collect lock.
37 	 */
38 	struct list_head garbage_collector_link;
39 	/**
40 	 * @tile_present: Tile mask of binding is present for this range.
41 	 * Protected by GPU SVM notifier lock.
42 	 */
43 	u8 tile_present;
44 	/**
45 	 * @tile_invalidated: Tile mask of binding is invalidated for this
46 	 * range. Protected by GPU SVM notifier lock.
47 	 */
48 	u8 tile_invalidated;
49 };
50 
51 /**
52  * struct xe_pagemap - Manages xe device_private memory for SVM.
53  * @pagemap: The struct dev_pagemap providing the struct pages.
54  * @dpagemap: The drm_pagemap managing allocation and migration.
55  * @destroy_work: Handles asnynchronous destruction and caching.
56  * @peer: Used for pagemap owner computation.
57  * @hpa_base: The host physical address base for the managemd memory.
58  * @vr: Backpointer to the xe_vram region.
59  */
60 struct xe_pagemap {
61 	struct dev_pagemap pagemap;
62 	struct drm_pagemap dpagemap;
63 	struct work_struct destroy_work;
64 	struct drm_pagemap_peer peer;
65 	resource_size_t hpa_base;
66 	struct xe_vram_region *vr;
67 };
68 
69 /**
70  * xe_svm_range_pages_valid() - SVM range pages valid
71  * @range: SVM range
72  *
73  * Return: True if SVM range pages are valid, False otherwise
74  */
xe_svm_range_pages_valid(struct xe_svm_range * range)75 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
76 {
77 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
78 }
79 
80 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
81 
82 int xe_svm_init(struct xe_vm *vm);
83 
84 void xe_svm_fini(struct xe_vm *vm);
85 
86 void xe_svm_close(struct xe_vm *vm);
87 
88 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
89 			    struct xe_gt *gt, u64 fault_addr,
90 			    bool atomic);
91 
92 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
93 
94 int xe_svm_bo_evict(struct xe_bo *bo);
95 
96 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
97 
98 int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
99 		      struct drm_pagemap *dpagemap);
100 
101 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
102 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
103 
104 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
105 			   struct drm_gpusvm_ctx *ctx);
106 
107 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
108 					const struct drm_pagemap *dpagemap);
109 
110 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
111 
112 bool xe_svm_range_validate(struct xe_vm *vm,
113 			   struct xe_svm_range *range,
114 			   u8 tile_mask, const struct drm_pagemap *dpagemap);
115 
116 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end,  struct xe_vma *vma);
117 
118 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
119 
120 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
121 
122 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
123 
124 void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem);
125 
126 /**
127  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
128  * @range: SVM range
129  *
130  * Return: True if SVM range has a DMA mapping, False otherwise
131  */
xe_svm_range_has_dma_mapping(struct xe_svm_range * range)132 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
133 {
134 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
135 	return range->base.pages.flags.has_dma_mapping;
136 }
137 
138 /**
139  * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
140  * @r: Pointer to the drm_gpusvm_range structure
141  *
142  * This function takes a pointer to a drm_gpusvm_range structure and
143  * converts it to a pointer to the containing xe_svm_range structure.
144  *
145  * Return: Pointer to the xe_svm_range structure
146  */
to_xe_range(struct drm_gpusvm_range * r)147 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
148 {
149 	return container_of(r, struct xe_svm_range, base);
150 }
151 
152 /**
153  * xe_svm_range_start() - SVM range start address
154  * @range: SVM range
155  *
156  * Return: start address of range.
157  */
xe_svm_range_start(struct xe_svm_range * range)158 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
159 {
160 	return drm_gpusvm_range_start(&range->base);
161 }
162 
163 /**
164  * xe_svm_range_end() - SVM range end address
165  * @range: SVM range
166  *
167  * Return: end address of range.
168  */
xe_svm_range_end(struct xe_svm_range * range)169 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
170 {
171 	return drm_gpusvm_range_end(&range->base);
172 }
173 
174 /**
175  * xe_svm_range_size() - SVM range size
176  * @range: SVM range
177  *
178  * Return: Size of range.
179  */
xe_svm_range_size(struct xe_svm_range * range)180 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
181 {
182 	return drm_gpusvm_range_size(&range->base);
183 }
184 
185 void xe_svm_flush(struct xe_vm *vm);
186 
187 int xe_pagemap_shrinker_create(struct xe_device *xe);
188 
189 int xe_pagemap_cache_create(struct xe_tile *tile);
190 
191 struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance);
192 
193 #else
194 #include <linux/interval_tree.h>
195 #include "xe_vm.h"
196 
197 struct drm_pagemap_addr;
198 struct drm_gpusvm_ctx;
199 struct drm_gpusvm_range;
200 struct xe_bo;
201 struct xe_device;
202 struct xe_vm;
203 struct xe_vma;
204 struct xe_tile;
205 struct xe_vram_region;
206 
207 #define XE_INTERCONNECT_VRAM 1
208 #define XE_INTERCONNECT_P2P (XE_INTERCONNECT_VRAM + 1)
209 
210 struct xe_svm_range {
211 	struct {
212 		struct interval_tree_node itree;
213 		struct {
214 			const struct drm_pagemap_addr *dma_addr;
215 		} pages;
216 	} base;
217 	u32 tile_present;
218 	u32 tile_invalidated;
219 };
220 
xe_svm_range_pages_valid(struct xe_svm_range * range)221 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
222 {
223 	return false;
224 }
225 
226 static inline
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)227 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
228 {
229 	return 0;
230 }
231 
232 static inline
xe_svm_init(struct xe_vm * vm)233 int xe_svm_init(struct xe_vm *vm)
234 {
235 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
236 	return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
237 			       NULL, 0, 0, 0, NULL, NULL, 0);
238 #else
239 	return 0;
240 #endif
241 }
242 
243 static inline
xe_svm_fini(struct xe_vm * vm)244 void xe_svm_fini(struct xe_vm *vm)
245 {
246 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
247 	xe_assert(vm->xe, xe_vm_is_closed(vm));
248 	drm_gpusvm_fini(&vm->svm.gpusvm);
249 #endif
250 }
251 
252 static inline
xe_svm_close(struct xe_vm * vm)253 void xe_svm_close(struct xe_vm *vm)
254 {
255 }
256 
257 static inline
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)258 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
259 			    struct xe_gt *gt, u64 fault_addr,
260 			    bool atomic)
261 {
262 	return 0;
263 }
264 
265 static inline
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)266 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
267 {
268 	return false;
269 }
270 
271 static inline
xe_svm_bo_evict(struct xe_bo * bo)272 int xe_svm_bo_evict(struct xe_bo *bo)
273 {
274 	return 0;
275 }
276 
277 static inline
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)278 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
279 {
280 }
281 
282 static inline int
xe_svm_alloc_vram(struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx,struct drm_pagemap * dpagemap)283 xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
284 		  struct drm_pagemap *dpagemap)
285 {
286 	return -EOPNOTSUPP;
287 }
288 
289 static inline
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)290 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
291 						 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
292 {
293 	return ERR_PTR(-EINVAL);
294 }
295 
296 static inline
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)297 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
298 			   struct drm_gpusvm_ctx *ctx)
299 {
300 	return -EINVAL;
301 }
302 
to_xe_range(struct drm_gpusvm_range * r)303 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
304 {
305 	return NULL;
306 }
307 
xe_svm_range_start(struct xe_svm_range * range)308 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
309 {
310 	return 0;
311 }
312 
xe_svm_range_end(struct xe_svm_range * range)313 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
314 {
315 	return 0;
316 }
317 
xe_svm_range_size(struct xe_svm_range * range)318 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
319 {
320 	return 0;
321 }
322 
323 static inline
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,const struct drm_pagemap * dpagemap)324 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
325 					const struct drm_pagemap *dpagemap)
326 {
327 	return false;
328 }
329 
330 static inline
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)331 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
332 {
333 }
334 
335 static inline
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)336 bool xe_svm_range_validate(struct xe_vm *vm,
337 			   struct xe_svm_range *range,
338 			   u8 tile_mask, bool devmem_preferred)
339 {
340 	return false;
341 }
342 
343 static inline
xe_svm_find_vma_start(struct xe_vm * vm,u64 addr,u64 end,struct xe_vma * vma)344 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
345 {
346 	return ULONG_MAX;
347 }
348 
349 static inline
xe_svm_unmap_address_range(struct xe_vm * vm,u64 start,u64 end)350 void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
351 {
352 }
353 
354 static inline
xe_svm_ranges_zap_ptes_in_range(struct xe_vm * vm,u64 start,u64 end)355 u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
356 {
357 	return 0;
358 }
359 
360 static inline
xe_vma_resolve_pagemap(struct xe_vma * vma,struct xe_tile * tile)361 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
362 {
363 	return NULL;
364 }
365 
xe_svm_private_page_owner(struct xe_vm * vm,bool force_smem)366 static inline void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
367 {
368 	return NULL;
369 }
370 
xe_svm_flush(struct xe_vm * vm)371 static inline void xe_svm_flush(struct xe_vm *vm)
372 {
373 }
374 
xe_pagemap_shrinker_create(struct xe_device * xe)375 static inline int xe_pagemap_shrinker_create(struct xe_device *xe)
376 {
377 	return 0;
378 }
379 
xe_pagemap_cache_create(struct xe_tile * tile)380 static inline int xe_pagemap_cache_create(struct xe_tile *tile)
381 {
382 	return 0;
383 }
384 
xe_drm_pagemap_from_fd(int fd,u32 region_instance)385 static inline struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
386 {
387 	return ERR_PTR(-ENOENT);
388 }
389 
390 #define xe_svm_range_has_dma_mapping(...) false
391 #endif /* CONFIG_DRM_XE_GPUSVM */
392 
393 #if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
394 #define xe_svm_assert_in_notifier(vm__) \
395 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
396 
397 #define xe_svm_assert_held_read(vm__) \
398 	lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
399 
400 #define xe_svm_notifier_lock(vm__)	\
401 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
402 
403 #define xe_svm_notifier_lock_interruptible(vm__)	\
404 	down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
405 
406 #define xe_svm_notifier_unlock(vm__)	\
407 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
408 
409 #else
410 #define xe_svm_assert_in_notifier(...) do {} while (0)
411 
xe_svm_assert_held_read(struct xe_vm * vm)412 static inline void xe_svm_assert_held_read(struct xe_vm *vm)
413 {
414 }
415 
xe_svm_notifier_lock(struct xe_vm * vm)416 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
417 {
418 }
419 
xe_svm_notifier_lock_interruptible(struct xe_vm * vm)420 static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
421 {
422 	return 0;
423 }
424 
xe_svm_notifier_unlock(struct xe_vm * vm)425 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
426 {
427 }
428 #endif /* CONFIG_DRM_GPUSVM */
429 
430 #endif
431