1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 /** @base: base drm_gpusvm_range */
26 struct drm_gpusvm_range base;
27 /**
28 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 * list. Protected by VM's garbage collect lock.
30 */
31 struct list_head garbage_collector_link;
32 /**
33 * @tile_present: Tile mask of binding is present for this range.
34 * Protected by GPU SVM notifier lock.
35 */
36 u8 tile_present;
37 /**
38 * @tile_invalidated: Tile mask of binding is invalidated for this
39 * range. Protected by GPU SVM notifier lock.
40 */
41 u8 tile_invalidated;
42 };
43
44 /**
45 * xe_svm_range_pages_valid() - SVM range pages valid
46 * @range: SVM range
47 *
48 * Return: True if SVM range pages are valid, False otherwise
49 */
xe_svm_range_pages_valid(struct xe_svm_range * range)50 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
51 {
52 return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
53 }
54
55 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
56
57 int xe_svm_init(struct xe_vm *vm);
58
59 void xe_svm_fini(struct xe_vm *vm);
60
61 void xe_svm_close(struct xe_vm *vm);
62
63 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
64 struct xe_gt *gt, u64 fault_addr,
65 bool atomic);
66
67 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
68
69 int xe_svm_bo_evict(struct xe_bo *bo);
70
71 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
72
73 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
74 const struct drm_gpusvm_ctx *ctx);
75
76 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
77 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
78
79 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
80 struct drm_gpusvm_ctx *ctx);
81
82 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
83 bool preferred_region_is_vram);
84
85 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
86
87 bool xe_svm_range_validate(struct xe_vm *vm,
88 struct xe_svm_range *range,
89 u8 tile_mask, bool devmem_preferred);
90
91 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
92
93 /**
94 * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
95 * @range: SVM range
96 *
97 * Return: True if SVM range has a DMA mapping, False otherwise
98 */
xe_svm_range_has_dma_mapping(struct xe_svm_range * range)99 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
100 {
101 lockdep_assert_held(&range->base.gpusvm->notifier_lock);
102 return range->base.flags.has_dma_mapping;
103 }
104
105 /**
106 * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
107 * @r: Pointer to the drm_gpusvm_range structure
108 *
109 * This function takes a pointer to a drm_gpusvm_range structure and
110 * converts it to a pointer to the containing xe_svm_range structure.
111 *
112 * Return: Pointer to the xe_svm_range structure
113 */
to_xe_range(struct drm_gpusvm_range * r)114 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
115 {
116 return container_of(r, struct xe_svm_range, base);
117 }
118
119 /**
120 * xe_svm_range_start() - SVM range start address
121 * @range: SVM range
122 *
123 * Return: start address of range.
124 */
xe_svm_range_start(struct xe_svm_range * range)125 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
126 {
127 return drm_gpusvm_range_start(&range->base);
128 }
129
130 /**
131 * xe_svm_range_end() - SVM range end address
132 * @range: SVM range
133 *
134 * Return: end address of range.
135 */
xe_svm_range_end(struct xe_svm_range * range)136 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
137 {
138 return drm_gpusvm_range_end(&range->base);
139 }
140
141 /**
142 * xe_svm_range_size() - SVM range size
143 * @range: SVM range
144 *
145 * Return: Size of range.
146 */
xe_svm_range_size(struct xe_svm_range * range)147 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
148 {
149 return drm_gpusvm_range_size(&range->base);
150 }
151
152 #define xe_svm_assert_in_notifier(vm__) \
153 lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
154
155 #define xe_svm_notifier_lock(vm__) \
156 drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
157
158 #define xe_svm_notifier_unlock(vm__) \
159 drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
160
161 void xe_svm_flush(struct xe_vm *vm);
162
163 #else
164 #include <linux/interval_tree.h>
165
166 struct drm_pagemap_device_addr;
167 struct drm_gpusvm_ctx;
168 struct drm_gpusvm_range;
169 struct xe_bo;
170 struct xe_gt;
171 struct xe_vm;
172 struct xe_vma;
173 struct xe_tile;
174 struct xe_vram_region;
175
176 #define XE_INTERCONNECT_VRAM 1
177
178 struct xe_svm_range {
179 struct {
180 struct interval_tree_node itree;
181 const struct drm_pagemap_device_addr *dma_addr;
182 } base;
183 u32 tile_present;
184 u32 tile_invalidated;
185 };
186
xe_svm_range_pages_valid(struct xe_svm_range * range)187 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
188 {
189 return false;
190 }
191
192 static inline
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)193 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
194 {
195 return 0;
196 }
197
198 static inline
xe_svm_init(struct xe_vm * vm)199 int xe_svm_init(struct xe_vm *vm)
200 {
201 return 0;
202 }
203
204 static inline
xe_svm_fini(struct xe_vm * vm)205 void xe_svm_fini(struct xe_vm *vm)
206 {
207 }
208
209 static inline
xe_svm_close(struct xe_vm * vm)210 void xe_svm_close(struct xe_vm *vm)
211 {
212 }
213
214 static inline
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)215 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
216 struct xe_gt *gt, u64 fault_addr,
217 bool atomic)
218 {
219 return 0;
220 }
221
222 static inline
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)223 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
224 {
225 return false;
226 }
227
228 static inline
xe_svm_bo_evict(struct xe_bo * bo)229 int xe_svm_bo_evict(struct xe_bo *bo)
230 {
231 return 0;
232 }
233
234 static inline
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)235 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
236 {
237 }
238
239 static inline int
xe_svm_alloc_vram(struct xe_tile * tile,struct xe_svm_range * range,const struct drm_gpusvm_ctx * ctx)240 xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
241 const struct drm_gpusvm_ctx *ctx)
242 {
243 return -EOPNOTSUPP;
244 }
245
246 static inline
xe_svm_range_find_or_insert(struct xe_vm * vm,u64 addr,struct xe_vma * vma,struct drm_gpusvm_ctx * ctx)247 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
248 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
249 {
250 return ERR_PTR(-EINVAL);
251 }
252
253 static inline
xe_svm_range_get_pages(struct xe_vm * vm,struct xe_svm_range * range,struct drm_gpusvm_ctx * ctx)254 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
255 struct drm_gpusvm_ctx *ctx)
256 {
257 return -EINVAL;
258 }
259
to_xe_range(struct drm_gpusvm_range * r)260 static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
261 {
262 return NULL;
263 }
264
xe_svm_range_start(struct xe_svm_range * range)265 static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
266 {
267 return 0;
268 }
269
xe_svm_range_end(struct xe_svm_range * range)270 static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
271 {
272 return 0;
273 }
274
xe_svm_range_size(struct xe_svm_range * range)275 static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
276 {
277 return 0;
278 }
279
280 static inline
xe_svm_range_needs_migrate_to_vram(struct xe_svm_range * range,struct xe_vma * vma,u32 region)281 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
282 u32 region)
283 {
284 return false;
285 }
286
287 static inline
xe_svm_range_migrate_to_smem(struct xe_vm * vm,struct xe_svm_range * range)288 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
289 {
290 }
291
292 static inline
xe_svm_range_validate(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_mask,bool devmem_preferred)293 bool xe_svm_range_validate(struct xe_vm *vm,
294 struct xe_svm_range *range,
295 u8 tile_mask, bool devmem_preferred)
296 {
297 return false;
298 }
299
300 static inline
xe_svm_find_vma_start(struct xe_vm * vm,u64 addr,u64 end,struct xe_vma * vma)301 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
302 {
303 return ULONG_MAX;
304 }
305
306 #define xe_svm_assert_in_notifier(...) do {} while (0)
307 #define xe_svm_range_has_dma_mapping(...) false
308
xe_svm_notifier_lock(struct xe_vm * vm)309 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
310 {
311 }
312
xe_svm_notifier_unlock(struct xe_vm * vm)313 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
314 {
315 }
316
xe_svm_flush(struct xe_vm * vm)317 static inline void xe_svm_flush(struct xe_vm *vm)
318 {
319 }
320 #endif
321 #endif
322