1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3 * Copyright © 2024 Intel Corporation
4 */
5
6 #ifndef __DRM_GPUSVM_H__
7 #define __DRM_GPUSVM_H__
8
9 #include <linux/kref.h>
10 #include <linux/interval_tree.h>
11 #include <linux/mmu_notifier.h>
12
13 struct dev_pagemap_ops;
14 struct drm_device;
15 struct drm_gpusvm;
16 struct drm_gpusvm_notifier;
17 struct drm_gpusvm_ops;
18 struct drm_gpusvm_range;
19 struct drm_pagemap;
20 struct drm_pagemap_addr;
21
22 /**
23 * struct drm_gpusvm_ops - Operations structure for GPU SVM
24 *
25 * This structure defines the operations for GPU Shared Virtual Memory (SVM).
26 * These operations are provided by the GPU driver to manage SVM ranges and
27 * notifiers.
28 */
29 struct drm_gpusvm_ops {
30 /**
31 * @notifier_alloc: Allocate a GPU SVM notifier (optional)
32 *
33 * Allocate a GPU SVM notifier.
34 *
35 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
36 */
37 struct drm_gpusvm_notifier *(*notifier_alloc)(void);
38
39 /**
40 * @notifier_free: Free a GPU SVM notifier (optional)
41 * @notifier: Pointer to the GPU SVM notifier to be freed
42 *
43 * Free a GPU SVM notifier.
44 */
45 void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
46
47 /**
48 * @range_alloc: Allocate a GPU SVM range (optional)
49 * @gpusvm: Pointer to the GPU SVM
50 *
51 * Allocate a GPU SVM range.
52 *
53 * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
54 */
55 struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
56
57 /**
58 * @range_free: Free a GPU SVM range (optional)
59 * @range: Pointer to the GPU SVM range to be freed
60 *
61 * Free a GPU SVM range.
62 */
63 void (*range_free)(struct drm_gpusvm_range *range);
64
65 /**
66 * @invalidate: Invalidate GPU SVM notifier (required)
67 * @gpusvm: Pointer to the GPU SVM
68 * @notifier: Pointer to the GPU SVM notifier
69 * @mmu_range: Pointer to the mmu_notifier_range structure
70 *
71 * Invalidate the GPU page tables. It can safely walk the notifier range
72 * RB tree/list in this function. Called while holding the notifier lock.
73 */
74 void (*invalidate)(struct drm_gpusvm *gpusvm,
75 struct drm_gpusvm_notifier *notifier,
76 const struct mmu_notifier_range *mmu_range);
77 };
78
79 /**
80 * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
81 *
82 * @gpusvm: Pointer to the GPU SVM structure
83 * @notifier: MMU interval notifier
84 * @itree: Interval tree node for the notifier (inserted in GPU SVM)
85 * @entry: List entry to fast interval tree traversal
86 * @root: Cached root node of the RB tree containing ranges
87 * @range_list: List head containing of ranges in the same order they appear in
88 * interval tree. This is useful to keep iterating ranges while
89 * doing modifications to RB tree.
90 * @flags: Flags for notifier
91 * @flags.removed: Flag indicating whether the MMU interval notifier has been
92 * removed
93 *
94 * This structure represents a GPU SVM notifier.
95 */
96 struct drm_gpusvm_notifier {
97 struct drm_gpusvm *gpusvm;
98 struct mmu_interval_notifier notifier;
99 struct interval_tree_node itree;
100 struct list_head entry;
101 struct rb_root_cached root;
102 struct list_head range_list;
103 struct {
104 u32 removed : 1;
105 } flags;
106 };
107
108 /**
109 * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags
110 *
111 * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory
112 * @unmapped: Flag indicating if the pages has been unmapped
113 * @partial_unmap: Flag indicating if the pages has been partially unmapped
114 * @has_devmem_pages: Flag indicating if the pages has devmem pages
115 * @has_dma_mapping: Flag indicating if the pages has a DMA mapping
116 * @__flags: Flags for pages in u16 form (used for READ_ONCE)
117 */
118 struct drm_gpusvm_pages_flags {
119 union {
120 struct {
121 /* All flags below must be set upon creation */
122 u16 migrate_devmem : 1;
123 /* All flags below must be set / cleared under notifier lock */
124 u16 unmapped : 1;
125 u16 partial_unmap : 1;
126 u16 has_devmem_pages : 1;
127 u16 has_dma_mapping : 1;
128 };
129 u16 __flags;
130 };
131 };
132
133 /**
134 * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
135 *
136 * @dma_addr: Device address array
137 * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
138 * Note this is assuming only one drm_pagemap per range is allowed.
139 * @notifier_seq: Notifier sequence number of the range's pages
140 * @flags: Flags for range
141 * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
142 * @flags.unmapped: Flag indicating if the range has been unmapped
143 * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
144 * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
145 * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
146 */
147 struct drm_gpusvm_pages {
148 struct drm_pagemap_addr *dma_addr;
149 struct drm_pagemap *dpagemap;
150 unsigned long notifier_seq;
151 struct drm_gpusvm_pages_flags flags;
152 };
153
154 /**
155 * struct drm_gpusvm_range - Structure representing a GPU SVM range
156 *
157 * @gpusvm: Pointer to the GPU SVM structure
158 * @notifier: Pointer to the GPU SVM notifier
159 * @refcount: Reference count for the range
160 * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
161 * @entry: List entry to fast interval tree traversal
162 * @pages: The pages for this range.
163 *
164 * This structure represents a GPU SVM range used for tracking memory ranges
165 * mapped in a DRM device.
166 */
167 struct drm_gpusvm_range {
168 struct drm_gpusvm *gpusvm;
169 struct drm_gpusvm_notifier *notifier;
170 struct kref refcount;
171 struct interval_tree_node itree;
172 struct list_head entry;
173 struct drm_gpusvm_pages pages;
174 };
175
176 /**
177 * struct drm_gpusvm - GPU SVM structure
178 *
179 * @name: Name of the GPU SVM
180 * @drm: Pointer to the DRM device structure
181 * @mm: Pointer to the mm_struct for the address space
182 * @mm_start: Start address of GPU SVM
183 * @mm_range: Range of the GPU SVM
184 * @notifier_size: Size of individual notifiers
185 * @ops: Pointer to the operations structure for GPU SVM
186 * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
187 * Entries should be powers of 2 in descending order.
188 * @num_chunks: Number of chunks
189 * @notifier_lock: Read-write semaphore for protecting notifier operations
190 * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
191 * @notifier_list: list head containing of notifiers in the same order they
192 * appear in interval tree. This is useful to keep iterating
193 * notifiers while doing modifications to RB tree.
194 *
195 * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
196 * memory ranges mapped in a DRM (Direct Rendering Manager) device.
197 *
198 * No reference counting is provided, as this is expected to be embedded in the
199 * driver VM structure along with the struct drm_gpuvm, which handles reference
200 * counting.
201 */
202 struct drm_gpusvm {
203 const char *name;
204 struct drm_device *drm;
205 struct mm_struct *mm;
206 unsigned long mm_start;
207 unsigned long mm_range;
208 unsigned long notifier_size;
209 const struct drm_gpusvm_ops *ops;
210 const unsigned long *chunk_sizes;
211 int num_chunks;
212 struct rw_semaphore notifier_lock;
213 struct rb_root_cached root;
214 struct list_head notifier_list;
215 #ifdef CONFIG_LOCKDEP
216 /**
217 * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
218 * drm_gpusvm_range_remove with a driver provided lock.
219 */
220 struct lockdep_map *lock_dep_map;
221 #endif
222 };
223
224 /**
225 * struct drm_gpusvm_ctx - DRM GPU SVM context
226 *
227 * @device_private_page_owner: The device-private page owner to use for
228 * this operation
229 * @check_pages_threshold: Check CPU pages for present if chunk is less than or
230 * equal to threshold. If not present, reduce chunk
231 * size.
232 * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
233 * remains with either exclusive GPU or CPU access.
234 * @in_notifier: entering from a MMU notifier
235 * @read_only: operating on read-only memory
236 * @devmem_possible: possible to use device memory
237 * @devmem_only: use only device memory
238 *
239 * Context that is DRM GPUSVM is operating in (i.e. user arguments).
240 */
241 struct drm_gpusvm_ctx {
242 void *device_private_page_owner;
243 unsigned long check_pages_threshold;
244 unsigned long timeslice_ms;
245 unsigned int in_notifier :1;
246 unsigned int read_only :1;
247 unsigned int devmem_possible :1;
248 unsigned int devmem_only :1;
249 };
250
251 int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
252 const char *name, struct drm_device *drm,
253 struct mm_struct *mm,
254 unsigned long mm_start, unsigned long mm_range,
255 unsigned long notifier_size,
256 const struct drm_gpusvm_ops *ops,
257 const unsigned long *chunk_sizes, int num_chunks);
258
259 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
260
261 void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
262
263 unsigned long
264 drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
265 unsigned long start,
266 unsigned long end);
267
268 struct drm_gpusvm_range *
269 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
270 unsigned long fault_addr,
271 unsigned long gpuva_start,
272 unsigned long gpuva_end,
273 const struct drm_gpusvm_ctx *ctx);
274
275 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
276 struct drm_gpusvm_range *range);
277
278 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
279 struct drm_gpusvm_range *range);
280
281 struct drm_gpusvm_range *
282 drm_gpusvm_range_get(struct drm_gpusvm_range *range);
283
284 void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
285
286 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
287 struct drm_gpusvm_range *range);
288
289 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
290 struct drm_gpusvm_range *range,
291 const struct drm_gpusvm_ctx *ctx);
292
293 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
294 struct drm_gpusvm_range *range,
295 const struct drm_gpusvm_ctx *ctx);
296
297 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
298 unsigned long end);
299
300 struct drm_gpusvm_notifier *
301 drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
302 unsigned long end);
303
304 struct drm_gpusvm_range *
305 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
306 unsigned long end);
307
308 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
309 const struct mmu_notifier_range *mmu_range);
310
311 int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
312 struct drm_gpusvm_pages *svm_pages,
313 struct mm_struct *mm,
314 struct mmu_interval_notifier *notifier,
315 unsigned long pages_start, unsigned long pages_end,
316 const struct drm_gpusvm_ctx *ctx);
317
318 void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
319 struct drm_gpusvm_pages *svm_pages,
320 unsigned long npages,
321 const struct drm_gpusvm_ctx *ctx);
322
323 void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
324 struct drm_gpusvm_pages *svm_pages,
325 unsigned long npages);
326
327 #ifdef CONFIG_LOCKDEP
328 /**
329 * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
330 * @gpusvm: Pointer to the GPU SVM structure.
331 * @lock: the lock used to protect the gpuva list. The locking primitive
332 * must contain a dep_map field.
333 *
334 * Call this to annotate drm_gpusvm_range_find_or_insert and
335 * drm_gpusvm_range_remove.
336 */
337 #define drm_gpusvm_driver_set_lock(gpusvm, lock) \
338 do { \
339 if (!WARN((gpusvm)->lock_dep_map, \
340 "GPUSVM range lock should be set only once."))\
341 (gpusvm)->lock_dep_map = &(lock)->dep_map; \
342 } while (0)
343 #else
344 #define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
345 #endif
346
347 /**
348 * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
349 * @gpusvm__: Pointer to the GPU SVM structure.
350 *
351 * Abstract client usage GPU SVM notifier lock, take lock
352 */
353 #define drm_gpusvm_notifier_lock(gpusvm__) \
354 down_read(&(gpusvm__)->notifier_lock)
355
356 /**
357 * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
358 * @gpusvm__: Pointer to the GPU SVM structure.
359 *
360 * Abstract client usage GPU SVM notifier lock, drop lock
361 */
362 #define drm_gpusvm_notifier_unlock(gpusvm__) \
363 up_read(&(gpusvm__)->notifier_lock)
364
365 /**
366 * drm_gpusvm_range_start() - GPU SVM range start address
367 * @range: Pointer to the GPU SVM range
368 *
369 * Return: GPU SVM range start address
370 */
371 static inline unsigned long
drm_gpusvm_range_start(struct drm_gpusvm_range * range)372 drm_gpusvm_range_start(struct drm_gpusvm_range *range)
373 {
374 return range->itree.start;
375 }
376
377 /**
378 * drm_gpusvm_range_end() - GPU SVM range end address
379 * @range: Pointer to the GPU SVM range
380 *
381 * Return: GPU SVM range end address
382 */
383 static inline unsigned long
drm_gpusvm_range_end(struct drm_gpusvm_range * range)384 drm_gpusvm_range_end(struct drm_gpusvm_range *range)
385 {
386 return range->itree.last + 1;
387 }
388
389 /**
390 * drm_gpusvm_range_size() - GPU SVM range size
391 * @range: Pointer to the GPU SVM range
392 *
393 * Return: GPU SVM range size
394 */
395 static inline unsigned long
drm_gpusvm_range_size(struct drm_gpusvm_range * range)396 drm_gpusvm_range_size(struct drm_gpusvm_range *range)
397 {
398 return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
399 }
400
401 /**
402 * drm_gpusvm_notifier_start() - GPU SVM notifier start address
403 * @notifier: Pointer to the GPU SVM notifier
404 *
405 * Return: GPU SVM notifier start address
406 */
407 static inline unsigned long
drm_gpusvm_notifier_start(struct drm_gpusvm_notifier * notifier)408 drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
409 {
410 return notifier->itree.start;
411 }
412
413 /**
414 * drm_gpusvm_notifier_end() - GPU SVM notifier end address
415 * @notifier: Pointer to the GPU SVM notifier
416 *
417 * Return: GPU SVM notifier end address
418 */
419 static inline unsigned long
drm_gpusvm_notifier_end(struct drm_gpusvm_notifier * notifier)420 drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
421 {
422 return notifier->itree.last + 1;
423 }
424
425 /**
426 * drm_gpusvm_notifier_size() - GPU SVM notifier size
427 * @notifier: Pointer to the GPU SVM notifier
428 *
429 * Return: GPU SVM notifier size
430 */
431 static inline unsigned long
drm_gpusvm_notifier_size(struct drm_gpusvm_notifier * notifier)432 drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
433 {
434 return drm_gpusvm_notifier_end(notifier) -
435 drm_gpusvm_notifier_start(notifier);
436 }
437
438 /**
439 * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
440 * @range: a pointer to the current GPU SVM range
441 *
442 * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
443 * current range is the last one or if the input range is NULL.
444 */
445 static inline struct drm_gpusvm_range *
__drm_gpusvm_range_next(struct drm_gpusvm_range * range)446 __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
447 {
448 if (range && !list_is_last(&range->entry,
449 &range->notifier->range_list))
450 return list_next_entry(range, entry);
451
452 return NULL;
453 }
454
455 /**
456 * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
457 * @range__: Iterator variable for the ranges. If set, it indicates the start of
458 * the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
459 * @notifier__: Pointer to the GPU SVM notifier
460 * @start__: Start address of the range
461 * @end__: End address of the range
462 *
463 * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
464 * to use while holding the driver SVM lock or the notifier lock.
465 */
466 #define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \
467 for ((range__) = (range__) ?: \
468 drm_gpusvm_range_find((notifier__), (start__), (end__)); \
469 (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
470 (range__) = __drm_gpusvm_range_next(range__))
471
472 /**
473 * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
474 * @range__: Iterator variable for the ranges
475 * @next__: Iterator variable for the ranges temporay storage
476 * @notifier__: Pointer to the GPU SVM notifier
477 * @start__: Start address of the range
478 * @end__: End address of the range
479 *
480 * This macro is used to iterate over GPU SVM ranges in a notifier while
481 * removing ranges from it.
482 */
483 #define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
484 for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
485 (next__) = __drm_gpusvm_range_next(range__); \
486 (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
487 (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
488
489 /**
490 * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
491 * @notifier: a pointer to the current drm_gpusvm_notifier
492 *
493 * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
494 * the current notifier is the last one or if the input notifier is
495 * NULL.
496 */
497 static inline struct drm_gpusvm_notifier *
__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier * notifier)498 __drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
499 {
500 if (notifier && !list_is_last(¬ifier->entry,
501 ¬ifier->gpusvm->notifier_list))
502 return list_next_entry(notifier, entry);
503
504 return NULL;
505 }
506
507 /**
508 * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
509 * @notifier__: Iterator variable for the notifiers
510 * @gpusvm__: Pointer to the GPU SVM notifier
511 * @start__: Start address of the notifier
512 * @end__: End address of the notifier
513 *
514 * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
515 */
516 #define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
517 for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
518 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
519 (notifier__) = __drm_gpusvm_notifier_next(notifier__))
520
521 /**
522 * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
523 * @notifier__: Iterator variable for the notifiers
524 * @next__: Iterator variable for the notifiers temporay storage
525 * @gpusvm__: Pointer to the GPU SVM notifier
526 * @start__: Start address of the notifier
527 * @end__: End address of the notifier
528 *
529 * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
530 * removing notifiers from it.
531 */
532 #define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
533 for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
534 (next__) = __drm_gpusvm_notifier_next(notifier__); \
535 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
536 (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
537
538 #endif /* __DRM_GPUSVM_H__ */
539