xref: /linux/include/drm/drm_gpusvm.h (revision 12b6c62c038e85354154aee4eb2cf7a2168b3ecc)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef __DRM_GPUSVM_H__
7 #define __DRM_GPUSVM_H__
8 
9 #include <linux/kref.h>
10 #include <linux/interval_tree.h>
11 #include <linux/mmu_notifier.h>
12 
13 struct dev_pagemap_ops;
14 struct drm_device;
15 struct drm_gpusvm;
16 struct drm_gpusvm_notifier;
17 struct drm_gpusvm_ops;
18 struct drm_gpusvm_range;
19 struct drm_gpusvm_devmem;
20 struct drm_pagemap;
21 struct drm_pagemap_device_addr;
22 
23 /**
24  * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory
25  *
26  * This structure defines the operations for GPU Shared Virtual Memory (SVM)
27  * device memory. These operations are provided by the GPU driver to manage device memory
28  * allocations and perform operations such as migration between device memory and system
29  * RAM.
30  */
31 struct drm_gpusvm_devmem_ops {
32 	/**
33 	 * @devmem_release: Release device memory allocation (optional)
34 	 * @devmem_allocation: device memory allocation
35 	 *
36 	 * Release device memory allocation and drop a reference to device
37 	 * memory allocation.
38 	 */
39 	void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation);
40 
41 	/**
42 	 * @populate_devmem_pfn: Populate device memory PFN (required for migration)
43 	 * @devmem_allocation: device memory allocation
44 	 * @npages: Number of pages to populate
45 	 * @pfn: Array of page frame numbers to populate
46 	 *
47 	 * Populate device memory page frame numbers (PFN).
48 	 *
49 	 * Return: 0 on success, a negative error code on failure.
50 	 */
51 	int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation,
52 				   unsigned long npages, unsigned long *pfn);
53 
54 	/**
55 	 * @copy_to_devmem: Copy to device memory (required for migration)
56 	 * @pages: Pointer to array of device memory pages (destination)
57 	 * @dma_addr: Pointer to array of DMA addresses (source)
58 	 * @npages: Number of pages to copy
59 	 *
60 	 * Copy pages to device memory.
61 	 *
62 	 * Return: 0 on success, a negative error code on failure.
63 	 */
64 	int (*copy_to_devmem)(struct page **pages,
65 			      dma_addr_t *dma_addr,
66 			      unsigned long npages);
67 
68 	/**
69 	 * @copy_to_ram: Copy to system RAM (required for migration)
70 	 * @pages: Pointer to array of device memory pages (source)
71 	 * @dma_addr: Pointer to array of DMA addresses (destination)
72 	 * @npages: Number of pages to copy
73 	 *
74 	 * Copy pages to system RAM.
75 	 *
76 	 * Return: 0 on success, a negative error code on failure.
77 	 */
78 	int (*copy_to_ram)(struct page **pages,
79 			   dma_addr_t *dma_addr,
80 			   unsigned long npages);
81 };
82 
83 /**
84  * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation
85  *
86  * @dev: Pointer to the device structure which device memory allocation belongs to
87  * @mm: Pointer to the mm_struct for the address space
88  * @detached: device memory allocations is detached from device pages
89  * @ops: Pointer to the operations structure for GPU SVM device memory
90  * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
91  * @size: Size of device memory allocation
92  * @timeslice_expiration: Timeslice expiration in jiffies
93  */
94 struct drm_gpusvm_devmem {
95 	struct device *dev;
96 	struct mm_struct *mm;
97 	struct completion detached;
98 	const struct drm_gpusvm_devmem_ops *ops;
99 	struct drm_pagemap *dpagemap;
100 	size_t size;
101 	u64 timeslice_expiration;
102 };
103 
104 /**
105  * struct drm_gpusvm_ops - Operations structure for GPU SVM
106  *
107  * This structure defines the operations for GPU Shared Virtual Memory (SVM).
108  * These operations are provided by the GPU driver to manage SVM ranges and
109  * notifiers.
110  */
111 struct drm_gpusvm_ops {
112 	/**
113 	 * @notifier_alloc: Allocate a GPU SVM notifier (optional)
114 	 *
115 	 * Allocate a GPU SVM notifier.
116 	 *
117 	 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
118 	 */
119 	struct drm_gpusvm_notifier *(*notifier_alloc)(void);
120 
121 	/**
122 	 * @notifier_free: Free a GPU SVM notifier (optional)
123 	 * @notifier: Pointer to the GPU SVM notifier to be freed
124 	 *
125 	 * Free a GPU SVM notifier.
126 	 */
127 	void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
128 
129 	/**
130 	 * @range_alloc: Allocate a GPU SVM range (optional)
131 	 * @gpusvm: Pointer to the GPU SVM
132 	 *
133 	 * Allocate a GPU SVM range.
134 	 *
135 	 * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
136 	 */
137 	struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
138 
139 	/**
140 	 * @range_free: Free a GPU SVM range (optional)
141 	 * @range: Pointer to the GPU SVM range to be freed
142 	 *
143 	 * Free a GPU SVM range.
144 	 */
145 	void (*range_free)(struct drm_gpusvm_range *range);
146 
147 	/**
148 	 * @invalidate: Invalidate GPU SVM notifier (required)
149 	 * @gpusvm: Pointer to the GPU SVM
150 	 * @notifier: Pointer to the GPU SVM notifier
151 	 * @mmu_range: Pointer to the mmu_notifier_range structure
152 	 *
153 	 * Invalidate the GPU page tables. It can safely walk the notifier range
154 	 * RB tree/list in this function. Called while holding the notifier lock.
155 	 */
156 	void (*invalidate)(struct drm_gpusvm *gpusvm,
157 			   struct drm_gpusvm_notifier *notifier,
158 			   const struct mmu_notifier_range *mmu_range);
159 };
160 
161 /**
162  * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
163  *
164  * @gpusvm: Pointer to the GPU SVM structure
165  * @notifier: MMU interval notifier
166  * @itree: Interval tree node for the notifier (inserted in GPU SVM)
167  * @entry: List entry to fast interval tree traversal
168  * @root: Cached root node of the RB tree containing ranges
169  * @range_list: List head containing of ranges in the same order they appear in
170  *              interval tree. This is useful to keep iterating ranges while
171  *              doing modifications to RB tree.
172  * @flags: Flags for notifier
173  * @flags.removed: Flag indicating whether the MMU interval notifier has been
174  *                 removed
175  *
176  * This structure represents a GPU SVM notifier.
177  */
178 struct drm_gpusvm_notifier {
179 	struct drm_gpusvm *gpusvm;
180 	struct mmu_interval_notifier notifier;
181 	struct interval_tree_node itree;
182 	struct list_head entry;
183 	struct rb_root_cached root;
184 	struct list_head range_list;
185 	struct {
186 		u32 removed : 1;
187 	} flags;
188 };
189 
190 /**
191  * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags
192  *
193  * @migrate_devmem: Flag indicating whether the range can be migrated to device memory
194  * @unmapped: Flag indicating if the range has been unmapped
195  * @partial_unmap: Flag indicating if the range has been partially unmapped
196  * @has_devmem_pages: Flag indicating if the range has devmem pages
197  * @has_dma_mapping: Flag indicating if the range has a DMA mapping
198  * @__flags: Flags for range in u16 form (used for READ_ONCE)
199  */
200 struct drm_gpusvm_range_flags {
201 	union {
202 		struct {
203 			/* All flags below must be set upon creation */
204 			u16 migrate_devmem : 1;
205 			/* All flags below must be set / cleared under notifier lock */
206 			u16 unmapped : 1;
207 			u16 partial_unmap : 1;
208 			u16 has_devmem_pages : 1;
209 			u16 has_dma_mapping : 1;
210 		};
211 		u16 __flags;
212 	};
213 };
214 
215 /**
216  * struct drm_gpusvm_range - Structure representing a GPU SVM range
217  *
218  * @gpusvm: Pointer to the GPU SVM structure
219  * @notifier: Pointer to the GPU SVM notifier
220  * @refcount: Reference count for the range
221  * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
222  * @entry: List entry to fast interval tree traversal
223  * @notifier_seq: Notifier sequence number of the range's pages
224  * @dma_addr: Device address array
225  * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
226  *            Note this is assuming only one drm_pagemap per range is allowed.
227  * @flags: Flags for range
228  *
229  * This structure represents a GPU SVM range used for tracking memory ranges
230  * mapped in a DRM device.
231  */
232 struct drm_gpusvm_range {
233 	struct drm_gpusvm *gpusvm;
234 	struct drm_gpusvm_notifier *notifier;
235 	struct kref refcount;
236 	struct interval_tree_node itree;
237 	struct list_head entry;
238 	unsigned long notifier_seq;
239 	struct drm_pagemap_device_addr *dma_addr;
240 	struct drm_pagemap *dpagemap;
241 	struct drm_gpusvm_range_flags flags;
242 };
243 
244 /**
245  * struct drm_gpusvm - GPU SVM structure
246  *
247  * @name: Name of the GPU SVM
248  * @drm: Pointer to the DRM device structure
249  * @mm: Pointer to the mm_struct for the address space
250  * @device_private_page_owner: Device private pages owner
251  * @mm_start: Start address of GPU SVM
252  * @mm_range: Range of the GPU SVM
253  * @notifier_size: Size of individual notifiers
254  * @ops: Pointer to the operations structure for GPU SVM
255  * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
256  *               Entries should be powers of 2 in descending order.
257  * @num_chunks: Number of chunks
258  * @notifier_lock: Read-write semaphore for protecting notifier operations
259  * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
260  * @notifier_list: list head containing of notifiers in the same order they
261  *                 appear in interval tree. This is useful to keep iterating
262  *                 notifiers while doing modifications to RB tree.
263  *
264  * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
265  * memory ranges mapped in a DRM (Direct Rendering Manager) device.
266  *
267  * No reference counting is provided, as this is expected to be embedded in the
268  * driver VM structure along with the struct drm_gpuvm, which handles reference
269  * counting.
270  */
271 struct drm_gpusvm {
272 	const char *name;
273 	struct drm_device *drm;
274 	struct mm_struct *mm;
275 	void *device_private_page_owner;
276 	unsigned long mm_start;
277 	unsigned long mm_range;
278 	unsigned long notifier_size;
279 	const struct drm_gpusvm_ops *ops;
280 	const unsigned long *chunk_sizes;
281 	int num_chunks;
282 	struct rw_semaphore notifier_lock;
283 	struct rb_root_cached root;
284 	struct list_head notifier_list;
285 #ifdef CONFIG_LOCKDEP
286 	/**
287 	 * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
288 	 * drm_gpusvm_range_remove with a driver provided lock.
289 	 */
290 	struct lockdep_map *lock_dep_map;
291 #endif
292 };
293 
294 /**
295  * struct drm_gpusvm_ctx - DRM GPU SVM context
296  *
297  * @check_pages_threshold: Check CPU pages for present if chunk is less than or
298  *                         equal to threshold. If not present, reduce chunk
299  *                         size.
300  * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
301  *		  remains with either exclusive GPU or CPU access.
302  * @in_notifier: entering from a MMU notifier
303  * @read_only: operating on read-only memory
304  * @devmem_possible: possible to use device memory
305  * @devmem_only: use only device memory
306  *
307  * Context that is DRM GPUSVM is operating in (i.e. user arguments).
308  */
309 struct drm_gpusvm_ctx {
310 	unsigned long check_pages_threshold;
311 	unsigned long timeslice_ms;
312 	unsigned int in_notifier :1;
313 	unsigned int read_only :1;
314 	unsigned int devmem_possible :1;
315 	unsigned int devmem_only :1;
316 };
317 
318 int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
319 		    const char *name, struct drm_device *drm,
320 		    struct mm_struct *mm, void *device_private_page_owner,
321 		    unsigned long mm_start, unsigned long mm_range,
322 		    unsigned long notifier_size,
323 		    const struct drm_gpusvm_ops *ops,
324 		    const unsigned long *chunk_sizes, int num_chunks);
325 
326 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
327 
328 void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
329 
330 struct drm_gpusvm_range *
331 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
332 				unsigned long fault_addr,
333 				unsigned long gpuva_start,
334 				unsigned long gpuva_end,
335 				const struct drm_gpusvm_ctx *ctx);
336 
337 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
338 			     struct drm_gpusvm_range *range);
339 
340 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
341 			   struct drm_gpusvm_range *range);
342 
343 struct drm_gpusvm_range *
344 drm_gpusvm_range_get(struct drm_gpusvm_range *range);
345 
346 void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
347 
348 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
349 				  struct drm_gpusvm_range *range);
350 
351 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
352 			       struct drm_gpusvm_range *range,
353 			       const struct drm_gpusvm_ctx *ctx);
354 
355 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
356 				  struct drm_gpusvm_range *range,
357 				  const struct drm_gpusvm_ctx *ctx);
358 
359 int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
360 				 struct drm_gpusvm_range *range,
361 				 struct drm_gpusvm_devmem *devmem_allocation,
362 				 const struct drm_gpusvm_ctx *ctx);
363 
364 int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation);
365 
366 const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
367 
368 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
369 			    unsigned long end);
370 
371 struct drm_gpusvm_range *
372 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
373 		      unsigned long end);
374 
375 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
376 				   const struct mmu_notifier_range *mmu_range);
377 
378 void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
379 			    struct device *dev, struct mm_struct *mm,
380 			    const struct drm_gpusvm_devmem_ops *ops,
381 			    struct drm_pagemap *dpagemap, size_t size);
382 
383 #ifdef CONFIG_LOCKDEP
384 /**
385  * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
386  * @gpusvm: Pointer to the GPU SVM structure.
387  * @lock: the lock used to protect the gpuva list. The locking primitive
388  * must contain a dep_map field.
389  *
390  * Call this to annotate drm_gpusvm_range_find_or_insert and
391  * drm_gpusvm_range_remove.
392  */
393 #define drm_gpusvm_driver_set_lock(gpusvm, lock) \
394 	do { \
395 		if (!WARN((gpusvm)->lock_dep_map, \
396 			  "GPUSVM range lock should be set only once."))\
397 			(gpusvm)->lock_dep_map = &(lock)->dep_map;	\
398 	} while (0)
399 #else
400 #define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
401 #endif
402 
403 /**
404  * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
405  * @gpusvm__: Pointer to the GPU SVM structure.
406  *
407  * Abstract client usage GPU SVM notifier lock, take lock
408  */
409 #define drm_gpusvm_notifier_lock(gpusvm__)	\
410 	down_read(&(gpusvm__)->notifier_lock)
411 
412 /**
413  * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
414  * @gpusvm__: Pointer to the GPU SVM structure.
415  *
416  * Abstract client usage GPU SVM notifier lock, drop lock
417  */
418 #define drm_gpusvm_notifier_unlock(gpusvm__)	\
419 	up_read(&(gpusvm__)->notifier_lock)
420 
421 /**
422  * drm_gpusvm_range_start() - GPU SVM range start address
423  * @range: Pointer to the GPU SVM range
424  *
425  * Return: GPU SVM range start address
426  */
427 static inline unsigned long
drm_gpusvm_range_start(struct drm_gpusvm_range * range)428 drm_gpusvm_range_start(struct drm_gpusvm_range *range)
429 {
430 	return range->itree.start;
431 }
432 
433 /**
434  * drm_gpusvm_range_end() - GPU SVM range end address
435  * @range: Pointer to the GPU SVM range
436  *
437  * Return: GPU SVM range end address
438  */
439 static inline unsigned long
drm_gpusvm_range_end(struct drm_gpusvm_range * range)440 drm_gpusvm_range_end(struct drm_gpusvm_range *range)
441 {
442 	return range->itree.last + 1;
443 }
444 
445 /**
446  * drm_gpusvm_range_size() - GPU SVM range size
447  * @range: Pointer to the GPU SVM range
448  *
449  * Return: GPU SVM range size
450  */
451 static inline unsigned long
drm_gpusvm_range_size(struct drm_gpusvm_range * range)452 drm_gpusvm_range_size(struct drm_gpusvm_range *range)
453 {
454 	return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
455 }
456 
457 /**
458  * drm_gpusvm_notifier_start() - GPU SVM notifier start address
459  * @notifier: Pointer to the GPU SVM notifier
460  *
461  * Return: GPU SVM notifier start address
462  */
463 static inline unsigned long
drm_gpusvm_notifier_start(struct drm_gpusvm_notifier * notifier)464 drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
465 {
466 	return notifier->itree.start;
467 }
468 
469 /**
470  * drm_gpusvm_notifier_end() - GPU SVM notifier end address
471  * @notifier: Pointer to the GPU SVM notifier
472  *
473  * Return: GPU SVM notifier end address
474  */
475 static inline unsigned long
drm_gpusvm_notifier_end(struct drm_gpusvm_notifier * notifier)476 drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
477 {
478 	return notifier->itree.last + 1;
479 }
480 
481 /**
482  * drm_gpusvm_notifier_size() - GPU SVM notifier size
483  * @notifier: Pointer to the GPU SVM notifier
484  *
485  * Return: GPU SVM notifier size
486  */
487 static inline unsigned long
drm_gpusvm_notifier_size(struct drm_gpusvm_notifier * notifier)488 drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
489 {
490 	return drm_gpusvm_notifier_end(notifier) -
491 		drm_gpusvm_notifier_start(notifier);
492 }
493 
494 /**
495  * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
496  * @range: a pointer to the current GPU SVM range
497  *
498  * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
499  *         current range is the last one or if the input range is NULL.
500  */
501 static inline struct drm_gpusvm_range *
__drm_gpusvm_range_next(struct drm_gpusvm_range * range)502 __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
503 {
504 	if (range && !list_is_last(&range->entry,
505 				   &range->notifier->range_list))
506 		return list_next_entry(range, entry);
507 
508 	return NULL;
509 }
510 
511 /**
512  * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
513  * @range__: Iterator variable for the ranges. If set, it indicates the start of
514  *	     the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
515  * @notifier__: Pointer to the GPU SVM notifier
516  * @start__: Start address of the range
517  * @end__: End address of the range
518  *
519  * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
520  * to use while holding the driver SVM lock or the notifier lock.
521  */
522 #define drm_gpusvm_for_each_range(range__, notifier__, start__, end__)	\
523 	for ((range__) = (range__) ?:					\
524 	     drm_gpusvm_range_find((notifier__), (start__), (end__));	\
525 	     (range__) && (drm_gpusvm_range_start(range__) < (end__));	\
526 	     (range__) = __drm_gpusvm_range_next(range__))
527 
528 #endif /* __DRM_GPUSVM_H__ */
529