xref: /linux/include/drm/drm_gpusvm.h (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef __DRM_GPUSVM_H__
7 #define __DRM_GPUSVM_H__
8 
9 #include <linux/kref.h>
10 #include <linux/interval_tree.h>
11 #include <linux/mmu_notifier.h>
12 
13 struct dev_pagemap_ops;
14 struct drm_device;
15 struct drm_gpusvm;
16 struct drm_gpusvm_notifier;
17 struct drm_gpusvm_ops;
18 struct drm_gpusvm_range;
19 struct drm_pagemap;
20 struct drm_pagemap_addr;
21 
22 /**
23  * struct drm_gpusvm_ops - Operations structure for GPU SVM
24  *
25  * This structure defines the operations for GPU Shared Virtual Memory (SVM).
26  * These operations are provided by the GPU driver to manage SVM ranges and
27  * notifiers.
28  */
29 struct drm_gpusvm_ops {
30 	/**
31 	 * @notifier_alloc: Allocate a GPU SVM notifier (optional)
32 	 *
33 	 * Allocate a GPU SVM notifier.
34 	 *
35 	 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
36 	 */
37 	struct drm_gpusvm_notifier *(*notifier_alloc)(void);
38 
39 	/**
40 	 * @notifier_free: Free a GPU SVM notifier (optional)
41 	 * @notifier: Pointer to the GPU SVM notifier to be freed
42 	 *
43 	 * Free a GPU SVM notifier.
44 	 */
45 	void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
46 
47 	/**
48 	 * @range_alloc: Allocate a GPU SVM range (optional)
49 	 * @gpusvm: Pointer to the GPU SVM
50 	 *
51 	 * Allocate a GPU SVM range.
52 	 *
53 	 * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
54 	 */
55 	struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
56 
57 	/**
58 	 * @range_free: Free a GPU SVM range (optional)
59 	 * @range: Pointer to the GPU SVM range to be freed
60 	 *
61 	 * Free a GPU SVM range.
62 	 */
63 	void (*range_free)(struct drm_gpusvm_range *range);
64 
65 	/**
66 	 * @invalidate: Invalidate GPU SVM notifier (required)
67 	 * @gpusvm: Pointer to the GPU SVM
68 	 * @notifier: Pointer to the GPU SVM notifier
69 	 * @mmu_range: Pointer to the mmu_notifier_range structure
70 	 *
71 	 * Invalidate the GPU page tables. It can safely walk the notifier range
72 	 * RB tree/list in this function. Called while holding the notifier lock.
73 	 */
74 	void (*invalidate)(struct drm_gpusvm *gpusvm,
75 			   struct drm_gpusvm_notifier *notifier,
76 			   const struct mmu_notifier_range *mmu_range);
77 };
78 
79 /**
80  * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
81  *
82  * @gpusvm: Pointer to the GPU SVM structure
83  * @notifier: MMU interval notifier
84  * @itree: Interval tree node for the notifier (inserted in GPU SVM)
85  * @entry: List entry to fast interval tree traversal
86  * @root: Cached root node of the RB tree containing ranges
87  * @range_list: List head containing of ranges in the same order they appear in
88  *              interval tree. This is useful to keep iterating ranges while
89  *              doing modifications to RB tree.
90  * @flags: Flags for notifier
91  * @flags.removed: Flag indicating whether the MMU interval notifier has been
92  *                 removed
93  *
94  * This structure represents a GPU SVM notifier.
95  */
96 struct drm_gpusvm_notifier {
97 	struct drm_gpusvm *gpusvm;
98 	struct mmu_interval_notifier notifier;
99 	struct interval_tree_node itree;
100 	struct list_head entry;
101 	struct rb_root_cached root;
102 	struct list_head range_list;
103 	struct {
104 		u32 removed : 1;
105 	} flags;
106 };
107 
108 /**
109  * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags
110  *
111  * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory
112  * @unmapped: Flag indicating if the pages has been unmapped
113  * @partial_unmap: Flag indicating if the pages has been partially unmapped
114  * @has_devmem_pages: Flag indicating if the pages has devmem pages
115  * @has_dma_mapping: Flag indicating if the pages has a DMA mapping
116  * @__flags: Flags for pages in u16 form (used for READ_ONCE)
117  */
118 struct drm_gpusvm_pages_flags {
119 	union {
120 		struct {
121 			/* All flags below must be set upon creation */
122 			u16 migrate_devmem : 1;
123 			/* All flags below must be set / cleared under notifier lock */
124 			u16 unmapped : 1;
125 			u16 partial_unmap : 1;
126 			u16 has_devmem_pages : 1;
127 			u16 has_dma_mapping : 1;
128 		};
129 		u16 __flags;
130 	};
131 };
132 
133 /**
134  * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
135  *
136  * @dma_addr: Device address array
137  * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
138  *            Note this is assuming only one drm_pagemap per range is allowed.
139  * @notifier_seq: Notifier sequence number of the range's pages
140  * @flags: Flags for range
141  * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
142  * @flags.unmapped: Flag indicating if the range has been unmapped
143  * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
144  * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
145  * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
146  */
147 struct drm_gpusvm_pages {
148 	struct drm_pagemap_addr *dma_addr;
149 	struct drm_pagemap *dpagemap;
150 	unsigned long notifier_seq;
151 	struct drm_gpusvm_pages_flags flags;
152 };
153 
154 /**
155  * struct drm_gpusvm_range - Structure representing a GPU SVM range
156  *
157  * @gpusvm: Pointer to the GPU SVM structure
158  * @notifier: Pointer to the GPU SVM notifier
159  * @refcount: Reference count for the range
160  * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
161  * @entry: List entry to fast interval tree traversal
162  * @pages: The pages for this range.
163  *
164  * This structure represents a GPU SVM range used for tracking memory ranges
165  * mapped in a DRM device.
166  */
167 struct drm_gpusvm_range {
168 	struct drm_gpusvm *gpusvm;
169 	struct drm_gpusvm_notifier *notifier;
170 	struct kref refcount;
171 	struct interval_tree_node itree;
172 	struct list_head entry;
173 	struct drm_gpusvm_pages pages;
174 };
175 
176 /**
177  * struct drm_gpusvm - GPU SVM structure
178  *
179  * @name: Name of the GPU SVM
180  * @drm: Pointer to the DRM device structure
181  * @mm: Pointer to the mm_struct for the address space
182  * @mm_start: Start address of GPU SVM
183  * @mm_range: Range of the GPU SVM
184  * @notifier_size: Size of individual notifiers
185  * @ops: Pointer to the operations structure for GPU SVM
186  * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
187  *               Entries should be powers of 2 in descending order.
188  * @num_chunks: Number of chunks
189  * @notifier_lock: Read-write semaphore for protecting notifier operations
190  * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
191  * @notifier_list: list head containing of notifiers in the same order they
192  *                 appear in interval tree. This is useful to keep iterating
193  *                 notifiers while doing modifications to RB tree.
194  *
195  * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
196  * memory ranges mapped in a DRM (Direct Rendering Manager) device.
197  *
198  * No reference counting is provided, as this is expected to be embedded in the
199  * driver VM structure along with the struct drm_gpuvm, which handles reference
200  * counting.
201  */
202 struct drm_gpusvm {
203 	const char *name;
204 	struct drm_device *drm;
205 	struct mm_struct *mm;
206 	unsigned long mm_start;
207 	unsigned long mm_range;
208 	unsigned long notifier_size;
209 	const struct drm_gpusvm_ops *ops;
210 	const unsigned long *chunk_sizes;
211 	int num_chunks;
212 	struct rw_semaphore notifier_lock;
213 	struct rb_root_cached root;
214 	struct list_head notifier_list;
215 #ifdef CONFIG_LOCKDEP
216 	/**
217 	 * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
218 	 * drm_gpusvm_range_remove with a driver provided lock.
219 	 */
220 	struct lockdep_map *lock_dep_map;
221 #endif
222 };
223 
224 /**
225  * struct drm_gpusvm_ctx - DRM GPU SVM context
226  *
227  * @device_private_page_owner: The device-private page owner to use for
228  * this operation
229  * @check_pages_threshold: Check CPU pages for present if chunk is less than or
230  *                         equal to threshold. If not present, reduce chunk
231  *                         size.
232  * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
233  *		  remains with either exclusive GPU or CPU access.
234  * @in_notifier: entering from a MMU notifier
235  * @read_only: operating on read-only memory
236  * @devmem_possible: possible to use device memory
237  * @devmem_only: use only device memory
238  * @allow_mixed: Allow mixed mappings in get pages. Mixing between system and
239  *               single dpagemap is supported, mixing between multiple dpagemap
240  *               is unsupported.
241  *
242  * Context that is DRM GPUSVM is operating in (i.e. user arguments).
243  */
244 struct drm_gpusvm_ctx {
245 	void *device_private_page_owner;
246 	unsigned long check_pages_threshold;
247 	unsigned long timeslice_ms;
248 	unsigned int in_notifier :1;
249 	unsigned int read_only :1;
250 	unsigned int devmem_possible :1;
251 	unsigned int devmem_only :1;
252 	unsigned int allow_mixed :1;
253 };
254 
255 int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
256 		    const char *name, struct drm_device *drm,
257 		    struct mm_struct *mm,
258 		    unsigned long mm_start, unsigned long mm_range,
259 		    unsigned long notifier_size,
260 		    const struct drm_gpusvm_ops *ops,
261 		    const unsigned long *chunk_sizes, int num_chunks);
262 
263 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
264 
265 void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
266 
267 unsigned long
268 drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
269 			  unsigned long start,
270 			  unsigned long end);
271 
272 struct drm_gpusvm_range *
273 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
274 				unsigned long fault_addr,
275 				unsigned long gpuva_start,
276 				unsigned long gpuva_end,
277 				const struct drm_gpusvm_ctx *ctx);
278 
279 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
280 			     struct drm_gpusvm_range *range);
281 
282 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
283 			   struct drm_gpusvm_range *range);
284 
285 struct drm_gpusvm_range *
286 drm_gpusvm_range_get(struct drm_gpusvm_range *range);
287 
288 void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
289 
290 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
291 				  struct drm_gpusvm_range *range);
292 
293 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
294 			       struct drm_gpusvm_range *range,
295 			       const struct drm_gpusvm_ctx *ctx);
296 
297 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
298 				  struct drm_gpusvm_range *range,
299 				  const struct drm_gpusvm_ctx *ctx);
300 
301 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
302 			    unsigned long end);
303 
304 struct drm_gpusvm_notifier *
305 drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
306 			 unsigned long end);
307 
308 struct drm_gpusvm_range *
309 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
310 		      unsigned long end);
311 
312 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
313 				   const struct mmu_notifier_range *mmu_range);
314 
315 int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
316 			 struct drm_gpusvm_pages *svm_pages,
317 			 struct mm_struct *mm,
318 			 struct mmu_interval_notifier *notifier,
319 			 unsigned long pages_start, unsigned long pages_end,
320 			 const struct drm_gpusvm_ctx *ctx);
321 
322 void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
323 			    struct drm_gpusvm_pages *svm_pages,
324 			    unsigned long npages,
325 			    const struct drm_gpusvm_ctx *ctx);
326 
327 void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
328 			   struct drm_gpusvm_pages *svm_pages,
329 			   unsigned long npages);
330 
331 /**
332  * enum drm_gpusvm_scan_result - Scan result from the drm_gpusvm_scan_mm() function.
333  * @DRM_GPUSVM_SCAN_UNPOPULATED: At least one page was not present or inaccessible.
334  * @DRM_GPUSVM_SCAN_EQUAL: All pages belong to the struct dev_pagemap indicated as
335  * the @pagemap argument to the drm_gpusvm_scan_mm() function.
336  * @DRM_GPUSVM_SCAN_OTHER: All pages belong to exactly one dev_pagemap, which is
337  * *NOT* the @pagemap argument to the drm_gpusvm_scan_mm(). All pages belong to
338  * the same device private owner.
339  * @DRM_GPUSVM_SCAN_SYSTEM: All pages are present and system pages.
340  * @DRM_GPUSVM_SCAN_MIXED_DEVICE: All pages are device pages and belong to at least
341  * two different struct dev_pagemaps. All pages belong to the same device private
342  * owner.
343  * @DRM_GPUSVM_SCAN_MIXED: Pages are present and are a mix of system pages
344  * and device-private pages. All device-private pages belong to the same device
345  * private owner.
346  */
347 enum drm_gpusvm_scan_result {
348 	DRM_GPUSVM_SCAN_UNPOPULATED,
349 	DRM_GPUSVM_SCAN_EQUAL,
350 	DRM_GPUSVM_SCAN_OTHER,
351 	DRM_GPUSVM_SCAN_SYSTEM,
352 	DRM_GPUSVM_SCAN_MIXED_DEVICE,
353 	DRM_GPUSVM_SCAN_MIXED,
354 };
355 
356 enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
357 					       void *dev_private_owner,
358 					       const struct dev_pagemap *pagemap);
359 
360 #ifdef CONFIG_LOCKDEP
361 /**
362  * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
363  * @gpusvm: Pointer to the GPU SVM structure.
364  * @lock: the lock used to protect the gpuva list. The locking primitive
365  * must contain a dep_map field.
366  *
367  * Call this to annotate drm_gpusvm_range_find_or_insert and
368  * drm_gpusvm_range_remove.
369  */
370 #define drm_gpusvm_driver_set_lock(gpusvm, lock) \
371 	do { \
372 		if (!WARN((gpusvm)->lock_dep_map, \
373 			  "GPUSVM range lock should be set only once."))\
374 			(gpusvm)->lock_dep_map = &(lock)->dep_map;	\
375 	} while (0)
376 #else
377 #define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
378 #endif
379 
380 /**
381  * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
382  * @gpusvm__: Pointer to the GPU SVM structure.
383  *
384  * Abstract client usage GPU SVM notifier lock, take lock
385  */
386 #define drm_gpusvm_notifier_lock(gpusvm__)	\
387 	down_read(&(gpusvm__)->notifier_lock)
388 
389 /**
390  * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
391  * @gpusvm__: Pointer to the GPU SVM structure.
392  *
393  * Abstract client usage GPU SVM notifier lock, drop lock
394  */
395 #define drm_gpusvm_notifier_unlock(gpusvm__)	\
396 	up_read(&(gpusvm__)->notifier_lock)
397 
398 /**
399  * drm_gpusvm_range_start() - GPU SVM range start address
400  * @range: Pointer to the GPU SVM range
401  *
402  * Return: GPU SVM range start address
403  */
404 static inline unsigned long
405 drm_gpusvm_range_start(struct drm_gpusvm_range *range)
406 {
407 	return range->itree.start;
408 }
409 
410 /**
411  * drm_gpusvm_range_end() - GPU SVM range end address
412  * @range: Pointer to the GPU SVM range
413  *
414  * Return: GPU SVM range end address
415  */
416 static inline unsigned long
417 drm_gpusvm_range_end(struct drm_gpusvm_range *range)
418 {
419 	return range->itree.last + 1;
420 }
421 
422 /**
423  * drm_gpusvm_range_size() - GPU SVM range size
424  * @range: Pointer to the GPU SVM range
425  *
426  * Return: GPU SVM range size
427  */
428 static inline unsigned long
429 drm_gpusvm_range_size(struct drm_gpusvm_range *range)
430 {
431 	return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
432 }
433 
434 /**
435  * drm_gpusvm_notifier_start() - GPU SVM notifier start address
436  * @notifier: Pointer to the GPU SVM notifier
437  *
438  * Return: GPU SVM notifier start address
439  */
440 static inline unsigned long
441 drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
442 {
443 	return notifier->itree.start;
444 }
445 
446 /**
447  * drm_gpusvm_notifier_end() - GPU SVM notifier end address
448  * @notifier: Pointer to the GPU SVM notifier
449  *
450  * Return: GPU SVM notifier end address
451  */
452 static inline unsigned long
453 drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
454 {
455 	return notifier->itree.last + 1;
456 }
457 
458 /**
459  * drm_gpusvm_notifier_size() - GPU SVM notifier size
460  * @notifier: Pointer to the GPU SVM notifier
461  *
462  * Return: GPU SVM notifier size
463  */
464 static inline unsigned long
465 drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
466 {
467 	return drm_gpusvm_notifier_end(notifier) -
468 		drm_gpusvm_notifier_start(notifier);
469 }
470 
471 /**
472  * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
473  * @range: a pointer to the current GPU SVM range
474  *
475  * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
476  *         current range is the last one or if the input range is NULL.
477  */
478 static inline struct drm_gpusvm_range *
479 __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
480 {
481 	if (range && !list_is_last(&range->entry,
482 				   &range->notifier->range_list))
483 		return list_next_entry(range, entry);
484 
485 	return NULL;
486 }
487 
488 /**
489  * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
490  * @range__: Iterator variable for the ranges. If set, it indicates the start of
491  *	     the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
492  * @notifier__: Pointer to the GPU SVM notifier
493  * @start__: Start address of the range
494  * @end__: End address of the range
495  *
496  * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
497  * to use while holding the driver SVM lock or the notifier lock.
498  */
499 #define drm_gpusvm_for_each_range(range__, notifier__, start__, end__)	\
500 	for ((range__) = (range__) ?:					\
501 	     drm_gpusvm_range_find((notifier__), (start__), (end__));	\
502 	     (range__) && (drm_gpusvm_range_start(range__) < (end__));	\
503 	     (range__) = __drm_gpusvm_range_next(range__))
504 
505 /**
506  * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
507  * @range__: Iterator variable for the ranges
508  * @next__: Iterator variable for the ranges temporay storage
509  * @notifier__: Pointer to the GPU SVM notifier
510  * @start__: Start address of the range
511  * @end__: End address of the range
512  *
513  * This macro is used to iterate over GPU SVM ranges in a notifier while
514  * removing ranges from it.
515  */
516 #define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__)	\
517 	for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)),	\
518 	     (next__) = __drm_gpusvm_range_next(range__);				\
519 	     (range__) && (drm_gpusvm_range_start(range__) < (end__));			\
520 	     (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
521 
522 /**
523  * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
524  * @notifier: a pointer to the current drm_gpusvm_notifier
525  *
526  * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
527  *         the current notifier is the last one or if the input notifier is
528  *         NULL.
529  */
530 static inline struct drm_gpusvm_notifier *
531 __drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
532 {
533 	if (notifier && !list_is_last(&notifier->entry,
534 				      &notifier->gpusvm->notifier_list))
535 		return list_next_entry(notifier, entry);
536 
537 	return NULL;
538 }
539 
540 /**
541  * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
542  * @notifier__: Iterator variable for the notifiers
543  * @gpusvm__: Pointer to the GPU SVM notifier
544  * @start__: Start address of the notifier
545  * @end__: End address of the notifier
546  *
547  * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
548  */
549 #define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__)		\
550 	for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__));	\
551 	     (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__));		\
552 	     (notifier__) = __drm_gpusvm_notifier_next(notifier__))
553 
554 /**
555  * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
556  * @notifier__: Iterator variable for the notifiers
557  * @next__: Iterator variable for the notifiers temporay storage
558  * @gpusvm__: Pointer to the GPU SVM notifier
559  * @start__: Start address of the notifier
560  * @end__: End address of the notifier
561  *
562  * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
563  * removing notifiers from it.
564  */
565 #define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__)	\
566 	for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)),	\
567 	     (next__) = __drm_gpusvm_notifier_next(notifier__);				\
568 	     (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__));		\
569 	     (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
570 
571 #endif /* __DRM_GPUSVM_H__ */
572