xref: /linux/drivers/gpu/drm/drm_gpusvm.c (revision 33b4e4fcd2980ee5fd754731ca9b0325f0344f04)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  *
5  * Authors:
6  *     Matthew Brost <matthew.brost@intel.com>
7  */
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/export.h>
11 #include <linux/hmm.h>
12 #include <linux/memremap.h>
13 #include <linux/migrate.h>
14 #include <linux/mm_types.h>
15 #include <linux/pagemap.h>
16 #include <linux/slab.h>
17 
18 #include <drm/drm_device.h>
19 #include <drm/drm_gpusvm.h>
20 #include <drm/drm_pagemap.h>
21 #include <drm/drm_print.h>
22 
23 /**
24  * DOC: Overview
25  *
26  * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM)
27  * is a component of the DRM framework designed to manage shared virtual memory
28  * between the CPU and GPU. It enables efficient data exchange and processing
29  * for GPU-accelerated applications by allowing memory sharing and
30  * synchronization between the CPU's and GPU's virtual address spaces.
31  *
32  * Key GPU SVM Components:
33  *
34  * - Notifiers:
35  *	Used for tracking memory intervals and notifying the GPU of changes,
36  *	notifiers are sized based on a GPU SVM initialization parameter, with a
37  *	recommendation of 512M or larger. They maintain a Red-BlacK tree and a
38  *	list of ranges that fall within the notifier interval.  Notifiers are
39  *	tracked within a GPU SVM Red-BlacK tree and list and are dynamically
40  *	inserted or removed as ranges within the interval are created or
41  *	destroyed.
42  * - Ranges:
43  *	Represent memory ranges mapped in a DRM device and managed by GPU SVM.
44  *	They are sized based on an array of chunk sizes, which is a GPU SVM
45  *	initialization parameter, and the CPU address space.  Upon GPU fault,
46  *	the largest aligned chunk that fits within the faulting CPU address
47  *	space is chosen for the range size. Ranges are expected to be
48  *	dynamically allocated on GPU fault and removed on an MMU notifier UNMAP
49  *	event. As mentioned above, ranges are tracked in a notifier's Red-Black
50  *	tree.
51  *
52  * - Operations:
53  *	Define the interface for driver-specific GPU SVM operations such as
54  *	range allocation, notifier allocation, and invalidations.
55  *
56  * - Device Memory Allocations:
57  *	Embedded structure containing enough information for GPU SVM to migrate
58  *	to / from device memory.
59  *
60  * - Device Memory Operations:
61  *	Define the interface for driver-specific device memory operations
62  *	release memory, populate pfns, and copy to / from device memory.
63  *
64  * This layer provides interfaces for allocating, mapping, migrating, and
65  * releasing memory ranges between the CPU and GPU. It handles all core memory
66  * management interactions (DMA mapping, HMM, and migration) and provides
67  * driver-specific virtual functions (vfuncs). This infrastructure is sufficient
68  * to build the expected driver components for an SVM implementation as detailed
69  * below.
70  *
71  * Expected Driver Components:
72  *
73  * - GPU page fault handler:
74  *	Used to create ranges and notifiers based on the fault address,
75  *	optionally migrate the range to device memory, and create GPU bindings.
76  *
77  * - Garbage collector:
78  *	Used to unmap and destroy GPU bindings for ranges.  Ranges are expected
79  *	to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in
80  *	notifier callback.
81  *
82  * - Notifier callback:
83  *	Used to invalidate and DMA unmap GPU bindings for ranges.
84  */
85 
86 /**
87  * DOC: Locking
88  *
89  * GPU SVM handles locking for core MM interactions, i.e., it locks/unlocks the
90  * mmap lock as needed.
91  *
92  * GPU SVM introduces a global notifier lock, which safeguards the notifier's
93  * range RB tree and list, as well as the range's DMA mappings and sequence
94  * number. GPU SVM manages all necessary locking and unlocking operations,
95  * except for the recheck range's pages being valid
96  * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings.
97  * This lock corresponds to the ``driver->update`` lock mentioned in
98  * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM
99  * global lock to a per-notifier lock if finer-grained locking is deemed
100  * necessary.
101  *
102  * In addition to the locking mentioned above, the driver should implement a
103  * lock to safeguard core GPU SVM function calls that modify state, such as
104  * drm_gpusvm_range_find_or_insert and drm_gpusvm_range_remove. This lock is
105  * denoted as 'driver_svm_lock' in code examples. Finer grained driver side
106  * locking should also be possible for concurrent GPU fault processing within a
107  * single GPU SVM. The 'driver_svm_lock' can be via drm_gpusvm_driver_set_lock
108  * to add annotations to GPU SVM.
109  */
110 
111 /**
112  * DOC: Migration
113  *
114  * The migration support is quite simple, allowing migration between RAM and
115  * device memory at the range granularity. For example, GPU SVM currently does
116  * not support mixing RAM and device memory pages within a range. This means
117  * that upon GPU fault, the entire range can be migrated to device memory, and
118  * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
119  * memory storage within a range could be added in the future if required.
120  *
121  * The reasoning for only supporting range granularity is as follows: it
122  * simplifies the implementation, and range sizes are driver-defined and should
123  * be relatively small.
124  */
125 
126 /**
127  * DOC: Partial Unmapping of Ranges
128  *
129  * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
130  * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one
131  * being that a subset of the range still has CPU and GPU mappings. If the
132  * backing store for the range is in device memory, a subset of the backing
133  * store has references. One option would be to split the range and device
134  * memory backing store, but the implementation for this would be quite
135  * complicated. Given that partial unmappings are rare and driver-defined range
136  * sizes are relatively small, GPU SVM does not support splitting of ranges.
137  *
138  * With no support for range splitting, upon partial unmapping of a range, the
139  * driver is expected to invalidate and destroy the entire range. If the range
140  * has device memory as its backing, the driver is also expected to migrate any
141  * remaining pages back to RAM.
142  */
143 
144 /**
145  * DOC: Examples
146  *
147  * This section provides three examples of how to build the expected driver
148  * components: the GPU page fault handler, the garbage collector, and the
149  * notifier callback.
150  *
151  * The generic code provided does not include logic for complex migration
152  * policies, optimized invalidations, fined grained driver locking, or other
153  * potentially required driver locking (e.g., DMA-resv locks).
154  *
155  * 1) GPU page fault handler
156  *
157  * .. code-block:: c
158  *
159  *	int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
160  *	{
161  *		int err = 0;
162  *
163  *		driver_alloc_and_setup_memory_for_bind(gpusvm, range);
164  *
165  *		drm_gpusvm_notifier_lock(gpusvm);
166  *		if (drm_gpusvm_range_pages_valid(range))
167  *			driver_commit_bind(gpusvm, range);
168  *		else
169  *			err = -EAGAIN;
170  *		drm_gpusvm_notifier_unlock(gpusvm);
171  *
172  *		return err;
173  *	}
174  *
175  *	int driver_gpu_fault(struct drm_gpusvm *gpusvm, unsigned long fault_addr,
176  *			     unsigned long gpuva_start, unsigned long gpuva_end)
177  *	{
178  *		struct drm_gpusvm_ctx ctx = {};
179  *		int err;
180  *
181  *		driver_svm_lock();
182  *	retry:
183  *		// Always process UNMAPs first so view of GPU SVM ranges is current
184  *		driver_garbage_collector(gpusvm);
185  *
186  *		range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
187  *							gpuva_start, gpuva_end,
188  *						        &ctx);
189  *		if (IS_ERR(range)) {
190  *			err = PTR_ERR(range);
191  *			goto unlock;
192  *		}
193  *
194  *		if (driver_migration_policy(range)) {
195  *			mmap_read_lock(mm);
196  *			devmem = driver_alloc_devmem();
197  *			err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
198  *							   devmem_allocation,
199  *							   &ctx);
200  *			mmap_read_unlock(mm);
201  *			if (err)	// CPU mappings may have changed
202  *				goto retry;
203  *		}
204  *
205  *		err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
206  *		if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {	// CPU mappings changed
207  *			if (err == -EOPNOTSUPP)
208  *				drm_gpusvm_range_evict(gpusvm, range);
209  *			goto retry;
210  *		} else if (err) {
211  *			goto unlock;
212  *		}
213  *
214  *		err = driver_bind_range(gpusvm, range);
215  *		if (err == -EAGAIN)	// CPU mappings changed
216  *			goto retry
217  *
218  *	unlock:
219  *		driver_svm_unlock();
220  *		return err;
221  *	}
222  *
223  * 2) Garbage Collector
224  *
225  * .. code-block:: c
226  *
227  *	void __driver_garbage_collector(struct drm_gpusvm *gpusvm,
228  *					struct drm_gpusvm_range *range)
229  *	{
230  *		assert_driver_svm_locked(gpusvm);
231  *
232  *		// Partial unmap, migrate any remaining device memory pages back to RAM
233  *		if (range->flags.partial_unmap)
234  *			drm_gpusvm_range_evict(gpusvm, range);
235  *
236  *		driver_unbind_range(range);
237  *		drm_gpusvm_range_remove(gpusvm, range);
238  *	}
239  *
240  *	void driver_garbage_collector(struct drm_gpusvm *gpusvm)
241  *	{
242  *		assert_driver_svm_locked(gpusvm);
243  *
244  *		for_each_range_in_garbage_collector(gpusvm, range)
245  *			__driver_garbage_collector(gpusvm, range);
246  *	}
247  *
248  * 3) Notifier callback
249  *
250  * .. code-block:: c
251  *
252  *	void driver_invalidation(struct drm_gpusvm *gpusvm,
253  *				 struct drm_gpusvm_notifier *notifier,
254  *				 const struct mmu_notifier_range *mmu_range)
255  *	{
256  *		struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
257  *		struct drm_gpusvm_range *range = NULL;
258  *
259  *		driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end);
260  *
261  *		drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
262  *					  mmu_range->end) {
263  *			drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
264  *
265  *			if (mmu_range->event != MMU_NOTIFY_UNMAP)
266  *				continue;
267  *
268  *			drm_gpusvm_range_set_unmapped(range, mmu_range);
269  *			driver_garbage_collector_add(gpusvm, range);
270  *		}
271  *	}
272  */
273 
274 /**
275  * npages_in_range() - Calculate the number of pages in a given range
276  * @start: The start address of the range
277  * @end: The end address of the range
278  *
279  * This macro calculates the number of pages in a given memory range,
280  * specified by the start and end addresses. It divides the difference
281  * between the end and start addresses by the page size (PAGE_SIZE) to
282  * determine the number of pages in the range.
283  *
284  * Return: The number of pages in the specified range.
285  */
286 static unsigned long
287 npages_in_range(unsigned long start, unsigned long end)
288 {
289 	return (end - start) >> PAGE_SHIFT;
290 }
291 
292 /**
293  * struct drm_gpusvm_zdd - GPU SVM zone device data
294  *
295  * @refcount: Reference count for the zdd
296  * @devmem_allocation: device memory allocation
297  * @device_private_page_owner: Device private pages owner
298  *
299  * This structure serves as a generic wrapper installed in
300  * page->zone_device_data. It provides infrastructure for looking up a device
301  * memory allocation upon CPU page fault and asynchronously releasing device
302  * memory once the CPU has no page references. Asynchronous release is useful
303  * because CPU page references can be dropped in IRQ contexts, while releasing
304  * device memory likely requires sleeping locks.
305  */
306 struct drm_gpusvm_zdd {
307 	struct kref refcount;
308 	struct drm_gpusvm_devmem *devmem_allocation;
309 	void *device_private_page_owner;
310 };
311 
312 /**
313  * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
314  * @device_private_page_owner: Device private pages owner
315  *
316  * This function allocates and initializes a new zdd structure. It sets up the
317  * reference count and initializes the destroy work.
318  *
319  * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
320  */
321 static struct drm_gpusvm_zdd *
322 drm_gpusvm_zdd_alloc(void *device_private_page_owner)
323 {
324 	struct drm_gpusvm_zdd *zdd;
325 
326 	zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
327 	if (!zdd)
328 		return NULL;
329 
330 	kref_init(&zdd->refcount);
331 	zdd->devmem_allocation = NULL;
332 	zdd->device_private_page_owner = device_private_page_owner;
333 
334 	return zdd;
335 }
336 
337 /**
338  * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
339  * @zdd: Pointer to the zdd structure.
340  *
341  * This function increments the reference count of the provided zdd structure.
342  *
343  * Return: Pointer to the zdd structure.
344  */
345 static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
346 {
347 	kref_get(&zdd->refcount);
348 	return zdd;
349 }
350 
351 /**
352  * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
353  * @ref: Pointer to the reference count structure.
354  *
355  * This function queues the destroy_work of the zdd for asynchronous destruction.
356  */
357 static void drm_gpusvm_zdd_destroy(struct kref *ref)
358 {
359 	struct drm_gpusvm_zdd *zdd =
360 		container_of(ref, struct drm_gpusvm_zdd, refcount);
361 	struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
362 
363 	if (devmem) {
364 		complete_all(&devmem->detached);
365 		if (devmem->ops->devmem_release)
366 			devmem->ops->devmem_release(devmem);
367 	}
368 	kfree(zdd);
369 }
370 
371 /**
372  * drm_gpusvm_zdd_put() - Put a zdd reference.
373  * @zdd: Pointer to the zdd structure.
374  *
375  * This function decrements the reference count of the provided zdd structure
376  * and schedules its destruction if the count drops to zero.
377  */
378 static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
379 {
380 	kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
381 }
382 
383 /**
384  * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
385  * @notifier: Pointer to the GPU SVM notifier structure.
386  * @start: Start address of the range
387  * @end: End address of the range
388  *
389  * Return: A pointer to the drm_gpusvm_range if found or NULL
390  */
391 struct drm_gpusvm_range *
392 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
393 		      unsigned long end)
394 {
395 	struct interval_tree_node *itree;
396 
397 	itree = interval_tree_iter_first(&notifier->root, start, end - 1);
398 
399 	if (itree)
400 		return container_of(itree, struct drm_gpusvm_range, itree);
401 	else
402 		return NULL;
403 }
404 EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
405 
406 /**
407  * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
408  * @range__: Iterator variable for the ranges
409  * @next__: Iterator variable for the ranges temporay storage
410  * @notifier__: Pointer to the GPU SVM notifier
411  * @start__: Start address of the range
412  * @end__: End address of the range
413  *
414  * This macro is used to iterate over GPU SVM ranges in a notifier while
415  * removing ranges from it.
416  */
417 #define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__)	\
418 	for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)),	\
419 	     (next__) = __drm_gpusvm_range_next(range__);				\
420 	     (range__) && (drm_gpusvm_range_start(range__) < (end__));			\
421 	     (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
422 
423 /**
424  * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
425  * @notifier: a pointer to the current drm_gpusvm_notifier
426  *
427  * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
428  *         the current notifier is the last one or if the input notifier is
429  *         NULL.
430  */
431 static struct drm_gpusvm_notifier *
432 __drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
433 {
434 	if (notifier && !list_is_last(&notifier->entry,
435 				      &notifier->gpusvm->notifier_list))
436 		return list_next_entry(notifier, entry);
437 
438 	return NULL;
439 }
440 
441 static struct drm_gpusvm_notifier *
442 notifier_iter_first(struct rb_root_cached *root, unsigned long start,
443 		    unsigned long last)
444 {
445 	struct interval_tree_node *itree;
446 
447 	itree = interval_tree_iter_first(root, start, last);
448 
449 	if (itree)
450 		return container_of(itree, struct drm_gpusvm_notifier, itree);
451 	else
452 		return NULL;
453 }
454 
455 /**
456  * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
457  * @notifier__: Iterator variable for the notifiers
458  * @notifier__: Pointer to the GPU SVM notifier
459  * @start__: Start address of the notifier
460  * @end__: End address of the notifier
461  *
462  * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
463  */
464 #define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__)		\
465 	for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1);	\
466 	     (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__));		\
467 	     (notifier__) = __drm_gpusvm_notifier_next(notifier__))
468 
469 /**
470  * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
471  * @notifier__: Iterator variable for the notifiers
472  * @next__: Iterator variable for the notifiers temporay storage
473  * @notifier__: Pointer to the GPU SVM notifier
474  * @start__: Start address of the notifier
475  * @end__: End address of the notifier
476  *
477  * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
478  * removing notifiers from it.
479  */
480 #define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__)	\
481 	for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1),	\
482 	     (next__) = __drm_gpusvm_notifier_next(notifier__);				\
483 	     (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__));		\
484 	     (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
485 
486 /**
487  * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
488  * @mni: Pointer to the mmu_interval_notifier structure.
489  * @mmu_range: Pointer to the mmu_notifier_range structure.
490  * @cur_seq: Current sequence number.
491  *
492  * This function serves as a generic MMU notifier for GPU SVM. It sets the MMU
493  * notifier sequence number and calls the driver invalidate vfunc under
494  * gpusvm->notifier_lock.
495  *
496  * Return: true if the operation succeeds, false otherwise.
497  */
498 static bool
499 drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni,
500 			       const struct mmu_notifier_range *mmu_range,
501 			       unsigned long cur_seq)
502 {
503 	struct drm_gpusvm_notifier *notifier =
504 		container_of(mni, typeof(*notifier), notifier);
505 	struct drm_gpusvm *gpusvm = notifier->gpusvm;
506 
507 	if (!mmu_notifier_range_blockable(mmu_range))
508 		return false;
509 
510 	down_write(&gpusvm->notifier_lock);
511 	mmu_interval_set_seq(mni, cur_seq);
512 	gpusvm->ops->invalidate(gpusvm, notifier, mmu_range);
513 	up_write(&gpusvm->notifier_lock);
514 
515 	return true;
516 }
517 
518 /*
519  * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
520  */
521 static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
522 	.invalidate = drm_gpusvm_notifier_invalidate,
523 };
524 
525 /**
526  * drm_gpusvm_init() - Initialize the GPU SVM.
527  * @gpusvm: Pointer to the GPU SVM structure.
528  * @name: Name of the GPU SVM.
529  * @drm: Pointer to the DRM device structure.
530  * @mm: Pointer to the mm_struct for the address space.
531  * @device_private_page_owner: Device private pages owner.
532  * @mm_start: Start address of GPU SVM.
533  * @mm_range: Range of the GPU SVM.
534  * @notifier_size: Size of individual notifiers.
535  * @ops: Pointer to the operations structure for GPU SVM.
536  * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
537  *               Entries should be powers of 2 in descending order with last
538  *               entry being SZ_4K.
539  * @num_chunks: Number of chunks.
540  *
541  * This function initializes the GPU SVM.
542  *
543  * Return: 0 on success, a negative error code on failure.
544  */
545 int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
546 		    const char *name, struct drm_device *drm,
547 		    struct mm_struct *mm, void *device_private_page_owner,
548 		    unsigned long mm_start, unsigned long mm_range,
549 		    unsigned long notifier_size,
550 		    const struct drm_gpusvm_ops *ops,
551 		    const unsigned long *chunk_sizes, int num_chunks)
552 {
553 	if (!ops->invalidate || !num_chunks)
554 		return -EINVAL;
555 
556 	gpusvm->name = name;
557 	gpusvm->drm = drm;
558 	gpusvm->mm = mm;
559 	gpusvm->device_private_page_owner = device_private_page_owner;
560 	gpusvm->mm_start = mm_start;
561 	gpusvm->mm_range = mm_range;
562 	gpusvm->notifier_size = notifier_size;
563 	gpusvm->ops = ops;
564 	gpusvm->chunk_sizes = chunk_sizes;
565 	gpusvm->num_chunks = num_chunks;
566 
567 	mmgrab(mm);
568 	gpusvm->root = RB_ROOT_CACHED;
569 	INIT_LIST_HEAD(&gpusvm->notifier_list);
570 
571 	init_rwsem(&gpusvm->notifier_lock);
572 
573 	fs_reclaim_acquire(GFP_KERNEL);
574 	might_lock(&gpusvm->notifier_lock);
575 	fs_reclaim_release(GFP_KERNEL);
576 
577 #ifdef CONFIG_LOCKDEP
578 	gpusvm->lock_dep_map = NULL;
579 #endif
580 
581 	return 0;
582 }
583 EXPORT_SYMBOL_GPL(drm_gpusvm_init);
584 
585 /**
586  * drm_gpusvm_notifier_find() - Find GPU SVM notifier
587  * @gpusvm: Pointer to the GPU SVM structure
588  * @fault_addr: Fault address
589  *
590  * This function finds the GPU SVM notifier associated with the fault address.
591  *
592  * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
593  */
594 static struct drm_gpusvm_notifier *
595 drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
596 			 unsigned long fault_addr)
597 {
598 	return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
599 }
600 
601 /**
602  * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
603  * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
604  *
605  * Return: A pointer to the containing drm_gpusvm_notifier structure.
606  */
607 static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node)
608 {
609 	return container_of(node, struct drm_gpusvm_notifier, itree.rb);
610 }
611 
612 /**
613  * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier
614  * @gpusvm: Pointer to the GPU SVM structure
615  * @notifier: Pointer to the GPU SVM notifier structure
616  *
617  * This function inserts the GPU SVM notifier into the GPU SVM RB tree and list.
618  */
619 static void drm_gpusvm_notifier_insert(struct drm_gpusvm *gpusvm,
620 				       struct drm_gpusvm_notifier *notifier)
621 {
622 	struct rb_node *node;
623 	struct list_head *head;
624 
625 	interval_tree_insert(&notifier->itree, &gpusvm->root);
626 
627 	node = rb_prev(&notifier->itree.rb);
628 	if (node)
629 		head = &(to_drm_gpusvm_notifier(node))->entry;
630 	else
631 		head = &gpusvm->notifier_list;
632 
633 	list_add(&notifier->entry, head);
634 }
635 
636 /**
637  * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier
638  * @gpusvm: Pointer to the GPU SVM tructure
639  * @notifier: Pointer to the GPU SVM notifier structure
640  *
641  * This function removes the GPU SVM notifier from the GPU SVM RB tree and list.
642  */
643 static void drm_gpusvm_notifier_remove(struct drm_gpusvm *gpusvm,
644 				       struct drm_gpusvm_notifier *notifier)
645 {
646 	interval_tree_remove(&notifier->itree, &gpusvm->root);
647 	list_del(&notifier->entry);
648 }
649 
650 /**
651  * drm_gpusvm_fini() - Finalize the GPU SVM.
652  * @gpusvm: Pointer to the GPU SVM structure.
653  *
654  * This function finalizes the GPU SVM by cleaning up any remaining ranges and
655  * notifiers, and dropping a reference to struct MM.
656  */
657 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
658 {
659 	struct drm_gpusvm_notifier *notifier, *next;
660 
661 	drm_gpusvm_for_each_notifier_safe(notifier, next, gpusvm, 0, LONG_MAX) {
662 		struct drm_gpusvm_range *range, *__next;
663 
664 		/*
665 		 * Remove notifier first to avoid racing with any invalidation
666 		 */
667 		mmu_interval_notifier_remove(&notifier->notifier);
668 		notifier->flags.removed = true;
669 
670 		drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
671 					       LONG_MAX)
672 			drm_gpusvm_range_remove(gpusvm, range);
673 	}
674 
675 	mmdrop(gpusvm->mm);
676 	WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
677 }
678 EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
679 
680 /**
681  * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier
682  * @gpusvm: Pointer to the GPU SVM structure
683  * @fault_addr: Fault address
684  *
685  * This function allocates and initializes the GPU SVM notifier structure.
686  *
687  * Return: Pointer to the allocated GPU SVM notifier on success, ERR_PTR() on failure.
688  */
689 static struct drm_gpusvm_notifier *
690 drm_gpusvm_notifier_alloc(struct drm_gpusvm *gpusvm, unsigned long fault_addr)
691 {
692 	struct drm_gpusvm_notifier *notifier;
693 
694 	if (gpusvm->ops->notifier_alloc)
695 		notifier = gpusvm->ops->notifier_alloc();
696 	else
697 		notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
698 
699 	if (!notifier)
700 		return ERR_PTR(-ENOMEM);
701 
702 	notifier->gpusvm = gpusvm;
703 	notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size);
704 	notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1;
705 	INIT_LIST_HEAD(&notifier->entry);
706 	notifier->root = RB_ROOT_CACHED;
707 	INIT_LIST_HEAD(&notifier->range_list);
708 
709 	return notifier;
710 }
711 
712 /**
713  * drm_gpusvm_notifier_free() - Free GPU SVM notifier
714  * @gpusvm: Pointer to the GPU SVM structure
715  * @notifier: Pointer to the GPU SVM notifier structure
716  *
717  * This function frees the GPU SVM notifier structure.
718  */
719 static void drm_gpusvm_notifier_free(struct drm_gpusvm *gpusvm,
720 				     struct drm_gpusvm_notifier *notifier)
721 {
722 	WARN_ON(!RB_EMPTY_ROOT(&notifier->root.rb_root));
723 
724 	if (gpusvm->ops->notifier_free)
725 		gpusvm->ops->notifier_free(notifier);
726 	else
727 		kfree(notifier);
728 }
729 
730 /**
731  * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node
732  * @node: a pointer to the rbtree node embedded within a drm_gpusvm_range struct
733  *
734  * Return: A pointer to the containing drm_gpusvm_range structure.
735  */
736 static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node)
737 {
738 	return container_of(node, struct drm_gpusvm_range, itree.rb);
739 }
740 
741 /**
742  * drm_gpusvm_range_insert() - Insert GPU SVM range
743  * @notifier: Pointer to the GPU SVM notifier structure
744  * @range: Pointer to the GPU SVM range structure
745  *
746  * This function inserts the GPU SVM range into the notifier RB tree and list.
747  */
748 static void drm_gpusvm_range_insert(struct drm_gpusvm_notifier *notifier,
749 				    struct drm_gpusvm_range *range)
750 {
751 	struct rb_node *node;
752 	struct list_head *head;
753 
754 	drm_gpusvm_notifier_lock(notifier->gpusvm);
755 	interval_tree_insert(&range->itree, &notifier->root);
756 
757 	node = rb_prev(&range->itree.rb);
758 	if (node)
759 		head = &(to_drm_gpusvm_range(node))->entry;
760 	else
761 		head = &notifier->range_list;
762 
763 	list_add(&range->entry, head);
764 	drm_gpusvm_notifier_unlock(notifier->gpusvm);
765 }
766 
767 /**
768  * __drm_gpusvm_range_remove() - Remove GPU SVM range
769  * @notifier: Pointer to the GPU SVM notifier structure
770  * @range: Pointer to the GPU SVM range structure
771  *
772  * This macro removes the GPU SVM range from the notifier RB tree and list.
773  */
774 static void __drm_gpusvm_range_remove(struct drm_gpusvm_notifier *notifier,
775 				      struct drm_gpusvm_range *range)
776 {
777 	interval_tree_remove(&range->itree, &notifier->root);
778 	list_del(&range->entry);
779 }
780 
781 /**
782  * drm_gpusvm_range_alloc() - Allocate GPU SVM range
783  * @gpusvm: Pointer to the GPU SVM structure
784  * @notifier: Pointer to the GPU SVM notifier structure
785  * @fault_addr: Fault address
786  * @chunk_size: Chunk size
787  * @migrate_devmem: Flag indicating whether to migrate device memory
788  *
789  * This function allocates and initializes the GPU SVM range structure.
790  *
791  * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
792  */
793 static struct drm_gpusvm_range *
794 drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
795 		       struct drm_gpusvm_notifier *notifier,
796 		       unsigned long fault_addr, unsigned long chunk_size,
797 		       bool migrate_devmem)
798 {
799 	struct drm_gpusvm_range *range;
800 
801 	if (gpusvm->ops->range_alloc)
802 		range = gpusvm->ops->range_alloc(gpusvm);
803 	else
804 		range = kzalloc(sizeof(*range), GFP_KERNEL);
805 
806 	if (!range)
807 		return ERR_PTR(-ENOMEM);
808 
809 	kref_init(&range->refcount);
810 	range->gpusvm = gpusvm;
811 	range->notifier = notifier;
812 	range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
813 	range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
814 	INIT_LIST_HEAD(&range->entry);
815 	range->notifier_seq = LONG_MAX;
816 	range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
817 
818 	return range;
819 }
820 
821 /**
822  * drm_gpusvm_check_pages() - Check pages
823  * @gpusvm: Pointer to the GPU SVM structure
824  * @notifier: Pointer to the GPU SVM notifier structure
825  * @start: Start address
826  * @end: End address
827  *
828  * Check if pages between start and end have been faulted in on the CPU. Use to
829  * prevent migration of pages without CPU backing store.
830  *
831  * Return: True if pages have been faulted into CPU, False otherwise
832  */
833 static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
834 				   struct drm_gpusvm_notifier *notifier,
835 				   unsigned long start, unsigned long end)
836 {
837 	struct hmm_range hmm_range = {
838 		.default_flags = 0,
839 		.notifier = &notifier->notifier,
840 		.start = start,
841 		.end = end,
842 		.dev_private_owner = gpusvm->device_private_page_owner,
843 	};
844 	unsigned long timeout =
845 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
846 	unsigned long *pfns;
847 	unsigned long npages = npages_in_range(start, end);
848 	int err, i;
849 
850 	mmap_assert_locked(gpusvm->mm);
851 
852 	pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
853 	if (!pfns)
854 		return false;
855 
856 	hmm_range.notifier_seq = mmu_interval_read_begin(&notifier->notifier);
857 	hmm_range.hmm_pfns = pfns;
858 
859 	while (true) {
860 		err = hmm_range_fault(&hmm_range);
861 		if (err == -EBUSY) {
862 			if (time_after(jiffies, timeout))
863 				break;
864 
865 			hmm_range.notifier_seq =
866 				mmu_interval_read_begin(&notifier->notifier);
867 			continue;
868 		}
869 		break;
870 	}
871 	if (err)
872 		goto err_free;
873 
874 	for (i = 0; i < npages;) {
875 		if (!(pfns[i] & HMM_PFN_VALID)) {
876 			err = -EFAULT;
877 			goto err_free;
878 		}
879 		i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
880 	}
881 
882 err_free:
883 	kvfree(pfns);
884 	return err ? false : true;
885 }
886 
887 /**
888  * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
889  * @gpusvm: Pointer to the GPU SVM structure
890  * @notifier: Pointer to the GPU SVM notifier structure
891  * @vas: Pointer to the virtual memory area structure
892  * @fault_addr: Fault address
893  * @gpuva_start: Start address of GPUVA which mirrors CPU
894  * @gpuva_end: End address of GPUVA which mirrors CPU
895  * @check_pages_threshold: Check CPU pages for present threshold
896  *
897  * This function determines the chunk size for the GPU SVM range based on the
898  * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
899  * memory area boundaries.
900  *
901  * Return: Chunk size on success, LONG_MAX on failure.
902  */
903 static unsigned long
904 drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
905 			    struct drm_gpusvm_notifier *notifier,
906 			    struct vm_area_struct *vas,
907 			    unsigned long fault_addr,
908 			    unsigned long gpuva_start,
909 			    unsigned long gpuva_end,
910 			    unsigned long check_pages_threshold)
911 {
912 	unsigned long start, end;
913 	int i = 0;
914 
915 retry:
916 	for (; i < gpusvm->num_chunks; ++i) {
917 		start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]);
918 		end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]);
919 
920 		if (start >= vas->vm_start && end <= vas->vm_end &&
921 		    start >= drm_gpusvm_notifier_start(notifier) &&
922 		    end <= drm_gpusvm_notifier_end(notifier) &&
923 		    start >= gpuva_start && end <= gpuva_end)
924 			break;
925 	}
926 
927 	if (i == gpusvm->num_chunks)
928 		return LONG_MAX;
929 
930 	/*
931 	 * If allocation more than page, ensure not to overlap with existing
932 	 * ranges.
933 	 */
934 	if (end - start != SZ_4K) {
935 		struct drm_gpusvm_range *range;
936 
937 		range = drm_gpusvm_range_find(notifier, start, end);
938 		if (range) {
939 			++i;
940 			goto retry;
941 		}
942 
943 		/*
944 		 * XXX: Only create range on pages CPU has faulted in. Without
945 		 * this check, or prefault, on BMG 'xe_exec_system_allocator --r
946 		 * process-many-malloc' fails. In the failure case, each process
947 		 * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
948 		 * ranges. When migrating the SVM ranges, some processes fail in
949 		 * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
950 		 * and then upon drm_gpusvm_range_get_pages device pages from
951 		 * other processes are collected + faulted in which creates all
952 		 * sorts of problems. Unsure exactly how this happening, also
953 		 * problem goes away if 'xe_exec_system_allocator --r
954 		 * process-many-malloc' mallocs at least 64k at a time.
955 		 */
956 		if (end - start <= check_pages_threshold &&
957 		    !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
958 			++i;
959 			goto retry;
960 		}
961 	}
962 
963 	return end - start;
964 }
965 
966 #ifdef CONFIG_LOCKDEP
967 /**
968  * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held
969  * @gpusvm: Pointer to the GPU SVM structure.
970  *
971  * Ensure driver lock is held.
972  */
973 static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
974 {
975 	if ((gpusvm)->lock_dep_map)
976 		lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0));
977 }
978 #else
979 static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
980 {
981 }
982 #endif
983 
984 /**
985  * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
986  * @gpusvm: Pointer to the GPU SVM structure
987  * @fault_addr: Fault address
988  * @gpuva_start: Start address of GPUVA which mirrors CPU
989  * @gpuva_end: End address of GPUVA which mirrors CPU
990  * @ctx: GPU SVM context
991  *
992  * This function finds or inserts a newly allocated a GPU SVM range based on the
993  * fault address. Caller must hold a lock to protect range lookup and insertion.
994  *
995  * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
996  */
997 struct drm_gpusvm_range *
998 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
999 				unsigned long fault_addr,
1000 				unsigned long gpuva_start,
1001 				unsigned long gpuva_end,
1002 				const struct drm_gpusvm_ctx *ctx)
1003 {
1004 	struct drm_gpusvm_notifier *notifier;
1005 	struct drm_gpusvm_range *range;
1006 	struct mm_struct *mm = gpusvm->mm;
1007 	struct vm_area_struct *vas;
1008 	bool notifier_alloc = false;
1009 	unsigned long chunk_size;
1010 	int err;
1011 	bool migrate_devmem;
1012 
1013 	drm_gpusvm_driver_lock_held(gpusvm);
1014 
1015 	if (fault_addr < gpusvm->mm_start ||
1016 	    fault_addr > gpusvm->mm_start + gpusvm->mm_range)
1017 		return ERR_PTR(-EINVAL);
1018 
1019 	if (!mmget_not_zero(mm))
1020 		return ERR_PTR(-EFAULT);
1021 
1022 	notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
1023 	if (!notifier) {
1024 		notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
1025 		if (IS_ERR(notifier)) {
1026 			err = PTR_ERR(notifier);
1027 			goto err_mmunlock;
1028 		}
1029 		notifier_alloc = true;
1030 		err = mmu_interval_notifier_insert(&notifier->notifier,
1031 						   mm,
1032 						   drm_gpusvm_notifier_start(notifier),
1033 						   drm_gpusvm_notifier_size(notifier),
1034 						   &drm_gpusvm_notifier_ops);
1035 		if (err)
1036 			goto err_notifier;
1037 	}
1038 
1039 	mmap_read_lock(mm);
1040 
1041 	vas = vma_lookup(mm, fault_addr);
1042 	if (!vas) {
1043 		err = -ENOENT;
1044 		goto err_notifier_remove;
1045 	}
1046 
1047 	if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) {
1048 		err = -EPERM;
1049 		goto err_notifier_remove;
1050 	}
1051 
1052 	range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1);
1053 	if (range)
1054 		goto out_mmunlock;
1055 	/*
1056 	 * XXX: Short-circuiting migration based on migrate_vma_* current
1057 	 * limitations. If/when migrate_vma_* add more support, this logic will
1058 	 * have to change.
1059 	 */
1060 	migrate_devmem = ctx->devmem_possible &&
1061 		vma_is_anonymous(vas) && !is_vm_hugetlb_page(vas);
1062 
1063 	chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
1064 						 fault_addr, gpuva_start,
1065 						 gpuva_end,
1066 						 ctx->check_pages_threshold);
1067 	if (chunk_size == LONG_MAX) {
1068 		err = -EINVAL;
1069 		goto err_notifier_remove;
1070 	}
1071 
1072 	range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size,
1073 				       migrate_devmem);
1074 	if (IS_ERR(range)) {
1075 		err = PTR_ERR(range);
1076 		goto err_notifier_remove;
1077 	}
1078 
1079 	drm_gpusvm_range_insert(notifier, range);
1080 	if (notifier_alloc)
1081 		drm_gpusvm_notifier_insert(gpusvm, notifier);
1082 
1083 out_mmunlock:
1084 	mmap_read_unlock(mm);
1085 	mmput(mm);
1086 
1087 	return range;
1088 
1089 err_notifier_remove:
1090 	mmap_read_unlock(mm);
1091 	if (notifier_alloc)
1092 		mmu_interval_notifier_remove(&notifier->notifier);
1093 err_notifier:
1094 	if (notifier_alloc)
1095 		drm_gpusvm_notifier_free(gpusvm, notifier);
1096 err_mmunlock:
1097 	mmput(mm);
1098 	return ERR_PTR(err);
1099 }
1100 EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
1101 
1102 /**
1103  * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
1104  * @gpusvm: Pointer to the GPU SVM structure
1105  * @range: Pointer to the GPU SVM range structure
1106  * @npages: Number of pages to unmap
1107  *
1108  * This function unmap pages associated with a GPU SVM range. Assumes and
1109  * asserts correct locking is in place when called.
1110  */
1111 static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
1112 					   struct drm_gpusvm_range *range,
1113 					   unsigned long npages)
1114 {
1115 	unsigned long i, j;
1116 	struct drm_pagemap *dpagemap = range->dpagemap;
1117 	struct device *dev = gpusvm->drm->dev;
1118 
1119 	lockdep_assert_held(&gpusvm->notifier_lock);
1120 
1121 	if (range->flags.has_dma_mapping) {
1122 		struct drm_gpusvm_range_flags flags = {
1123 			.__flags = range->flags.__flags,
1124 		};
1125 
1126 		for (i = 0, j = 0; i < npages; j++) {
1127 			struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
1128 
1129 			if (addr->proto == DRM_INTERCONNECT_SYSTEM)
1130 				dma_unmap_page(dev,
1131 					       addr->addr,
1132 					       PAGE_SIZE << addr->order,
1133 					       addr->dir);
1134 			else if (dpagemap && dpagemap->ops->device_unmap)
1135 				dpagemap->ops->device_unmap(dpagemap,
1136 							    dev, *addr);
1137 			i += 1 << addr->order;
1138 		}
1139 
1140 		/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
1141 		flags.has_devmem_pages = false;
1142 		flags.has_dma_mapping = false;
1143 		WRITE_ONCE(range->flags.__flags, flags.__flags);
1144 
1145 		range->dpagemap = NULL;
1146 	}
1147 }
1148 
1149 /**
1150  * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
1151  * @gpusvm: Pointer to the GPU SVM structure
1152  * @range: Pointer to the GPU SVM range structure
1153  *
1154  * This function frees the dma address array associated with a GPU SVM range.
1155  */
1156 static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
1157 					struct drm_gpusvm_range *range)
1158 {
1159 	lockdep_assert_held(&gpusvm->notifier_lock);
1160 
1161 	if (range->dma_addr) {
1162 		kvfree(range->dma_addr);
1163 		range->dma_addr = NULL;
1164 	}
1165 }
1166 
1167 /**
1168  * drm_gpusvm_range_remove() - Remove GPU SVM range
1169  * @gpusvm: Pointer to the GPU SVM structure
1170  * @range: Pointer to the GPU SVM range to be removed
1171  *
1172  * This function removes the specified GPU SVM range and also removes the parent
1173  * GPU SVM notifier if no more ranges remain in the notifier. The caller must
1174  * hold a lock to protect range and notifier removal.
1175  */
1176 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
1177 			     struct drm_gpusvm_range *range)
1178 {
1179 	unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
1180 					       drm_gpusvm_range_end(range));
1181 	struct drm_gpusvm_notifier *notifier;
1182 
1183 	drm_gpusvm_driver_lock_held(gpusvm);
1184 
1185 	notifier = drm_gpusvm_notifier_find(gpusvm,
1186 					    drm_gpusvm_range_start(range));
1187 	if (WARN_ON_ONCE(!notifier))
1188 		return;
1189 
1190 	drm_gpusvm_notifier_lock(gpusvm);
1191 	__drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
1192 	drm_gpusvm_range_free_pages(gpusvm, range);
1193 	__drm_gpusvm_range_remove(notifier, range);
1194 	drm_gpusvm_notifier_unlock(gpusvm);
1195 
1196 	drm_gpusvm_range_put(range);
1197 
1198 	if (RB_EMPTY_ROOT(&notifier->root.rb_root)) {
1199 		if (!notifier->flags.removed)
1200 			mmu_interval_notifier_remove(&notifier->notifier);
1201 		drm_gpusvm_notifier_remove(gpusvm, notifier);
1202 		drm_gpusvm_notifier_free(gpusvm, notifier);
1203 	}
1204 }
1205 EXPORT_SYMBOL_GPL(drm_gpusvm_range_remove);
1206 
1207 /**
1208  * drm_gpusvm_range_get() - Get a reference to GPU SVM range
1209  * @range: Pointer to the GPU SVM range
1210  *
1211  * This function increments the reference count of the specified GPU SVM range.
1212  *
1213  * Return: Pointer to the GPU SVM range.
1214  */
1215 struct drm_gpusvm_range *
1216 drm_gpusvm_range_get(struct drm_gpusvm_range *range)
1217 {
1218 	kref_get(&range->refcount);
1219 
1220 	return range;
1221 }
1222 EXPORT_SYMBOL_GPL(drm_gpusvm_range_get);
1223 
1224 /**
1225  * drm_gpusvm_range_destroy() - Destroy GPU SVM range
1226  * @refcount: Pointer to the reference counter embedded in the GPU SVM range
1227  *
1228  * This function destroys the specified GPU SVM range when its reference count
1229  * reaches zero. If a custom range-free function is provided, it is invoked to
1230  * free the range; otherwise, the range is deallocated using kfree().
1231  */
1232 static void drm_gpusvm_range_destroy(struct kref *refcount)
1233 {
1234 	struct drm_gpusvm_range *range =
1235 		container_of(refcount, struct drm_gpusvm_range, refcount);
1236 	struct drm_gpusvm *gpusvm = range->gpusvm;
1237 
1238 	if (gpusvm->ops->range_free)
1239 		gpusvm->ops->range_free(range);
1240 	else
1241 		kfree(range);
1242 }
1243 
1244 /**
1245  * drm_gpusvm_range_put() - Put a reference to GPU SVM range
1246  * @range: Pointer to the GPU SVM range
1247  *
1248  * This function decrements the reference count of the specified GPU SVM range
1249  * and frees it when the count reaches zero.
1250  */
1251 void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
1252 {
1253 	kref_put(&range->refcount, drm_gpusvm_range_destroy);
1254 }
1255 EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
1256 
1257 /**
1258  * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
1259  * @gpusvm: Pointer to the GPU SVM structure
1260  * @range: Pointer to the GPU SVM range structure
1261  *
1262  * This function determines if a GPU SVM range pages are valid. Expected be
1263  * called holding gpusvm->notifier_lock and as the last step before committing a
1264  * GPU binding. This is akin to a notifier seqno check in the HMM documentation
1265  * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
1266  * function is required for finer grained checking (i.e., per range) if pages
1267  * are valid.
1268  *
1269  * Return: True if GPU SVM range has valid pages, False otherwise
1270  */
1271 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
1272 				  struct drm_gpusvm_range *range)
1273 {
1274 	lockdep_assert_held(&gpusvm->notifier_lock);
1275 
1276 	return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
1277 }
1278 EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
1279 
1280 /**
1281  * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
1282  * @gpusvm: Pointer to the GPU SVM structure
1283  * @range: Pointer to the GPU SVM range structure
1284  *
1285  * This function determines if a GPU SVM range pages are valid. Expected be
1286  * called without holding gpusvm->notifier_lock.
1287  *
1288  * Return: True if GPU SVM range has valid pages, False otherwise
1289  */
1290 static bool
1291 drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
1292 				      struct drm_gpusvm_range *range)
1293 {
1294 	bool pages_valid;
1295 
1296 	if (!range->dma_addr)
1297 		return false;
1298 
1299 	drm_gpusvm_notifier_lock(gpusvm);
1300 	pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
1301 	if (!pages_valid)
1302 		drm_gpusvm_range_free_pages(gpusvm, range);
1303 	drm_gpusvm_notifier_unlock(gpusvm);
1304 
1305 	return pages_valid;
1306 }
1307 
1308 /**
1309  * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
1310  * @gpusvm: Pointer to the GPU SVM structure
1311  * @range: Pointer to the GPU SVM range structure
1312  * @ctx: GPU SVM context
1313  *
1314  * This function gets pages for a GPU SVM range and ensures they are mapped for
1315  * DMA access.
1316  *
1317  * Return: 0 on success, negative error code on failure.
1318  */
1319 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
1320 			       struct drm_gpusvm_range *range,
1321 			       const struct drm_gpusvm_ctx *ctx)
1322 {
1323 	struct mmu_interval_notifier *notifier = &range->notifier->notifier;
1324 	struct hmm_range hmm_range = {
1325 		.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
1326 			HMM_PFN_REQ_WRITE),
1327 		.notifier = notifier,
1328 		.start = drm_gpusvm_range_start(range),
1329 		.end = drm_gpusvm_range_end(range),
1330 		.dev_private_owner = gpusvm->device_private_page_owner,
1331 	};
1332 	struct mm_struct *mm = gpusvm->mm;
1333 	struct drm_gpusvm_zdd *zdd;
1334 	unsigned long timeout =
1335 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
1336 	unsigned long i, j;
1337 	unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
1338 					       drm_gpusvm_range_end(range));
1339 	unsigned long num_dma_mapped;
1340 	unsigned int order = 0;
1341 	unsigned long *pfns;
1342 	int err = 0;
1343 	struct dev_pagemap *pagemap;
1344 	struct drm_pagemap *dpagemap;
1345 	struct drm_gpusvm_range_flags flags;
1346 
1347 retry:
1348 	hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
1349 	if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
1350 		goto set_seqno;
1351 
1352 	pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
1353 	if (!pfns)
1354 		return -ENOMEM;
1355 
1356 	if (!mmget_not_zero(mm)) {
1357 		err = -EFAULT;
1358 		goto err_free;
1359 	}
1360 
1361 	hmm_range.hmm_pfns = pfns;
1362 	while (true) {
1363 		mmap_read_lock(mm);
1364 		err = hmm_range_fault(&hmm_range);
1365 		mmap_read_unlock(mm);
1366 
1367 		if (err == -EBUSY) {
1368 			if (time_after(jiffies, timeout))
1369 				break;
1370 
1371 			hmm_range.notifier_seq =
1372 				mmu_interval_read_begin(notifier);
1373 			continue;
1374 		}
1375 		break;
1376 	}
1377 	mmput(mm);
1378 	if (err)
1379 		goto err_free;
1380 
1381 map_pages:
1382 	/*
1383 	 * Perform all dma mappings under the notifier lock to not
1384 	 * access freed pages. A notifier will either block on
1385 	 * the notifier lock or unmap dma.
1386 	 */
1387 	drm_gpusvm_notifier_lock(gpusvm);
1388 
1389 	flags.__flags = range->flags.__flags;
1390 	if (flags.unmapped) {
1391 		drm_gpusvm_notifier_unlock(gpusvm);
1392 		err = -EFAULT;
1393 		goto err_free;
1394 	}
1395 
1396 	if (mmu_interval_read_retry(notifier, hmm_range.notifier_seq)) {
1397 		drm_gpusvm_notifier_unlock(gpusvm);
1398 		kvfree(pfns);
1399 		goto retry;
1400 	}
1401 
1402 	if (!range->dma_addr) {
1403 		/* Unlock and restart mapping to allocate memory. */
1404 		drm_gpusvm_notifier_unlock(gpusvm);
1405 		range->dma_addr = kvmalloc_array(npages,
1406 						 sizeof(*range->dma_addr),
1407 						 GFP_KERNEL);
1408 		if (!range->dma_addr) {
1409 			err = -ENOMEM;
1410 			goto err_free;
1411 		}
1412 		goto map_pages;
1413 	}
1414 
1415 	zdd = NULL;
1416 	num_dma_mapped = 0;
1417 	for (i = 0, j = 0; i < npages; ++j) {
1418 		struct page *page = hmm_pfn_to_page(pfns[i]);
1419 
1420 		order = hmm_pfn_to_map_order(pfns[i]);
1421 		if (is_device_private_page(page) ||
1422 		    is_device_coherent_page(page)) {
1423 			if (zdd != page->zone_device_data && i > 0) {
1424 				err = -EOPNOTSUPP;
1425 				goto err_unmap;
1426 			}
1427 			zdd = page->zone_device_data;
1428 			if (pagemap != page_pgmap(page)) {
1429 				if (i > 0) {
1430 					err = -EOPNOTSUPP;
1431 					goto err_unmap;
1432 				}
1433 
1434 				pagemap = page_pgmap(page);
1435 				dpagemap = zdd->devmem_allocation->dpagemap;
1436 				if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
1437 					/*
1438 					 * Raced. This is not supposed to happen
1439 					 * since hmm_range_fault() should've migrated
1440 					 * this page to system.
1441 					 */
1442 					err = -EAGAIN;
1443 					goto err_unmap;
1444 				}
1445 			}
1446 			range->dma_addr[j] =
1447 				dpagemap->ops->device_map(dpagemap,
1448 							  gpusvm->drm->dev,
1449 							  page, order,
1450 							  DMA_BIDIRECTIONAL);
1451 			if (dma_mapping_error(gpusvm->drm->dev,
1452 					      range->dma_addr[j].addr)) {
1453 				err = -EFAULT;
1454 				goto err_unmap;
1455 			}
1456 		} else {
1457 			dma_addr_t addr;
1458 
1459 			if (is_zone_device_page(page) || zdd) {
1460 				err = -EOPNOTSUPP;
1461 				goto err_unmap;
1462 			}
1463 
1464 			if (ctx->devmem_only) {
1465 				err = -EFAULT;
1466 				goto err_unmap;
1467 			}
1468 
1469 			addr = dma_map_page(gpusvm->drm->dev,
1470 					    page, 0,
1471 					    PAGE_SIZE << order,
1472 					    DMA_BIDIRECTIONAL);
1473 			if (dma_mapping_error(gpusvm->drm->dev, addr)) {
1474 				err = -EFAULT;
1475 				goto err_unmap;
1476 			}
1477 
1478 			range->dma_addr[j] = drm_pagemap_device_addr_encode
1479 				(addr, DRM_INTERCONNECT_SYSTEM, order,
1480 				 DMA_BIDIRECTIONAL);
1481 		}
1482 		i += 1 << order;
1483 		num_dma_mapped = i;
1484 		flags.has_dma_mapping = true;
1485 	}
1486 
1487 	if (zdd) {
1488 		flags.has_devmem_pages = true;
1489 		range->dpagemap = dpagemap;
1490 	}
1491 
1492 	/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
1493 	WRITE_ONCE(range->flags.__flags, flags.__flags);
1494 
1495 	drm_gpusvm_notifier_unlock(gpusvm);
1496 	kvfree(pfns);
1497 set_seqno:
1498 	range->notifier_seq = hmm_range.notifier_seq;
1499 
1500 	return 0;
1501 
1502 err_unmap:
1503 	__drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
1504 	drm_gpusvm_notifier_unlock(gpusvm);
1505 err_free:
1506 	kvfree(pfns);
1507 	if (err == -EAGAIN)
1508 		goto retry;
1509 	return err;
1510 }
1511 EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
1512 
1513 /**
1514  * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
1515  * @gpusvm: Pointer to the GPU SVM structure
1516  * @range: Pointer to the GPU SVM range structure
1517  * @ctx: GPU SVM context
1518  *
1519  * This function unmaps pages associated with a GPU SVM range. If @in_notifier
1520  * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it
1521  * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on
1522  * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
1523  * security model.
1524  */
1525 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
1526 				  struct drm_gpusvm_range *range,
1527 				  const struct drm_gpusvm_ctx *ctx)
1528 {
1529 	unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
1530 					       drm_gpusvm_range_end(range));
1531 
1532 	if (ctx->in_notifier)
1533 		lockdep_assert_held_write(&gpusvm->notifier_lock);
1534 	else
1535 		drm_gpusvm_notifier_lock(gpusvm);
1536 
1537 	__drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
1538 
1539 	if (!ctx->in_notifier)
1540 		drm_gpusvm_notifier_unlock(gpusvm);
1541 }
1542 EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
1543 
1544 /**
1545  * drm_gpusvm_migration_unlock_put_page() - Put a migration page
1546  * @page: Pointer to the page to put
1547  *
1548  * This function unlocks and puts a page.
1549  */
1550 static void drm_gpusvm_migration_unlock_put_page(struct page *page)
1551 {
1552 	unlock_page(page);
1553 	put_page(page);
1554 }
1555 
1556 /**
1557  * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
1558  * @npages: Number of pages
1559  * @migrate_pfn: Array of migrate page frame numbers
1560  *
1561  * This function unlocks and puts an array of pages.
1562  */
1563 static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
1564 						  unsigned long *migrate_pfn)
1565 {
1566 	unsigned long i;
1567 
1568 	for (i = 0; i < npages; ++i) {
1569 		struct page *page;
1570 
1571 		if (!migrate_pfn[i])
1572 			continue;
1573 
1574 		page = migrate_pfn_to_page(migrate_pfn[i]);
1575 		drm_gpusvm_migration_unlock_put_page(page);
1576 		migrate_pfn[i] = 0;
1577 	}
1578 }
1579 
1580 /**
1581  * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
1582  * @page: Pointer to the page
1583  * @zdd: Pointer to the GPU SVM zone device data
1584  *
1585  * This function associates the given page with the specified GPU SVM zone
1586  * device data and initializes it for zone device usage.
1587  */
1588 static void drm_gpusvm_get_devmem_page(struct page *page,
1589 				       struct drm_gpusvm_zdd *zdd)
1590 {
1591 	page->zone_device_data = drm_gpusvm_zdd_get(zdd);
1592 	zone_device_page_init(page);
1593 }
1594 
1595 /**
1596  * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
1597  * @dev: The device for which the pages are being mapped
1598  * @dma_addr: Array to store DMA addresses corresponding to mapped pages
1599  * @migrate_pfn: Array of migrate page frame numbers to map
1600  * @npages: Number of pages to map
1601  * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
1602  *
1603  * This function maps pages of memory for migration usage in GPU SVM. It
1604  * iterates over each page frame number provided in @migrate_pfn, maps the
1605  * corresponding page, and stores the DMA address in the provided @dma_addr
1606  * array.
1607  *
1608  * Return: 0 on success, -EFAULT if an error occurs during mapping.
1609  */
1610 static int drm_gpusvm_migrate_map_pages(struct device *dev,
1611 					dma_addr_t *dma_addr,
1612 					unsigned long *migrate_pfn,
1613 					unsigned long npages,
1614 					enum dma_data_direction dir)
1615 {
1616 	unsigned long i;
1617 
1618 	for (i = 0; i < npages; ++i) {
1619 		struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
1620 
1621 		if (!page)
1622 			continue;
1623 
1624 		if (WARN_ON_ONCE(is_zone_device_page(page)))
1625 			return -EFAULT;
1626 
1627 		dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
1628 		if (dma_mapping_error(dev, dma_addr[i]))
1629 			return -EFAULT;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 /**
1636  * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
1637  * @dev: The device for which the pages were mapped
1638  * @dma_addr: Array of DMA addresses corresponding to mapped pages
1639  * @npages: Number of pages to unmap
1640  * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
1641  *
1642  * This function unmaps previously mapped pages of memory for GPU Shared Virtual
1643  * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
1644  * if it's valid and not already unmapped, and unmaps the corresponding page.
1645  */
1646 static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
1647 					   dma_addr_t *dma_addr,
1648 					   unsigned long npages,
1649 					   enum dma_data_direction dir)
1650 {
1651 	unsigned long i;
1652 
1653 	for (i = 0; i < npages; ++i) {
1654 		if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
1655 			continue;
1656 
1657 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
1658 	}
1659 }
1660 
1661 /**
1662  * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
1663  * @gpusvm: Pointer to the GPU SVM structure
1664  * @range: Pointer to the GPU SVM range structure
1665  * @devmem_allocation: Pointer to the device memory allocation. The caller
1666  *                     should hold a reference to the device memory allocation,
1667  *                     which should be dropped via ops->devmem_release or upon
1668  *                     the failure of this function.
1669  * @ctx: GPU SVM context
1670  *
1671  * This function migrates the specified GPU SVM range to device memory. It
1672  * performs the necessary setup and invokes the driver-specific operations for
1673  * migration to device memory. Upon successful return, @devmem_allocation can
1674  * safely reference @range until ops->devmem_release is called which only upon
1675  * successful return. Expected to be called while holding the mmap lock in read
1676  * mode.
1677  *
1678  * Return: 0 on success, negative error code on failure.
1679  */
1680 int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
1681 				 struct drm_gpusvm_range *range,
1682 				 struct drm_gpusvm_devmem *devmem_allocation,
1683 				 const struct drm_gpusvm_ctx *ctx)
1684 {
1685 	const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
1686 	unsigned long start = drm_gpusvm_range_start(range),
1687 		      end = drm_gpusvm_range_end(range);
1688 	struct migrate_vma migrate = {
1689 		.start		= start,
1690 		.end		= end,
1691 		.pgmap_owner	= gpusvm->device_private_page_owner,
1692 		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
1693 	};
1694 	struct mm_struct *mm = gpusvm->mm;
1695 	unsigned long i, npages = npages_in_range(start, end);
1696 	struct vm_area_struct *vas;
1697 	struct drm_gpusvm_zdd *zdd = NULL;
1698 	struct page **pages;
1699 	dma_addr_t *dma_addr;
1700 	void *buf;
1701 	int err;
1702 
1703 	mmap_assert_locked(gpusvm->mm);
1704 
1705 	if (!range->flags.migrate_devmem)
1706 		return -EINVAL;
1707 
1708 	if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
1709 	    !ops->copy_to_ram)
1710 		return -EOPNOTSUPP;
1711 
1712 	vas = vma_lookup(mm, start);
1713 	if (!vas) {
1714 		err = -ENOENT;
1715 		goto err_out;
1716 	}
1717 
1718 	if (end > vas->vm_end || start < vas->vm_start) {
1719 		err = -EINVAL;
1720 		goto err_out;
1721 	}
1722 
1723 	if (!vma_is_anonymous(vas)) {
1724 		err = -EBUSY;
1725 		goto err_out;
1726 	}
1727 
1728 	buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
1729 		       sizeof(*pages), GFP_KERNEL);
1730 	if (!buf) {
1731 		err = -ENOMEM;
1732 		goto err_out;
1733 	}
1734 	dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
1735 	pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
1736 
1737 	zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
1738 	if (!zdd) {
1739 		err = -ENOMEM;
1740 		goto err_free;
1741 	}
1742 
1743 	migrate.vma = vas;
1744 	migrate.src = buf;
1745 	migrate.dst = migrate.src + npages;
1746 
1747 	err = migrate_vma_setup(&migrate);
1748 	if (err)
1749 		goto err_free;
1750 
1751 	if (!migrate.cpages) {
1752 		err = -EFAULT;
1753 		goto err_free;
1754 	}
1755 
1756 	if (migrate.cpages != npages) {
1757 		err = -EBUSY;
1758 		goto err_finalize;
1759 	}
1760 
1761 	err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
1762 	if (err)
1763 		goto err_finalize;
1764 
1765 	err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
1766 					   migrate.src, npages, DMA_TO_DEVICE);
1767 	if (err)
1768 		goto err_finalize;
1769 
1770 	for (i = 0; i < npages; ++i) {
1771 		struct page *page = pfn_to_page(migrate.dst[i]);
1772 
1773 		pages[i] = page;
1774 		migrate.dst[i] = migrate_pfn(migrate.dst[i]);
1775 		drm_gpusvm_get_devmem_page(page, zdd);
1776 	}
1777 
1778 	err = ops->copy_to_devmem(pages, dma_addr, npages);
1779 	if (err)
1780 		goto err_finalize;
1781 
1782 	/* Upon success bind devmem allocation to range and zdd */
1783 	devmem_allocation->timeslice_expiration = get_jiffies_64() +
1784 		msecs_to_jiffies(ctx->timeslice_ms);
1785 	zdd->devmem_allocation = devmem_allocation;	/* Owns ref */
1786 
1787 err_finalize:
1788 	if (err)
1789 		drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
1790 	migrate_vma_pages(&migrate);
1791 	migrate_vma_finalize(&migrate);
1792 	drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
1793 				       DMA_TO_DEVICE);
1794 err_free:
1795 	if (zdd)
1796 		drm_gpusvm_zdd_put(zdd);
1797 	kvfree(buf);
1798 err_out:
1799 	return err;
1800 }
1801 EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
1802 
1803 /**
1804  * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
1805  * @vas: Pointer to the VM area structure, can be NULL
1806  * @fault_page: Fault page
1807  * @npages: Number of pages to populate
1808  * @mpages: Number of pages to migrate
1809  * @src_mpfn: Source array of migrate PFNs
1810  * @mpfn: Array of migrate PFNs to populate
1811  * @addr: Start address for PFN allocation
1812  *
1813  * This function populates the RAM migrate page frame numbers (PFNs) for the
1814  * specified VM area structure. It allocates and locks pages in the VM area for
1815  * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
1816  * alloc_page for allocation.
1817  *
1818  * Return: 0 on success, negative error code on failure.
1819  */
1820 static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
1821 					       struct page *fault_page,
1822 					       unsigned long npages,
1823 					       unsigned long *mpages,
1824 					       unsigned long *src_mpfn,
1825 					       unsigned long *mpfn,
1826 					       unsigned long addr)
1827 {
1828 	unsigned long i;
1829 
1830 	for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
1831 		struct page *page, *src_page;
1832 
1833 		if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
1834 			continue;
1835 
1836 		src_page = migrate_pfn_to_page(src_mpfn[i]);
1837 		if (!src_page)
1838 			continue;
1839 
1840 		if (fault_page) {
1841 			if (src_page->zone_device_data !=
1842 			    fault_page->zone_device_data)
1843 				continue;
1844 		}
1845 
1846 		if (vas)
1847 			page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
1848 		else
1849 			page = alloc_page(GFP_HIGHUSER);
1850 
1851 		if (!page)
1852 			goto free_pages;
1853 
1854 		mpfn[i] = migrate_pfn(page_to_pfn(page));
1855 	}
1856 
1857 	for (i = 0; i < npages; ++i) {
1858 		struct page *page = migrate_pfn_to_page(mpfn[i]);
1859 
1860 		if (!page)
1861 			continue;
1862 
1863 		WARN_ON_ONCE(!trylock_page(page));
1864 		++*mpages;
1865 	}
1866 
1867 	return 0;
1868 
1869 free_pages:
1870 	for (i = 0; i < npages; ++i) {
1871 		struct page *page = migrate_pfn_to_page(mpfn[i]);
1872 
1873 		if (!page)
1874 			continue;
1875 
1876 		put_page(page);
1877 		mpfn[i] = 0;
1878 	}
1879 	return -ENOMEM;
1880 }
1881 
1882 /**
1883  * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
1884  * @devmem_allocation: Pointer to the device memory allocation
1885  *
1886  * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
1887  * migration done via migrate_device_* functions.
1888  *
1889  * Return: 0 on success, negative error code on failure.
1890  */
1891 int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
1892 {
1893 	const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
1894 	unsigned long npages, mpages = 0;
1895 	struct page **pages;
1896 	unsigned long *src, *dst;
1897 	dma_addr_t *dma_addr;
1898 	void *buf;
1899 	int i, err = 0;
1900 	unsigned int retry_count = 2;
1901 
1902 	npages = devmem_allocation->size >> PAGE_SHIFT;
1903 
1904 retry:
1905 	if (!mmget_not_zero(devmem_allocation->mm))
1906 		return -EFAULT;
1907 
1908 	buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
1909 		       sizeof(*pages), GFP_KERNEL);
1910 	if (!buf) {
1911 		err = -ENOMEM;
1912 		goto err_out;
1913 	}
1914 	src = buf;
1915 	dst = buf + (sizeof(*src) * npages);
1916 	dma_addr = buf + (2 * sizeof(*src) * npages);
1917 	pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
1918 
1919 	err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
1920 	if (err)
1921 		goto err_free;
1922 
1923 	err = migrate_device_pfns(src, npages);
1924 	if (err)
1925 		goto err_free;
1926 
1927 	err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
1928 						  src, dst, 0);
1929 	if (err || !mpages)
1930 		goto err_finalize;
1931 
1932 	err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
1933 					   dst, npages, DMA_FROM_DEVICE);
1934 	if (err)
1935 		goto err_finalize;
1936 
1937 	for (i = 0; i < npages; ++i)
1938 		pages[i] = migrate_pfn_to_page(src[i]);
1939 
1940 	err = ops->copy_to_ram(pages, dma_addr, npages);
1941 	if (err)
1942 		goto err_finalize;
1943 
1944 err_finalize:
1945 	if (err)
1946 		drm_gpusvm_migration_unlock_put_pages(npages, dst);
1947 	migrate_device_pages(src, dst, npages);
1948 	migrate_device_finalize(src, dst, npages);
1949 	drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
1950 				       DMA_FROM_DEVICE);
1951 err_free:
1952 	kvfree(buf);
1953 err_out:
1954 	mmput_async(devmem_allocation->mm);
1955 
1956 	if (completion_done(&devmem_allocation->detached))
1957 		return 0;
1958 
1959 	if (retry_count--) {
1960 		cond_resched();
1961 		goto retry;
1962 	}
1963 
1964 	return err ?: -EBUSY;
1965 }
1966 EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
1967 
1968 /**
1969  * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
1970  * @vas: Pointer to the VM area structure
1971  * @device_private_page_owner: Device private pages owner
1972  * @page: Pointer to the page for fault handling (can be NULL)
1973  * @fault_addr: Fault address
1974  * @size: Size of migration
1975  *
1976  * This internal function performs the migration of the specified GPU SVM range
1977  * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
1978  * invokes the driver-specific operations for migration to RAM.
1979  *
1980  * Return: 0 on success, negative error code on failure.
1981  */
1982 static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
1983 				       void *device_private_page_owner,
1984 				       struct page *page,
1985 				       unsigned long fault_addr,
1986 				       unsigned long size)
1987 {
1988 	struct migrate_vma migrate = {
1989 		.vma		= vas,
1990 		.pgmap_owner	= device_private_page_owner,
1991 		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
1992 			MIGRATE_VMA_SELECT_DEVICE_COHERENT,
1993 		.fault_page	= page,
1994 	};
1995 	struct drm_gpusvm_zdd *zdd;
1996 	const struct drm_gpusvm_devmem_ops *ops;
1997 	struct device *dev = NULL;
1998 	unsigned long npages, mpages = 0;
1999 	struct page **pages;
2000 	dma_addr_t *dma_addr;
2001 	unsigned long start, end;
2002 	void *buf;
2003 	int i, err = 0;
2004 
2005 	if (page) {
2006 		zdd = page->zone_device_data;
2007 		if (time_before64(get_jiffies_64(),
2008 				  zdd->devmem_allocation->timeslice_expiration))
2009 			return 0;
2010 	}
2011 
2012 	start = ALIGN_DOWN(fault_addr, size);
2013 	end = ALIGN(fault_addr + 1, size);
2014 
2015 	/* Corner where VMA area struct has been partially unmapped */
2016 	if (start < vas->vm_start)
2017 		start = vas->vm_start;
2018 	if (end > vas->vm_end)
2019 		end = vas->vm_end;
2020 
2021 	migrate.start = start;
2022 	migrate.end = end;
2023 	npages = npages_in_range(start, end);
2024 
2025 	buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
2026 		       sizeof(*pages), GFP_KERNEL);
2027 	if (!buf) {
2028 		err = -ENOMEM;
2029 		goto err_out;
2030 	}
2031 	dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
2032 	pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
2033 
2034 	migrate.vma = vas;
2035 	migrate.src = buf;
2036 	migrate.dst = migrate.src + npages;
2037 
2038 	err = migrate_vma_setup(&migrate);
2039 	if (err)
2040 		goto err_free;
2041 
2042 	/* Raced with another CPU fault, nothing to do */
2043 	if (!migrate.cpages)
2044 		goto err_free;
2045 
2046 	if (!page) {
2047 		for (i = 0; i < npages; ++i) {
2048 			if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
2049 				continue;
2050 
2051 			page = migrate_pfn_to_page(migrate.src[i]);
2052 			break;
2053 		}
2054 
2055 		if (!page)
2056 			goto err_finalize;
2057 	}
2058 	zdd = page->zone_device_data;
2059 	ops = zdd->devmem_allocation->ops;
2060 	dev = zdd->devmem_allocation->dev;
2061 
2062 	err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
2063 						  migrate.src, migrate.dst,
2064 						  start);
2065 	if (err)
2066 		goto err_finalize;
2067 
2068 	err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
2069 					   DMA_FROM_DEVICE);
2070 	if (err)
2071 		goto err_finalize;
2072 
2073 	for (i = 0; i < npages; ++i)
2074 		pages[i] = migrate_pfn_to_page(migrate.src[i]);
2075 
2076 	err = ops->copy_to_ram(pages, dma_addr, npages);
2077 	if (err)
2078 		goto err_finalize;
2079 
2080 err_finalize:
2081 	if (err)
2082 		drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
2083 	migrate_vma_pages(&migrate);
2084 	migrate_vma_finalize(&migrate);
2085 	if (dev)
2086 		drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
2087 					       DMA_FROM_DEVICE);
2088 err_free:
2089 	kvfree(buf);
2090 err_out:
2091 
2092 	return err;
2093 }
2094 
2095 /**
2096  * drm_gpusvm_range_evict - Evict GPU SVM range
2097  * @range: Pointer to the GPU SVM range to be removed
2098  *
2099  * This function evicts the specified GPU SVM range. This function will not
2100  * evict coherent pages.
2101  *
2102  * Return: 0 on success, a negative error code on failure.
2103  */
2104 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
2105 			   struct drm_gpusvm_range *range)
2106 {
2107 	struct mmu_interval_notifier *notifier = &range->notifier->notifier;
2108 	struct hmm_range hmm_range = {
2109 		.default_flags = HMM_PFN_REQ_FAULT,
2110 		.notifier = notifier,
2111 		.start = drm_gpusvm_range_start(range),
2112 		.end = drm_gpusvm_range_end(range),
2113 		.dev_private_owner = NULL,
2114 	};
2115 	unsigned long timeout =
2116 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
2117 	unsigned long *pfns;
2118 	unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
2119 					       drm_gpusvm_range_end(range));
2120 	int err = 0;
2121 	struct mm_struct *mm = gpusvm->mm;
2122 
2123 	if (!mmget_not_zero(mm))
2124 		return -EFAULT;
2125 
2126 	pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
2127 	if (!pfns)
2128 		return -ENOMEM;
2129 
2130 	hmm_range.hmm_pfns = pfns;
2131 	while (!time_after(jiffies, timeout)) {
2132 		hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
2133 		if (time_after(jiffies, timeout)) {
2134 			err = -ETIME;
2135 			break;
2136 		}
2137 
2138 		mmap_read_lock(mm);
2139 		err = hmm_range_fault(&hmm_range);
2140 		mmap_read_unlock(mm);
2141 		if (err != -EBUSY)
2142 			break;
2143 	}
2144 
2145 	kvfree(pfns);
2146 	mmput(mm);
2147 
2148 	return err;
2149 }
2150 EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
2151 
2152 /**
2153  * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
2154  * @page: Pointer to the page
2155  *
2156  * This function is a callback used to put the GPU SVM zone device data
2157  * associated with a page when it is being released.
2158  */
2159 static void drm_gpusvm_page_free(struct page *page)
2160 {
2161 	drm_gpusvm_zdd_put(page->zone_device_data);
2162 }
2163 
2164 /**
2165  * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
2166  * @vmf: Pointer to the fault information structure
2167  *
2168  * This function is a page fault handler used to migrate a GPU SVM range to RAM.
2169  * It retrieves the GPU SVM range information from the faulting page and invokes
2170  * the internal migration function to migrate the range back to RAM.
2171  *
2172  * Return: VM_FAULT_SIGBUS on failure, 0 on success.
2173  */
2174 static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
2175 {
2176 	struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
2177 	int err;
2178 
2179 	err = __drm_gpusvm_migrate_to_ram(vmf->vma,
2180 					  zdd->device_private_page_owner,
2181 					  vmf->page, vmf->address,
2182 					  zdd->devmem_allocation->size);
2183 
2184 	return err ? VM_FAULT_SIGBUS : 0;
2185 }
2186 
2187 /*
2188  * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
2189  */
2190 static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
2191 	.page_free = drm_gpusvm_page_free,
2192 	.migrate_to_ram = drm_gpusvm_migrate_to_ram,
2193 };
2194 
2195 /**
2196  * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
2197  *
2198  * Return: Pointer to the GPU SVM device page map operations structure.
2199  */
2200 const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
2201 {
2202 	return &drm_gpusvm_pagemap_ops;
2203 }
2204 EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
2205 
2206 /**
2207  * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
2208  * @gpusvm: Pointer to the GPU SVM structure.
2209  * @start: Start address
2210  * @end: End address
2211  *
2212  * Return: True if GPU SVM has mapping, False otherwise
2213  */
2214 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
2215 			    unsigned long end)
2216 {
2217 	struct drm_gpusvm_notifier *notifier;
2218 
2219 	drm_gpusvm_for_each_notifier(notifier, gpusvm, start, end) {
2220 		struct drm_gpusvm_range *range = NULL;
2221 
2222 		drm_gpusvm_for_each_range(range, notifier, start, end)
2223 			return true;
2224 	}
2225 
2226 	return false;
2227 }
2228 EXPORT_SYMBOL_GPL(drm_gpusvm_has_mapping);
2229 
2230 /**
2231  * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
2232  * @range: Pointer to the GPU SVM range structure.
2233  * @mmu_range: Pointer to the MMU notifier range structure.
2234  *
2235  * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
2236  * if the range partially falls within the provided MMU notifier range.
2237  */
2238 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
2239 				   const struct mmu_notifier_range *mmu_range)
2240 {
2241 	lockdep_assert_held_write(&range->gpusvm->notifier_lock);
2242 
2243 	range->flags.unmapped = true;
2244 	if (drm_gpusvm_range_start(range) < mmu_range->start ||
2245 	    drm_gpusvm_range_end(range) > mmu_range->end)
2246 		range->flags.partial_unmap = true;
2247 }
2248 EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
2249 
2250 /**
2251  * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
2252  *
2253  * @dev: Pointer to the device structure which device memory allocation belongs to
2254  * @mm: Pointer to the mm_struct for the address space
2255  * @ops: Pointer to the operations structure for GPU SVM device memory
2256  * @dpagemap: The struct drm_pagemap we're allocating from.
2257  * @size: Size of device memory allocation
2258  */
2259 void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
2260 			    struct device *dev, struct mm_struct *mm,
2261 			    const struct drm_gpusvm_devmem_ops *ops,
2262 			    struct drm_pagemap *dpagemap, size_t size)
2263 {
2264 	init_completion(&devmem_allocation->detached);
2265 	devmem_allocation->dev = dev;
2266 	devmem_allocation->mm = mm;
2267 	devmem_allocation->ops = ops;
2268 	devmem_allocation->dpagemap = dpagemap;
2269 	devmem_allocation->size = size;
2270 }
2271 EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
2272 
2273 MODULE_DESCRIPTION("DRM GPUSVM");
2274 MODULE_LICENSE("GPL");
2275