xref: /linux/mm/migrate_device.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Device Memory Migration functionality.
4  *
5  * Originally written by Jérôme Glisse.
6  */
7 #include <linux/export.h>
8 #include <linux/memremap.h>
9 #include <linux/migrate.h>
10 #include <linux/mm.h>
11 #include <linux/mm_inline.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/oom.h>
14 #include <linux/pagewalk.h>
15 #include <linux/rmap.h>
16 #include <linux/leafops.h>
17 #include <linux/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include "internal.h"
20 
21 static int migrate_vma_collect_skip(unsigned long start,
22 				    unsigned long end,
23 				    struct mm_walk *walk)
24 {
25 	struct migrate_vma *migrate = walk->private;
26 	unsigned long addr;
27 
28 	for (addr = start; addr < end; addr += PAGE_SIZE) {
29 		migrate->dst[migrate->npages] = 0;
30 		migrate->src[migrate->npages++] = 0;
31 	}
32 
33 	return 0;
34 }
35 
36 static int migrate_vma_collect_hole(unsigned long start,
37 				    unsigned long end,
38 				    __always_unused int depth,
39 				    struct mm_walk *walk)
40 {
41 	struct migrate_vma *migrate = walk->private;
42 	unsigned long addr;
43 
44 	/* Only allow populating anonymous memory. */
45 	if (!vma_is_anonymous(walk->vma))
46 		return migrate_vma_collect_skip(start, end, walk);
47 
48 	if (thp_migration_supported() &&
49 		(migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
50 		(IS_ALIGNED(start, HPAGE_PMD_SIZE) &&
51 		 IS_ALIGNED(end, HPAGE_PMD_SIZE))) {
52 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE |
53 						MIGRATE_PFN_COMPOUND;
54 		migrate->dst[migrate->npages] = 0;
55 		migrate->npages++;
56 		migrate->cpages++;
57 
58 		/*
59 		 * Collect the remaining entries as holes, in case we
60 		 * need to split later
61 		 */
62 		return migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
63 	}
64 
65 	for (addr = start; addr < end; addr += PAGE_SIZE) {
66 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
67 		migrate->dst[migrate->npages] = 0;
68 		migrate->npages++;
69 		migrate->cpages++;
70 	}
71 
72 	return 0;
73 }
74 
75 /**
76  * migrate_vma_split_folio() - Helper function to split a THP folio
77  * @folio: the folio to split
78  * @fault_page: struct page associated with the fault if any
79  *
80  * Returns 0 on success
81  */
82 static int migrate_vma_split_folio(struct folio *folio,
83 				   struct page *fault_page)
84 {
85 	int ret;
86 	struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL;
87 	struct folio *new_fault_folio = NULL;
88 
89 	if (folio != fault_folio) {
90 		folio_get(folio);
91 		folio_lock(folio);
92 	}
93 
94 	ret = split_folio(folio);
95 	if (ret) {
96 		if (folio != fault_folio) {
97 			folio_unlock(folio);
98 			folio_put(folio);
99 		}
100 		return ret;
101 	}
102 
103 	new_fault_folio = fault_page ? page_folio(fault_page) : NULL;
104 
105 	/*
106 	 * Ensure the lock is held on the correct
107 	 * folio after the split
108 	 */
109 	if (!new_fault_folio) {
110 		folio_unlock(folio);
111 		folio_put(folio);
112 	} else if (folio != new_fault_folio) {
113 		if (new_fault_folio != fault_folio) {
114 			folio_get(new_fault_folio);
115 			folio_lock(new_fault_folio);
116 		}
117 		folio_unlock(folio);
118 		folio_put(folio);
119 	}
120 
121 	return 0;
122 }
123 
124 /** migrate_vma_collect_huge_pmd - collect THP pages without splitting the
125  * folio for device private pages.
126  * @pmdp: pointer to pmd entry
127  * @start: start address of the range for migration
128  * @end: end address of the range for migration
129  * @walk: mm_walk callback structure
130  * @fault_folio: folio associated with the fault if any
131  *
132  * Collect the huge pmd entry at @pmdp for migration and set the
133  * MIGRATE_PFN_COMPOUND flag in the migrate src entry to indicate that
134  * migration will occur at HPAGE_PMD granularity
135  */
136 static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
137 					unsigned long end, struct mm_walk *walk,
138 					struct folio *fault_folio)
139 {
140 	struct mm_struct *mm = walk->mm;
141 	struct folio *folio;
142 	struct migrate_vma *migrate = walk->private;
143 	spinlock_t *ptl;
144 	int ret;
145 	unsigned long write = 0;
146 
147 	ptl = pmd_lock(mm, pmdp);
148 	if (pmd_none(*pmdp)) {
149 		spin_unlock(ptl);
150 		return migrate_vma_collect_hole(start, end, -1, walk);
151 	}
152 
153 	if (pmd_trans_huge(*pmdp)) {
154 		if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
155 			spin_unlock(ptl);
156 			return migrate_vma_collect_skip(start, end, walk);
157 		}
158 
159 		folio = pmd_folio(*pmdp);
160 		if (is_huge_zero_folio(folio)) {
161 			spin_unlock(ptl);
162 			return migrate_vma_collect_hole(start, end, -1, walk);
163 		}
164 		if (pmd_write(*pmdp))
165 			write = MIGRATE_PFN_WRITE;
166 	} else if (!pmd_present(*pmdp)) {
167 		const softleaf_t entry = softleaf_from_pmd(*pmdp);
168 
169 		folio = softleaf_to_folio(entry);
170 
171 		if (!softleaf_is_device_private(entry) ||
172 			!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
173 			(folio->pgmap->owner != migrate->pgmap_owner)) {
174 			spin_unlock(ptl);
175 			return migrate_vma_collect_skip(start, end, walk);
176 		}
177 
178 		if (softleaf_is_device_private_write(entry))
179 			write = MIGRATE_PFN_WRITE;
180 	} else {
181 		spin_unlock(ptl);
182 		return -EAGAIN;
183 	}
184 
185 	folio_get(folio);
186 	if (folio != fault_folio && unlikely(!folio_trylock(folio))) {
187 		spin_unlock(ptl);
188 		folio_put(folio);
189 		return migrate_vma_collect_skip(start, end, walk);
190 	}
191 
192 	if (thp_migration_supported() &&
193 		(migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
194 		(IS_ALIGNED(start, HPAGE_PMD_SIZE) &&
195 		 IS_ALIGNED(end, HPAGE_PMD_SIZE))) {
196 
197 		struct page_vma_mapped_walk pvmw = {
198 			.ptl = ptl,
199 			.address = start,
200 			.pmd = pmdp,
201 			.vma = walk->vma,
202 		};
203 
204 		unsigned long pfn = page_to_pfn(folio_page(folio, 0));
205 
206 		migrate->src[migrate->npages] = migrate_pfn(pfn) | write
207 						| MIGRATE_PFN_MIGRATE
208 						| MIGRATE_PFN_COMPOUND;
209 		migrate->dst[migrate->npages++] = 0;
210 		migrate->cpages++;
211 		ret = set_pmd_migration_entry(&pvmw, folio_page(folio, 0));
212 		if (ret) {
213 			migrate->npages--;
214 			migrate->cpages--;
215 			migrate->src[migrate->npages] = 0;
216 			migrate->dst[migrate->npages] = 0;
217 			goto fallback;
218 		}
219 		migrate_vma_collect_skip(start + PAGE_SIZE, end, walk);
220 		spin_unlock(ptl);
221 		return 0;
222 	}
223 
224 fallback:
225 	spin_unlock(ptl);
226 	if (!folio_test_large(folio))
227 		goto done;
228 	ret = split_folio(folio);
229 	if (fault_folio != folio)
230 		folio_unlock(folio);
231 	folio_put(folio);
232 	if (ret)
233 		return migrate_vma_collect_skip(start, end, walk);
234 	if (pmd_none(pmdp_get_lockless(pmdp)))
235 		return migrate_vma_collect_hole(start, end, -1, walk);
236 
237 done:
238 	return -ENOENT;
239 }
240 
241 static int migrate_vma_collect_pmd(pmd_t *pmdp,
242 				   unsigned long start,
243 				   unsigned long end,
244 				   struct mm_walk *walk)
245 {
246 	struct migrate_vma *migrate = walk->private;
247 	struct vm_area_struct *vma = walk->vma;
248 	struct mm_struct *mm = vma->vm_mm;
249 	unsigned long addr = start, unmapped = 0;
250 	spinlock_t *ptl;
251 	struct folio *fault_folio = migrate->fault_page ?
252 		page_folio(migrate->fault_page) : NULL;
253 	pte_t *ptep;
254 
255 again:
256 	if (pmd_trans_huge(*pmdp) || !pmd_present(*pmdp)) {
257 		int ret = migrate_vma_collect_huge_pmd(pmdp, start, end, walk, fault_folio);
258 
259 		if (ret == -EAGAIN)
260 			goto again;
261 		if (ret == 0)
262 			return 0;
263 	}
264 
265 	ptep = pte_offset_map_lock(mm, pmdp, start, &ptl);
266 	if (!ptep)
267 		goto again;
268 	lazy_mmu_mode_enable();
269 	ptep += (addr - start) / PAGE_SIZE;
270 
271 	for (; addr < end; addr += PAGE_SIZE, ptep++) {
272 		struct dev_pagemap *pgmap;
273 		unsigned long mpfn = 0, pfn;
274 		struct folio *folio;
275 		struct page *page;
276 		softleaf_t entry;
277 		pte_t pte;
278 
279 		pte = ptep_get(ptep);
280 
281 		if (pte_none(pte)) {
282 			if (vma_is_anonymous(vma)) {
283 				mpfn = MIGRATE_PFN_MIGRATE;
284 				migrate->cpages++;
285 			}
286 			goto next;
287 		}
288 
289 		if (!pte_present(pte)) {
290 			/*
291 			 * Only care about unaddressable device page special
292 			 * page table entry. Other special swap entries are not
293 			 * migratable, and we ignore regular swapped page.
294 			 */
295 			entry = softleaf_from_pte(pte);
296 			if (!softleaf_is_device_private(entry))
297 				goto next;
298 
299 			page = softleaf_to_page(entry);
300 			pgmap = page_pgmap(page);
301 			if (!(migrate->flags &
302 				MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
303 			    pgmap->owner != migrate->pgmap_owner)
304 				goto next;
305 
306 			folio = page_folio(page);
307 			if (folio_test_large(folio)) {
308 				int ret;
309 
310 				lazy_mmu_mode_disable();
311 				pte_unmap_unlock(ptep, ptl);
312 				ret = migrate_vma_split_folio(folio,
313 							  migrate->fault_page);
314 
315 				if (ret) {
316 					if (unmapped)
317 						flush_tlb_range(walk->vma, start, end);
318 
319 					return migrate_vma_collect_skip(addr, end, walk);
320 				}
321 
322 				goto again;
323 			}
324 
325 			mpfn = migrate_pfn(page_to_pfn(page)) |
326 					MIGRATE_PFN_MIGRATE;
327 			if (softleaf_is_device_private_write(entry))
328 				mpfn |= MIGRATE_PFN_WRITE;
329 		} else {
330 			pfn = pte_pfn(pte);
331 			if (is_zero_pfn(pfn) &&
332 			    (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
333 				mpfn = MIGRATE_PFN_MIGRATE;
334 				migrate->cpages++;
335 				goto next;
336 			}
337 			page = vm_normal_page(migrate->vma, addr, pte);
338 			if (page && !is_zone_device_page(page) &&
339 			    !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
340 				goto next;
341 			} else if (page && is_device_coherent_page(page)) {
342 				pgmap = page_pgmap(page);
343 
344 				if (!(migrate->flags &
345 					MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
346 					pgmap->owner != migrate->pgmap_owner)
347 					goto next;
348 			}
349 			folio = page ? page_folio(page) : NULL;
350 			if (folio && folio_test_large(folio)) {
351 				int ret;
352 
353 				lazy_mmu_mode_disable();
354 				pte_unmap_unlock(ptep, ptl);
355 				ret = migrate_vma_split_folio(folio,
356 							  migrate->fault_page);
357 
358 				if (ret) {
359 					if (unmapped)
360 						flush_tlb_range(walk->vma, start, end);
361 
362 					return migrate_vma_collect_skip(addr, end, walk);
363 				}
364 
365 				goto again;
366 			}
367 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
368 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
369 		}
370 
371 		if (!page || !page->mapping) {
372 			mpfn = 0;
373 			goto next;
374 		}
375 
376 		/*
377 		 * By getting a reference on the folio we pin it and that blocks
378 		 * any kind of migration. Side effect is that it "freezes" the
379 		 * pte.
380 		 *
381 		 * We drop this reference after isolating the folio from the lru
382 		 * for non device folio (device folio are not on the lru and thus
383 		 * can't be dropped from it).
384 		 */
385 		folio = page_folio(page);
386 		folio_get(folio);
387 
388 		/*
389 		 * We rely on folio_trylock() to avoid deadlock between
390 		 * concurrent migrations where each is waiting on the others
391 		 * folio lock. If we can't immediately lock the folio we fail this
392 		 * migration as it is only best effort anyway.
393 		 *
394 		 * If we can lock the folio it's safe to set up a migration entry
395 		 * now. In the common case where the folio is mapped once in a
396 		 * single process setting up the migration entry now is an
397 		 * optimisation to avoid walking the rmap later with
398 		 * try_to_migrate().
399 		 */
400 		if (fault_folio == folio || folio_trylock(folio)) {
401 			bool anon_exclusive;
402 			pte_t swp_pte;
403 
404 			flush_cache_page(vma, addr, pte_pfn(pte));
405 			anon_exclusive = folio_test_anon(folio) &&
406 					  PageAnonExclusive(page);
407 			if (anon_exclusive) {
408 				pte = ptep_clear_flush(vma, addr, ptep);
409 
410 				if (folio_try_share_anon_rmap_pte(folio, page)) {
411 					set_pte_at(mm, addr, ptep, pte);
412 					if (fault_folio != folio)
413 						folio_unlock(folio);
414 					folio_put(folio);
415 					mpfn = 0;
416 					goto next;
417 				}
418 			} else {
419 				pte = ptep_get_and_clear(mm, addr, ptep);
420 			}
421 
422 			migrate->cpages++;
423 
424 			/* Set the dirty flag on the folio now the pte is gone. */
425 			if (pte_dirty(pte))
426 				folio_mark_dirty(folio);
427 
428 			/* Setup special migration page table entry */
429 			if (mpfn & MIGRATE_PFN_WRITE)
430 				entry = make_writable_migration_entry(
431 							page_to_pfn(page));
432 			else if (anon_exclusive)
433 				entry = make_readable_exclusive_migration_entry(
434 							page_to_pfn(page));
435 			else
436 				entry = make_readable_migration_entry(
437 							page_to_pfn(page));
438 			if (pte_present(pte)) {
439 				if (pte_young(pte))
440 					entry = make_migration_entry_young(entry);
441 				if (pte_dirty(pte))
442 					entry = make_migration_entry_dirty(entry);
443 			}
444 			swp_pte = swp_entry_to_pte(entry);
445 			if (pte_present(pte)) {
446 				if (pte_soft_dirty(pte))
447 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
448 				if (pte_uffd_wp(pte))
449 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
450 			} else {
451 				if (pte_swp_soft_dirty(pte))
452 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
453 				if (pte_swp_uffd_wp(pte))
454 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
455 			}
456 			set_pte_at(mm, addr, ptep, swp_pte);
457 
458 			/*
459 			 * This is like regular unmap: we remove the rmap and
460 			 * drop the folio refcount. The folio won't be freed, as
461 			 * we took a reference just above.
462 			 */
463 			folio_remove_rmap_pte(folio, page, vma);
464 			folio_put(folio);
465 
466 			if (pte_present(pte))
467 				unmapped++;
468 		} else {
469 			folio_put(folio);
470 			mpfn = 0;
471 		}
472 
473 next:
474 		migrate->dst[migrate->npages] = 0;
475 		migrate->src[migrate->npages++] = mpfn;
476 	}
477 
478 	/* Only flush the TLB if we actually modified any entries */
479 	if (unmapped)
480 		flush_tlb_range(walk->vma, start, end);
481 
482 	lazy_mmu_mode_disable();
483 	pte_unmap_unlock(ptep - 1, ptl);
484 
485 	return 0;
486 }
487 
488 static const struct mm_walk_ops migrate_vma_walk_ops = {
489 	.pmd_entry		= migrate_vma_collect_pmd,
490 	.pte_hole		= migrate_vma_collect_hole,
491 	.walk_lock		= PGWALK_RDLOCK,
492 };
493 
494 /*
495  * migrate_vma_collect() - collect pages over a range of virtual addresses
496  * @migrate: migrate struct containing all migration information
497  *
498  * This will walk the CPU page table. For each virtual address backed by a
499  * valid page, it updates the src array and takes a reference on the page, in
500  * order to pin the page until we lock it and unmap it.
501  */
502 static void migrate_vma_collect(struct migrate_vma *migrate)
503 {
504 	struct mmu_notifier_range range;
505 
506 	/*
507 	 * Note that the pgmap_owner is passed to the mmu notifier callback so
508 	 * that the registered device driver can skip invalidating device
509 	 * private page mappings that won't be migrated.
510 	 */
511 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
512 		migrate->vma->vm_mm, migrate->start, migrate->end,
513 		migrate->pgmap_owner);
514 	mmu_notifier_invalidate_range_start(&range);
515 
516 	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
517 			&migrate_vma_walk_ops, migrate);
518 
519 	mmu_notifier_invalidate_range_end(&range);
520 	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
521 }
522 
523 /*
524  * migrate_vma_check_page() - check if page is pinned or not
525  * @page: struct page to check
526  *
527  * Pinned pages cannot be migrated. This is the same test as in
528  * folio_migrate_mapping(), except that here we allow migration of a
529  * ZONE_DEVICE page.
530  */
531 static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
532 {
533 	struct folio *folio = page_folio(page);
534 
535 	/*
536 	 * One extra ref because caller holds an extra reference, either from
537 	 * folio_isolate_lru() for a regular folio, or migrate_vma_collect() for
538 	 * a device folio.
539 	 */
540 	int extra = 1 + (page == fault_page);
541 
542 	/* Page from ZONE_DEVICE have one extra reference */
543 	if (folio_is_zone_device(folio))
544 		extra++;
545 
546 	/* For file back page */
547 	if (folio_mapping(folio))
548 		extra += 1 + folio_has_private(folio);
549 
550 	if ((folio_ref_count(folio) - extra) > folio_mapcount(folio))
551 		return false;
552 
553 	return true;
554 }
555 
556 /*
557  * Unmaps pages for migration. Returns number of source pfns marked as
558  * migrating.
559  */
560 static unsigned long migrate_device_unmap(unsigned long *src_pfns,
561 					  unsigned long npages,
562 					  struct page *fault_page)
563 {
564 	struct folio *fault_folio = fault_page ?
565 		page_folio(fault_page) : NULL;
566 	unsigned long i, restore = 0;
567 	bool allow_drain = true;
568 	unsigned long unmapped = 0;
569 
570 	lru_add_drain();
571 
572 	for (i = 0; i < npages; ) {
573 		struct page *page = migrate_pfn_to_page(src_pfns[i]);
574 		struct folio *folio;
575 		unsigned int nr = 1;
576 
577 		if (!page) {
578 			if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
579 				unmapped++;
580 			goto next;
581 		}
582 
583 		folio =	page_folio(page);
584 		nr = folio_nr_pages(folio);
585 
586 		if (nr > 1)
587 			src_pfns[i] |= MIGRATE_PFN_COMPOUND;
588 
589 
590 		/* ZONE_DEVICE folios are not on LRU */
591 		if (!folio_is_zone_device(folio)) {
592 			if (!folio_test_lru(folio) && allow_drain) {
593 				/* Drain CPU's lru cache */
594 				lru_add_drain_all();
595 				allow_drain = false;
596 			}
597 
598 			if (!folio_isolate_lru(folio)) {
599 				src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
600 				restore++;
601 				goto next;
602 			}
603 
604 			/* Drop the reference we took in collect */
605 			folio_put(folio);
606 		}
607 
608 		if (folio_mapped(folio))
609 			try_to_migrate(folio, 0);
610 
611 		if (folio_mapped(folio) ||
612 		    !migrate_vma_check_page(page, fault_page)) {
613 			if (!folio_is_zone_device(folio)) {
614 				folio_get(folio);
615 				folio_putback_lru(folio);
616 			}
617 
618 			src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
619 			restore++;
620 			goto next;
621 		}
622 
623 		unmapped++;
624 next:
625 		i += nr;
626 	}
627 
628 	for (i = 0; i < npages && restore; i++) {
629 		struct page *page = migrate_pfn_to_page(src_pfns[i]);
630 		struct folio *folio;
631 
632 		if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE))
633 			continue;
634 
635 		folio = page_folio(page);
636 		remove_migration_ptes(folio, folio, 0);
637 
638 		src_pfns[i] = 0;
639 		if (fault_folio != folio)
640 			folio_unlock(folio);
641 		folio_put(folio);
642 		restore--;
643 	}
644 
645 	return unmapped;
646 }
647 
648 /*
649  * migrate_vma_unmap() - replace page mapping with special migration pte entry
650  * @migrate: migrate struct containing all migration information
651  *
652  * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
653  * special migration pte entry and check if it has been pinned. Pinned pages are
654  * restored because we cannot migrate them.
655  *
656  * This is the last step before we call the device driver callback to allocate
657  * destination memory and copy contents of original page over to new page.
658  */
659 static void migrate_vma_unmap(struct migrate_vma *migrate)
660 {
661 	migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages,
662 					migrate->fault_page);
663 }
664 
665 /**
666  * migrate_vma_setup() - prepare to migrate a range of memory
667  * @args: contains the vma, start, and pfns arrays for the migration
668  *
669  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
670  * without an error.
671  *
672  * Prepare to migrate a range of memory virtual address range by collecting all
673  * the pages backing each virtual address in the range, saving them inside the
674  * src array.  Then lock those pages and unmap them. Once the pages are locked
675  * and unmapped, check whether each page is pinned or not.  Pages that aren't
676  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
677  * corresponding src array entry.  Then restores any pages that are pinned, by
678  * remapping and unlocking those pages.
679  *
680  * The caller should then allocate destination memory and copy source memory to
681  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
682  * flag set).  Once these are allocated and copied, the caller must update each
683  * corresponding entry in the dst array with the pfn value of the destination
684  * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
685  * lock_page().
686  *
687  * Note that the caller does not have to migrate all the pages that are marked
688  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
689  * device memory to system memory.  If the caller cannot migrate a device page
690  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
691  * consequences for the userspace process, so it must be avoided if at all
692  * possible.
693  *
694  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
695  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
696  * allowing the caller to allocate device memory for those unbacked virtual
697  * addresses.  For this the caller simply has to allocate device memory and
698  * properly set the destination entry like for regular migration.  Note that
699  * this can still fail, and thus inside the device driver you must check if the
700  * migration was successful for those entries after calling migrate_vma_pages(),
701  * just like for regular migration.
702  *
703  * After that, the callers must call migrate_vma_pages() to go over each entry
704  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
705  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
706  * then migrate_vma_pages() to migrate struct page information from the source
707  * struct page to the destination struct page.  If it fails to migrate the
708  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
709  * src array.
710  *
711  * At this point all successfully migrated pages have an entry in the src
712  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
713  * array entry with MIGRATE_PFN_VALID flag set.
714  *
715  * Once migrate_vma_pages() returns the caller may inspect which pages were
716  * successfully migrated, and which were not.  Successfully migrated pages will
717  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
718  *
719  * It is safe to update device page table after migrate_vma_pages() because
720  * both destination and source page are still locked, and the mmap_lock is held
721  * in read mode (hence no one can unmap the range being migrated).
722  *
723  * Once the caller is done cleaning up things and updating its page table (if it
724  * chose to do so, this is not an obligation) it finally calls
725  * migrate_vma_finalize() to update the CPU page table to point to new pages
726  * for successfully migrated pages or otherwise restore the CPU page table to
727  * point to the original source pages.
728  */
729 int migrate_vma_setup(struct migrate_vma *args)
730 {
731 	long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
732 
733 	args->start &= PAGE_MASK;
734 	args->end &= PAGE_MASK;
735 	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
736 	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
737 		return -EINVAL;
738 	if (nr_pages <= 0)
739 		return -EINVAL;
740 	if (args->start < args->vma->vm_start ||
741 	    args->start >= args->vma->vm_end)
742 		return -EINVAL;
743 	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
744 		return -EINVAL;
745 	if (!args->src || !args->dst)
746 		return -EINVAL;
747 	if (args->fault_page && !is_device_private_page(args->fault_page))
748 		return -EINVAL;
749 	if (args->fault_page && !PageLocked(args->fault_page))
750 		return -EINVAL;
751 
752 	memset(args->src, 0, sizeof(*args->src) * nr_pages);
753 	args->cpages = 0;
754 	args->npages = 0;
755 
756 	migrate_vma_collect(args);
757 
758 	if (args->cpages)
759 		migrate_vma_unmap(args);
760 
761 	/*
762 	 * At this point pages are locked and unmapped, and thus they have
763 	 * stable content and can safely be copied to destination memory that
764 	 * is allocated by the drivers.
765 	 */
766 	return 0;
767 
768 }
769 EXPORT_SYMBOL(migrate_vma_setup);
770 
771 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
772 /**
773  * migrate_vma_insert_huge_pmd_page: Insert a huge folio into @migrate->vma->vm_mm
774  * at @addr. folio is already allocated as a part of the migration process with
775  * large page.
776  *
777  * @page needs to be initialized and setup after it's allocated. The code bits
778  * here follow closely the code in __do_huge_pmd_anonymous_page(). This API does
779  * not support THP zero pages.
780  *
781  * @migrate: migrate_vma arguments
782  * @addr: address where the folio will be inserted
783  * @page: page to be inserted at @addr
784  * @src: src pfn which is being migrated
785  * @pmdp: pointer to the pmd
786  */
787 static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
788 					 unsigned long addr,
789 					 struct page *page,
790 					 unsigned long *src,
791 					 pmd_t *pmdp)
792 {
793 	struct vm_area_struct *vma = migrate->vma;
794 	gfp_t gfp = vma_thp_gfp_mask(vma);
795 	struct folio *folio = page_folio(page);
796 	int ret;
797 	vm_fault_t csa_ret;
798 	spinlock_t *ptl;
799 	pgtable_t pgtable;
800 	pmd_t entry;
801 	bool flush = false;
802 	unsigned long i;
803 
804 	VM_WARN_ON_FOLIO(!folio, folio);
805 	VM_WARN_ON_ONCE(!pmd_none(*pmdp) && !is_huge_zero_pmd(*pmdp));
806 
807 	if (!thp_vma_suitable_order(vma, addr, HPAGE_PMD_ORDER))
808 		return -EINVAL;
809 
810 	ret = anon_vma_prepare(vma);
811 	if (ret)
812 		return ret;
813 
814 	folio_set_order(folio, HPAGE_PMD_ORDER);
815 	folio_set_large_rmappable(folio);
816 
817 	if (mem_cgroup_charge(folio, migrate->vma->vm_mm, gfp)) {
818 		count_vm_event(THP_FAULT_FALLBACK);
819 		count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
820 		ret = -ENOMEM;
821 		goto abort;
822 	}
823 
824 	__folio_mark_uptodate(folio);
825 
826 	pgtable = pte_alloc_one(vma->vm_mm);
827 	if (unlikely(!pgtable))
828 		goto abort;
829 
830 	if (folio_is_device_private(folio)) {
831 		swp_entry_t swp_entry;
832 
833 		if (vma->vm_flags & VM_WRITE)
834 			swp_entry = make_writable_device_private_entry(
835 						page_to_pfn(page));
836 		else
837 			swp_entry = make_readable_device_private_entry(
838 						page_to_pfn(page));
839 		entry = swp_entry_to_pmd(swp_entry);
840 	} else {
841 		if (folio_is_zone_device(folio) &&
842 		    !folio_is_device_coherent(folio)) {
843 			goto abort;
844 		}
845 		entry = folio_mk_pmd(folio, vma->vm_page_prot);
846 		if (vma->vm_flags & VM_WRITE)
847 			entry = pmd_mkwrite(pmd_mkdirty(entry), vma);
848 	}
849 
850 	ptl = pmd_lock(vma->vm_mm, pmdp);
851 	csa_ret = check_stable_address_space(vma->vm_mm);
852 	if (csa_ret)
853 		goto abort;
854 
855 	/*
856 	 * Check for userfaultfd but do not deliver the fault. Instead,
857 	 * just back off.
858 	 */
859 	if (userfaultfd_missing(vma))
860 		goto unlock_abort;
861 
862 	if (!pmd_none(*pmdp)) {
863 		if (!is_huge_zero_pmd(*pmdp))
864 			goto unlock_abort;
865 		flush = true;
866 	} else if (!pmd_none(*pmdp))
867 		goto unlock_abort;
868 
869 	add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
870 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
871 	if (!folio_is_zone_device(folio))
872 		folio_add_lru_vma(folio, vma);
873 	folio_get(folio);
874 
875 	if (flush) {
876 		pte_free(vma->vm_mm, pgtable);
877 		flush_cache_page(vma, addr, addr + HPAGE_PMD_SIZE);
878 		pmdp_invalidate(vma, addr, pmdp);
879 	} else {
880 		pgtable_trans_huge_deposit(vma->vm_mm, pmdp, pgtable);
881 		mm_inc_nr_ptes(vma->vm_mm);
882 	}
883 	set_pmd_at(vma->vm_mm, addr, pmdp, entry);
884 	update_mmu_cache_pmd(vma, addr, pmdp);
885 
886 	spin_unlock(ptl);
887 
888 	count_vm_event(THP_FAULT_ALLOC);
889 	count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
890 	count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
891 
892 	return 0;
893 
894 unlock_abort:
895 	spin_unlock(ptl);
896 abort:
897 	for (i = 0; i < HPAGE_PMD_NR; i++)
898 		src[i] &= ~MIGRATE_PFN_MIGRATE;
899 	return 0;
900 }
901 
902 static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate,
903 					    unsigned long idx, unsigned long addr,
904 					    struct folio *folio)
905 {
906 	unsigned long i;
907 	unsigned long pfn;
908 	unsigned long flags;
909 	int ret = 0;
910 
911 	/*
912 	 * take a reference, since split_huge_pmd_address() with freeze = true
913 	 * drops a reference at the end.
914 	 */
915 	folio_get(folio);
916 	split_huge_pmd_address(migrate->vma, addr, true);
917 	ret = folio_split_unmapped(folio, 0);
918 	if (ret)
919 		return ret;
920 	migrate->src[idx] &= ~MIGRATE_PFN_COMPOUND;
921 	flags = migrate->src[idx] & ((1UL << MIGRATE_PFN_SHIFT) - 1);
922 	pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT;
923 	for (i = 1; i < HPAGE_PMD_NR; i++)
924 		migrate->src[i+idx] = migrate_pfn(pfn + i) | flags;
925 	return ret;
926 }
927 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
928 static int migrate_vma_insert_huge_pmd_page(struct migrate_vma *migrate,
929 					 unsigned long addr,
930 					 struct page *page,
931 					 unsigned long *src,
932 					 pmd_t *pmdp)
933 {
934 	return 0;
935 }
936 
937 static int migrate_vma_split_unmapped_folio(struct migrate_vma *migrate,
938 					    unsigned long idx, unsigned long addr,
939 					    struct folio *folio)
940 {
941 	return 0;
942 }
943 #endif
944 
945 static unsigned long migrate_vma_nr_pages(unsigned long *src)
946 {
947 	unsigned long nr = 1;
948 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
949 	if (*src & MIGRATE_PFN_COMPOUND)
950 		nr = HPAGE_PMD_NR;
951 #else
952 	if (*src & MIGRATE_PFN_COMPOUND)
953 		VM_WARN_ON_ONCE(true);
954 #endif
955 	return nr;
956 }
957 
958 /*
959  * This code closely matches the code in:
960  *   __handle_mm_fault()
961  *     handle_pte_fault()
962  *       do_anonymous_page()
963  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
964  * private or coherent page.
965  */
966 static void migrate_vma_insert_page(struct migrate_vma *migrate,
967 				    unsigned long addr,
968 				    unsigned long *dst,
969 				    unsigned long *src)
970 {
971 	struct page *page = migrate_pfn_to_page(*dst);
972 	struct folio *folio = page_folio(page);
973 	struct vm_area_struct *vma = migrate->vma;
974 	struct mm_struct *mm = vma->vm_mm;
975 	bool flush = false;
976 	spinlock_t *ptl;
977 	pte_t entry;
978 	pgd_t *pgdp;
979 	p4d_t *p4dp;
980 	pud_t *pudp;
981 	pmd_t *pmdp;
982 	pte_t *ptep;
983 	pte_t orig_pte;
984 
985 	/* Only allow populating anonymous memory */
986 	if (!vma_is_anonymous(vma))
987 		goto abort;
988 
989 	pgdp = pgd_offset(mm, addr);
990 	p4dp = p4d_alloc(mm, pgdp, addr);
991 	if (!p4dp)
992 		goto abort;
993 	pudp = pud_alloc(mm, p4dp, addr);
994 	if (!pudp)
995 		goto abort;
996 	pmdp = pmd_alloc(mm, pudp, addr);
997 	if (!pmdp)
998 		goto abort;
999 
1000 	if (thp_migration_supported() && (*dst & MIGRATE_PFN_COMPOUND)) {
1001 		int ret = migrate_vma_insert_huge_pmd_page(migrate, addr, page,
1002 								src, pmdp);
1003 		if (ret)
1004 			goto abort;
1005 		return;
1006 	}
1007 
1008 	if (!pmd_none(*pmdp)) {
1009 		if (pmd_trans_huge(*pmdp)) {
1010 			if (!is_huge_zero_pmd(*pmdp))
1011 				goto abort;
1012 			split_huge_pmd(vma, pmdp, addr);
1013 		} else if (pmd_leaf(*pmdp))
1014 			goto abort;
1015 	}
1016 
1017 	if (pte_alloc(mm, pmdp))
1018 		goto abort;
1019 	if (unlikely(anon_vma_prepare(vma)))
1020 		goto abort;
1021 	if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
1022 		goto abort;
1023 
1024 	/*
1025 	 * The memory barrier inside __folio_mark_uptodate makes sure that
1026 	 * preceding stores to the folio contents become visible before
1027 	 * the set_pte_at() write.
1028 	 */
1029 	__folio_mark_uptodate(folio);
1030 
1031 	if (folio_is_device_private(folio)) {
1032 		swp_entry_t swp_entry;
1033 
1034 		if (vma->vm_flags & VM_WRITE)
1035 			swp_entry = make_writable_device_private_entry(
1036 						page_to_pfn(page));
1037 		else
1038 			swp_entry = make_readable_device_private_entry(
1039 						page_to_pfn(page));
1040 		entry = swp_entry_to_pte(swp_entry);
1041 	} else {
1042 		if (folio_is_zone_device(folio) &&
1043 		    !folio_is_device_coherent(folio)) {
1044 			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
1045 			goto abort;
1046 		}
1047 		entry = mk_pte(page, vma->vm_page_prot);
1048 		if (vma->vm_flags & VM_WRITE)
1049 			entry = pte_mkwrite(pte_mkdirty(entry), vma);
1050 	}
1051 
1052 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
1053 	if (!ptep)
1054 		goto abort;
1055 	orig_pte = ptep_get(ptep);
1056 
1057 	if (check_stable_address_space(mm))
1058 		goto unlock_abort;
1059 
1060 	if (pte_present(orig_pte)) {
1061 		unsigned long pfn = pte_pfn(orig_pte);
1062 
1063 		if (!is_zero_pfn(pfn))
1064 			goto unlock_abort;
1065 		flush = true;
1066 	} else if (!pte_none(orig_pte))
1067 		goto unlock_abort;
1068 
1069 	/*
1070 	 * Check for userfaultfd but do not deliver the fault. Instead,
1071 	 * just back off.
1072 	 */
1073 	if (userfaultfd_missing(vma))
1074 		goto unlock_abort;
1075 
1076 	inc_mm_counter(mm, MM_ANONPAGES);
1077 	folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
1078 	if (!folio_is_zone_device(folio))
1079 		folio_add_lru_vma(folio, vma);
1080 	folio_get(folio);
1081 
1082 	if (flush) {
1083 		flush_cache_page(vma, addr, pte_pfn(orig_pte));
1084 		ptep_clear_flush(vma, addr, ptep);
1085 	}
1086 	set_pte_at(mm, addr, ptep, entry);
1087 	update_mmu_cache(vma, addr, ptep);
1088 
1089 	pte_unmap_unlock(ptep, ptl);
1090 	*src = MIGRATE_PFN_MIGRATE;
1091 	return;
1092 
1093 unlock_abort:
1094 	pte_unmap_unlock(ptep, ptl);
1095 abort:
1096 	*src &= ~MIGRATE_PFN_MIGRATE;
1097 }
1098 
1099 static void __migrate_device_pages(unsigned long *src_pfns,
1100 				unsigned long *dst_pfns, unsigned long npages,
1101 				struct migrate_vma *migrate)
1102 {
1103 	struct mmu_notifier_range range;
1104 	unsigned long i, j;
1105 	bool notified = false;
1106 	unsigned long addr;
1107 
1108 	for (i = 0; i < npages; ) {
1109 		struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
1110 		struct page *page = migrate_pfn_to_page(src_pfns[i]);
1111 		struct address_space *mapping;
1112 		struct folio *newfolio, *folio;
1113 		int r, extra_cnt = 0;
1114 		unsigned long nr = 1;
1115 
1116 		if (!newpage) {
1117 			src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
1118 			goto next;
1119 		}
1120 
1121 		if (!page) {
1122 			unsigned long addr;
1123 
1124 			if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE))
1125 				goto next;
1126 
1127 			/*
1128 			 * The only time there is no vma is when called from
1129 			 * migrate_device_coherent_folio(). However this isn't
1130 			 * called if the page could not be unmapped.
1131 			 */
1132 			VM_BUG_ON(!migrate);
1133 			addr = migrate->start + i*PAGE_SIZE;
1134 			if (!notified) {
1135 				notified = true;
1136 
1137 				mmu_notifier_range_init_owner(&range,
1138 					MMU_NOTIFY_MIGRATE, 0,
1139 					migrate->vma->vm_mm, addr, migrate->end,
1140 					migrate->pgmap_owner);
1141 				mmu_notifier_invalidate_range_start(&range);
1142 			}
1143 
1144 			if ((src_pfns[i] & MIGRATE_PFN_COMPOUND) &&
1145 				(!(dst_pfns[i] & MIGRATE_PFN_COMPOUND))) {
1146 				nr = migrate_vma_nr_pages(&src_pfns[i]);
1147 				src_pfns[i] &= ~MIGRATE_PFN_COMPOUND;
1148 			} else {
1149 				nr = 1;
1150 			}
1151 
1152 			for (j = 0; j < nr && i + j < npages; j++) {
1153 				src_pfns[i+j] |= MIGRATE_PFN_MIGRATE;
1154 				migrate_vma_insert_page(migrate,
1155 					addr + j * PAGE_SIZE,
1156 					&dst_pfns[i+j], &src_pfns[i+j]);
1157 			}
1158 			goto next;
1159 		}
1160 
1161 		newfolio = page_folio(newpage);
1162 		folio = page_folio(page);
1163 		mapping = folio_mapping(folio);
1164 
1165 		/*
1166 		 * If THP migration is enabled, check if both src and dst
1167 		 * can migrate large pages
1168 		 */
1169 		if (thp_migration_supported()) {
1170 			if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) &&
1171 				(src_pfns[i] & MIGRATE_PFN_COMPOUND) &&
1172 				!(dst_pfns[i] & MIGRATE_PFN_COMPOUND)) {
1173 
1174 				if (!migrate) {
1175 					src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE |
1176 							 MIGRATE_PFN_COMPOUND);
1177 					goto next;
1178 				}
1179 				nr = 1 << folio_order(folio);
1180 				addr = migrate->start + i * PAGE_SIZE;
1181 				if (migrate_vma_split_unmapped_folio(migrate, i, addr, folio)) {
1182 					src_pfns[i] &= ~(MIGRATE_PFN_MIGRATE |
1183 							 MIGRATE_PFN_COMPOUND);
1184 					goto next;
1185 				}
1186 			} else if ((src_pfns[i] & MIGRATE_PFN_MIGRATE) &&
1187 				(dst_pfns[i] & MIGRATE_PFN_COMPOUND) &&
1188 				!(src_pfns[i] & MIGRATE_PFN_COMPOUND)) {
1189 				src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
1190 			}
1191 		}
1192 
1193 
1194 		if (folio_is_device_private(newfolio) ||
1195 		    folio_is_device_coherent(newfolio)) {
1196 			if (mapping) {
1197 				/*
1198 				 * For now only support anonymous memory migrating to
1199 				 * device private or coherent memory.
1200 				 *
1201 				 * Try to get rid of swap cache if possible.
1202 				 */
1203 				if (!folio_test_anon(folio) ||
1204 				    !folio_free_swap(folio)) {
1205 					src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
1206 					goto next;
1207 				}
1208 			}
1209 		} else if (folio_is_zone_device(newfolio)) {
1210 			/*
1211 			 * Other types of ZONE_DEVICE page are not supported.
1212 			 */
1213 			src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
1214 			goto next;
1215 		}
1216 
1217 		BUG_ON(folio_test_writeback(folio));
1218 
1219 		if (migrate && migrate->fault_page == page)
1220 			extra_cnt = 1;
1221 		for (j = 0; j < nr && i + j < npages; j++) {
1222 			folio = page_folio(migrate_pfn_to_page(src_pfns[i+j]));
1223 			newfolio = page_folio(migrate_pfn_to_page(dst_pfns[i+j]));
1224 
1225 			r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
1226 			if (r)
1227 				src_pfns[i+j] &= ~MIGRATE_PFN_MIGRATE;
1228 			else
1229 				folio_migrate_flags(newfolio, folio);
1230 		}
1231 next:
1232 		i += nr;
1233 	}
1234 
1235 	if (notified)
1236 		mmu_notifier_invalidate_range_end(&range);
1237 }
1238 
1239 /**
1240  * migrate_device_pages() - migrate meta-data from src page to dst page
1241  * @src_pfns: src_pfns returned from migrate_device_range()
1242  * @dst_pfns: array of pfns allocated by the driver to migrate memory to
1243  * @npages: number of pages in the range
1244  *
1245  * Equivalent to migrate_vma_pages(). This is called to migrate struct page
1246  * meta-data from source struct page to destination.
1247  */
1248 void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
1249 			unsigned long npages)
1250 {
1251 	__migrate_device_pages(src_pfns, dst_pfns, npages, NULL);
1252 }
1253 EXPORT_SYMBOL(migrate_device_pages);
1254 
1255 /**
1256  * migrate_vma_pages() - migrate meta-data from src page to dst page
1257  * @migrate: migrate struct containing all migration information
1258  *
1259  * This migrates struct page meta-data from source struct page to destination
1260  * struct page. This effectively finishes the migration from source page to the
1261  * destination page.
1262  */
1263 void migrate_vma_pages(struct migrate_vma *migrate)
1264 {
1265 	__migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate);
1266 }
1267 EXPORT_SYMBOL(migrate_vma_pages);
1268 
1269 static void __migrate_device_finalize(unsigned long *src_pfns,
1270 				      unsigned long *dst_pfns,
1271 				      unsigned long npages,
1272 				      struct page *fault_page)
1273 {
1274 	struct folio *fault_folio = fault_page ?
1275 		page_folio(fault_page) : NULL;
1276 	unsigned long i;
1277 
1278 	for (i = 0; i < npages; i++) {
1279 		struct folio *dst = NULL, *src = NULL;
1280 		struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
1281 		struct page *page = migrate_pfn_to_page(src_pfns[i]);
1282 
1283 		if (newpage)
1284 			dst = page_folio(newpage);
1285 
1286 		if (!page) {
1287 			if (dst) {
1288 				WARN_ON_ONCE(fault_folio == dst);
1289 				folio_unlock(dst);
1290 				folio_put(dst);
1291 			}
1292 			continue;
1293 		}
1294 
1295 		src = page_folio(page);
1296 
1297 		if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
1298 			if (dst) {
1299 				WARN_ON_ONCE(fault_folio == dst);
1300 				folio_unlock(dst);
1301 				folio_put(dst);
1302 			}
1303 			dst = src;
1304 		}
1305 
1306 		if (!folio_is_zone_device(dst))
1307 			folio_add_lru(dst);
1308 		remove_migration_ptes(src, dst, 0);
1309 		if (fault_folio != src)
1310 			folio_unlock(src);
1311 		folio_put(src);
1312 
1313 		if (dst != src) {
1314 			WARN_ON_ONCE(fault_folio == dst);
1315 			folio_unlock(dst);
1316 			folio_put(dst);
1317 		}
1318 	}
1319 }
1320 
1321 /*
1322  * migrate_device_finalize() - complete page migration
1323  * @src_pfns: src_pfns returned from migrate_device_range()
1324  * @dst_pfns: array of pfns allocated by the driver to migrate memory to
1325  * @npages: number of pages in the range
1326  *
1327  * Completes migration of the page by removing special migration entries.
1328  * Drivers must ensure copying of page data is complete and visible to the CPU
1329  * before calling this.
1330  */
1331 void migrate_device_finalize(unsigned long *src_pfns,
1332 			     unsigned long *dst_pfns, unsigned long npages)
1333 {
1334 	return __migrate_device_finalize(src_pfns, dst_pfns, npages, NULL);
1335 }
1336 EXPORT_SYMBOL(migrate_device_finalize);
1337 
1338 /**
1339  * migrate_vma_finalize() - restore CPU page table entry
1340  * @migrate: migrate struct containing all migration information
1341  *
1342  * This replaces the special migration pte entry with either a mapping to the
1343  * new page if migration was successful for that page, or to the original page
1344  * otherwise.
1345  *
1346  * This also unlocks the pages and puts them back on the lru, or drops the extra
1347  * refcount, for device pages.
1348  */
1349 void migrate_vma_finalize(struct migrate_vma *migrate)
1350 {
1351 	__migrate_device_finalize(migrate->src, migrate->dst, migrate->npages,
1352 				  migrate->fault_page);
1353 }
1354 EXPORT_SYMBOL(migrate_vma_finalize);
1355 
1356 static unsigned long migrate_device_pfn_lock(unsigned long pfn)
1357 {
1358 	struct folio *folio;
1359 
1360 	folio = folio_get_nontail_page(pfn_to_page(pfn));
1361 	if (!folio)
1362 		return 0;
1363 
1364 	if (!folio_trylock(folio)) {
1365 		folio_put(folio);
1366 		return 0;
1367 	}
1368 
1369 	return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
1370 }
1371 
1372 /**
1373  * migrate_device_range() - migrate device private pfns to normal memory.
1374  * @src_pfns: array large enough to hold migrating source device private pfns.
1375  * @start: starting pfn in the range to migrate.
1376  * @npages: number of pages to migrate.
1377  *
1378  * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
1379  * instead of looking up pages based on virtual address mappings a range of
1380  * device pfns that should be migrated to system memory is used instead.
1381  *
1382  * This is useful when a driver needs to free device memory but doesn't know the
1383  * virtual mappings of every page that may be in device memory. For example this
1384  * is often the case when a driver is being unloaded or unbound from a device.
1385  *
1386  * Like migrate_vma_setup() this function will take a reference and lock any
1387  * migrating pages that aren't free before unmapping them. Drivers may then
1388  * allocate destination pages and start copying data from the device to CPU
1389  * memory before calling migrate_device_pages().
1390  */
1391 int migrate_device_range(unsigned long *src_pfns, unsigned long start,
1392 			unsigned long npages)
1393 {
1394 	unsigned long i, j, pfn;
1395 
1396 	for (pfn = start, i = 0; i < npages; pfn++, i++) {
1397 		struct page *page = pfn_to_page(pfn);
1398 		struct folio *folio = page_folio(page);
1399 		unsigned int nr = 1;
1400 
1401 		src_pfns[i] = migrate_device_pfn_lock(pfn);
1402 		nr = folio_nr_pages(folio);
1403 		if (nr > 1) {
1404 			src_pfns[i] |= MIGRATE_PFN_COMPOUND;
1405 			for (j = 1; j < nr; j++)
1406 				src_pfns[i+j] = 0;
1407 			i += j - 1;
1408 			pfn += j - 1;
1409 		}
1410 	}
1411 
1412 	migrate_device_unmap(src_pfns, npages, NULL);
1413 
1414 	return 0;
1415 }
1416 EXPORT_SYMBOL(migrate_device_range);
1417 
1418 /**
1419  * migrate_device_pfns() - migrate device private pfns to normal memory.
1420  * @src_pfns: pre-populated array of source device private pfns to migrate.
1421  * @npages: number of pages to migrate.
1422  *
1423  * Similar to migrate_device_range() but supports non-contiguous pre-populated
1424  * array of device pages to migrate.
1425  */
1426 int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)
1427 {
1428 	unsigned long i, j;
1429 
1430 	for (i = 0; i < npages; i++) {
1431 		struct page *page = pfn_to_page(src_pfns[i]);
1432 		struct folio *folio = page_folio(page);
1433 		unsigned int nr = 1;
1434 
1435 		src_pfns[i] = migrate_device_pfn_lock(src_pfns[i]);
1436 		nr = folio_nr_pages(folio);
1437 		if (nr > 1) {
1438 			src_pfns[i] |= MIGRATE_PFN_COMPOUND;
1439 			for (j = 1; j < nr; j++)
1440 				src_pfns[i+j] = 0;
1441 			i += j - 1;
1442 		}
1443 	}
1444 
1445 	migrate_device_unmap(src_pfns, npages, NULL);
1446 
1447 	return 0;
1448 }
1449 EXPORT_SYMBOL(migrate_device_pfns);
1450 
1451 /*
1452  * Migrate a device coherent folio back to normal memory. The caller should have
1453  * a reference on folio which will be copied to the new folio if migration is
1454  * successful or dropped on failure.
1455  */
1456 int migrate_device_coherent_folio(struct folio *folio)
1457 {
1458 	unsigned long src_pfn, dst_pfn = 0;
1459 	struct folio *dfolio;
1460 
1461 	WARN_ON_ONCE(folio_test_large(folio));
1462 
1463 	folio_lock(folio);
1464 	src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
1465 
1466 	/*
1467 	 * We don't have a VMA and don't need to walk the page tables to find
1468 	 * the source folio. So call migrate_vma_unmap() directly to unmap the
1469 	 * folio as migrate_vma_setup() will fail if args.vma == NULL.
1470 	 */
1471 	migrate_device_unmap(&src_pfn, 1, NULL);
1472 	if (!(src_pfn & MIGRATE_PFN_MIGRATE))
1473 		return -EBUSY;
1474 
1475 	dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
1476 	if (dfolio) {
1477 		folio_lock(dfolio);
1478 		dst_pfn = migrate_pfn(folio_pfn(dfolio));
1479 	}
1480 
1481 	migrate_device_pages(&src_pfn, &dst_pfn, 1);
1482 	if (src_pfn & MIGRATE_PFN_MIGRATE)
1483 		folio_copy(dfolio, folio);
1484 	migrate_device_finalize(&src_pfn, &dst_pfn, 1);
1485 
1486 	if (src_pfn & MIGRATE_PFN_MIGRATE)
1487 		return 0;
1488 	return -EBUSY;
1489 }
1490