xref: /linux/mm/migrate.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #include <trace/events/migrate.h>
57 
58 #include "internal.h"
59 
60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61 {
62 	struct folio *folio = folio_get_nontail_page(page);
63 	const struct movable_operations *mops;
64 
65 	/*
66 	 * Avoid burning cycles with pages that are yet under __free_pages(),
67 	 * or just got freed under us.
68 	 *
69 	 * In case we 'win' a race for a movable page being freed under us and
70 	 * raise its refcount preventing __free_pages() from doing its job
71 	 * the put_page() at the end of this block will take care of
72 	 * release this page, thus avoiding a nasty leakage.
73 	 */
74 	if (!folio)
75 		goto out;
76 
77 	if (unlikely(folio_test_slab(folio)))
78 		goto out_putfolio;
79 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 	smp_rmb();
81 	/*
82 	 * Check movable flag before taking the page lock because
83 	 * we use non-atomic bitops on newly allocated page flags so
84 	 * unconditionally grabbing the lock ruins page's owner side.
85 	 */
86 	if (unlikely(!__folio_test_movable(folio)))
87 		goto out_putfolio;
88 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 	smp_rmb();
90 	if (unlikely(folio_test_slab(folio)))
91 		goto out_putfolio;
92 
93 	/*
94 	 * As movable pages are not isolated from LRU lists, concurrent
95 	 * compaction threads can race against page migration functions
96 	 * as well as race against the releasing a page.
97 	 *
98 	 * In order to avoid having an already isolated movable page
99 	 * being (wrongly) re-isolated while it is under migration,
100 	 * or to avoid attempting to isolate pages being released,
101 	 * lets be sure we have the page lock
102 	 * before proceeding with the movable page isolation steps.
103 	 */
104 	if (unlikely(!folio_trylock(folio)))
105 		goto out_putfolio;
106 
107 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 		goto out_no_isolated;
109 
110 	mops = folio_movable_ops(folio);
111 	VM_BUG_ON_FOLIO(!mops, folio);
112 
113 	if (!mops->isolate_page(&folio->page, mode))
114 		goto out_no_isolated;
115 
116 	/* Driver shouldn't use PG_isolated bit of page->flags */
117 	WARN_ON_ONCE(folio_test_isolated(folio));
118 	folio_set_isolated(folio);
119 	folio_unlock(folio);
120 
121 	return true;
122 
123 out_no_isolated:
124 	folio_unlock(folio);
125 out_putfolio:
126 	folio_put(folio);
127 out:
128 	return false;
129 }
130 
131 static void putback_movable_folio(struct folio *folio)
132 {
133 	const struct movable_operations *mops = folio_movable_ops(folio);
134 
135 	mops->putback_page(&folio->page);
136 	folio_clear_isolated(folio);
137 }
138 
139 /*
140  * Put previously isolated pages back onto the appropriate lists
141  * from where they were once taken off for compaction/migration.
142  *
143  * This function shall be used whenever the isolated pageset has been
144  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145  * and isolate_hugetlb().
146  */
147 void putback_movable_pages(struct list_head *l)
148 {
149 	struct folio *folio;
150 	struct folio *folio2;
151 
152 	list_for_each_entry_safe(folio, folio2, l, lru) {
153 		if (unlikely(folio_test_hugetlb(folio))) {
154 			folio_putback_active_hugetlb(folio);
155 			continue;
156 		}
157 		list_del(&folio->lru);
158 		/*
159 		 * We isolated non-lru movable folio so here we can use
160 		 * __folio_test_movable because LRU folio's mapping cannot
161 		 * have PAGE_MAPPING_MOVABLE.
162 		 */
163 		if (unlikely(__folio_test_movable(folio))) {
164 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 			folio_lock(folio);
166 			if (folio_test_movable(folio))
167 				putback_movable_folio(folio);
168 			else
169 				folio_clear_isolated(folio);
170 			folio_unlock(folio);
171 			folio_put(folio);
172 		} else {
173 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 					folio_is_file_lru(folio), -folio_nr_pages(folio));
175 			folio_putback_lru(folio);
176 		}
177 	}
178 }
179 
180 /*
181  * Restore a potential migration pte to a working pte entry
182  */
183 static bool remove_migration_pte(struct folio *folio,
184 		struct vm_area_struct *vma, unsigned long addr, void *old)
185 {
186 	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187 
188 	while (page_vma_mapped_walk(&pvmw)) {
189 		rmap_t rmap_flags = RMAP_NONE;
190 		pte_t old_pte;
191 		pte_t pte;
192 		swp_entry_t entry;
193 		struct page *new;
194 		unsigned long idx = 0;
195 
196 		/* pgoff is invalid for ksm pages, but they are never large */
197 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 		new = folio_page(folio, idx);
200 
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 		/* PMD-mapped THP migration entry */
203 		if (!pvmw.pte) {
204 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 					!folio_test_pmd_mappable(folio), folio);
206 			remove_migration_pmd(&pvmw, new);
207 			continue;
208 		}
209 #endif
210 
211 		folio_get(folio);
212 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 		old_pte = ptep_get(pvmw.pte);
214 		if (pte_swp_soft_dirty(old_pte))
215 			pte = pte_mksoft_dirty(pte);
216 
217 		entry = pte_to_swp_entry(old_pte);
218 		if (!is_migration_entry_young(entry))
219 			pte = pte_mkold(pte);
220 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
221 			pte = pte_mkdirty(pte);
222 		if (is_writable_migration_entry(entry))
223 			pte = pte_mkwrite(pte, vma);
224 		else if (pte_swp_uffd_wp(old_pte))
225 			pte = pte_mkuffd_wp(pte);
226 
227 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
228 			rmap_flags |= RMAP_EXCLUSIVE;
229 
230 		if (unlikely(is_device_private_page(new))) {
231 			if (pte_write(pte))
232 				entry = make_writable_device_private_entry(
233 							page_to_pfn(new));
234 			else
235 				entry = make_readable_device_private_entry(
236 							page_to_pfn(new));
237 			pte = swp_entry_to_pte(entry);
238 			if (pte_swp_soft_dirty(old_pte))
239 				pte = pte_swp_mksoft_dirty(pte);
240 			if (pte_swp_uffd_wp(old_pte))
241 				pte = pte_swp_mkuffd_wp(pte);
242 		}
243 
244 #ifdef CONFIG_HUGETLB_PAGE
245 		if (folio_test_hugetlb(folio)) {
246 			struct hstate *h = hstate_vma(vma);
247 			unsigned int shift = huge_page_shift(h);
248 			unsigned long psize = huge_page_size(h);
249 
250 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
251 			if (folio_test_anon(folio))
252 				hugepage_add_anon_rmap(folio, vma, pvmw.address,
253 						       rmap_flags);
254 			else
255 				page_dup_file_rmap(new, true);
256 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
257 					psize);
258 		} else
259 #endif
260 		{
261 			if (folio_test_anon(folio))
262 				page_add_anon_rmap(new, vma, pvmw.address,
263 						   rmap_flags);
264 			else
265 				page_add_file_rmap(new, vma, false);
266 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
267 		}
268 		if (vma->vm_flags & VM_LOCKED)
269 			mlock_drain_local();
270 
271 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
272 					   compound_order(new));
273 
274 		/* No need to invalidate - it was non-present before */
275 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
276 	}
277 
278 	return true;
279 }
280 
281 /*
282  * Get rid of all migration entries and replace them by
283  * references to the indicated page.
284  */
285 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
286 {
287 	struct rmap_walk_control rwc = {
288 		.rmap_one = remove_migration_pte,
289 		.arg = src,
290 	};
291 
292 	if (locked)
293 		rmap_walk_locked(dst, &rwc);
294 	else
295 		rmap_walk(dst, &rwc);
296 }
297 
298 /*
299  * Something used the pte of a page under migration. We need to
300  * get to the page and wait until migration is finished.
301  * When we return from this function the fault will be retried.
302  */
303 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
304 			  unsigned long address)
305 {
306 	spinlock_t *ptl;
307 	pte_t *ptep;
308 	pte_t pte;
309 	swp_entry_t entry;
310 
311 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
312 	if (!ptep)
313 		return;
314 
315 	pte = ptep_get(ptep);
316 	pte_unmap(ptep);
317 
318 	if (!is_swap_pte(pte))
319 		goto out;
320 
321 	entry = pte_to_swp_entry(pte);
322 	if (!is_migration_entry(entry))
323 		goto out;
324 
325 	migration_entry_wait_on_locked(entry, ptl);
326 	return;
327 out:
328 	spin_unlock(ptl);
329 }
330 
331 #ifdef CONFIG_HUGETLB_PAGE
332 /*
333  * The vma read lock must be held upon entry. Holding that lock prevents either
334  * the pte or the ptl from being freed.
335  *
336  * This function will release the vma lock before returning.
337  */
338 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
339 {
340 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
341 	pte_t pte;
342 
343 	hugetlb_vma_assert_locked(vma);
344 	spin_lock(ptl);
345 	pte = huge_ptep_get(ptep);
346 
347 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
348 		spin_unlock(ptl);
349 		hugetlb_vma_unlock_read(vma);
350 	} else {
351 		/*
352 		 * If migration entry existed, safe to release vma lock
353 		 * here because the pgtable page won't be freed without the
354 		 * pgtable lock released.  See comment right above pgtable
355 		 * lock release in migration_entry_wait_on_locked().
356 		 */
357 		hugetlb_vma_unlock_read(vma);
358 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
359 	}
360 }
361 #endif
362 
363 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
364 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
365 {
366 	spinlock_t *ptl;
367 
368 	ptl = pmd_lock(mm, pmd);
369 	if (!is_pmd_migration_entry(*pmd))
370 		goto unlock;
371 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
372 	return;
373 unlock:
374 	spin_unlock(ptl);
375 }
376 #endif
377 
378 static int folio_expected_refs(struct address_space *mapping,
379 		struct folio *folio)
380 {
381 	int refs = 1;
382 	if (!mapping)
383 		return refs;
384 
385 	refs += folio_nr_pages(folio);
386 	if (folio_test_private(folio))
387 		refs++;
388 
389 	return refs;
390 }
391 
392 /*
393  * Replace the page in the mapping.
394  *
395  * The number of remaining references must be:
396  * 1 for anonymous pages without a mapping
397  * 2 for pages with a mapping
398  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
399  */
400 int folio_migrate_mapping(struct address_space *mapping,
401 		struct folio *newfolio, struct folio *folio, int extra_count)
402 {
403 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
404 	struct zone *oldzone, *newzone;
405 	int dirty;
406 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
407 	long nr = folio_nr_pages(folio);
408 	long entries, i;
409 
410 	if (!mapping) {
411 		/* Anonymous page without mapping */
412 		if (folio_ref_count(folio) != expected_count)
413 			return -EAGAIN;
414 
415 		/* No turning back from here */
416 		newfolio->index = folio->index;
417 		newfolio->mapping = folio->mapping;
418 		if (folio_test_swapbacked(folio))
419 			__folio_set_swapbacked(newfolio);
420 
421 		return MIGRATEPAGE_SUCCESS;
422 	}
423 
424 	oldzone = folio_zone(folio);
425 	newzone = folio_zone(newfolio);
426 
427 	xas_lock_irq(&xas);
428 	if (!folio_ref_freeze(folio, expected_count)) {
429 		xas_unlock_irq(&xas);
430 		return -EAGAIN;
431 	}
432 
433 	/*
434 	 * Now we know that no one else is looking at the folio:
435 	 * no turning back from here.
436 	 */
437 	newfolio->index = folio->index;
438 	newfolio->mapping = folio->mapping;
439 	folio_ref_add(newfolio, nr); /* add cache reference */
440 	if (folio_test_swapbacked(folio)) {
441 		__folio_set_swapbacked(newfolio);
442 		if (folio_test_swapcache(folio)) {
443 			folio_set_swapcache(newfolio);
444 			newfolio->private = folio_get_private(folio);
445 		}
446 		entries = nr;
447 	} else {
448 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
449 		entries = 1;
450 	}
451 
452 	/* Move dirty while page refs frozen and newpage not yet exposed */
453 	dirty = folio_test_dirty(folio);
454 	if (dirty) {
455 		folio_clear_dirty(folio);
456 		folio_set_dirty(newfolio);
457 	}
458 
459 	/* Swap cache still stores N entries instead of a high-order entry */
460 	for (i = 0; i < entries; i++) {
461 		xas_store(&xas, newfolio);
462 		xas_next(&xas);
463 	}
464 
465 	/*
466 	 * Drop cache reference from old page by unfreezing
467 	 * to one less reference.
468 	 * We know this isn't the last reference.
469 	 */
470 	folio_ref_unfreeze(folio, expected_count - nr);
471 
472 	xas_unlock(&xas);
473 	/* Leave irq disabled to prevent preemption while updating stats */
474 
475 	/*
476 	 * If moved to a different zone then also account
477 	 * the page for that zone. Other VM counters will be
478 	 * taken care of when we establish references to the
479 	 * new page and drop references to the old page.
480 	 *
481 	 * Note that anonymous pages are accounted for
482 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
483 	 * are mapped to swap space.
484 	 */
485 	if (newzone != oldzone) {
486 		struct lruvec *old_lruvec, *new_lruvec;
487 		struct mem_cgroup *memcg;
488 
489 		memcg = folio_memcg(folio);
490 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
491 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
492 
493 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
494 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
495 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
496 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
497 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
498 
499 			if (folio_test_pmd_mappable(folio)) {
500 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
501 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
502 			}
503 		}
504 #ifdef CONFIG_SWAP
505 		if (folio_test_swapcache(folio)) {
506 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
507 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
508 		}
509 #endif
510 		if (dirty && mapping_can_writeback(mapping)) {
511 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
512 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
513 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
514 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
515 		}
516 	}
517 	local_irq_enable();
518 
519 	return MIGRATEPAGE_SUCCESS;
520 }
521 EXPORT_SYMBOL(folio_migrate_mapping);
522 
523 /*
524  * The expected number of remaining references is the same as that
525  * of folio_migrate_mapping().
526  */
527 int migrate_huge_page_move_mapping(struct address_space *mapping,
528 				   struct folio *dst, struct folio *src)
529 {
530 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
531 	int expected_count;
532 
533 	xas_lock_irq(&xas);
534 	expected_count = folio_expected_refs(mapping, src);
535 	if (!folio_ref_freeze(src, expected_count)) {
536 		xas_unlock_irq(&xas);
537 		return -EAGAIN;
538 	}
539 
540 	dst->index = src->index;
541 	dst->mapping = src->mapping;
542 
543 	folio_ref_add(dst, folio_nr_pages(dst));
544 
545 	xas_store(&xas, dst);
546 
547 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
548 
549 	xas_unlock_irq(&xas);
550 
551 	return MIGRATEPAGE_SUCCESS;
552 }
553 
554 /*
555  * Copy the flags and some other ancillary information
556  */
557 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
558 {
559 	int cpupid;
560 
561 	if (folio_test_error(folio))
562 		folio_set_error(newfolio);
563 	if (folio_test_referenced(folio))
564 		folio_set_referenced(newfolio);
565 	if (folio_test_uptodate(folio))
566 		folio_mark_uptodate(newfolio);
567 	if (folio_test_clear_active(folio)) {
568 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
569 		folio_set_active(newfolio);
570 	} else if (folio_test_clear_unevictable(folio))
571 		folio_set_unevictable(newfolio);
572 	if (folio_test_workingset(folio))
573 		folio_set_workingset(newfolio);
574 	if (folio_test_checked(folio))
575 		folio_set_checked(newfolio);
576 	/*
577 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
578 	 * migration entries. We can still have PG_anon_exclusive set on an
579 	 * effectively unmapped and unreferenced first sub-pages of an
580 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
581 	 */
582 	if (folio_test_mappedtodisk(folio))
583 		folio_set_mappedtodisk(newfolio);
584 
585 	/* Move dirty on pages not done by folio_migrate_mapping() */
586 	if (folio_test_dirty(folio))
587 		folio_set_dirty(newfolio);
588 
589 	if (folio_test_young(folio))
590 		folio_set_young(newfolio);
591 	if (folio_test_idle(folio))
592 		folio_set_idle(newfolio);
593 
594 	/*
595 	 * Copy NUMA information to the new page, to prevent over-eager
596 	 * future migrations of this same page.
597 	 */
598 	cpupid = folio_xchg_last_cpupid(folio, -1);
599 	/*
600 	 * For memory tiering mode, when migrate between slow and fast
601 	 * memory node, reset cpupid, because that is used to record
602 	 * page access time in slow memory node.
603 	 */
604 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
605 		bool f_toptier = node_is_toptier(folio_nid(folio));
606 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
607 
608 		if (f_toptier != t_toptier)
609 			cpupid = -1;
610 	}
611 	folio_xchg_last_cpupid(newfolio, cpupid);
612 
613 	folio_migrate_ksm(newfolio, folio);
614 	/*
615 	 * Please do not reorder this without considering how mm/ksm.c's
616 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
617 	 */
618 	if (folio_test_swapcache(folio))
619 		folio_clear_swapcache(folio);
620 	folio_clear_private(folio);
621 
622 	/* page->private contains hugetlb specific flags */
623 	if (!folio_test_hugetlb(folio))
624 		folio->private = NULL;
625 
626 	/*
627 	 * If any waiters have accumulated on the new page then
628 	 * wake them up.
629 	 */
630 	if (folio_test_writeback(newfolio))
631 		folio_end_writeback(newfolio);
632 
633 	/*
634 	 * PG_readahead shares the same bit with PG_reclaim.  The above
635 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
636 	 * bit after that.
637 	 */
638 	if (folio_test_readahead(folio))
639 		folio_set_readahead(newfolio);
640 
641 	folio_copy_owner(newfolio, folio);
642 
643 	mem_cgroup_migrate(folio, newfolio);
644 }
645 EXPORT_SYMBOL(folio_migrate_flags);
646 
647 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
648 {
649 	folio_copy(newfolio, folio);
650 	folio_migrate_flags(newfolio, folio);
651 }
652 EXPORT_SYMBOL(folio_migrate_copy);
653 
654 /************************************************************
655  *                    Migration functions
656  ***********************************************************/
657 
658 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
659 		struct folio *src, enum migrate_mode mode, int extra_count)
660 {
661 	int rc;
662 
663 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
664 
665 	rc = folio_migrate_mapping(mapping, dst, src, extra_count);
666 
667 	if (rc != MIGRATEPAGE_SUCCESS)
668 		return rc;
669 
670 	if (mode != MIGRATE_SYNC_NO_COPY)
671 		folio_migrate_copy(dst, src);
672 	else
673 		folio_migrate_flags(dst, src);
674 	return MIGRATEPAGE_SUCCESS;
675 }
676 
677 /**
678  * migrate_folio() - Simple folio migration.
679  * @mapping: The address_space containing the folio.
680  * @dst: The folio to migrate the data to.
681  * @src: The folio containing the current data.
682  * @mode: How to migrate the page.
683  *
684  * Common logic to directly migrate a single LRU folio suitable for
685  * folios that do not use PagePrivate/PagePrivate2.
686  *
687  * Folios are locked upon entry and exit.
688  */
689 int migrate_folio(struct address_space *mapping, struct folio *dst,
690 		struct folio *src, enum migrate_mode mode)
691 {
692 	return migrate_folio_extra(mapping, dst, src, mode, 0);
693 }
694 EXPORT_SYMBOL(migrate_folio);
695 
696 #ifdef CONFIG_BUFFER_HEAD
697 /* Returns true if all buffers are successfully locked */
698 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
699 							enum migrate_mode mode)
700 {
701 	struct buffer_head *bh = head;
702 	struct buffer_head *failed_bh;
703 
704 	do {
705 		if (!trylock_buffer(bh)) {
706 			if (mode == MIGRATE_ASYNC)
707 				goto unlock;
708 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
709 				goto unlock;
710 			lock_buffer(bh);
711 		}
712 
713 		bh = bh->b_this_page;
714 	} while (bh != head);
715 
716 	return true;
717 
718 unlock:
719 	/* We failed to lock the buffer and cannot stall. */
720 	failed_bh = bh;
721 	bh = head;
722 	while (bh != failed_bh) {
723 		unlock_buffer(bh);
724 		bh = bh->b_this_page;
725 	}
726 
727 	return false;
728 }
729 
730 static int __buffer_migrate_folio(struct address_space *mapping,
731 		struct folio *dst, struct folio *src, enum migrate_mode mode,
732 		bool check_refs)
733 {
734 	struct buffer_head *bh, *head;
735 	int rc;
736 	int expected_count;
737 
738 	head = folio_buffers(src);
739 	if (!head)
740 		return migrate_folio(mapping, dst, src, mode);
741 
742 	/* Check whether page does not have extra refs before we do more work */
743 	expected_count = folio_expected_refs(mapping, src);
744 	if (folio_ref_count(src) != expected_count)
745 		return -EAGAIN;
746 
747 	if (!buffer_migrate_lock_buffers(head, mode))
748 		return -EAGAIN;
749 
750 	if (check_refs) {
751 		bool busy;
752 		bool invalidated = false;
753 
754 recheck_buffers:
755 		busy = false;
756 		spin_lock(&mapping->private_lock);
757 		bh = head;
758 		do {
759 			if (atomic_read(&bh->b_count)) {
760 				busy = true;
761 				break;
762 			}
763 			bh = bh->b_this_page;
764 		} while (bh != head);
765 		if (busy) {
766 			if (invalidated) {
767 				rc = -EAGAIN;
768 				goto unlock_buffers;
769 			}
770 			spin_unlock(&mapping->private_lock);
771 			invalidate_bh_lrus();
772 			invalidated = true;
773 			goto recheck_buffers;
774 		}
775 	}
776 
777 	rc = folio_migrate_mapping(mapping, dst, src, 0);
778 	if (rc != MIGRATEPAGE_SUCCESS)
779 		goto unlock_buffers;
780 
781 	folio_attach_private(dst, folio_detach_private(src));
782 
783 	bh = head;
784 	do {
785 		folio_set_bh(bh, dst, bh_offset(bh));
786 		bh = bh->b_this_page;
787 	} while (bh != head);
788 
789 	if (mode != MIGRATE_SYNC_NO_COPY)
790 		folio_migrate_copy(dst, src);
791 	else
792 		folio_migrate_flags(dst, src);
793 
794 	rc = MIGRATEPAGE_SUCCESS;
795 unlock_buffers:
796 	if (check_refs)
797 		spin_unlock(&mapping->private_lock);
798 	bh = head;
799 	do {
800 		unlock_buffer(bh);
801 		bh = bh->b_this_page;
802 	} while (bh != head);
803 
804 	return rc;
805 }
806 
807 /**
808  * buffer_migrate_folio() - Migration function for folios with buffers.
809  * @mapping: The address space containing @src.
810  * @dst: The folio to migrate to.
811  * @src: The folio to migrate from.
812  * @mode: How to migrate the folio.
813  *
814  * This function can only be used if the underlying filesystem guarantees
815  * that no other references to @src exist. For example attached buffer
816  * heads are accessed only under the folio lock.  If your filesystem cannot
817  * provide this guarantee, buffer_migrate_folio_norefs() may be more
818  * appropriate.
819  *
820  * Return: 0 on success or a negative errno on failure.
821  */
822 int buffer_migrate_folio(struct address_space *mapping,
823 		struct folio *dst, struct folio *src, enum migrate_mode mode)
824 {
825 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
826 }
827 EXPORT_SYMBOL(buffer_migrate_folio);
828 
829 /**
830  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
831  * @mapping: The address space containing @src.
832  * @dst: The folio to migrate to.
833  * @src: The folio to migrate from.
834  * @mode: How to migrate the folio.
835  *
836  * Like buffer_migrate_folio() except that this variant is more careful
837  * and checks that there are also no buffer head references. This function
838  * is the right one for mappings where buffer heads are directly looked
839  * up and referenced (such as block device mappings).
840  *
841  * Return: 0 on success or a negative errno on failure.
842  */
843 int buffer_migrate_folio_norefs(struct address_space *mapping,
844 		struct folio *dst, struct folio *src, enum migrate_mode mode)
845 {
846 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
847 }
848 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
849 #endif /* CONFIG_BUFFER_HEAD */
850 
851 int filemap_migrate_folio(struct address_space *mapping,
852 		struct folio *dst, struct folio *src, enum migrate_mode mode)
853 {
854 	int ret;
855 
856 	ret = folio_migrate_mapping(mapping, dst, src, 0);
857 	if (ret != MIGRATEPAGE_SUCCESS)
858 		return ret;
859 
860 	if (folio_get_private(src))
861 		folio_attach_private(dst, folio_detach_private(src));
862 
863 	if (mode != MIGRATE_SYNC_NO_COPY)
864 		folio_migrate_copy(dst, src);
865 	else
866 		folio_migrate_flags(dst, src);
867 	return MIGRATEPAGE_SUCCESS;
868 }
869 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
870 
871 /*
872  * Writeback a folio to clean the dirty state
873  */
874 static int writeout(struct address_space *mapping, struct folio *folio)
875 {
876 	struct writeback_control wbc = {
877 		.sync_mode = WB_SYNC_NONE,
878 		.nr_to_write = 1,
879 		.range_start = 0,
880 		.range_end = LLONG_MAX,
881 		.for_reclaim = 1
882 	};
883 	int rc;
884 
885 	if (!mapping->a_ops->writepage)
886 		/* No write method for the address space */
887 		return -EINVAL;
888 
889 	if (!folio_clear_dirty_for_io(folio))
890 		/* Someone else already triggered a write */
891 		return -EAGAIN;
892 
893 	/*
894 	 * A dirty folio may imply that the underlying filesystem has
895 	 * the folio on some queue. So the folio must be clean for
896 	 * migration. Writeout may mean we lose the lock and the
897 	 * folio state is no longer what we checked for earlier.
898 	 * At this point we know that the migration attempt cannot
899 	 * be successful.
900 	 */
901 	remove_migration_ptes(folio, folio, false);
902 
903 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
904 
905 	if (rc != AOP_WRITEPAGE_ACTIVATE)
906 		/* unlocked. Relock */
907 		folio_lock(folio);
908 
909 	return (rc < 0) ? -EIO : -EAGAIN;
910 }
911 
912 /*
913  * Default handling if a filesystem does not provide a migration function.
914  */
915 static int fallback_migrate_folio(struct address_space *mapping,
916 		struct folio *dst, struct folio *src, enum migrate_mode mode)
917 {
918 	if (folio_test_dirty(src)) {
919 		/* Only writeback folios in full synchronous migration */
920 		switch (mode) {
921 		case MIGRATE_SYNC:
922 		case MIGRATE_SYNC_NO_COPY:
923 			break;
924 		default:
925 			return -EBUSY;
926 		}
927 		return writeout(mapping, src);
928 	}
929 
930 	/*
931 	 * Buffers may be managed in a filesystem specific way.
932 	 * We must have no buffers or drop them.
933 	 */
934 	if (!filemap_release_folio(src, GFP_KERNEL))
935 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
936 
937 	return migrate_folio(mapping, dst, src, mode);
938 }
939 
940 /*
941  * Move a page to a newly allocated page
942  * The page is locked and all ptes have been successfully removed.
943  *
944  * The new page will have replaced the old page if this function
945  * is successful.
946  *
947  * Return value:
948  *   < 0 - error code
949  *  MIGRATEPAGE_SUCCESS - success
950  */
951 static int move_to_new_folio(struct folio *dst, struct folio *src,
952 				enum migrate_mode mode)
953 {
954 	int rc = -EAGAIN;
955 	bool is_lru = !__folio_test_movable(src);
956 
957 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
958 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
959 
960 	if (likely(is_lru)) {
961 		struct address_space *mapping = folio_mapping(src);
962 
963 		if (!mapping)
964 			rc = migrate_folio(mapping, dst, src, mode);
965 		else if (mapping->a_ops->migrate_folio)
966 			/*
967 			 * Most folios have a mapping and most filesystems
968 			 * provide a migrate_folio callback. Anonymous folios
969 			 * are part of swap space which also has its own
970 			 * migrate_folio callback. This is the most common path
971 			 * for page migration.
972 			 */
973 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
974 								mode);
975 		else
976 			rc = fallback_migrate_folio(mapping, dst, src, mode);
977 	} else {
978 		const struct movable_operations *mops;
979 
980 		/*
981 		 * In case of non-lru page, it could be released after
982 		 * isolation step. In that case, we shouldn't try migration.
983 		 */
984 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
985 		if (!folio_test_movable(src)) {
986 			rc = MIGRATEPAGE_SUCCESS;
987 			folio_clear_isolated(src);
988 			goto out;
989 		}
990 
991 		mops = folio_movable_ops(src);
992 		rc = mops->migrate_page(&dst->page, &src->page, mode);
993 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
994 				!folio_test_isolated(src));
995 	}
996 
997 	/*
998 	 * When successful, old pagecache src->mapping must be cleared before
999 	 * src is freed; but stats require that PageAnon be left as PageAnon.
1000 	 */
1001 	if (rc == MIGRATEPAGE_SUCCESS) {
1002 		if (__folio_test_movable(src)) {
1003 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1004 
1005 			/*
1006 			 * We clear PG_movable under page_lock so any compactor
1007 			 * cannot try to migrate this page.
1008 			 */
1009 			folio_clear_isolated(src);
1010 		}
1011 
1012 		/*
1013 		 * Anonymous and movable src->mapping will be cleared by
1014 		 * free_pages_prepare so don't reset it here for keeping
1015 		 * the type to work PageAnon, for example.
1016 		 */
1017 		if (!folio_mapping_flags(src))
1018 			src->mapping = NULL;
1019 
1020 		if (likely(!folio_is_zone_device(dst)))
1021 			flush_dcache_folio(dst);
1022 	}
1023 out:
1024 	return rc;
1025 }
1026 
1027 /*
1028  * To record some information during migration, we use some unused
1029  * fields (mapping and private) of struct folio of the newly allocated
1030  * destination folio.  This is safe because nobody is using them
1031  * except us.
1032  */
1033 union migration_ptr {
1034 	struct anon_vma *anon_vma;
1035 	struct address_space *mapping;
1036 };
1037 
1038 enum {
1039 	PAGE_WAS_MAPPED = BIT(0),
1040 	PAGE_WAS_MLOCKED = BIT(1),
1041 };
1042 
1043 static void __migrate_folio_record(struct folio *dst,
1044 				   unsigned long old_page_state,
1045 				   struct anon_vma *anon_vma)
1046 {
1047 	union migration_ptr ptr = { .anon_vma = anon_vma };
1048 	dst->mapping = ptr.mapping;
1049 	dst->private = (void *)old_page_state;
1050 }
1051 
1052 static void __migrate_folio_extract(struct folio *dst,
1053 				   int *old_page_state,
1054 				   struct anon_vma **anon_vmap)
1055 {
1056 	union migration_ptr ptr = { .mapping = dst->mapping };
1057 	*anon_vmap = ptr.anon_vma;
1058 	*old_page_state = (unsigned long)dst->private;
1059 	dst->mapping = NULL;
1060 	dst->private = NULL;
1061 }
1062 
1063 /* Restore the source folio to the original state upon failure */
1064 static void migrate_folio_undo_src(struct folio *src,
1065 				   int page_was_mapped,
1066 				   struct anon_vma *anon_vma,
1067 				   bool locked,
1068 				   struct list_head *ret)
1069 {
1070 	if (page_was_mapped)
1071 		remove_migration_ptes(src, src, false);
1072 	/* Drop an anon_vma reference if we took one */
1073 	if (anon_vma)
1074 		put_anon_vma(anon_vma);
1075 	if (locked)
1076 		folio_unlock(src);
1077 	if (ret)
1078 		list_move_tail(&src->lru, ret);
1079 }
1080 
1081 /* Restore the destination folio to the original state upon failure */
1082 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1083 		free_folio_t put_new_folio, unsigned long private)
1084 {
1085 	if (locked)
1086 		folio_unlock(dst);
1087 	if (put_new_folio)
1088 		put_new_folio(dst, private);
1089 	else
1090 		folio_put(dst);
1091 }
1092 
1093 /* Cleanup src folio upon migration success */
1094 static void migrate_folio_done(struct folio *src,
1095 			       enum migrate_reason reason)
1096 {
1097 	/*
1098 	 * Compaction can migrate also non-LRU pages which are
1099 	 * not accounted to NR_ISOLATED_*. They can be recognized
1100 	 * as __folio_test_movable
1101 	 */
1102 	if (likely(!__folio_test_movable(src)))
1103 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1104 				    folio_is_file_lru(src), -folio_nr_pages(src));
1105 
1106 	if (reason != MR_MEMORY_FAILURE)
1107 		/* We release the page in page_handle_poison. */
1108 		folio_put(src);
1109 }
1110 
1111 /* Obtain the lock on page, remove all ptes. */
1112 static int migrate_folio_unmap(new_folio_t get_new_folio,
1113 		free_folio_t put_new_folio, unsigned long private,
1114 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1115 		enum migrate_reason reason, struct list_head *ret)
1116 {
1117 	struct folio *dst;
1118 	int rc = -EAGAIN;
1119 	int old_page_state = 0;
1120 	struct anon_vma *anon_vma = NULL;
1121 	bool is_lru = !__folio_test_movable(src);
1122 	bool locked = false;
1123 	bool dst_locked = false;
1124 
1125 	if (folio_ref_count(src) == 1) {
1126 		/* Folio was freed from under us. So we are done. */
1127 		folio_clear_active(src);
1128 		folio_clear_unevictable(src);
1129 		/* free_pages_prepare() will clear PG_isolated. */
1130 		list_del(&src->lru);
1131 		migrate_folio_done(src, reason);
1132 		return MIGRATEPAGE_SUCCESS;
1133 	}
1134 
1135 	dst = get_new_folio(src, private);
1136 	if (!dst)
1137 		return -ENOMEM;
1138 	*dstp = dst;
1139 
1140 	dst->private = NULL;
1141 
1142 	if (!folio_trylock(src)) {
1143 		if (mode == MIGRATE_ASYNC)
1144 			goto out;
1145 
1146 		/*
1147 		 * It's not safe for direct compaction to call lock_page.
1148 		 * For example, during page readahead pages are added locked
1149 		 * to the LRU. Later, when the IO completes the pages are
1150 		 * marked uptodate and unlocked. However, the queueing
1151 		 * could be merging multiple pages for one bio (e.g.
1152 		 * mpage_readahead). If an allocation happens for the
1153 		 * second or third page, the process can end up locking
1154 		 * the same page twice and deadlocking. Rather than
1155 		 * trying to be clever about what pages can be locked,
1156 		 * avoid the use of lock_page for direct compaction
1157 		 * altogether.
1158 		 */
1159 		if (current->flags & PF_MEMALLOC)
1160 			goto out;
1161 
1162 		/*
1163 		 * In "light" mode, we can wait for transient locks (eg
1164 		 * inserting a page into the page table), but it's not
1165 		 * worth waiting for I/O.
1166 		 */
1167 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1168 			goto out;
1169 
1170 		folio_lock(src);
1171 	}
1172 	locked = true;
1173 	if (folio_test_mlocked(src))
1174 		old_page_state |= PAGE_WAS_MLOCKED;
1175 
1176 	if (folio_test_writeback(src)) {
1177 		/*
1178 		 * Only in the case of a full synchronous migration is it
1179 		 * necessary to wait for PageWriteback. In the async case,
1180 		 * the retry loop is too short and in the sync-light case,
1181 		 * the overhead of stalling is too much
1182 		 */
1183 		switch (mode) {
1184 		case MIGRATE_SYNC:
1185 		case MIGRATE_SYNC_NO_COPY:
1186 			break;
1187 		default:
1188 			rc = -EBUSY;
1189 			goto out;
1190 		}
1191 		folio_wait_writeback(src);
1192 	}
1193 
1194 	/*
1195 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1196 	 * we cannot notice that anon_vma is freed while we migrate a page.
1197 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1198 	 * of migration. File cache pages are no problem because of page_lock()
1199 	 * File Caches may use write_page() or lock_page() in migration, then,
1200 	 * just care Anon page here.
1201 	 *
1202 	 * Only folio_get_anon_vma() understands the subtleties of
1203 	 * getting a hold on an anon_vma from outside one of its mms.
1204 	 * But if we cannot get anon_vma, then we won't need it anyway,
1205 	 * because that implies that the anon page is no longer mapped
1206 	 * (and cannot be remapped so long as we hold the page lock).
1207 	 */
1208 	if (folio_test_anon(src) && !folio_test_ksm(src))
1209 		anon_vma = folio_get_anon_vma(src);
1210 
1211 	/*
1212 	 * Block others from accessing the new page when we get around to
1213 	 * establishing additional references. We are usually the only one
1214 	 * holding a reference to dst at this point. We used to have a BUG
1215 	 * here if folio_trylock(dst) fails, but would like to allow for
1216 	 * cases where there might be a race with the previous use of dst.
1217 	 * This is much like races on refcount of oldpage: just don't BUG().
1218 	 */
1219 	if (unlikely(!folio_trylock(dst)))
1220 		goto out;
1221 	dst_locked = true;
1222 
1223 	if (unlikely(!is_lru)) {
1224 		__migrate_folio_record(dst, old_page_state, anon_vma);
1225 		return MIGRATEPAGE_UNMAP;
1226 	}
1227 
1228 	/*
1229 	 * Corner case handling:
1230 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1231 	 * and treated as swapcache but it has no rmap yet.
1232 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1233 	 * trigger a BUG.  So handle it here.
1234 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1235 	 * fs-private metadata. The page can be picked up due to memory
1236 	 * offlining.  Everywhere else except page reclaim, the page is
1237 	 * invisible to the vm, so the page can not be migrated.  So try to
1238 	 * free the metadata, so the page can be freed.
1239 	 */
1240 	if (!src->mapping) {
1241 		if (folio_test_private(src)) {
1242 			try_to_free_buffers(src);
1243 			goto out;
1244 		}
1245 	} else if (folio_mapped(src)) {
1246 		/* Establish migration ptes */
1247 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1248 			       !folio_test_ksm(src) && !anon_vma, src);
1249 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1250 		old_page_state |= PAGE_WAS_MAPPED;
1251 	}
1252 
1253 	if (!folio_mapped(src)) {
1254 		__migrate_folio_record(dst, old_page_state, anon_vma);
1255 		return MIGRATEPAGE_UNMAP;
1256 	}
1257 
1258 out:
1259 	/*
1260 	 * A folio that has not been unmapped will be restored to
1261 	 * right list unless we want to retry.
1262 	 */
1263 	if (rc == -EAGAIN)
1264 		ret = NULL;
1265 
1266 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1267 			       anon_vma, locked, ret);
1268 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1269 
1270 	return rc;
1271 }
1272 
1273 /* Migrate the folio to the newly allocated folio in dst. */
1274 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1275 			      struct folio *src, struct folio *dst,
1276 			      enum migrate_mode mode, enum migrate_reason reason,
1277 			      struct list_head *ret)
1278 {
1279 	int rc;
1280 	int old_page_state = 0;
1281 	struct anon_vma *anon_vma = NULL;
1282 	bool is_lru = !__folio_test_movable(src);
1283 	struct list_head *prev;
1284 
1285 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1286 	prev = dst->lru.prev;
1287 	list_del(&dst->lru);
1288 
1289 	rc = move_to_new_folio(dst, src, mode);
1290 	if (rc)
1291 		goto out;
1292 
1293 	if (unlikely(!is_lru))
1294 		goto out_unlock_both;
1295 
1296 	/*
1297 	 * When successful, push dst to LRU immediately: so that if it
1298 	 * turns out to be an mlocked page, remove_migration_ptes() will
1299 	 * automatically build up the correct dst->mlock_count for it.
1300 	 *
1301 	 * We would like to do something similar for the old page, when
1302 	 * unsuccessful, and other cases when a page has been temporarily
1303 	 * isolated from the unevictable LRU: but this case is the easiest.
1304 	 */
1305 	folio_add_lru(dst);
1306 	if (old_page_state & PAGE_WAS_MLOCKED)
1307 		lru_add_drain();
1308 
1309 	if (old_page_state & PAGE_WAS_MAPPED)
1310 		remove_migration_ptes(src, dst, false);
1311 
1312 out_unlock_both:
1313 	folio_unlock(dst);
1314 	set_page_owner_migrate_reason(&dst->page, reason);
1315 	/*
1316 	 * If migration is successful, decrease refcount of dst,
1317 	 * which will not free the page because new page owner increased
1318 	 * refcounter.
1319 	 */
1320 	folio_put(dst);
1321 
1322 	/*
1323 	 * A folio that has been migrated has all references removed
1324 	 * and will be freed.
1325 	 */
1326 	list_del(&src->lru);
1327 	/* Drop an anon_vma reference if we took one */
1328 	if (anon_vma)
1329 		put_anon_vma(anon_vma);
1330 	folio_unlock(src);
1331 	migrate_folio_done(src, reason);
1332 
1333 	return rc;
1334 out:
1335 	/*
1336 	 * A folio that has not been migrated will be restored to
1337 	 * right list unless we want to retry.
1338 	 */
1339 	if (rc == -EAGAIN) {
1340 		list_add(&dst->lru, prev);
1341 		__migrate_folio_record(dst, old_page_state, anon_vma);
1342 		return rc;
1343 	}
1344 
1345 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1346 			       anon_vma, true, ret);
1347 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1348 
1349 	return rc;
1350 }
1351 
1352 /*
1353  * Counterpart of unmap_and_move_page() for hugepage migration.
1354  *
1355  * This function doesn't wait the completion of hugepage I/O
1356  * because there is no race between I/O and migration for hugepage.
1357  * Note that currently hugepage I/O occurs only in direct I/O
1358  * where no lock is held and PG_writeback is irrelevant,
1359  * and writeback status of all subpages are counted in the reference
1360  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1361  * under direct I/O, the reference of the head page is 512 and a bit more.)
1362  * This means that when we try to migrate hugepage whose subpages are
1363  * doing direct I/O, some references remain after try_to_unmap() and
1364  * hugepage migration fails without data corruption.
1365  *
1366  * There is also no race when direct I/O is issued on the page under migration,
1367  * because then pte is replaced with migration swap entry and direct I/O code
1368  * will wait in the page fault for migration to complete.
1369  */
1370 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1371 		free_folio_t put_new_folio, unsigned long private,
1372 		struct folio *src, int force, enum migrate_mode mode,
1373 		int reason, struct list_head *ret)
1374 {
1375 	struct folio *dst;
1376 	int rc = -EAGAIN;
1377 	int page_was_mapped = 0;
1378 	struct anon_vma *anon_vma = NULL;
1379 	struct address_space *mapping = NULL;
1380 
1381 	if (folio_ref_count(src) == 1) {
1382 		/* page was freed from under us. So we are done. */
1383 		folio_putback_active_hugetlb(src);
1384 		return MIGRATEPAGE_SUCCESS;
1385 	}
1386 
1387 	dst = get_new_folio(src, private);
1388 	if (!dst)
1389 		return -ENOMEM;
1390 
1391 	if (!folio_trylock(src)) {
1392 		if (!force)
1393 			goto out;
1394 		switch (mode) {
1395 		case MIGRATE_SYNC:
1396 		case MIGRATE_SYNC_NO_COPY:
1397 			break;
1398 		default:
1399 			goto out;
1400 		}
1401 		folio_lock(src);
1402 	}
1403 
1404 	/*
1405 	 * Check for pages which are in the process of being freed.  Without
1406 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1407 	 * be called and we could leak usage counts for subpools.
1408 	 */
1409 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1410 		rc = -EBUSY;
1411 		goto out_unlock;
1412 	}
1413 
1414 	if (folio_test_anon(src))
1415 		anon_vma = folio_get_anon_vma(src);
1416 
1417 	if (unlikely(!folio_trylock(dst)))
1418 		goto put_anon;
1419 
1420 	if (folio_mapped(src)) {
1421 		enum ttu_flags ttu = 0;
1422 
1423 		if (!folio_test_anon(src)) {
1424 			/*
1425 			 * In shared mappings, try_to_unmap could potentially
1426 			 * call huge_pmd_unshare.  Because of this, take
1427 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1428 			 * to let lower levels know we have taken the lock.
1429 			 */
1430 			mapping = hugetlb_page_mapping_lock_write(&src->page);
1431 			if (unlikely(!mapping))
1432 				goto unlock_put_anon;
1433 
1434 			ttu = TTU_RMAP_LOCKED;
1435 		}
1436 
1437 		try_to_migrate(src, ttu);
1438 		page_was_mapped = 1;
1439 
1440 		if (ttu & TTU_RMAP_LOCKED)
1441 			i_mmap_unlock_write(mapping);
1442 	}
1443 
1444 	if (!folio_mapped(src))
1445 		rc = move_to_new_folio(dst, src, mode);
1446 
1447 	if (page_was_mapped)
1448 		remove_migration_ptes(src,
1449 			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1450 
1451 unlock_put_anon:
1452 	folio_unlock(dst);
1453 
1454 put_anon:
1455 	if (anon_vma)
1456 		put_anon_vma(anon_vma);
1457 
1458 	if (rc == MIGRATEPAGE_SUCCESS) {
1459 		move_hugetlb_state(src, dst, reason);
1460 		put_new_folio = NULL;
1461 	}
1462 
1463 out_unlock:
1464 	folio_unlock(src);
1465 out:
1466 	if (rc == MIGRATEPAGE_SUCCESS)
1467 		folio_putback_active_hugetlb(src);
1468 	else if (rc != -EAGAIN)
1469 		list_move_tail(&src->lru, ret);
1470 
1471 	/*
1472 	 * If migration was not successful and there's a freeing callback, use
1473 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1474 	 * isolation.
1475 	 */
1476 	if (put_new_folio)
1477 		put_new_folio(dst, private);
1478 	else
1479 		folio_putback_active_hugetlb(dst);
1480 
1481 	return rc;
1482 }
1483 
1484 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1485 {
1486 	int rc;
1487 
1488 	folio_lock(folio);
1489 	rc = split_folio_to_list(folio, split_folios);
1490 	folio_unlock(folio);
1491 	if (!rc)
1492 		list_move_tail(&folio->lru, split_folios);
1493 
1494 	return rc;
1495 }
1496 
1497 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1498 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1499 #else
1500 #define NR_MAX_BATCHED_MIGRATION	512
1501 #endif
1502 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1503 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1504 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1505 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1506 
1507 struct migrate_pages_stats {
1508 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1509 				   units of base pages */
1510 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1511 				   units of base pages.  Untried folios aren't counted */
1512 	int nr_thp_succeeded;	/* THP migrated successfully */
1513 	int nr_thp_failed;	/* THP failed to be migrated */
1514 	int nr_thp_split;	/* THP split before migrating */
1515 	int nr_split;	/* Large folio (include THP) split before migrating */
1516 };
1517 
1518 /*
1519  * Returns the number of hugetlb folios that were not migrated, or an error code
1520  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1521  * any more because the list has become empty or no retryable hugetlb folios
1522  * exist any more. It is caller's responsibility to call putback_movable_pages()
1523  * only if ret != 0.
1524  */
1525 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1526 			    free_folio_t put_new_folio, unsigned long private,
1527 			    enum migrate_mode mode, int reason,
1528 			    struct migrate_pages_stats *stats,
1529 			    struct list_head *ret_folios)
1530 {
1531 	int retry = 1;
1532 	int nr_failed = 0;
1533 	int nr_retry_pages = 0;
1534 	int pass = 0;
1535 	struct folio *folio, *folio2;
1536 	int rc, nr_pages;
1537 
1538 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1539 		retry = 0;
1540 		nr_retry_pages = 0;
1541 
1542 		list_for_each_entry_safe(folio, folio2, from, lru) {
1543 			if (!folio_test_hugetlb(folio))
1544 				continue;
1545 
1546 			nr_pages = folio_nr_pages(folio);
1547 
1548 			cond_resched();
1549 
1550 			/*
1551 			 * Migratability of hugepages depends on architectures and
1552 			 * their size.  This check is necessary because some callers
1553 			 * of hugepage migration like soft offline and memory
1554 			 * hotremove don't walk through page tables or check whether
1555 			 * the hugepage is pmd-based or not before kicking migration.
1556 			 */
1557 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1558 				nr_failed++;
1559 				stats->nr_failed_pages += nr_pages;
1560 				list_move_tail(&folio->lru, ret_folios);
1561 				continue;
1562 			}
1563 
1564 			rc = unmap_and_move_huge_page(get_new_folio,
1565 						      put_new_folio, private,
1566 						      folio, pass > 2, mode,
1567 						      reason, ret_folios);
1568 			/*
1569 			 * The rules are:
1570 			 *	Success: hugetlb folio will be put back
1571 			 *	-EAGAIN: stay on the from list
1572 			 *	-ENOMEM: stay on the from list
1573 			 *	Other errno: put on ret_folios list
1574 			 */
1575 			switch(rc) {
1576 			case -ENOMEM:
1577 				/*
1578 				 * When memory is low, don't bother to try to migrate
1579 				 * other folios, just exit.
1580 				 */
1581 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1582 				return -ENOMEM;
1583 			case -EAGAIN:
1584 				retry++;
1585 				nr_retry_pages += nr_pages;
1586 				break;
1587 			case MIGRATEPAGE_SUCCESS:
1588 				stats->nr_succeeded += nr_pages;
1589 				break;
1590 			default:
1591 				/*
1592 				 * Permanent failure (-EBUSY, etc.):
1593 				 * unlike -EAGAIN case, the failed folio is
1594 				 * removed from migration folio list and not
1595 				 * retried in the next outer loop.
1596 				 */
1597 				nr_failed++;
1598 				stats->nr_failed_pages += nr_pages;
1599 				break;
1600 			}
1601 		}
1602 	}
1603 	/*
1604 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1605 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1606 	 * folios as failed.
1607 	 */
1608 	nr_failed += retry;
1609 	stats->nr_failed_pages += nr_retry_pages;
1610 
1611 	return nr_failed;
1612 }
1613 
1614 /*
1615  * migrate_pages_batch() first unmaps folios in the from list as many as
1616  * possible, then move the unmapped folios.
1617  *
1618  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1619  * lock or bit when we have locked more than one folio.  Which may cause
1620  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1621  * length of the from list must be <= 1.
1622  */
1623 static int migrate_pages_batch(struct list_head *from,
1624 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1625 		unsigned long private, enum migrate_mode mode, int reason,
1626 		struct list_head *ret_folios, struct list_head *split_folios,
1627 		struct migrate_pages_stats *stats, int nr_pass)
1628 {
1629 	int retry = 1;
1630 	int thp_retry = 1;
1631 	int nr_failed = 0;
1632 	int nr_retry_pages = 0;
1633 	int pass = 0;
1634 	bool is_thp = false;
1635 	bool is_large = false;
1636 	struct folio *folio, *folio2, *dst = NULL, *dst2;
1637 	int rc, rc_saved = 0, nr_pages;
1638 	LIST_HEAD(unmap_folios);
1639 	LIST_HEAD(dst_folios);
1640 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1641 
1642 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1643 			!list_empty(from) && !list_is_singular(from));
1644 
1645 	for (pass = 0; pass < nr_pass && retry; pass++) {
1646 		retry = 0;
1647 		thp_retry = 0;
1648 		nr_retry_pages = 0;
1649 
1650 		list_for_each_entry_safe(folio, folio2, from, lru) {
1651 			is_large = folio_test_large(folio);
1652 			is_thp = is_large && folio_test_pmd_mappable(folio);
1653 			nr_pages = folio_nr_pages(folio);
1654 
1655 			cond_resched();
1656 
1657 			/*
1658 			 * Large folio migration might be unsupported or
1659 			 * the allocation might be failed so we should retry
1660 			 * on the same folio with the large folio split
1661 			 * to normal folios.
1662 			 *
1663 			 * Split folios are put in split_folios, and
1664 			 * we will migrate them after the rest of the
1665 			 * list is processed.
1666 			 */
1667 			if (!thp_migration_supported() && is_thp) {
1668 				nr_failed++;
1669 				stats->nr_thp_failed++;
1670 				if (!try_split_folio(folio, split_folios)) {
1671 					stats->nr_thp_split++;
1672 					stats->nr_split++;
1673 					continue;
1674 				}
1675 				stats->nr_failed_pages += nr_pages;
1676 				list_move_tail(&folio->lru, ret_folios);
1677 				continue;
1678 			}
1679 
1680 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1681 					private, folio, &dst, mode, reason,
1682 					ret_folios);
1683 			/*
1684 			 * The rules are:
1685 			 *	Success: folio will be freed
1686 			 *	Unmap: folio will be put on unmap_folios list,
1687 			 *	       dst folio put on dst_folios list
1688 			 *	-EAGAIN: stay on the from list
1689 			 *	-ENOMEM: stay on the from list
1690 			 *	Other errno: put on ret_folios list
1691 			 */
1692 			switch(rc) {
1693 			case -ENOMEM:
1694 				/*
1695 				 * When memory is low, don't bother to try to migrate
1696 				 * other folios, move unmapped folios, then exit.
1697 				 */
1698 				nr_failed++;
1699 				stats->nr_thp_failed += is_thp;
1700 				/* Large folio NUMA faulting doesn't split to retry. */
1701 				if (is_large && !nosplit) {
1702 					int ret = try_split_folio(folio, split_folios);
1703 
1704 					if (!ret) {
1705 						stats->nr_thp_split += is_thp;
1706 						stats->nr_split++;
1707 						break;
1708 					} else if (reason == MR_LONGTERM_PIN &&
1709 						   ret == -EAGAIN) {
1710 						/*
1711 						 * Try again to split large folio to
1712 						 * mitigate the failure of longterm pinning.
1713 						 */
1714 						retry++;
1715 						thp_retry += is_thp;
1716 						nr_retry_pages += nr_pages;
1717 						/* Undo duplicated failure counting. */
1718 						nr_failed--;
1719 						stats->nr_thp_failed -= is_thp;
1720 						break;
1721 					}
1722 				}
1723 
1724 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1725 				/* nr_failed isn't updated for not used */
1726 				stats->nr_thp_failed += thp_retry;
1727 				rc_saved = rc;
1728 				if (list_empty(&unmap_folios))
1729 					goto out;
1730 				else
1731 					goto move;
1732 			case -EAGAIN:
1733 				retry++;
1734 				thp_retry += is_thp;
1735 				nr_retry_pages += nr_pages;
1736 				break;
1737 			case MIGRATEPAGE_SUCCESS:
1738 				stats->nr_succeeded += nr_pages;
1739 				stats->nr_thp_succeeded += is_thp;
1740 				break;
1741 			case MIGRATEPAGE_UNMAP:
1742 				list_move_tail(&folio->lru, &unmap_folios);
1743 				list_add_tail(&dst->lru, &dst_folios);
1744 				break;
1745 			default:
1746 				/*
1747 				 * Permanent failure (-EBUSY, etc.):
1748 				 * unlike -EAGAIN case, the failed folio is
1749 				 * removed from migration folio list and not
1750 				 * retried in the next outer loop.
1751 				 */
1752 				nr_failed++;
1753 				stats->nr_thp_failed += is_thp;
1754 				stats->nr_failed_pages += nr_pages;
1755 				break;
1756 			}
1757 		}
1758 	}
1759 	nr_failed += retry;
1760 	stats->nr_thp_failed += thp_retry;
1761 	stats->nr_failed_pages += nr_retry_pages;
1762 move:
1763 	/* Flush TLBs for all unmapped folios */
1764 	try_to_unmap_flush();
1765 
1766 	retry = 1;
1767 	for (pass = 0; pass < nr_pass && retry; pass++) {
1768 		retry = 0;
1769 		thp_retry = 0;
1770 		nr_retry_pages = 0;
1771 
1772 		dst = list_first_entry(&dst_folios, struct folio, lru);
1773 		dst2 = list_next_entry(dst, lru);
1774 		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1775 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1776 			nr_pages = folio_nr_pages(folio);
1777 
1778 			cond_resched();
1779 
1780 			rc = migrate_folio_move(put_new_folio, private,
1781 						folio, dst, mode,
1782 						reason, ret_folios);
1783 			/*
1784 			 * The rules are:
1785 			 *	Success: folio will be freed
1786 			 *	-EAGAIN: stay on the unmap_folios list
1787 			 *	Other errno: put on ret_folios list
1788 			 */
1789 			switch(rc) {
1790 			case -EAGAIN:
1791 				retry++;
1792 				thp_retry += is_thp;
1793 				nr_retry_pages += nr_pages;
1794 				break;
1795 			case MIGRATEPAGE_SUCCESS:
1796 				stats->nr_succeeded += nr_pages;
1797 				stats->nr_thp_succeeded += is_thp;
1798 				break;
1799 			default:
1800 				nr_failed++;
1801 				stats->nr_thp_failed += is_thp;
1802 				stats->nr_failed_pages += nr_pages;
1803 				break;
1804 			}
1805 			dst = dst2;
1806 			dst2 = list_next_entry(dst, lru);
1807 		}
1808 	}
1809 	nr_failed += retry;
1810 	stats->nr_thp_failed += thp_retry;
1811 	stats->nr_failed_pages += nr_retry_pages;
1812 
1813 	rc = rc_saved ? : nr_failed;
1814 out:
1815 	/* Cleanup remaining folios */
1816 	dst = list_first_entry(&dst_folios, struct folio, lru);
1817 	dst2 = list_next_entry(dst, lru);
1818 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1819 		int old_page_state = 0;
1820 		struct anon_vma *anon_vma = NULL;
1821 
1822 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1823 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1824 				       anon_vma, true, ret_folios);
1825 		list_del(&dst->lru);
1826 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1827 		dst = dst2;
1828 		dst2 = list_next_entry(dst, lru);
1829 	}
1830 
1831 	return rc;
1832 }
1833 
1834 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1835 		free_folio_t put_new_folio, unsigned long private,
1836 		enum migrate_mode mode, int reason,
1837 		struct list_head *ret_folios, struct list_head *split_folios,
1838 		struct migrate_pages_stats *stats)
1839 {
1840 	int rc, nr_failed = 0;
1841 	LIST_HEAD(folios);
1842 	struct migrate_pages_stats astats;
1843 
1844 	memset(&astats, 0, sizeof(astats));
1845 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1846 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1847 				 reason, &folios, split_folios, &astats,
1848 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1849 	stats->nr_succeeded += astats.nr_succeeded;
1850 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1851 	stats->nr_thp_split += astats.nr_thp_split;
1852 	stats->nr_split += astats.nr_split;
1853 	if (rc < 0) {
1854 		stats->nr_failed_pages += astats.nr_failed_pages;
1855 		stats->nr_thp_failed += astats.nr_thp_failed;
1856 		list_splice_tail(&folios, ret_folios);
1857 		return rc;
1858 	}
1859 	stats->nr_thp_failed += astats.nr_thp_split;
1860 	/*
1861 	 * Do not count rc, as pages will be retried below.
1862 	 * Count nr_split only, since it includes nr_thp_split.
1863 	 */
1864 	nr_failed += astats.nr_split;
1865 	/*
1866 	 * Fall back to migrate all failed folios one by one synchronously. All
1867 	 * failed folios except split THPs will be retried, so their failure
1868 	 * isn't counted
1869 	 */
1870 	list_splice_tail_init(&folios, from);
1871 	while (!list_empty(from)) {
1872 		list_move(from->next, &folios);
1873 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1874 					 private, mode, reason, ret_folios,
1875 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1876 		list_splice_tail_init(&folios, ret_folios);
1877 		if (rc < 0)
1878 			return rc;
1879 		nr_failed += rc;
1880 	}
1881 
1882 	return nr_failed;
1883 }
1884 
1885 /*
1886  * migrate_pages - migrate the folios specified in a list, to the free folios
1887  *		   supplied as the target for the page migration
1888  *
1889  * @from:		The list of folios to be migrated.
1890  * @get_new_folio:	The function used to allocate free folios to be used
1891  *			as the target of the folio migration.
1892  * @put_new_folio:	The function used to free target folios if migration
1893  *			fails, or NULL if no special handling is necessary.
1894  * @private:		Private data to be passed on to get_new_folio()
1895  * @mode:		The migration mode that specifies the constraints for
1896  *			folio migration, if any.
1897  * @reason:		The reason for folio migration.
1898  * @ret_succeeded:	Set to the number of folios migrated successfully if
1899  *			the caller passes a non-NULL pointer.
1900  *
1901  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1902  * are movable any more because the list has become empty or no retryable folios
1903  * exist any more. It is caller's responsibility to call putback_movable_pages()
1904  * only if ret != 0.
1905  *
1906  * Returns the number of {normal folio, large folio, hugetlb} that were not
1907  * migrated, or an error code. The number of large folio splits will be
1908  * considered as the number of non-migrated large folio, no matter how many
1909  * split folios of the large folio are migrated successfully.
1910  */
1911 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1912 		free_folio_t put_new_folio, unsigned long private,
1913 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1914 {
1915 	int rc, rc_gather;
1916 	int nr_pages;
1917 	struct folio *folio, *folio2;
1918 	LIST_HEAD(folios);
1919 	LIST_HEAD(ret_folios);
1920 	LIST_HEAD(split_folios);
1921 	struct migrate_pages_stats stats;
1922 
1923 	trace_mm_migrate_pages_start(mode, reason);
1924 
1925 	memset(&stats, 0, sizeof(stats));
1926 
1927 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1928 				     mode, reason, &stats, &ret_folios);
1929 	if (rc_gather < 0)
1930 		goto out;
1931 
1932 again:
1933 	nr_pages = 0;
1934 	list_for_each_entry_safe(folio, folio2, from, lru) {
1935 		/* Retried hugetlb folios will be kept in list  */
1936 		if (folio_test_hugetlb(folio)) {
1937 			list_move_tail(&folio->lru, &ret_folios);
1938 			continue;
1939 		}
1940 
1941 		nr_pages += folio_nr_pages(folio);
1942 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1943 			break;
1944 	}
1945 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1946 		list_cut_before(&folios, from, &folio2->lru);
1947 	else
1948 		list_splice_init(from, &folios);
1949 	if (mode == MIGRATE_ASYNC)
1950 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1951 				private, mode, reason, &ret_folios,
1952 				&split_folios, &stats,
1953 				NR_MAX_MIGRATE_PAGES_RETRY);
1954 	else
1955 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1956 				private, mode, reason, &ret_folios,
1957 				&split_folios, &stats);
1958 	list_splice_tail_init(&folios, &ret_folios);
1959 	if (rc < 0) {
1960 		rc_gather = rc;
1961 		list_splice_tail(&split_folios, &ret_folios);
1962 		goto out;
1963 	}
1964 	if (!list_empty(&split_folios)) {
1965 		/*
1966 		 * Failure isn't counted since all split folios of a large folio
1967 		 * is counted as 1 failure already.  And, we only try to migrate
1968 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1969 		 */
1970 		migrate_pages_batch(&split_folios, get_new_folio,
1971 				put_new_folio, private, MIGRATE_ASYNC, reason,
1972 				&ret_folios, NULL, &stats, 1);
1973 		list_splice_tail_init(&split_folios, &ret_folios);
1974 	}
1975 	rc_gather += rc;
1976 	if (!list_empty(from))
1977 		goto again;
1978 out:
1979 	/*
1980 	 * Put the permanent failure folio back to migration list, they
1981 	 * will be put back to the right list by the caller.
1982 	 */
1983 	list_splice(&ret_folios, from);
1984 
1985 	/*
1986 	 * Return 0 in case all split folios of fail-to-migrate large folios
1987 	 * are migrated successfully.
1988 	 */
1989 	if (list_empty(from))
1990 		rc_gather = 0;
1991 
1992 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1993 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1994 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1995 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1996 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1997 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1998 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
1999 			       stats.nr_thp_split, stats.nr_split, mode,
2000 			       reason);
2001 
2002 	if (ret_succeeded)
2003 		*ret_succeeded = stats.nr_succeeded;
2004 
2005 	return rc_gather;
2006 }
2007 
2008 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2009 {
2010 	struct migration_target_control *mtc;
2011 	gfp_t gfp_mask;
2012 	unsigned int order = 0;
2013 	int nid;
2014 	int zidx;
2015 
2016 	mtc = (struct migration_target_control *)private;
2017 	gfp_mask = mtc->gfp_mask;
2018 	nid = mtc->nid;
2019 	if (nid == NUMA_NO_NODE)
2020 		nid = folio_nid(src);
2021 
2022 	if (folio_test_hugetlb(src)) {
2023 		struct hstate *h = folio_hstate(src);
2024 
2025 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2026 		return alloc_hugetlb_folio_nodemask(h, nid,
2027 						mtc->nmask, gfp_mask);
2028 	}
2029 
2030 	if (folio_test_large(src)) {
2031 		/*
2032 		 * clear __GFP_RECLAIM to make the migration callback
2033 		 * consistent with regular THP allocations.
2034 		 */
2035 		gfp_mask &= ~__GFP_RECLAIM;
2036 		gfp_mask |= GFP_TRANSHUGE;
2037 		order = folio_order(src);
2038 	}
2039 	zidx = zone_idx(folio_zone(src));
2040 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2041 		gfp_mask |= __GFP_HIGHMEM;
2042 
2043 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2044 }
2045 
2046 #ifdef CONFIG_NUMA
2047 
2048 static int store_status(int __user *status, int start, int value, int nr)
2049 {
2050 	while (nr-- > 0) {
2051 		if (put_user(value, status + start))
2052 			return -EFAULT;
2053 		start++;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2060 {
2061 	int err;
2062 	struct migration_target_control mtc = {
2063 		.nid = node,
2064 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2065 	};
2066 
2067 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2068 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2069 	if (err)
2070 		putback_movable_pages(pagelist);
2071 	return err;
2072 }
2073 
2074 /*
2075  * Resolves the given address to a struct page, isolates it from the LRU and
2076  * puts it to the given pagelist.
2077  * Returns:
2078  *     errno - if the page cannot be found/isolated
2079  *     0 - when it doesn't have to be migrated because it is already on the
2080  *         target node
2081  *     1 - when it has been queued
2082  */
2083 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2084 		int node, struct list_head *pagelist, bool migrate_all)
2085 {
2086 	struct vm_area_struct *vma;
2087 	unsigned long addr;
2088 	struct page *page;
2089 	struct folio *folio;
2090 	int err;
2091 
2092 	mmap_read_lock(mm);
2093 	addr = (unsigned long)untagged_addr_remote(mm, p);
2094 
2095 	err = -EFAULT;
2096 	vma = vma_lookup(mm, addr);
2097 	if (!vma || !vma_migratable(vma))
2098 		goto out;
2099 
2100 	/* FOLL_DUMP to ignore special (like zero) pages */
2101 	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2102 
2103 	err = PTR_ERR(page);
2104 	if (IS_ERR(page))
2105 		goto out;
2106 
2107 	err = -ENOENT;
2108 	if (!page)
2109 		goto out;
2110 
2111 	folio = page_folio(page);
2112 	if (folio_is_zone_device(folio))
2113 		goto out_putfolio;
2114 
2115 	err = 0;
2116 	if (folio_nid(folio) == node)
2117 		goto out_putfolio;
2118 
2119 	err = -EACCES;
2120 	if (page_mapcount(page) > 1 && !migrate_all)
2121 		goto out_putfolio;
2122 
2123 	err = -EBUSY;
2124 	if (folio_test_hugetlb(folio)) {
2125 		if (isolate_hugetlb(folio, pagelist))
2126 			err = 1;
2127 	} else {
2128 		if (!folio_isolate_lru(folio))
2129 			goto out_putfolio;
2130 
2131 		err = 1;
2132 		list_add_tail(&folio->lru, pagelist);
2133 		node_stat_mod_folio(folio,
2134 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2135 			folio_nr_pages(folio));
2136 	}
2137 out_putfolio:
2138 	/*
2139 	 * Either remove the duplicate refcount from folio_isolate_lru()
2140 	 * or drop the folio ref if it was not isolated.
2141 	 */
2142 	folio_put(folio);
2143 out:
2144 	mmap_read_unlock(mm);
2145 	return err;
2146 }
2147 
2148 static int move_pages_and_store_status(int node,
2149 		struct list_head *pagelist, int __user *status,
2150 		int start, int i, unsigned long nr_pages)
2151 {
2152 	int err;
2153 
2154 	if (list_empty(pagelist))
2155 		return 0;
2156 
2157 	err = do_move_pages_to_node(pagelist, node);
2158 	if (err) {
2159 		/*
2160 		 * Positive err means the number of failed
2161 		 * pages to migrate.  Since we are going to
2162 		 * abort and return the number of non-migrated
2163 		 * pages, so need to include the rest of the
2164 		 * nr_pages that have not been attempted as
2165 		 * well.
2166 		 */
2167 		if (err > 0)
2168 			err += nr_pages - i;
2169 		return err;
2170 	}
2171 	return store_status(status, start, node, i - start);
2172 }
2173 
2174 /*
2175  * Migrate an array of page address onto an array of nodes and fill
2176  * the corresponding array of status.
2177  */
2178 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2179 			 unsigned long nr_pages,
2180 			 const void __user * __user *pages,
2181 			 const int __user *nodes,
2182 			 int __user *status, int flags)
2183 {
2184 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2185 	int current_node = NUMA_NO_NODE;
2186 	LIST_HEAD(pagelist);
2187 	int start, i;
2188 	int err = 0, err1;
2189 
2190 	lru_cache_disable();
2191 
2192 	for (i = start = 0; i < nr_pages; i++) {
2193 		const void __user *p;
2194 		int node;
2195 
2196 		err = -EFAULT;
2197 		if (in_compat_syscall()) {
2198 			compat_uptr_t cp;
2199 
2200 			if (get_user(cp, compat_pages + i))
2201 				goto out_flush;
2202 
2203 			p = compat_ptr(cp);
2204 		} else {
2205 			if (get_user(p, pages + i))
2206 				goto out_flush;
2207 		}
2208 		if (get_user(node, nodes + i))
2209 			goto out_flush;
2210 
2211 		err = -ENODEV;
2212 		if (node < 0 || node >= MAX_NUMNODES)
2213 			goto out_flush;
2214 		if (!node_state(node, N_MEMORY))
2215 			goto out_flush;
2216 
2217 		err = -EACCES;
2218 		if (!node_isset(node, task_nodes))
2219 			goto out_flush;
2220 
2221 		if (current_node == NUMA_NO_NODE) {
2222 			current_node = node;
2223 			start = i;
2224 		} else if (node != current_node) {
2225 			err = move_pages_and_store_status(current_node,
2226 					&pagelist, status, start, i, nr_pages);
2227 			if (err)
2228 				goto out;
2229 			start = i;
2230 			current_node = node;
2231 		}
2232 
2233 		/*
2234 		 * Errors in the page lookup or isolation are not fatal and we simply
2235 		 * report them via status
2236 		 */
2237 		err = add_page_for_migration(mm, p, current_node, &pagelist,
2238 					     flags & MPOL_MF_MOVE_ALL);
2239 
2240 		if (err > 0) {
2241 			/* The page is successfully queued for migration */
2242 			continue;
2243 		}
2244 
2245 		/*
2246 		 * The move_pages() man page does not have an -EEXIST choice, so
2247 		 * use -EFAULT instead.
2248 		 */
2249 		if (err == -EEXIST)
2250 			err = -EFAULT;
2251 
2252 		/*
2253 		 * If the page is already on the target node (!err), store the
2254 		 * node, otherwise, store the err.
2255 		 */
2256 		err = store_status(status, i, err ? : current_node, 1);
2257 		if (err)
2258 			goto out_flush;
2259 
2260 		err = move_pages_and_store_status(current_node, &pagelist,
2261 				status, start, i, nr_pages);
2262 		if (err) {
2263 			/* We have accounted for page i */
2264 			if (err > 0)
2265 				err--;
2266 			goto out;
2267 		}
2268 		current_node = NUMA_NO_NODE;
2269 	}
2270 out_flush:
2271 	/* Make sure we do not overwrite the existing error */
2272 	err1 = move_pages_and_store_status(current_node, &pagelist,
2273 				status, start, i, nr_pages);
2274 	if (err >= 0)
2275 		err = err1;
2276 out:
2277 	lru_cache_enable();
2278 	return err;
2279 }
2280 
2281 /*
2282  * Determine the nodes of an array of pages and store it in an array of status.
2283  */
2284 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2285 				const void __user **pages, int *status)
2286 {
2287 	unsigned long i;
2288 
2289 	mmap_read_lock(mm);
2290 
2291 	for (i = 0; i < nr_pages; i++) {
2292 		unsigned long addr = (unsigned long)(*pages);
2293 		struct vm_area_struct *vma;
2294 		struct page *page;
2295 		int err = -EFAULT;
2296 
2297 		vma = vma_lookup(mm, addr);
2298 		if (!vma)
2299 			goto set_status;
2300 
2301 		/* FOLL_DUMP to ignore special (like zero) pages */
2302 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2303 
2304 		err = PTR_ERR(page);
2305 		if (IS_ERR(page))
2306 			goto set_status;
2307 
2308 		err = -ENOENT;
2309 		if (!page)
2310 			goto set_status;
2311 
2312 		if (!is_zone_device_page(page))
2313 			err = page_to_nid(page);
2314 
2315 		put_page(page);
2316 set_status:
2317 		*status = err;
2318 
2319 		pages++;
2320 		status++;
2321 	}
2322 
2323 	mmap_read_unlock(mm);
2324 }
2325 
2326 static int get_compat_pages_array(const void __user *chunk_pages[],
2327 				  const void __user * __user *pages,
2328 				  unsigned long chunk_nr)
2329 {
2330 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2331 	compat_uptr_t p;
2332 	int i;
2333 
2334 	for (i = 0; i < chunk_nr; i++) {
2335 		if (get_user(p, pages32 + i))
2336 			return -EFAULT;
2337 		chunk_pages[i] = compat_ptr(p);
2338 	}
2339 
2340 	return 0;
2341 }
2342 
2343 /*
2344  * Determine the nodes of a user array of pages and store it in
2345  * a user array of status.
2346  */
2347 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2348 			 const void __user * __user *pages,
2349 			 int __user *status)
2350 {
2351 #define DO_PAGES_STAT_CHUNK_NR 16UL
2352 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2353 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2354 
2355 	while (nr_pages) {
2356 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2357 
2358 		if (in_compat_syscall()) {
2359 			if (get_compat_pages_array(chunk_pages, pages,
2360 						   chunk_nr))
2361 				break;
2362 		} else {
2363 			if (copy_from_user(chunk_pages, pages,
2364 				      chunk_nr * sizeof(*chunk_pages)))
2365 				break;
2366 		}
2367 
2368 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2369 
2370 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2371 			break;
2372 
2373 		pages += chunk_nr;
2374 		status += chunk_nr;
2375 		nr_pages -= chunk_nr;
2376 	}
2377 	return nr_pages ? -EFAULT : 0;
2378 }
2379 
2380 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2381 {
2382 	struct task_struct *task;
2383 	struct mm_struct *mm;
2384 
2385 	/*
2386 	 * There is no need to check if current process has the right to modify
2387 	 * the specified process when they are same.
2388 	 */
2389 	if (!pid) {
2390 		mmget(current->mm);
2391 		*mem_nodes = cpuset_mems_allowed(current);
2392 		return current->mm;
2393 	}
2394 
2395 	/* Find the mm_struct */
2396 	rcu_read_lock();
2397 	task = find_task_by_vpid(pid);
2398 	if (!task) {
2399 		rcu_read_unlock();
2400 		return ERR_PTR(-ESRCH);
2401 	}
2402 	get_task_struct(task);
2403 
2404 	/*
2405 	 * Check if this process has the right to modify the specified
2406 	 * process. Use the regular "ptrace_may_access()" checks.
2407 	 */
2408 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2409 		rcu_read_unlock();
2410 		mm = ERR_PTR(-EPERM);
2411 		goto out;
2412 	}
2413 	rcu_read_unlock();
2414 
2415 	mm = ERR_PTR(security_task_movememory(task));
2416 	if (IS_ERR(mm))
2417 		goto out;
2418 	*mem_nodes = cpuset_mems_allowed(task);
2419 	mm = get_task_mm(task);
2420 out:
2421 	put_task_struct(task);
2422 	if (!mm)
2423 		mm = ERR_PTR(-EINVAL);
2424 	return mm;
2425 }
2426 
2427 /*
2428  * Move a list of pages in the address space of the currently executing
2429  * process.
2430  */
2431 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2432 			     const void __user * __user *pages,
2433 			     const int __user *nodes,
2434 			     int __user *status, int flags)
2435 {
2436 	struct mm_struct *mm;
2437 	int err;
2438 	nodemask_t task_nodes;
2439 
2440 	/* Check flags */
2441 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2442 		return -EINVAL;
2443 
2444 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2445 		return -EPERM;
2446 
2447 	mm = find_mm_struct(pid, &task_nodes);
2448 	if (IS_ERR(mm))
2449 		return PTR_ERR(mm);
2450 
2451 	if (nodes)
2452 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2453 				    nodes, status, flags);
2454 	else
2455 		err = do_pages_stat(mm, nr_pages, pages, status);
2456 
2457 	mmput(mm);
2458 	return err;
2459 }
2460 
2461 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2462 		const void __user * __user *, pages,
2463 		const int __user *, nodes,
2464 		int __user *, status, int, flags)
2465 {
2466 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2467 }
2468 
2469 #ifdef CONFIG_NUMA_BALANCING
2470 /*
2471  * Returns true if this is a safe migration target node for misplaced NUMA
2472  * pages. Currently it only checks the watermarks which is crude.
2473  */
2474 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2475 				   unsigned long nr_migrate_pages)
2476 {
2477 	int z;
2478 
2479 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2480 		struct zone *zone = pgdat->node_zones + z;
2481 
2482 		if (!managed_zone(zone))
2483 			continue;
2484 
2485 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2486 		if (!zone_watermark_ok(zone, 0,
2487 				       high_wmark_pages(zone) +
2488 				       nr_migrate_pages,
2489 				       ZONE_MOVABLE, 0))
2490 			continue;
2491 		return true;
2492 	}
2493 	return false;
2494 }
2495 
2496 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2497 					   unsigned long data)
2498 {
2499 	int nid = (int) data;
2500 	int order = folio_order(src);
2501 	gfp_t gfp = __GFP_THISNODE;
2502 
2503 	if (order > 0)
2504 		gfp |= GFP_TRANSHUGE_LIGHT;
2505 	else {
2506 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2507 			__GFP_NOWARN;
2508 		gfp &= ~__GFP_RECLAIM;
2509 	}
2510 	return __folio_alloc_node(gfp, order, nid);
2511 }
2512 
2513 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2514 {
2515 	int nr_pages = folio_nr_pages(folio);
2516 
2517 	/* Avoid migrating to a node that is nearly full */
2518 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2519 		int z;
2520 
2521 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2522 			return 0;
2523 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2524 			if (managed_zone(pgdat->node_zones + z))
2525 				break;
2526 		}
2527 		wakeup_kswapd(pgdat->node_zones + z, 0,
2528 			      folio_order(folio), ZONE_MOVABLE);
2529 		return 0;
2530 	}
2531 
2532 	if (!folio_isolate_lru(folio))
2533 		return 0;
2534 
2535 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2536 			    nr_pages);
2537 
2538 	/*
2539 	 * Isolating the folio has taken another reference, so the
2540 	 * caller's reference can be safely dropped without the folio
2541 	 * disappearing underneath us during migration.
2542 	 */
2543 	folio_put(folio);
2544 	return 1;
2545 }
2546 
2547 /*
2548  * Attempt to migrate a misplaced folio to the specified destination
2549  * node. Caller is expected to have an elevated reference count on
2550  * the folio that will be dropped by this function before returning.
2551  */
2552 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2553 			    int node)
2554 {
2555 	pg_data_t *pgdat = NODE_DATA(node);
2556 	int isolated;
2557 	int nr_remaining;
2558 	unsigned int nr_succeeded;
2559 	LIST_HEAD(migratepages);
2560 	int nr_pages = folio_nr_pages(folio);
2561 
2562 	/*
2563 	 * Don't migrate file folios that are mapped in multiple processes
2564 	 * with execute permissions as they are probably shared libraries.
2565 	 * To check if the folio is shared, ideally we want to make sure
2566 	 * every page is mapped to the same process. Doing that is very
2567 	 * expensive, so check the estimated mapcount of the folio instead.
2568 	 */
2569 	if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2570 	    (vma->vm_flags & VM_EXEC))
2571 		goto out;
2572 
2573 	/*
2574 	 * Also do not migrate dirty folios as not all filesystems can move
2575 	 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2576 	 */
2577 	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2578 		goto out;
2579 
2580 	isolated = numamigrate_isolate_folio(pgdat, folio);
2581 	if (!isolated)
2582 		goto out;
2583 
2584 	list_add(&folio->lru, &migratepages);
2585 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2586 				     NULL, node, MIGRATE_ASYNC,
2587 				     MR_NUMA_MISPLACED, &nr_succeeded);
2588 	if (nr_remaining) {
2589 		if (!list_empty(&migratepages)) {
2590 			list_del(&folio->lru);
2591 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2592 					folio_is_file_lru(folio), -nr_pages);
2593 			folio_putback_lru(folio);
2594 		}
2595 		isolated = 0;
2596 	}
2597 	if (nr_succeeded) {
2598 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2599 		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2600 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2601 					    nr_succeeded);
2602 	}
2603 	BUG_ON(!list_empty(&migratepages));
2604 	return isolated;
2605 
2606 out:
2607 	folio_put(folio);
2608 	return 0;
2609 }
2610 #endif /* CONFIG_NUMA_BALANCING */
2611 #endif /* CONFIG_NUMA */
2612