xref: /linux/mm/migrate.c (revision 656fe3ee455e8d8dfa1c18292c508da26b29a39c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #include <trace/events/migrate.h>
57 
58 #include "internal.h"
59 
60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61 {
62 	struct folio *folio = folio_get_nontail_page(page);
63 	const struct movable_operations *mops;
64 
65 	/*
66 	 * Avoid burning cycles with pages that are yet under __free_pages(),
67 	 * or just got freed under us.
68 	 *
69 	 * In case we 'win' a race for a movable page being freed under us and
70 	 * raise its refcount preventing __free_pages() from doing its job
71 	 * the put_page() at the end of this block will take care of
72 	 * release this page, thus avoiding a nasty leakage.
73 	 */
74 	if (!folio)
75 		goto out;
76 
77 	if (unlikely(folio_test_slab(folio)))
78 		goto out_putfolio;
79 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 	smp_rmb();
81 	/*
82 	 * Check movable flag before taking the page lock because
83 	 * we use non-atomic bitops on newly allocated page flags so
84 	 * unconditionally grabbing the lock ruins page's owner side.
85 	 */
86 	if (unlikely(!__folio_test_movable(folio)))
87 		goto out_putfolio;
88 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 	smp_rmb();
90 	if (unlikely(folio_test_slab(folio)))
91 		goto out_putfolio;
92 
93 	/*
94 	 * As movable pages are not isolated from LRU lists, concurrent
95 	 * compaction threads can race against page migration functions
96 	 * as well as race against the releasing a page.
97 	 *
98 	 * In order to avoid having an already isolated movable page
99 	 * being (wrongly) re-isolated while it is under migration,
100 	 * or to avoid attempting to isolate pages being released,
101 	 * lets be sure we have the page lock
102 	 * before proceeding with the movable page isolation steps.
103 	 */
104 	if (unlikely(!folio_trylock(folio)))
105 		goto out_putfolio;
106 
107 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 		goto out_no_isolated;
109 
110 	mops = folio_movable_ops(folio);
111 	VM_BUG_ON_FOLIO(!mops, folio);
112 
113 	if (!mops->isolate_page(&folio->page, mode))
114 		goto out_no_isolated;
115 
116 	/* Driver shouldn't use the isolated flag */
117 	WARN_ON_ONCE(folio_test_isolated(folio));
118 	folio_set_isolated(folio);
119 	folio_unlock(folio);
120 
121 	return true;
122 
123 out_no_isolated:
124 	folio_unlock(folio);
125 out_putfolio:
126 	folio_put(folio);
127 out:
128 	return false;
129 }
130 
131 static void putback_movable_folio(struct folio *folio)
132 {
133 	const struct movable_operations *mops = folio_movable_ops(folio);
134 
135 	mops->putback_page(&folio->page);
136 	folio_clear_isolated(folio);
137 }
138 
139 /*
140  * Put previously isolated pages back onto the appropriate lists
141  * from where they were once taken off for compaction/migration.
142  *
143  * This function shall be used whenever the isolated pageset has been
144  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145  * and isolate_hugetlb().
146  */
147 void putback_movable_pages(struct list_head *l)
148 {
149 	struct folio *folio;
150 	struct folio *folio2;
151 
152 	list_for_each_entry_safe(folio, folio2, l, lru) {
153 		if (unlikely(folio_test_hugetlb(folio))) {
154 			folio_putback_active_hugetlb(folio);
155 			continue;
156 		}
157 		list_del(&folio->lru);
158 		/*
159 		 * We isolated non-lru movable folio so here we can use
160 		 * __folio_test_movable because LRU folio's mapping cannot
161 		 * have PAGE_MAPPING_MOVABLE.
162 		 */
163 		if (unlikely(__folio_test_movable(folio))) {
164 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 			folio_lock(folio);
166 			if (folio_test_movable(folio))
167 				putback_movable_folio(folio);
168 			else
169 				folio_clear_isolated(folio);
170 			folio_unlock(folio);
171 			folio_put(folio);
172 		} else {
173 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 					folio_is_file_lru(folio), -folio_nr_pages(folio));
175 			folio_putback_lru(folio);
176 		}
177 	}
178 }
179 
180 /*
181  * Restore a potential migration pte to a working pte entry
182  */
183 static bool remove_migration_pte(struct folio *folio,
184 		struct vm_area_struct *vma, unsigned long addr, void *old)
185 {
186 	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187 
188 	while (page_vma_mapped_walk(&pvmw)) {
189 		rmap_t rmap_flags = RMAP_NONE;
190 		pte_t old_pte;
191 		pte_t pte;
192 		swp_entry_t entry;
193 		struct page *new;
194 		unsigned long idx = 0;
195 
196 		/* pgoff is invalid for ksm pages, but they are never large */
197 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 		new = folio_page(folio, idx);
200 
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 		/* PMD-mapped THP migration entry */
203 		if (!pvmw.pte) {
204 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 					!folio_test_pmd_mappable(folio), folio);
206 			remove_migration_pmd(&pvmw, new);
207 			continue;
208 		}
209 #endif
210 
211 		folio_get(folio);
212 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 		old_pte = ptep_get(pvmw.pte);
214 
215 		entry = pte_to_swp_entry(old_pte);
216 		if (!is_migration_entry_young(entry))
217 			pte = pte_mkold(pte);
218 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219 			pte = pte_mkdirty(pte);
220 		if (pte_swp_soft_dirty(old_pte))
221 			pte = pte_mksoft_dirty(pte);
222 		else
223 			pte = pte_clear_soft_dirty(pte);
224 
225 		if (is_writable_migration_entry(entry))
226 			pte = pte_mkwrite(pte, vma);
227 		else if (pte_swp_uffd_wp(old_pte))
228 			pte = pte_mkuffd_wp(pte);
229 
230 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 			rmap_flags |= RMAP_EXCLUSIVE;
232 
233 		if (unlikely(is_device_private_page(new))) {
234 			if (pte_write(pte))
235 				entry = make_writable_device_private_entry(
236 							page_to_pfn(new));
237 			else
238 				entry = make_readable_device_private_entry(
239 							page_to_pfn(new));
240 			pte = swp_entry_to_pte(entry);
241 			if (pte_swp_soft_dirty(old_pte))
242 				pte = pte_swp_mksoft_dirty(pte);
243 			if (pte_swp_uffd_wp(old_pte))
244 				pte = pte_swp_mkuffd_wp(pte);
245 		}
246 
247 #ifdef CONFIG_HUGETLB_PAGE
248 		if (folio_test_hugetlb(folio)) {
249 			struct hstate *h = hstate_vma(vma);
250 			unsigned int shift = huge_page_shift(h);
251 			unsigned long psize = huge_page_size(h);
252 
253 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254 			if (folio_test_anon(folio))
255 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
256 						      rmap_flags);
257 			else
258 				hugetlb_add_file_rmap(folio);
259 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260 					psize);
261 		} else
262 #endif
263 		{
264 			if (folio_test_anon(folio))
265 				folio_add_anon_rmap_pte(folio, new, vma,
266 							pvmw.address, rmap_flags);
267 			else
268 				folio_add_file_rmap_pte(folio, new, vma);
269 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
270 		}
271 		if (vma->vm_flags & VM_LOCKED)
272 			mlock_drain_local();
273 
274 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
275 					   compound_order(new));
276 
277 		/* No need to invalidate - it was non-present before */
278 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
279 	}
280 
281 	return true;
282 }
283 
284 /*
285  * Get rid of all migration entries and replace them by
286  * references to the indicated page.
287  */
288 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
289 {
290 	struct rmap_walk_control rwc = {
291 		.rmap_one = remove_migration_pte,
292 		.arg = src,
293 	};
294 
295 	if (locked)
296 		rmap_walk_locked(dst, &rwc);
297 	else
298 		rmap_walk(dst, &rwc);
299 }
300 
301 /*
302  * Something used the pte of a page under migration. We need to
303  * get to the page and wait until migration is finished.
304  * When we return from this function the fault will be retried.
305  */
306 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307 			  unsigned long address)
308 {
309 	spinlock_t *ptl;
310 	pte_t *ptep;
311 	pte_t pte;
312 	swp_entry_t entry;
313 
314 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
315 	if (!ptep)
316 		return;
317 
318 	pte = ptep_get(ptep);
319 	pte_unmap(ptep);
320 
321 	if (!is_swap_pte(pte))
322 		goto out;
323 
324 	entry = pte_to_swp_entry(pte);
325 	if (!is_migration_entry(entry))
326 		goto out;
327 
328 	migration_entry_wait_on_locked(entry, ptl);
329 	return;
330 out:
331 	spin_unlock(ptl);
332 }
333 
334 #ifdef CONFIG_HUGETLB_PAGE
335 /*
336  * The vma read lock must be held upon entry. Holding that lock prevents either
337  * the pte or the ptl from being freed.
338  *
339  * This function will release the vma lock before returning.
340  */
341 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
342 {
343 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
344 	pte_t pte;
345 
346 	hugetlb_vma_assert_locked(vma);
347 	spin_lock(ptl);
348 	pte = huge_ptep_get(ptep);
349 
350 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
351 		spin_unlock(ptl);
352 		hugetlb_vma_unlock_read(vma);
353 	} else {
354 		/*
355 		 * If migration entry existed, safe to release vma lock
356 		 * here because the pgtable page won't be freed without the
357 		 * pgtable lock released.  See comment right above pgtable
358 		 * lock release in migration_entry_wait_on_locked().
359 		 */
360 		hugetlb_vma_unlock_read(vma);
361 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
362 	}
363 }
364 #endif
365 
366 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368 {
369 	spinlock_t *ptl;
370 
371 	ptl = pmd_lock(mm, pmd);
372 	if (!is_pmd_migration_entry(*pmd))
373 		goto unlock;
374 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
375 	return;
376 unlock:
377 	spin_unlock(ptl);
378 }
379 #endif
380 
381 static int folio_expected_refs(struct address_space *mapping,
382 		struct folio *folio)
383 {
384 	int refs = 1;
385 	if (!mapping)
386 		return refs;
387 
388 	refs += folio_nr_pages(folio);
389 	if (folio_test_private(folio))
390 		refs++;
391 
392 	return refs;
393 }
394 
395 /*
396  * Replace the page in the mapping.
397  *
398  * The number of remaining references must be:
399  * 1 for anonymous pages without a mapping
400  * 2 for pages with a mapping
401  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
402  */
403 int folio_migrate_mapping(struct address_space *mapping,
404 		struct folio *newfolio, struct folio *folio, int extra_count)
405 {
406 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
407 	struct zone *oldzone, *newzone;
408 	int dirty;
409 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
410 	long nr = folio_nr_pages(folio);
411 	long entries, i;
412 
413 	if (!mapping) {
414 		/* Anonymous page without mapping */
415 		if (folio_ref_count(folio) != expected_count)
416 			return -EAGAIN;
417 
418 		/* No turning back from here */
419 		newfolio->index = folio->index;
420 		newfolio->mapping = folio->mapping;
421 		if (folio_test_swapbacked(folio))
422 			__folio_set_swapbacked(newfolio);
423 
424 		return MIGRATEPAGE_SUCCESS;
425 	}
426 
427 	oldzone = folio_zone(folio);
428 	newzone = folio_zone(newfolio);
429 
430 	xas_lock_irq(&xas);
431 	if (!folio_ref_freeze(folio, expected_count)) {
432 		xas_unlock_irq(&xas);
433 		return -EAGAIN;
434 	}
435 
436 	/*
437 	 * Now we know that no one else is looking at the folio:
438 	 * no turning back from here.
439 	 */
440 	newfolio->index = folio->index;
441 	newfolio->mapping = folio->mapping;
442 	folio_ref_add(newfolio, nr); /* add cache reference */
443 	if (folio_test_swapbacked(folio)) {
444 		__folio_set_swapbacked(newfolio);
445 		if (folio_test_swapcache(folio)) {
446 			folio_set_swapcache(newfolio);
447 			newfolio->private = folio_get_private(folio);
448 		}
449 		entries = nr;
450 	} else {
451 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
452 		entries = 1;
453 	}
454 
455 	/* Move dirty while page refs frozen and newpage not yet exposed */
456 	dirty = folio_test_dirty(folio);
457 	if (dirty) {
458 		folio_clear_dirty(folio);
459 		folio_set_dirty(newfolio);
460 	}
461 
462 	/* Swap cache still stores N entries instead of a high-order entry */
463 	for (i = 0; i < entries; i++) {
464 		xas_store(&xas, newfolio);
465 		xas_next(&xas);
466 	}
467 
468 	/*
469 	 * Drop cache reference from old page by unfreezing
470 	 * to one less reference.
471 	 * We know this isn't the last reference.
472 	 */
473 	folio_ref_unfreeze(folio, expected_count - nr);
474 
475 	xas_unlock(&xas);
476 	/* Leave irq disabled to prevent preemption while updating stats */
477 
478 	/*
479 	 * If moved to a different zone then also account
480 	 * the page for that zone. Other VM counters will be
481 	 * taken care of when we establish references to the
482 	 * new page and drop references to the old page.
483 	 *
484 	 * Note that anonymous pages are accounted for
485 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
486 	 * are mapped to swap space.
487 	 */
488 	if (newzone != oldzone) {
489 		struct lruvec *old_lruvec, *new_lruvec;
490 		struct mem_cgroup *memcg;
491 
492 		memcg = folio_memcg(folio);
493 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495 
496 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
498 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
499 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
501 
502 			if (folio_test_pmd_mappable(folio)) {
503 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
504 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
505 			}
506 		}
507 #ifdef CONFIG_SWAP
508 		if (folio_test_swapcache(folio)) {
509 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
510 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
511 		}
512 #endif
513 		if (dirty && mapping_can_writeback(mapping)) {
514 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
515 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
516 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
517 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
518 		}
519 	}
520 	local_irq_enable();
521 
522 	return MIGRATEPAGE_SUCCESS;
523 }
524 EXPORT_SYMBOL(folio_migrate_mapping);
525 
526 /*
527  * The expected number of remaining references is the same as that
528  * of folio_migrate_mapping().
529  */
530 int migrate_huge_page_move_mapping(struct address_space *mapping,
531 				   struct folio *dst, struct folio *src)
532 {
533 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
534 	int expected_count;
535 
536 	xas_lock_irq(&xas);
537 	expected_count = folio_expected_refs(mapping, src);
538 	if (!folio_ref_freeze(src, expected_count)) {
539 		xas_unlock_irq(&xas);
540 		return -EAGAIN;
541 	}
542 
543 	dst->index = src->index;
544 	dst->mapping = src->mapping;
545 
546 	folio_ref_add(dst, folio_nr_pages(dst));
547 
548 	xas_store(&xas, dst);
549 
550 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
551 
552 	xas_unlock_irq(&xas);
553 
554 	return MIGRATEPAGE_SUCCESS;
555 }
556 
557 /*
558  * Copy the flags and some other ancillary information
559  */
560 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
561 {
562 	int cpupid;
563 
564 	if (folio_test_error(folio))
565 		folio_set_error(newfolio);
566 	if (folio_test_referenced(folio))
567 		folio_set_referenced(newfolio);
568 	if (folio_test_uptodate(folio))
569 		folio_mark_uptodate(newfolio);
570 	if (folio_test_clear_active(folio)) {
571 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
572 		folio_set_active(newfolio);
573 	} else if (folio_test_clear_unevictable(folio))
574 		folio_set_unevictable(newfolio);
575 	if (folio_test_workingset(folio))
576 		folio_set_workingset(newfolio);
577 	if (folio_test_checked(folio))
578 		folio_set_checked(newfolio);
579 	/*
580 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
581 	 * migration entries. We can still have PG_anon_exclusive set on an
582 	 * effectively unmapped and unreferenced first sub-pages of an
583 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
584 	 */
585 	if (folio_test_mappedtodisk(folio))
586 		folio_set_mappedtodisk(newfolio);
587 
588 	/* Move dirty on pages not done by folio_migrate_mapping() */
589 	if (folio_test_dirty(folio))
590 		folio_set_dirty(newfolio);
591 
592 	if (folio_test_young(folio))
593 		folio_set_young(newfolio);
594 	if (folio_test_idle(folio))
595 		folio_set_idle(newfolio);
596 
597 	/*
598 	 * Copy NUMA information to the new page, to prevent over-eager
599 	 * future migrations of this same page.
600 	 */
601 	cpupid = folio_xchg_last_cpupid(folio, -1);
602 	/*
603 	 * For memory tiering mode, when migrate between slow and fast
604 	 * memory node, reset cpupid, because that is used to record
605 	 * page access time in slow memory node.
606 	 */
607 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
608 		bool f_toptier = node_is_toptier(folio_nid(folio));
609 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
610 
611 		if (f_toptier != t_toptier)
612 			cpupid = -1;
613 	}
614 	folio_xchg_last_cpupid(newfolio, cpupid);
615 
616 	folio_migrate_ksm(newfolio, folio);
617 	/*
618 	 * Please do not reorder this without considering how mm/ksm.c's
619 	 * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
620 	 */
621 	if (folio_test_swapcache(folio))
622 		folio_clear_swapcache(folio);
623 	folio_clear_private(folio);
624 
625 	/* page->private contains hugetlb specific flags */
626 	if (!folio_test_hugetlb(folio))
627 		folio->private = NULL;
628 
629 	/*
630 	 * If any waiters have accumulated on the new page then
631 	 * wake them up.
632 	 */
633 	if (folio_test_writeback(newfolio))
634 		folio_end_writeback(newfolio);
635 
636 	/*
637 	 * PG_readahead shares the same bit with PG_reclaim.  The above
638 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
639 	 * bit after that.
640 	 */
641 	if (folio_test_readahead(folio))
642 		folio_set_readahead(newfolio);
643 
644 	folio_copy_owner(newfolio, folio);
645 
646 	mem_cgroup_migrate(folio, newfolio);
647 }
648 EXPORT_SYMBOL(folio_migrate_flags);
649 
650 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
651 {
652 	folio_copy(newfolio, folio);
653 	folio_migrate_flags(newfolio, folio);
654 }
655 EXPORT_SYMBOL(folio_migrate_copy);
656 
657 /************************************************************
658  *                    Migration functions
659  ***********************************************************/
660 
661 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
662 			   struct folio *src, void *src_private,
663 			   enum migrate_mode mode)
664 {
665 	int rc;
666 
667 	rc = folio_migrate_mapping(mapping, dst, src, 0);
668 	if (rc != MIGRATEPAGE_SUCCESS)
669 		return rc;
670 
671 	if (src_private)
672 		folio_attach_private(dst, folio_detach_private(src));
673 
674 	folio_migrate_copy(dst, src);
675 	return MIGRATEPAGE_SUCCESS;
676 }
677 
678 /**
679  * migrate_folio() - Simple folio migration.
680  * @mapping: The address_space containing the folio.
681  * @dst: The folio to migrate the data to.
682  * @src: The folio containing the current data.
683  * @mode: How to migrate the page.
684  *
685  * Common logic to directly migrate a single LRU folio suitable for
686  * folios that do not use PagePrivate/PagePrivate2.
687  *
688  * Folios are locked upon entry and exit.
689  */
690 int migrate_folio(struct address_space *mapping, struct folio *dst,
691 		  struct folio *src, enum migrate_mode mode)
692 {
693 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
694 	return __migrate_folio(mapping, dst, src, NULL, mode);
695 }
696 EXPORT_SYMBOL(migrate_folio);
697 
698 #ifdef CONFIG_BUFFER_HEAD
699 /* Returns true if all buffers are successfully locked */
700 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
701 							enum migrate_mode mode)
702 {
703 	struct buffer_head *bh = head;
704 	struct buffer_head *failed_bh;
705 
706 	do {
707 		if (!trylock_buffer(bh)) {
708 			if (mode == MIGRATE_ASYNC)
709 				goto unlock;
710 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
711 				goto unlock;
712 			lock_buffer(bh);
713 		}
714 
715 		bh = bh->b_this_page;
716 	} while (bh != head);
717 
718 	return true;
719 
720 unlock:
721 	/* We failed to lock the buffer and cannot stall. */
722 	failed_bh = bh;
723 	bh = head;
724 	while (bh != failed_bh) {
725 		unlock_buffer(bh);
726 		bh = bh->b_this_page;
727 	}
728 
729 	return false;
730 }
731 
732 static int __buffer_migrate_folio(struct address_space *mapping,
733 		struct folio *dst, struct folio *src, enum migrate_mode mode,
734 		bool check_refs)
735 {
736 	struct buffer_head *bh, *head;
737 	int rc;
738 	int expected_count;
739 
740 	head = folio_buffers(src);
741 	if (!head)
742 		return migrate_folio(mapping, dst, src, mode);
743 
744 	/* Check whether page does not have extra refs before we do more work */
745 	expected_count = folio_expected_refs(mapping, src);
746 	if (folio_ref_count(src) != expected_count)
747 		return -EAGAIN;
748 
749 	if (!buffer_migrate_lock_buffers(head, mode))
750 		return -EAGAIN;
751 
752 	if (check_refs) {
753 		bool busy;
754 		bool invalidated = false;
755 
756 recheck_buffers:
757 		busy = false;
758 		spin_lock(&mapping->i_private_lock);
759 		bh = head;
760 		do {
761 			if (atomic_read(&bh->b_count)) {
762 				busy = true;
763 				break;
764 			}
765 			bh = bh->b_this_page;
766 		} while (bh != head);
767 		if (busy) {
768 			if (invalidated) {
769 				rc = -EAGAIN;
770 				goto unlock_buffers;
771 			}
772 			spin_unlock(&mapping->i_private_lock);
773 			invalidate_bh_lrus();
774 			invalidated = true;
775 			goto recheck_buffers;
776 		}
777 	}
778 
779 	rc = filemap_migrate_folio(mapping, dst, src, mode);
780 	if (rc != MIGRATEPAGE_SUCCESS)
781 		goto unlock_buffers;
782 
783 	bh = head;
784 	do {
785 		folio_set_bh(bh, dst, bh_offset(bh));
786 		bh = bh->b_this_page;
787 	} while (bh != head);
788 
789 unlock_buffers:
790 	if (check_refs)
791 		spin_unlock(&mapping->i_private_lock);
792 	bh = head;
793 	do {
794 		unlock_buffer(bh);
795 		bh = bh->b_this_page;
796 	} while (bh != head);
797 
798 	return rc;
799 }
800 
801 /**
802  * buffer_migrate_folio() - Migration function for folios with buffers.
803  * @mapping: The address space containing @src.
804  * @dst: The folio to migrate to.
805  * @src: The folio to migrate from.
806  * @mode: How to migrate the folio.
807  *
808  * This function can only be used if the underlying filesystem guarantees
809  * that no other references to @src exist. For example attached buffer
810  * heads are accessed only under the folio lock.  If your filesystem cannot
811  * provide this guarantee, buffer_migrate_folio_norefs() may be more
812  * appropriate.
813  *
814  * Return: 0 on success or a negative errno on failure.
815  */
816 int buffer_migrate_folio(struct address_space *mapping,
817 		struct folio *dst, struct folio *src, enum migrate_mode mode)
818 {
819 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
820 }
821 EXPORT_SYMBOL(buffer_migrate_folio);
822 
823 /**
824  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
825  * @mapping: The address space containing @src.
826  * @dst: The folio to migrate to.
827  * @src: The folio to migrate from.
828  * @mode: How to migrate the folio.
829  *
830  * Like buffer_migrate_folio() except that this variant is more careful
831  * and checks that there are also no buffer head references. This function
832  * is the right one for mappings where buffer heads are directly looked
833  * up and referenced (such as block device mappings).
834  *
835  * Return: 0 on success or a negative errno on failure.
836  */
837 int buffer_migrate_folio_norefs(struct address_space *mapping,
838 		struct folio *dst, struct folio *src, enum migrate_mode mode)
839 {
840 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
841 }
842 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
843 #endif /* CONFIG_BUFFER_HEAD */
844 
845 int filemap_migrate_folio(struct address_space *mapping,
846 		struct folio *dst, struct folio *src, enum migrate_mode mode)
847 {
848 	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
849 }
850 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
851 
852 /*
853  * Writeback a folio to clean the dirty state
854  */
855 static int writeout(struct address_space *mapping, struct folio *folio)
856 {
857 	struct writeback_control wbc = {
858 		.sync_mode = WB_SYNC_NONE,
859 		.nr_to_write = 1,
860 		.range_start = 0,
861 		.range_end = LLONG_MAX,
862 		.for_reclaim = 1
863 	};
864 	int rc;
865 
866 	if (!mapping->a_ops->writepage)
867 		/* No write method for the address space */
868 		return -EINVAL;
869 
870 	if (!folio_clear_dirty_for_io(folio))
871 		/* Someone else already triggered a write */
872 		return -EAGAIN;
873 
874 	/*
875 	 * A dirty folio may imply that the underlying filesystem has
876 	 * the folio on some queue. So the folio must be clean for
877 	 * migration. Writeout may mean we lose the lock and the
878 	 * folio state is no longer what we checked for earlier.
879 	 * At this point we know that the migration attempt cannot
880 	 * be successful.
881 	 */
882 	remove_migration_ptes(folio, folio, false);
883 
884 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
885 
886 	if (rc != AOP_WRITEPAGE_ACTIVATE)
887 		/* unlocked. Relock */
888 		folio_lock(folio);
889 
890 	return (rc < 0) ? -EIO : -EAGAIN;
891 }
892 
893 /*
894  * Default handling if a filesystem does not provide a migration function.
895  */
896 static int fallback_migrate_folio(struct address_space *mapping,
897 		struct folio *dst, struct folio *src, enum migrate_mode mode)
898 {
899 	if (folio_test_dirty(src)) {
900 		/* Only writeback folios in full synchronous migration */
901 		switch (mode) {
902 		case MIGRATE_SYNC:
903 			break;
904 		default:
905 			return -EBUSY;
906 		}
907 		return writeout(mapping, src);
908 	}
909 
910 	/*
911 	 * Buffers may be managed in a filesystem specific way.
912 	 * We must have no buffers or drop them.
913 	 */
914 	if (!filemap_release_folio(src, GFP_KERNEL))
915 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
916 
917 	return migrate_folio(mapping, dst, src, mode);
918 }
919 
920 /*
921  * Move a page to a newly allocated page
922  * The page is locked and all ptes have been successfully removed.
923  *
924  * The new page will have replaced the old page if this function
925  * is successful.
926  *
927  * Return value:
928  *   < 0 - error code
929  *  MIGRATEPAGE_SUCCESS - success
930  */
931 static int move_to_new_folio(struct folio *dst, struct folio *src,
932 				enum migrate_mode mode)
933 {
934 	int rc = -EAGAIN;
935 	bool is_lru = !__folio_test_movable(src);
936 
937 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
938 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
939 
940 	if (likely(is_lru)) {
941 		struct address_space *mapping = folio_mapping(src);
942 
943 		if (!mapping)
944 			rc = migrate_folio(mapping, dst, src, mode);
945 		else if (mapping_unmovable(mapping))
946 			rc = -EOPNOTSUPP;
947 		else if (mapping->a_ops->migrate_folio)
948 			/*
949 			 * Most folios have a mapping and most filesystems
950 			 * provide a migrate_folio callback. Anonymous folios
951 			 * are part of swap space which also has its own
952 			 * migrate_folio callback. This is the most common path
953 			 * for page migration.
954 			 */
955 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
956 								mode);
957 		else
958 			rc = fallback_migrate_folio(mapping, dst, src, mode);
959 	} else {
960 		const struct movable_operations *mops;
961 
962 		/*
963 		 * In case of non-lru page, it could be released after
964 		 * isolation step. In that case, we shouldn't try migration.
965 		 */
966 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
967 		if (!folio_test_movable(src)) {
968 			rc = MIGRATEPAGE_SUCCESS;
969 			folio_clear_isolated(src);
970 			goto out;
971 		}
972 
973 		mops = folio_movable_ops(src);
974 		rc = mops->migrate_page(&dst->page, &src->page, mode);
975 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
976 				!folio_test_isolated(src));
977 	}
978 
979 	/*
980 	 * When successful, old pagecache src->mapping must be cleared before
981 	 * src is freed; but stats require that PageAnon be left as PageAnon.
982 	 */
983 	if (rc == MIGRATEPAGE_SUCCESS) {
984 		if (__folio_test_movable(src)) {
985 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
986 
987 			/*
988 			 * We clear PG_movable under page_lock so any compactor
989 			 * cannot try to migrate this page.
990 			 */
991 			folio_clear_isolated(src);
992 		}
993 
994 		/*
995 		 * Anonymous and movable src->mapping will be cleared by
996 		 * free_pages_prepare so don't reset it here for keeping
997 		 * the type to work PageAnon, for example.
998 		 */
999 		if (!folio_mapping_flags(src))
1000 			src->mapping = NULL;
1001 
1002 		if (likely(!folio_is_zone_device(dst)))
1003 			flush_dcache_folio(dst);
1004 	}
1005 out:
1006 	return rc;
1007 }
1008 
1009 /*
1010  * To record some information during migration, we use unused private
1011  * field of struct folio of the newly allocated destination folio.
1012  * This is safe because nobody is using it except us.
1013  */
1014 enum {
1015 	PAGE_WAS_MAPPED = BIT(0),
1016 	PAGE_WAS_MLOCKED = BIT(1),
1017 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1018 };
1019 
1020 static void __migrate_folio_record(struct folio *dst,
1021 				   int old_page_state,
1022 				   struct anon_vma *anon_vma)
1023 {
1024 	dst->private = (void *)anon_vma + old_page_state;
1025 }
1026 
1027 static void __migrate_folio_extract(struct folio *dst,
1028 				   int *old_page_state,
1029 				   struct anon_vma **anon_vmap)
1030 {
1031 	unsigned long private = (unsigned long)dst->private;
1032 
1033 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1034 	*old_page_state = private & PAGE_OLD_STATES;
1035 	dst->private = NULL;
1036 }
1037 
1038 /* Restore the source folio to the original state upon failure */
1039 static void migrate_folio_undo_src(struct folio *src,
1040 				   int page_was_mapped,
1041 				   struct anon_vma *anon_vma,
1042 				   bool locked,
1043 				   struct list_head *ret)
1044 {
1045 	if (page_was_mapped)
1046 		remove_migration_ptes(src, src, false);
1047 	/* Drop an anon_vma reference if we took one */
1048 	if (anon_vma)
1049 		put_anon_vma(anon_vma);
1050 	if (locked)
1051 		folio_unlock(src);
1052 	if (ret)
1053 		list_move_tail(&src->lru, ret);
1054 }
1055 
1056 /* Restore the destination folio to the original state upon failure */
1057 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1058 		free_folio_t put_new_folio, unsigned long private)
1059 {
1060 	if (locked)
1061 		folio_unlock(dst);
1062 	if (put_new_folio)
1063 		put_new_folio(dst, private);
1064 	else
1065 		folio_put(dst);
1066 }
1067 
1068 /* Cleanup src folio upon migration success */
1069 static void migrate_folio_done(struct folio *src,
1070 			       enum migrate_reason reason)
1071 {
1072 	/*
1073 	 * Compaction can migrate also non-LRU pages which are
1074 	 * not accounted to NR_ISOLATED_*. They can be recognized
1075 	 * as __folio_test_movable
1076 	 */
1077 	if (likely(!__folio_test_movable(src)))
1078 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1079 				    folio_is_file_lru(src), -folio_nr_pages(src));
1080 
1081 	if (reason != MR_MEMORY_FAILURE)
1082 		/* We release the page in page_handle_poison. */
1083 		folio_put(src);
1084 }
1085 
1086 /* Obtain the lock on page, remove all ptes. */
1087 static int migrate_folio_unmap(new_folio_t get_new_folio,
1088 		free_folio_t put_new_folio, unsigned long private,
1089 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1090 		enum migrate_reason reason, struct list_head *ret)
1091 {
1092 	struct folio *dst;
1093 	int rc = -EAGAIN;
1094 	int old_page_state = 0;
1095 	struct anon_vma *anon_vma = NULL;
1096 	bool is_lru = !__folio_test_movable(src);
1097 	bool locked = false;
1098 	bool dst_locked = false;
1099 
1100 	if (folio_ref_count(src) == 1) {
1101 		/* Folio was freed from under us. So we are done. */
1102 		folio_clear_active(src);
1103 		folio_clear_unevictable(src);
1104 		/* free_pages_prepare() will clear PG_isolated. */
1105 		list_del(&src->lru);
1106 		migrate_folio_done(src, reason);
1107 		return MIGRATEPAGE_SUCCESS;
1108 	}
1109 
1110 	dst = get_new_folio(src, private);
1111 	if (!dst)
1112 		return -ENOMEM;
1113 	*dstp = dst;
1114 
1115 	dst->private = NULL;
1116 
1117 	if (!folio_trylock(src)) {
1118 		if (mode == MIGRATE_ASYNC)
1119 			goto out;
1120 
1121 		/*
1122 		 * It's not safe for direct compaction to call lock_page.
1123 		 * For example, during page readahead pages are added locked
1124 		 * to the LRU. Later, when the IO completes the pages are
1125 		 * marked uptodate and unlocked. However, the queueing
1126 		 * could be merging multiple pages for one bio (e.g.
1127 		 * mpage_readahead). If an allocation happens for the
1128 		 * second or third page, the process can end up locking
1129 		 * the same page twice and deadlocking. Rather than
1130 		 * trying to be clever about what pages can be locked,
1131 		 * avoid the use of lock_page for direct compaction
1132 		 * altogether.
1133 		 */
1134 		if (current->flags & PF_MEMALLOC)
1135 			goto out;
1136 
1137 		/*
1138 		 * In "light" mode, we can wait for transient locks (eg
1139 		 * inserting a page into the page table), but it's not
1140 		 * worth waiting for I/O.
1141 		 */
1142 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1143 			goto out;
1144 
1145 		folio_lock(src);
1146 	}
1147 	locked = true;
1148 	if (folio_test_mlocked(src))
1149 		old_page_state |= PAGE_WAS_MLOCKED;
1150 
1151 	if (folio_test_writeback(src)) {
1152 		/*
1153 		 * Only in the case of a full synchronous migration is it
1154 		 * necessary to wait for PageWriteback. In the async case,
1155 		 * the retry loop is too short and in the sync-light case,
1156 		 * the overhead of stalling is too much
1157 		 */
1158 		switch (mode) {
1159 		case MIGRATE_SYNC:
1160 			break;
1161 		default:
1162 			rc = -EBUSY;
1163 			goto out;
1164 		}
1165 		folio_wait_writeback(src);
1166 	}
1167 
1168 	/*
1169 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1170 	 * we cannot notice that anon_vma is freed while we migrate a page.
1171 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1172 	 * of migration. File cache pages are no problem because of page_lock()
1173 	 * File Caches may use write_page() or lock_page() in migration, then,
1174 	 * just care Anon page here.
1175 	 *
1176 	 * Only folio_get_anon_vma() understands the subtleties of
1177 	 * getting a hold on an anon_vma from outside one of its mms.
1178 	 * But if we cannot get anon_vma, then we won't need it anyway,
1179 	 * because that implies that the anon page is no longer mapped
1180 	 * (and cannot be remapped so long as we hold the page lock).
1181 	 */
1182 	if (folio_test_anon(src) && !folio_test_ksm(src))
1183 		anon_vma = folio_get_anon_vma(src);
1184 
1185 	/*
1186 	 * Block others from accessing the new page when we get around to
1187 	 * establishing additional references. We are usually the only one
1188 	 * holding a reference to dst at this point. We used to have a BUG
1189 	 * here if folio_trylock(dst) fails, but would like to allow for
1190 	 * cases where there might be a race with the previous use of dst.
1191 	 * This is much like races on refcount of oldpage: just don't BUG().
1192 	 */
1193 	if (unlikely(!folio_trylock(dst)))
1194 		goto out;
1195 	dst_locked = true;
1196 
1197 	if (unlikely(!is_lru)) {
1198 		__migrate_folio_record(dst, old_page_state, anon_vma);
1199 		return MIGRATEPAGE_UNMAP;
1200 	}
1201 
1202 	/*
1203 	 * Corner case handling:
1204 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1205 	 * and treated as swapcache but it has no rmap yet.
1206 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1207 	 * trigger a BUG.  So handle it here.
1208 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1209 	 * fs-private metadata. The page can be picked up due to memory
1210 	 * offlining.  Everywhere else except page reclaim, the page is
1211 	 * invisible to the vm, so the page can not be migrated.  So try to
1212 	 * free the metadata, so the page can be freed.
1213 	 */
1214 	if (!src->mapping) {
1215 		if (folio_test_private(src)) {
1216 			try_to_free_buffers(src);
1217 			goto out;
1218 		}
1219 	} else if (folio_mapped(src)) {
1220 		/* Establish migration ptes */
1221 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1222 			       !folio_test_ksm(src) && !anon_vma, src);
1223 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1224 		old_page_state |= PAGE_WAS_MAPPED;
1225 	}
1226 
1227 	if (!folio_mapped(src)) {
1228 		__migrate_folio_record(dst, old_page_state, anon_vma);
1229 		return MIGRATEPAGE_UNMAP;
1230 	}
1231 
1232 out:
1233 	/*
1234 	 * A folio that has not been unmapped will be restored to
1235 	 * right list unless we want to retry.
1236 	 */
1237 	if (rc == -EAGAIN)
1238 		ret = NULL;
1239 
1240 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1241 			       anon_vma, locked, ret);
1242 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1243 
1244 	return rc;
1245 }
1246 
1247 /* Migrate the folio to the newly allocated folio in dst. */
1248 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1249 			      struct folio *src, struct folio *dst,
1250 			      enum migrate_mode mode, enum migrate_reason reason,
1251 			      struct list_head *ret)
1252 {
1253 	int rc;
1254 	int old_page_state = 0;
1255 	struct anon_vma *anon_vma = NULL;
1256 	bool is_lru = !__folio_test_movable(src);
1257 	struct list_head *prev;
1258 
1259 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1260 	prev = dst->lru.prev;
1261 	list_del(&dst->lru);
1262 
1263 	rc = move_to_new_folio(dst, src, mode);
1264 	if (rc)
1265 		goto out;
1266 
1267 	if (unlikely(!is_lru))
1268 		goto out_unlock_both;
1269 
1270 	/*
1271 	 * When successful, push dst to LRU immediately: so that if it
1272 	 * turns out to be an mlocked page, remove_migration_ptes() will
1273 	 * automatically build up the correct dst->mlock_count for it.
1274 	 *
1275 	 * We would like to do something similar for the old page, when
1276 	 * unsuccessful, and other cases when a page has been temporarily
1277 	 * isolated from the unevictable LRU: but this case is the easiest.
1278 	 */
1279 	folio_add_lru(dst);
1280 	if (old_page_state & PAGE_WAS_MLOCKED)
1281 		lru_add_drain();
1282 
1283 	if (old_page_state & PAGE_WAS_MAPPED)
1284 		remove_migration_ptes(src, dst, false);
1285 
1286 out_unlock_both:
1287 	folio_unlock(dst);
1288 	set_page_owner_migrate_reason(&dst->page, reason);
1289 	/*
1290 	 * If migration is successful, decrease refcount of dst,
1291 	 * which will not free the page because new page owner increased
1292 	 * refcounter.
1293 	 */
1294 	folio_put(dst);
1295 
1296 	/*
1297 	 * A folio that has been migrated has all references removed
1298 	 * and will be freed.
1299 	 */
1300 	list_del(&src->lru);
1301 	/* Drop an anon_vma reference if we took one */
1302 	if (anon_vma)
1303 		put_anon_vma(anon_vma);
1304 	folio_unlock(src);
1305 	migrate_folio_done(src, reason);
1306 
1307 	return rc;
1308 out:
1309 	/*
1310 	 * A folio that has not been migrated will be restored to
1311 	 * right list unless we want to retry.
1312 	 */
1313 	if (rc == -EAGAIN) {
1314 		list_add(&dst->lru, prev);
1315 		__migrate_folio_record(dst, old_page_state, anon_vma);
1316 		return rc;
1317 	}
1318 
1319 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1320 			       anon_vma, true, ret);
1321 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1322 
1323 	return rc;
1324 }
1325 
1326 /*
1327  * Counterpart of unmap_and_move_page() for hugepage migration.
1328  *
1329  * This function doesn't wait the completion of hugepage I/O
1330  * because there is no race between I/O and migration for hugepage.
1331  * Note that currently hugepage I/O occurs only in direct I/O
1332  * where no lock is held and PG_writeback is irrelevant,
1333  * and writeback status of all subpages are counted in the reference
1334  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1335  * under direct I/O, the reference of the head page is 512 and a bit more.)
1336  * This means that when we try to migrate hugepage whose subpages are
1337  * doing direct I/O, some references remain after try_to_unmap() and
1338  * hugepage migration fails without data corruption.
1339  *
1340  * There is also no race when direct I/O is issued on the page under migration,
1341  * because then pte is replaced with migration swap entry and direct I/O code
1342  * will wait in the page fault for migration to complete.
1343  */
1344 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1345 		free_folio_t put_new_folio, unsigned long private,
1346 		struct folio *src, int force, enum migrate_mode mode,
1347 		int reason, struct list_head *ret)
1348 {
1349 	struct folio *dst;
1350 	int rc = -EAGAIN;
1351 	int page_was_mapped = 0;
1352 	struct anon_vma *anon_vma = NULL;
1353 	struct address_space *mapping = NULL;
1354 
1355 	if (folio_ref_count(src) == 1) {
1356 		/* page was freed from under us. So we are done. */
1357 		folio_putback_active_hugetlb(src);
1358 		return MIGRATEPAGE_SUCCESS;
1359 	}
1360 
1361 	dst = get_new_folio(src, private);
1362 	if (!dst)
1363 		return -ENOMEM;
1364 
1365 	if (!folio_trylock(src)) {
1366 		if (!force)
1367 			goto out;
1368 		switch (mode) {
1369 		case MIGRATE_SYNC:
1370 			break;
1371 		default:
1372 			goto out;
1373 		}
1374 		folio_lock(src);
1375 	}
1376 
1377 	/*
1378 	 * Check for pages which are in the process of being freed.  Without
1379 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1380 	 * be called and we could leak usage counts for subpools.
1381 	 */
1382 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1383 		rc = -EBUSY;
1384 		goto out_unlock;
1385 	}
1386 
1387 	if (folio_test_anon(src))
1388 		anon_vma = folio_get_anon_vma(src);
1389 
1390 	if (unlikely(!folio_trylock(dst)))
1391 		goto put_anon;
1392 
1393 	if (folio_mapped(src)) {
1394 		enum ttu_flags ttu = 0;
1395 
1396 		if (!folio_test_anon(src)) {
1397 			/*
1398 			 * In shared mappings, try_to_unmap could potentially
1399 			 * call huge_pmd_unshare.  Because of this, take
1400 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1401 			 * to let lower levels know we have taken the lock.
1402 			 */
1403 			mapping = hugetlb_folio_mapping_lock_write(src);
1404 			if (unlikely(!mapping))
1405 				goto unlock_put_anon;
1406 
1407 			ttu = TTU_RMAP_LOCKED;
1408 		}
1409 
1410 		try_to_migrate(src, ttu);
1411 		page_was_mapped = 1;
1412 
1413 		if (ttu & TTU_RMAP_LOCKED)
1414 			i_mmap_unlock_write(mapping);
1415 	}
1416 
1417 	if (!folio_mapped(src))
1418 		rc = move_to_new_folio(dst, src, mode);
1419 
1420 	if (page_was_mapped)
1421 		remove_migration_ptes(src,
1422 			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1423 
1424 unlock_put_anon:
1425 	folio_unlock(dst);
1426 
1427 put_anon:
1428 	if (anon_vma)
1429 		put_anon_vma(anon_vma);
1430 
1431 	if (rc == MIGRATEPAGE_SUCCESS) {
1432 		move_hugetlb_state(src, dst, reason);
1433 		put_new_folio = NULL;
1434 	}
1435 
1436 out_unlock:
1437 	folio_unlock(src);
1438 out:
1439 	if (rc == MIGRATEPAGE_SUCCESS)
1440 		folio_putback_active_hugetlb(src);
1441 	else if (rc != -EAGAIN)
1442 		list_move_tail(&src->lru, ret);
1443 
1444 	/*
1445 	 * If migration was not successful and there's a freeing callback, use
1446 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1447 	 * isolation.
1448 	 */
1449 	if (put_new_folio)
1450 		put_new_folio(dst, private);
1451 	else
1452 		folio_putback_active_hugetlb(dst);
1453 
1454 	return rc;
1455 }
1456 
1457 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1458 {
1459 	int rc;
1460 
1461 	folio_lock(folio);
1462 	rc = split_folio_to_list(folio, split_folios);
1463 	folio_unlock(folio);
1464 	if (!rc)
1465 		list_move_tail(&folio->lru, split_folios);
1466 
1467 	return rc;
1468 }
1469 
1470 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1471 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1472 #else
1473 #define NR_MAX_BATCHED_MIGRATION	512
1474 #endif
1475 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1476 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1477 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1478 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1479 
1480 struct migrate_pages_stats {
1481 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1482 				   units of base pages */
1483 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1484 				   units of base pages.  Untried folios aren't counted */
1485 	int nr_thp_succeeded;	/* THP migrated successfully */
1486 	int nr_thp_failed;	/* THP failed to be migrated */
1487 	int nr_thp_split;	/* THP split before migrating */
1488 	int nr_split;	/* Large folio (include THP) split before migrating */
1489 };
1490 
1491 /*
1492  * Returns the number of hugetlb folios that were not migrated, or an error code
1493  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1494  * any more because the list has become empty or no retryable hugetlb folios
1495  * exist any more. It is caller's responsibility to call putback_movable_pages()
1496  * only if ret != 0.
1497  */
1498 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1499 			    free_folio_t put_new_folio, unsigned long private,
1500 			    enum migrate_mode mode, int reason,
1501 			    struct migrate_pages_stats *stats,
1502 			    struct list_head *ret_folios)
1503 {
1504 	int retry = 1;
1505 	int nr_failed = 0;
1506 	int nr_retry_pages = 0;
1507 	int pass = 0;
1508 	struct folio *folio, *folio2;
1509 	int rc, nr_pages;
1510 
1511 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1512 		retry = 0;
1513 		nr_retry_pages = 0;
1514 
1515 		list_for_each_entry_safe(folio, folio2, from, lru) {
1516 			if (!folio_test_hugetlb(folio))
1517 				continue;
1518 
1519 			nr_pages = folio_nr_pages(folio);
1520 
1521 			cond_resched();
1522 
1523 			/*
1524 			 * Migratability of hugepages depends on architectures and
1525 			 * their size.  This check is necessary because some callers
1526 			 * of hugepage migration like soft offline and memory
1527 			 * hotremove don't walk through page tables or check whether
1528 			 * the hugepage is pmd-based or not before kicking migration.
1529 			 */
1530 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1531 				nr_failed++;
1532 				stats->nr_failed_pages += nr_pages;
1533 				list_move_tail(&folio->lru, ret_folios);
1534 				continue;
1535 			}
1536 
1537 			rc = unmap_and_move_huge_page(get_new_folio,
1538 						      put_new_folio, private,
1539 						      folio, pass > 2, mode,
1540 						      reason, ret_folios);
1541 			/*
1542 			 * The rules are:
1543 			 *	Success: hugetlb folio will be put back
1544 			 *	-EAGAIN: stay on the from list
1545 			 *	-ENOMEM: stay on the from list
1546 			 *	Other errno: put on ret_folios list
1547 			 */
1548 			switch(rc) {
1549 			case -ENOMEM:
1550 				/*
1551 				 * When memory is low, don't bother to try to migrate
1552 				 * other folios, just exit.
1553 				 */
1554 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1555 				return -ENOMEM;
1556 			case -EAGAIN:
1557 				retry++;
1558 				nr_retry_pages += nr_pages;
1559 				break;
1560 			case MIGRATEPAGE_SUCCESS:
1561 				stats->nr_succeeded += nr_pages;
1562 				break;
1563 			default:
1564 				/*
1565 				 * Permanent failure (-EBUSY, etc.):
1566 				 * unlike -EAGAIN case, the failed folio is
1567 				 * removed from migration folio list and not
1568 				 * retried in the next outer loop.
1569 				 */
1570 				nr_failed++;
1571 				stats->nr_failed_pages += nr_pages;
1572 				break;
1573 			}
1574 		}
1575 	}
1576 	/*
1577 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1578 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1579 	 * folios as failed.
1580 	 */
1581 	nr_failed += retry;
1582 	stats->nr_failed_pages += nr_retry_pages;
1583 
1584 	return nr_failed;
1585 }
1586 
1587 /*
1588  * migrate_pages_batch() first unmaps folios in the from list as many as
1589  * possible, then move the unmapped folios.
1590  *
1591  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1592  * lock or bit when we have locked more than one folio.  Which may cause
1593  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1594  * length of the from list must be <= 1.
1595  */
1596 static int migrate_pages_batch(struct list_head *from,
1597 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1598 		unsigned long private, enum migrate_mode mode, int reason,
1599 		struct list_head *ret_folios, struct list_head *split_folios,
1600 		struct migrate_pages_stats *stats, int nr_pass)
1601 {
1602 	int retry = 1;
1603 	int thp_retry = 1;
1604 	int nr_failed = 0;
1605 	int nr_retry_pages = 0;
1606 	int pass = 0;
1607 	bool is_thp = false;
1608 	bool is_large = false;
1609 	struct folio *folio, *folio2, *dst = NULL, *dst2;
1610 	int rc, rc_saved = 0, nr_pages;
1611 	LIST_HEAD(unmap_folios);
1612 	LIST_HEAD(dst_folios);
1613 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1614 
1615 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1616 			!list_empty(from) && !list_is_singular(from));
1617 
1618 	for (pass = 0; pass < nr_pass && retry; pass++) {
1619 		retry = 0;
1620 		thp_retry = 0;
1621 		nr_retry_pages = 0;
1622 
1623 		list_for_each_entry_safe(folio, folio2, from, lru) {
1624 			is_large = folio_test_large(folio);
1625 			is_thp = is_large && folio_test_pmd_mappable(folio);
1626 			nr_pages = folio_nr_pages(folio);
1627 
1628 			cond_resched();
1629 
1630 			/*
1631 			 * The rare folio on the deferred split list should
1632 			 * be split now. It should not count as a failure:
1633 			 * but increment nr_failed because, without doing so,
1634 			 * migrate_pages() may report success with (split but
1635 			 * unmigrated) pages still on its fromlist; whereas it
1636 			 * always reports success when its fromlist is empty.
1637 			 * stats->nr_thp_failed should be increased too,
1638 			 * otherwise stats inconsistency will happen when
1639 			 * migrate_pages_batch is called via migrate_pages()
1640 			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1641 			 *
1642 			 * Only check it without removing it from the list.
1643 			 * Since the folio can be on deferred_split_scan()
1644 			 * local list and removing it can cause the local list
1645 			 * corruption. Folio split process below can handle it
1646 			 * with the help of folio_ref_freeze().
1647 			 *
1648 			 * nr_pages > 2 is needed to avoid checking order-1
1649 			 * page cache folios. They exist, in contrast to
1650 			 * non-existent order-1 anonymous folios, and do not
1651 			 * use _deferred_list.
1652 			 */
1653 			if (nr_pages > 2 &&
1654 			   !list_empty(&folio->_deferred_list)) {
1655 				if (try_split_folio(folio, split_folios) == 0) {
1656 					nr_failed++;
1657 					stats->nr_thp_failed += is_thp;
1658 					stats->nr_thp_split += is_thp;
1659 					stats->nr_split++;
1660 					continue;
1661 				}
1662 			}
1663 
1664 			/*
1665 			 * Large folio migration might be unsupported or
1666 			 * the allocation might be failed so we should retry
1667 			 * on the same folio with the large folio split
1668 			 * to normal folios.
1669 			 *
1670 			 * Split folios are put in split_folios, and
1671 			 * we will migrate them after the rest of the
1672 			 * list is processed.
1673 			 */
1674 			if (!thp_migration_supported() && is_thp) {
1675 				nr_failed++;
1676 				stats->nr_thp_failed++;
1677 				if (!try_split_folio(folio, split_folios)) {
1678 					stats->nr_thp_split++;
1679 					stats->nr_split++;
1680 					continue;
1681 				}
1682 				stats->nr_failed_pages += nr_pages;
1683 				list_move_tail(&folio->lru, ret_folios);
1684 				continue;
1685 			}
1686 
1687 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1688 					private, folio, &dst, mode, reason,
1689 					ret_folios);
1690 			/*
1691 			 * The rules are:
1692 			 *	Success: folio will be freed
1693 			 *	Unmap: folio will be put on unmap_folios list,
1694 			 *	       dst folio put on dst_folios list
1695 			 *	-EAGAIN: stay on the from list
1696 			 *	-ENOMEM: stay on the from list
1697 			 *	Other errno: put on ret_folios list
1698 			 */
1699 			switch(rc) {
1700 			case -ENOMEM:
1701 				/*
1702 				 * When memory is low, don't bother to try to migrate
1703 				 * other folios, move unmapped folios, then exit.
1704 				 */
1705 				nr_failed++;
1706 				stats->nr_thp_failed += is_thp;
1707 				/* Large folio NUMA faulting doesn't split to retry. */
1708 				if (is_large && !nosplit) {
1709 					int ret = try_split_folio(folio, split_folios);
1710 
1711 					if (!ret) {
1712 						stats->nr_thp_split += is_thp;
1713 						stats->nr_split++;
1714 						break;
1715 					} else if (reason == MR_LONGTERM_PIN &&
1716 						   ret == -EAGAIN) {
1717 						/*
1718 						 * Try again to split large folio to
1719 						 * mitigate the failure of longterm pinning.
1720 						 */
1721 						retry++;
1722 						thp_retry += is_thp;
1723 						nr_retry_pages += nr_pages;
1724 						/* Undo duplicated failure counting. */
1725 						nr_failed--;
1726 						stats->nr_thp_failed -= is_thp;
1727 						break;
1728 					}
1729 				}
1730 
1731 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1732 				/* nr_failed isn't updated for not used */
1733 				stats->nr_thp_failed += thp_retry;
1734 				rc_saved = rc;
1735 				if (list_empty(&unmap_folios))
1736 					goto out;
1737 				else
1738 					goto move;
1739 			case -EAGAIN:
1740 				retry++;
1741 				thp_retry += is_thp;
1742 				nr_retry_pages += nr_pages;
1743 				break;
1744 			case MIGRATEPAGE_SUCCESS:
1745 				stats->nr_succeeded += nr_pages;
1746 				stats->nr_thp_succeeded += is_thp;
1747 				break;
1748 			case MIGRATEPAGE_UNMAP:
1749 				list_move_tail(&folio->lru, &unmap_folios);
1750 				list_add_tail(&dst->lru, &dst_folios);
1751 				break;
1752 			default:
1753 				/*
1754 				 * Permanent failure (-EBUSY, etc.):
1755 				 * unlike -EAGAIN case, the failed folio is
1756 				 * removed from migration folio list and not
1757 				 * retried in the next outer loop.
1758 				 */
1759 				nr_failed++;
1760 				stats->nr_thp_failed += is_thp;
1761 				stats->nr_failed_pages += nr_pages;
1762 				break;
1763 			}
1764 		}
1765 	}
1766 	nr_failed += retry;
1767 	stats->nr_thp_failed += thp_retry;
1768 	stats->nr_failed_pages += nr_retry_pages;
1769 move:
1770 	/* Flush TLBs for all unmapped folios */
1771 	try_to_unmap_flush();
1772 
1773 	retry = 1;
1774 	for (pass = 0; pass < nr_pass && retry; pass++) {
1775 		retry = 0;
1776 		thp_retry = 0;
1777 		nr_retry_pages = 0;
1778 
1779 		dst = list_first_entry(&dst_folios, struct folio, lru);
1780 		dst2 = list_next_entry(dst, lru);
1781 		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1782 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1783 			nr_pages = folio_nr_pages(folio);
1784 
1785 			cond_resched();
1786 
1787 			rc = migrate_folio_move(put_new_folio, private,
1788 						folio, dst, mode,
1789 						reason, ret_folios);
1790 			/*
1791 			 * The rules are:
1792 			 *	Success: folio will be freed
1793 			 *	-EAGAIN: stay on the unmap_folios list
1794 			 *	Other errno: put on ret_folios list
1795 			 */
1796 			switch(rc) {
1797 			case -EAGAIN:
1798 				retry++;
1799 				thp_retry += is_thp;
1800 				nr_retry_pages += nr_pages;
1801 				break;
1802 			case MIGRATEPAGE_SUCCESS:
1803 				stats->nr_succeeded += nr_pages;
1804 				stats->nr_thp_succeeded += is_thp;
1805 				break;
1806 			default:
1807 				nr_failed++;
1808 				stats->nr_thp_failed += is_thp;
1809 				stats->nr_failed_pages += nr_pages;
1810 				break;
1811 			}
1812 			dst = dst2;
1813 			dst2 = list_next_entry(dst, lru);
1814 		}
1815 	}
1816 	nr_failed += retry;
1817 	stats->nr_thp_failed += thp_retry;
1818 	stats->nr_failed_pages += nr_retry_pages;
1819 
1820 	rc = rc_saved ? : nr_failed;
1821 out:
1822 	/* Cleanup remaining folios */
1823 	dst = list_first_entry(&dst_folios, struct folio, lru);
1824 	dst2 = list_next_entry(dst, lru);
1825 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1826 		int old_page_state = 0;
1827 		struct anon_vma *anon_vma = NULL;
1828 
1829 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1830 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1831 				       anon_vma, true, ret_folios);
1832 		list_del(&dst->lru);
1833 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1834 		dst = dst2;
1835 		dst2 = list_next_entry(dst, lru);
1836 	}
1837 
1838 	return rc;
1839 }
1840 
1841 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1842 		free_folio_t put_new_folio, unsigned long private,
1843 		enum migrate_mode mode, int reason,
1844 		struct list_head *ret_folios, struct list_head *split_folios,
1845 		struct migrate_pages_stats *stats)
1846 {
1847 	int rc, nr_failed = 0;
1848 	LIST_HEAD(folios);
1849 	struct migrate_pages_stats astats;
1850 
1851 	memset(&astats, 0, sizeof(astats));
1852 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1853 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1854 				 reason, &folios, split_folios, &astats,
1855 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1856 	stats->nr_succeeded += astats.nr_succeeded;
1857 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1858 	stats->nr_thp_split += astats.nr_thp_split;
1859 	stats->nr_split += astats.nr_split;
1860 	if (rc < 0) {
1861 		stats->nr_failed_pages += astats.nr_failed_pages;
1862 		stats->nr_thp_failed += astats.nr_thp_failed;
1863 		list_splice_tail(&folios, ret_folios);
1864 		return rc;
1865 	}
1866 	stats->nr_thp_failed += astats.nr_thp_split;
1867 	/*
1868 	 * Do not count rc, as pages will be retried below.
1869 	 * Count nr_split only, since it includes nr_thp_split.
1870 	 */
1871 	nr_failed += astats.nr_split;
1872 	/*
1873 	 * Fall back to migrate all failed folios one by one synchronously. All
1874 	 * failed folios except split THPs will be retried, so their failure
1875 	 * isn't counted
1876 	 */
1877 	list_splice_tail_init(&folios, from);
1878 	while (!list_empty(from)) {
1879 		list_move(from->next, &folios);
1880 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1881 					 private, mode, reason, ret_folios,
1882 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1883 		list_splice_tail_init(&folios, ret_folios);
1884 		if (rc < 0)
1885 			return rc;
1886 		nr_failed += rc;
1887 	}
1888 
1889 	return nr_failed;
1890 }
1891 
1892 /*
1893  * migrate_pages - migrate the folios specified in a list, to the free folios
1894  *		   supplied as the target for the page migration
1895  *
1896  * @from:		The list of folios to be migrated.
1897  * @get_new_folio:	The function used to allocate free folios to be used
1898  *			as the target of the folio migration.
1899  * @put_new_folio:	The function used to free target folios if migration
1900  *			fails, or NULL if no special handling is necessary.
1901  * @private:		Private data to be passed on to get_new_folio()
1902  * @mode:		The migration mode that specifies the constraints for
1903  *			folio migration, if any.
1904  * @reason:		The reason for folio migration.
1905  * @ret_succeeded:	Set to the number of folios migrated successfully if
1906  *			the caller passes a non-NULL pointer.
1907  *
1908  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1909  * are movable any more because the list has become empty or no retryable folios
1910  * exist any more. It is caller's responsibility to call putback_movable_pages()
1911  * only if ret != 0.
1912  *
1913  * Returns the number of {normal folio, large folio, hugetlb} that were not
1914  * migrated, or an error code. The number of large folio splits will be
1915  * considered as the number of non-migrated large folio, no matter how many
1916  * split folios of the large folio are migrated successfully.
1917  */
1918 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1919 		free_folio_t put_new_folio, unsigned long private,
1920 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1921 {
1922 	int rc, rc_gather;
1923 	int nr_pages;
1924 	struct folio *folio, *folio2;
1925 	LIST_HEAD(folios);
1926 	LIST_HEAD(ret_folios);
1927 	LIST_HEAD(split_folios);
1928 	struct migrate_pages_stats stats;
1929 
1930 	trace_mm_migrate_pages_start(mode, reason);
1931 
1932 	memset(&stats, 0, sizeof(stats));
1933 
1934 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1935 				     mode, reason, &stats, &ret_folios);
1936 	if (rc_gather < 0)
1937 		goto out;
1938 
1939 again:
1940 	nr_pages = 0;
1941 	list_for_each_entry_safe(folio, folio2, from, lru) {
1942 		/* Retried hugetlb folios will be kept in list  */
1943 		if (folio_test_hugetlb(folio)) {
1944 			list_move_tail(&folio->lru, &ret_folios);
1945 			continue;
1946 		}
1947 
1948 		nr_pages += folio_nr_pages(folio);
1949 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1950 			break;
1951 	}
1952 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1953 		list_cut_before(&folios, from, &folio2->lru);
1954 	else
1955 		list_splice_init(from, &folios);
1956 	if (mode == MIGRATE_ASYNC)
1957 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1958 				private, mode, reason, &ret_folios,
1959 				&split_folios, &stats,
1960 				NR_MAX_MIGRATE_PAGES_RETRY);
1961 	else
1962 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1963 				private, mode, reason, &ret_folios,
1964 				&split_folios, &stats);
1965 	list_splice_tail_init(&folios, &ret_folios);
1966 	if (rc < 0) {
1967 		rc_gather = rc;
1968 		list_splice_tail(&split_folios, &ret_folios);
1969 		goto out;
1970 	}
1971 	if (!list_empty(&split_folios)) {
1972 		/*
1973 		 * Failure isn't counted since all split folios of a large folio
1974 		 * is counted as 1 failure already.  And, we only try to migrate
1975 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1976 		 */
1977 		migrate_pages_batch(&split_folios, get_new_folio,
1978 				put_new_folio, private, MIGRATE_ASYNC, reason,
1979 				&ret_folios, NULL, &stats, 1);
1980 		list_splice_tail_init(&split_folios, &ret_folios);
1981 	}
1982 	rc_gather += rc;
1983 	if (!list_empty(from))
1984 		goto again;
1985 out:
1986 	/*
1987 	 * Put the permanent failure folio back to migration list, they
1988 	 * will be put back to the right list by the caller.
1989 	 */
1990 	list_splice(&ret_folios, from);
1991 
1992 	/*
1993 	 * Return 0 in case all split folios of fail-to-migrate large folios
1994 	 * are migrated successfully.
1995 	 */
1996 	if (list_empty(from))
1997 		rc_gather = 0;
1998 
1999 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2000 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2001 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2002 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2003 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2004 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2005 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2006 			       stats.nr_thp_split, stats.nr_split, mode,
2007 			       reason);
2008 
2009 	if (ret_succeeded)
2010 		*ret_succeeded = stats.nr_succeeded;
2011 
2012 	return rc_gather;
2013 }
2014 
2015 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2016 {
2017 	struct migration_target_control *mtc;
2018 	gfp_t gfp_mask;
2019 	unsigned int order = 0;
2020 	int nid;
2021 	int zidx;
2022 
2023 	mtc = (struct migration_target_control *)private;
2024 	gfp_mask = mtc->gfp_mask;
2025 	nid = mtc->nid;
2026 	if (nid == NUMA_NO_NODE)
2027 		nid = folio_nid(src);
2028 
2029 	if (folio_test_hugetlb(src)) {
2030 		struct hstate *h = folio_hstate(src);
2031 
2032 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2033 		return alloc_hugetlb_folio_nodemask(h, nid,
2034 						mtc->nmask, gfp_mask,
2035 						htlb_allow_alloc_fallback(mtc->reason));
2036 	}
2037 
2038 	if (folio_test_large(src)) {
2039 		/*
2040 		 * clear __GFP_RECLAIM to make the migration callback
2041 		 * consistent with regular THP allocations.
2042 		 */
2043 		gfp_mask &= ~__GFP_RECLAIM;
2044 		gfp_mask |= GFP_TRANSHUGE;
2045 		order = folio_order(src);
2046 	}
2047 	zidx = zone_idx(folio_zone(src));
2048 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2049 		gfp_mask |= __GFP_HIGHMEM;
2050 
2051 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2052 }
2053 
2054 #ifdef CONFIG_NUMA
2055 
2056 static int store_status(int __user *status, int start, int value, int nr)
2057 {
2058 	while (nr-- > 0) {
2059 		if (put_user(value, status + start))
2060 			return -EFAULT;
2061 		start++;
2062 	}
2063 
2064 	return 0;
2065 }
2066 
2067 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2068 {
2069 	int err;
2070 	struct migration_target_control mtc = {
2071 		.nid = node,
2072 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2073 		.reason = MR_SYSCALL,
2074 	};
2075 
2076 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2077 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2078 	if (err)
2079 		putback_movable_pages(pagelist);
2080 	return err;
2081 }
2082 
2083 /*
2084  * Resolves the given address to a struct page, isolates it from the LRU and
2085  * puts it to the given pagelist.
2086  * Returns:
2087  *     errno - if the page cannot be found/isolated
2088  *     0 - when it doesn't have to be migrated because it is already on the
2089  *         target node
2090  *     1 - when it has been queued
2091  */
2092 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2093 		int node, struct list_head *pagelist, bool migrate_all)
2094 {
2095 	struct vm_area_struct *vma;
2096 	unsigned long addr;
2097 	struct page *page;
2098 	struct folio *folio;
2099 	int err;
2100 
2101 	mmap_read_lock(mm);
2102 	addr = (unsigned long)untagged_addr_remote(mm, p);
2103 
2104 	err = -EFAULT;
2105 	vma = vma_lookup(mm, addr);
2106 	if (!vma || !vma_migratable(vma))
2107 		goto out;
2108 
2109 	/* FOLL_DUMP to ignore special (like zero) pages */
2110 	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2111 
2112 	err = PTR_ERR(page);
2113 	if (IS_ERR(page))
2114 		goto out;
2115 
2116 	err = -ENOENT;
2117 	if (!page)
2118 		goto out;
2119 
2120 	folio = page_folio(page);
2121 	if (folio_is_zone_device(folio))
2122 		goto out_putfolio;
2123 
2124 	err = 0;
2125 	if (folio_nid(folio) == node)
2126 		goto out_putfolio;
2127 
2128 	err = -EACCES;
2129 	if (folio_likely_mapped_shared(folio) && !migrate_all)
2130 		goto out_putfolio;
2131 
2132 	err = -EBUSY;
2133 	if (folio_test_hugetlb(folio)) {
2134 		if (isolate_hugetlb(folio, pagelist))
2135 			err = 1;
2136 	} else {
2137 		if (!folio_isolate_lru(folio))
2138 			goto out_putfolio;
2139 
2140 		err = 1;
2141 		list_add_tail(&folio->lru, pagelist);
2142 		node_stat_mod_folio(folio,
2143 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2144 			folio_nr_pages(folio));
2145 	}
2146 out_putfolio:
2147 	/*
2148 	 * Either remove the duplicate refcount from folio_isolate_lru()
2149 	 * or drop the folio ref if it was not isolated.
2150 	 */
2151 	folio_put(folio);
2152 out:
2153 	mmap_read_unlock(mm);
2154 	return err;
2155 }
2156 
2157 static int move_pages_and_store_status(int node,
2158 		struct list_head *pagelist, int __user *status,
2159 		int start, int i, unsigned long nr_pages)
2160 {
2161 	int err;
2162 
2163 	if (list_empty(pagelist))
2164 		return 0;
2165 
2166 	err = do_move_pages_to_node(pagelist, node);
2167 	if (err) {
2168 		/*
2169 		 * Positive err means the number of failed
2170 		 * pages to migrate.  Since we are going to
2171 		 * abort and return the number of non-migrated
2172 		 * pages, so need to include the rest of the
2173 		 * nr_pages that have not been attempted as
2174 		 * well.
2175 		 */
2176 		if (err > 0)
2177 			err += nr_pages - i;
2178 		return err;
2179 	}
2180 	return store_status(status, start, node, i - start);
2181 }
2182 
2183 /*
2184  * Migrate an array of page address onto an array of nodes and fill
2185  * the corresponding array of status.
2186  */
2187 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2188 			 unsigned long nr_pages,
2189 			 const void __user * __user *pages,
2190 			 const int __user *nodes,
2191 			 int __user *status, int flags)
2192 {
2193 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2194 	int current_node = NUMA_NO_NODE;
2195 	LIST_HEAD(pagelist);
2196 	int start, i;
2197 	int err = 0, err1;
2198 
2199 	lru_cache_disable();
2200 
2201 	for (i = start = 0; i < nr_pages; i++) {
2202 		const void __user *p;
2203 		int node;
2204 
2205 		err = -EFAULT;
2206 		if (in_compat_syscall()) {
2207 			compat_uptr_t cp;
2208 
2209 			if (get_user(cp, compat_pages + i))
2210 				goto out_flush;
2211 
2212 			p = compat_ptr(cp);
2213 		} else {
2214 			if (get_user(p, pages + i))
2215 				goto out_flush;
2216 		}
2217 		if (get_user(node, nodes + i))
2218 			goto out_flush;
2219 
2220 		err = -ENODEV;
2221 		if (node < 0 || node >= MAX_NUMNODES)
2222 			goto out_flush;
2223 		if (!node_state(node, N_MEMORY))
2224 			goto out_flush;
2225 
2226 		err = -EACCES;
2227 		if (!node_isset(node, task_nodes))
2228 			goto out_flush;
2229 
2230 		if (current_node == NUMA_NO_NODE) {
2231 			current_node = node;
2232 			start = i;
2233 		} else if (node != current_node) {
2234 			err = move_pages_and_store_status(current_node,
2235 					&pagelist, status, start, i, nr_pages);
2236 			if (err)
2237 				goto out;
2238 			start = i;
2239 			current_node = node;
2240 		}
2241 
2242 		/*
2243 		 * Errors in the page lookup or isolation are not fatal and we simply
2244 		 * report them via status
2245 		 */
2246 		err = add_page_for_migration(mm, p, current_node, &pagelist,
2247 					     flags & MPOL_MF_MOVE_ALL);
2248 
2249 		if (err > 0) {
2250 			/* The page is successfully queued for migration */
2251 			continue;
2252 		}
2253 
2254 		/*
2255 		 * The move_pages() man page does not have an -EEXIST choice, so
2256 		 * use -EFAULT instead.
2257 		 */
2258 		if (err == -EEXIST)
2259 			err = -EFAULT;
2260 
2261 		/*
2262 		 * If the page is already on the target node (!err), store the
2263 		 * node, otherwise, store the err.
2264 		 */
2265 		err = store_status(status, i, err ? : current_node, 1);
2266 		if (err)
2267 			goto out_flush;
2268 
2269 		err = move_pages_and_store_status(current_node, &pagelist,
2270 				status, start, i, nr_pages);
2271 		if (err) {
2272 			/* We have accounted for page i */
2273 			if (err > 0)
2274 				err--;
2275 			goto out;
2276 		}
2277 		current_node = NUMA_NO_NODE;
2278 	}
2279 out_flush:
2280 	/* Make sure we do not overwrite the existing error */
2281 	err1 = move_pages_and_store_status(current_node, &pagelist,
2282 				status, start, i, nr_pages);
2283 	if (err >= 0)
2284 		err = err1;
2285 out:
2286 	lru_cache_enable();
2287 	return err;
2288 }
2289 
2290 /*
2291  * Determine the nodes of an array of pages and store it in an array of status.
2292  */
2293 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2294 				const void __user **pages, int *status)
2295 {
2296 	unsigned long i;
2297 
2298 	mmap_read_lock(mm);
2299 
2300 	for (i = 0; i < nr_pages; i++) {
2301 		unsigned long addr = (unsigned long)(*pages);
2302 		struct vm_area_struct *vma;
2303 		struct page *page;
2304 		int err = -EFAULT;
2305 
2306 		vma = vma_lookup(mm, addr);
2307 		if (!vma)
2308 			goto set_status;
2309 
2310 		/* FOLL_DUMP to ignore special (like zero) pages */
2311 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2312 
2313 		err = PTR_ERR(page);
2314 		if (IS_ERR(page))
2315 			goto set_status;
2316 
2317 		err = -ENOENT;
2318 		if (!page)
2319 			goto set_status;
2320 
2321 		if (!is_zone_device_page(page))
2322 			err = page_to_nid(page);
2323 
2324 		put_page(page);
2325 set_status:
2326 		*status = err;
2327 
2328 		pages++;
2329 		status++;
2330 	}
2331 
2332 	mmap_read_unlock(mm);
2333 }
2334 
2335 static int get_compat_pages_array(const void __user *chunk_pages[],
2336 				  const void __user * __user *pages,
2337 				  unsigned long chunk_nr)
2338 {
2339 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2340 	compat_uptr_t p;
2341 	int i;
2342 
2343 	for (i = 0; i < chunk_nr; i++) {
2344 		if (get_user(p, pages32 + i))
2345 			return -EFAULT;
2346 		chunk_pages[i] = compat_ptr(p);
2347 	}
2348 
2349 	return 0;
2350 }
2351 
2352 /*
2353  * Determine the nodes of a user array of pages and store it in
2354  * a user array of status.
2355  */
2356 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2357 			 const void __user * __user *pages,
2358 			 int __user *status)
2359 {
2360 #define DO_PAGES_STAT_CHUNK_NR 16UL
2361 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2362 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2363 
2364 	while (nr_pages) {
2365 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2366 
2367 		if (in_compat_syscall()) {
2368 			if (get_compat_pages_array(chunk_pages, pages,
2369 						   chunk_nr))
2370 				break;
2371 		} else {
2372 			if (copy_from_user(chunk_pages, pages,
2373 				      chunk_nr * sizeof(*chunk_pages)))
2374 				break;
2375 		}
2376 
2377 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2378 
2379 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2380 			break;
2381 
2382 		pages += chunk_nr;
2383 		status += chunk_nr;
2384 		nr_pages -= chunk_nr;
2385 	}
2386 	return nr_pages ? -EFAULT : 0;
2387 }
2388 
2389 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2390 {
2391 	struct task_struct *task;
2392 	struct mm_struct *mm;
2393 
2394 	/*
2395 	 * There is no need to check if current process has the right to modify
2396 	 * the specified process when they are same.
2397 	 */
2398 	if (!pid) {
2399 		mmget(current->mm);
2400 		*mem_nodes = cpuset_mems_allowed(current);
2401 		return current->mm;
2402 	}
2403 
2404 	/* Find the mm_struct */
2405 	rcu_read_lock();
2406 	task = find_task_by_vpid(pid);
2407 	if (!task) {
2408 		rcu_read_unlock();
2409 		return ERR_PTR(-ESRCH);
2410 	}
2411 	get_task_struct(task);
2412 
2413 	/*
2414 	 * Check if this process has the right to modify the specified
2415 	 * process. Use the regular "ptrace_may_access()" checks.
2416 	 */
2417 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2418 		rcu_read_unlock();
2419 		mm = ERR_PTR(-EPERM);
2420 		goto out;
2421 	}
2422 	rcu_read_unlock();
2423 
2424 	mm = ERR_PTR(security_task_movememory(task));
2425 	if (IS_ERR(mm))
2426 		goto out;
2427 	*mem_nodes = cpuset_mems_allowed(task);
2428 	mm = get_task_mm(task);
2429 out:
2430 	put_task_struct(task);
2431 	if (!mm)
2432 		mm = ERR_PTR(-EINVAL);
2433 	return mm;
2434 }
2435 
2436 /*
2437  * Move a list of pages in the address space of the currently executing
2438  * process.
2439  */
2440 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2441 			     const void __user * __user *pages,
2442 			     const int __user *nodes,
2443 			     int __user *status, int flags)
2444 {
2445 	struct mm_struct *mm;
2446 	int err;
2447 	nodemask_t task_nodes;
2448 
2449 	/* Check flags */
2450 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2451 		return -EINVAL;
2452 
2453 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2454 		return -EPERM;
2455 
2456 	mm = find_mm_struct(pid, &task_nodes);
2457 	if (IS_ERR(mm))
2458 		return PTR_ERR(mm);
2459 
2460 	if (nodes)
2461 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2462 				    nodes, status, flags);
2463 	else
2464 		err = do_pages_stat(mm, nr_pages, pages, status);
2465 
2466 	mmput(mm);
2467 	return err;
2468 }
2469 
2470 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2471 		const void __user * __user *, pages,
2472 		const int __user *, nodes,
2473 		int __user *, status, int, flags)
2474 {
2475 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2476 }
2477 
2478 #ifdef CONFIG_NUMA_BALANCING
2479 /*
2480  * Returns true if this is a safe migration target node for misplaced NUMA
2481  * pages. Currently it only checks the watermarks which is crude.
2482  */
2483 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2484 				   unsigned long nr_migrate_pages)
2485 {
2486 	int z;
2487 
2488 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2489 		struct zone *zone = pgdat->node_zones + z;
2490 
2491 		if (!managed_zone(zone))
2492 			continue;
2493 
2494 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2495 		if (!zone_watermark_ok(zone, 0,
2496 				       high_wmark_pages(zone) +
2497 				       nr_migrate_pages,
2498 				       ZONE_MOVABLE, 0))
2499 			continue;
2500 		return true;
2501 	}
2502 	return false;
2503 }
2504 
2505 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2506 					   unsigned long data)
2507 {
2508 	int nid = (int) data;
2509 	int order = folio_order(src);
2510 	gfp_t gfp = __GFP_THISNODE;
2511 
2512 	if (order > 0)
2513 		gfp |= GFP_TRANSHUGE_LIGHT;
2514 	else {
2515 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2516 			__GFP_NOWARN;
2517 		gfp &= ~__GFP_RECLAIM;
2518 	}
2519 	return __folio_alloc_node(gfp, order, nid);
2520 }
2521 
2522 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2523 {
2524 	int nr_pages = folio_nr_pages(folio);
2525 
2526 	/* Avoid migrating to a node that is nearly full */
2527 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2528 		int z;
2529 
2530 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2531 			return 0;
2532 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2533 			if (managed_zone(pgdat->node_zones + z))
2534 				break;
2535 		}
2536 
2537 		/*
2538 		 * If there are no managed zones, it should not proceed
2539 		 * further.
2540 		 */
2541 		if (z < 0)
2542 			return 0;
2543 
2544 		wakeup_kswapd(pgdat->node_zones + z, 0,
2545 			      folio_order(folio), ZONE_MOVABLE);
2546 		return 0;
2547 	}
2548 
2549 	if (!folio_isolate_lru(folio))
2550 		return 0;
2551 
2552 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2553 			    nr_pages);
2554 
2555 	/*
2556 	 * Isolating the folio has taken another reference, so the
2557 	 * caller's reference can be safely dropped without the folio
2558 	 * disappearing underneath us during migration.
2559 	 */
2560 	folio_put(folio);
2561 	return 1;
2562 }
2563 
2564 /*
2565  * Attempt to migrate a misplaced folio to the specified destination
2566  * node. Caller is expected to have an elevated reference count on
2567  * the folio that will be dropped by this function before returning.
2568  */
2569 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2570 			    int node)
2571 {
2572 	pg_data_t *pgdat = NODE_DATA(node);
2573 	int isolated;
2574 	int nr_remaining;
2575 	unsigned int nr_succeeded;
2576 	LIST_HEAD(migratepages);
2577 	int nr_pages = folio_nr_pages(folio);
2578 
2579 	/*
2580 	 * Don't migrate file folios that are mapped in multiple processes
2581 	 * with execute permissions as they are probably shared libraries.
2582 	 *
2583 	 * See folio_likely_mapped_shared() on possible imprecision when we
2584 	 * cannot easily detect if a folio is shared.
2585 	 */
2586 	if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
2587 	    (vma->vm_flags & VM_EXEC))
2588 		goto out;
2589 
2590 	/*
2591 	 * Also do not migrate dirty folios as not all filesystems can move
2592 	 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2593 	 */
2594 	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2595 		goto out;
2596 
2597 	isolated = numamigrate_isolate_folio(pgdat, folio);
2598 	if (!isolated)
2599 		goto out;
2600 
2601 	list_add(&folio->lru, &migratepages);
2602 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2603 				     NULL, node, MIGRATE_ASYNC,
2604 				     MR_NUMA_MISPLACED, &nr_succeeded);
2605 	if (nr_remaining) {
2606 		if (!list_empty(&migratepages)) {
2607 			list_del(&folio->lru);
2608 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2609 					folio_is_file_lru(folio), -nr_pages);
2610 			folio_putback_lru(folio);
2611 		}
2612 		isolated = 0;
2613 	}
2614 	if (nr_succeeded) {
2615 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2616 		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2617 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2618 					    nr_succeeded);
2619 	}
2620 	BUG_ON(!list_empty(&migratepages));
2621 	return isolated;
2622 
2623 out:
2624 	folio_put(folio);
2625 	return 0;
2626 }
2627 #endif /* CONFIG_NUMA_BALANCING */
2628 #endif /* CONFIG_NUMA */
2629