xref: /linux/mm/migrate.c (revision 4f372263ef92ed2af55a8c226750b72021ff8d0f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/pfn_t.h>
39 #include <linux/page_idle.h>
40 #include <linux/page_owner.h>
41 #include <linux/sched/mm.h>
42 #include <linux/ptrace.h>
43 #include <linux/memory.h>
44 #include <linux/sched/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/pagewalk.h>
47 
48 #include <asm/tlbflush.h>
49 
50 #include <trace/events/migrate.h>
51 
52 #include "internal.h"
53 
54 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
55 {
56 	struct folio *folio = folio_get_nontail_page(page);
57 	const struct movable_operations *mops;
58 
59 	/*
60 	 * Avoid burning cycles with pages that are yet under __free_pages(),
61 	 * or just got freed under us.
62 	 *
63 	 * In case we 'win' a race for a movable page being freed under us and
64 	 * raise its refcount preventing __free_pages() from doing its job
65 	 * the put_page() at the end of this block will take care of
66 	 * release this page, thus avoiding a nasty leakage.
67 	 */
68 	if (!folio)
69 		goto out;
70 
71 	/*
72 	 * Check movable flag before taking the page lock because
73 	 * we use non-atomic bitops on newly allocated page flags so
74 	 * unconditionally grabbing the lock ruins page's owner side.
75 	 */
76 	if (unlikely(!__folio_test_movable(folio)))
77 		goto out_putfolio;
78 
79 	/*
80 	 * As movable pages are not isolated from LRU lists, concurrent
81 	 * compaction threads can race against page migration functions
82 	 * as well as race against the releasing a page.
83 	 *
84 	 * In order to avoid having an already isolated movable page
85 	 * being (wrongly) re-isolated while it is under migration,
86 	 * or to avoid attempting to isolate pages being released,
87 	 * lets be sure we have the page lock
88 	 * before proceeding with the movable page isolation steps.
89 	 */
90 	if (unlikely(!folio_trylock(folio)))
91 		goto out_putfolio;
92 
93 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
94 		goto out_no_isolated;
95 
96 	mops = folio_movable_ops(folio);
97 	VM_BUG_ON_FOLIO(!mops, folio);
98 
99 	if (!mops->isolate_page(&folio->page, mode))
100 		goto out_no_isolated;
101 
102 	/* Driver shouldn't use the isolated flag */
103 	WARN_ON_ONCE(folio_test_isolated(folio));
104 	folio_set_isolated(folio);
105 	folio_unlock(folio);
106 
107 	return true;
108 
109 out_no_isolated:
110 	folio_unlock(folio);
111 out_putfolio:
112 	folio_put(folio);
113 out:
114 	return false;
115 }
116 
117 static void putback_movable_folio(struct folio *folio)
118 {
119 	const struct movable_operations *mops = folio_movable_ops(folio);
120 
121 	mops->putback_page(&folio->page);
122 	folio_clear_isolated(folio);
123 }
124 
125 /*
126  * Put previously isolated pages back onto the appropriate lists
127  * from where they were once taken off for compaction/migration.
128  *
129  * This function shall be used whenever the isolated pageset has been
130  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
131  * and folio_isolate_hugetlb().
132  */
133 void putback_movable_pages(struct list_head *l)
134 {
135 	struct folio *folio;
136 	struct folio *folio2;
137 
138 	list_for_each_entry_safe(folio, folio2, l, lru) {
139 		if (unlikely(folio_test_hugetlb(folio))) {
140 			folio_putback_hugetlb(folio);
141 			continue;
142 		}
143 		list_del(&folio->lru);
144 		/*
145 		 * We isolated non-lru movable folio so here we can use
146 		 * __folio_test_movable because LRU folio's mapping cannot
147 		 * have PAGE_MAPPING_MOVABLE.
148 		 */
149 		if (unlikely(__folio_test_movable(folio))) {
150 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
151 			folio_lock(folio);
152 			if (folio_test_movable(folio))
153 				putback_movable_folio(folio);
154 			else
155 				folio_clear_isolated(folio);
156 			folio_unlock(folio);
157 			folio_put(folio);
158 		} else {
159 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
160 					folio_is_file_lru(folio), -folio_nr_pages(folio));
161 			folio_putback_lru(folio);
162 		}
163 	}
164 }
165 
166 /* Must be called with an elevated refcount on the non-hugetlb folio */
167 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
168 {
169 	bool isolated, lru;
170 
171 	if (folio_test_hugetlb(folio))
172 		return folio_isolate_hugetlb(folio, list);
173 
174 	lru = !__folio_test_movable(folio);
175 	if (lru)
176 		isolated = folio_isolate_lru(folio);
177 	else
178 		isolated = isolate_movable_page(&folio->page,
179 						ISOLATE_UNEVICTABLE);
180 
181 	if (!isolated)
182 		return false;
183 
184 	list_add(&folio->lru, list);
185 	if (lru)
186 		node_stat_add_folio(folio, NR_ISOLATED_ANON +
187 				    folio_is_file_lru(folio));
188 
189 	return true;
190 }
191 
192 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
193 					  struct folio *folio,
194 					  unsigned long idx)
195 {
196 	struct page *page = folio_page(folio, idx);
197 	bool contains_data;
198 	pte_t newpte;
199 	void *addr;
200 
201 	if (PageCompound(page))
202 		return false;
203 	VM_BUG_ON_PAGE(!PageAnon(page), page);
204 	VM_BUG_ON_PAGE(!PageLocked(page), page);
205 	VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
206 
207 	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
208 	    mm_forbids_zeropage(pvmw->vma->vm_mm))
209 		return false;
210 
211 	/*
212 	 * The pmd entry mapping the old thp was flushed and the pte mapping
213 	 * this subpage has been non present. If the subpage is only zero-filled
214 	 * then map it to the shared zeropage.
215 	 */
216 	addr = kmap_local_page(page);
217 	contains_data = memchr_inv(addr, 0, PAGE_SIZE);
218 	kunmap_local(addr);
219 
220 	if (contains_data)
221 		return false;
222 
223 	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
224 					pvmw->vma->vm_page_prot));
225 	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
226 
227 	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
228 	return true;
229 }
230 
231 struct rmap_walk_arg {
232 	struct folio *folio;
233 	bool map_unused_to_zeropage;
234 };
235 
236 /*
237  * Restore a potential migration pte to a working pte entry
238  */
239 static bool remove_migration_pte(struct folio *folio,
240 		struct vm_area_struct *vma, unsigned long addr, void *arg)
241 {
242 	struct rmap_walk_arg *rmap_walk_arg = arg;
243 	DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
244 
245 	while (page_vma_mapped_walk(&pvmw)) {
246 		rmap_t rmap_flags = RMAP_NONE;
247 		pte_t old_pte;
248 		pte_t pte;
249 		swp_entry_t entry;
250 		struct page *new;
251 		unsigned long idx = 0;
252 
253 		/* pgoff is invalid for ksm pages, but they are never large */
254 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
255 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
256 		new = folio_page(folio, idx);
257 
258 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
259 		/* PMD-mapped THP migration entry */
260 		if (!pvmw.pte) {
261 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
262 					!folio_test_pmd_mappable(folio), folio);
263 			remove_migration_pmd(&pvmw, new);
264 			continue;
265 		}
266 #endif
267 		if (rmap_walk_arg->map_unused_to_zeropage &&
268 		    try_to_map_unused_to_zeropage(&pvmw, folio, idx))
269 			continue;
270 
271 		folio_get(folio);
272 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
273 		old_pte = ptep_get(pvmw.pte);
274 
275 		entry = pte_to_swp_entry(old_pte);
276 		if (!is_migration_entry_young(entry))
277 			pte = pte_mkold(pte);
278 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
279 			pte = pte_mkdirty(pte);
280 		if (pte_swp_soft_dirty(old_pte))
281 			pte = pte_mksoft_dirty(pte);
282 		else
283 			pte = pte_clear_soft_dirty(pte);
284 
285 		if (is_writable_migration_entry(entry))
286 			pte = pte_mkwrite(pte, vma);
287 		else if (pte_swp_uffd_wp(old_pte))
288 			pte = pte_mkuffd_wp(pte);
289 
290 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
291 			rmap_flags |= RMAP_EXCLUSIVE;
292 
293 		if (unlikely(is_device_private_page(new))) {
294 			if (pte_write(pte))
295 				entry = make_writable_device_private_entry(
296 							page_to_pfn(new));
297 			else
298 				entry = make_readable_device_private_entry(
299 							page_to_pfn(new));
300 			pte = swp_entry_to_pte(entry);
301 			if (pte_swp_soft_dirty(old_pte))
302 				pte = pte_swp_mksoft_dirty(pte);
303 			if (pte_swp_uffd_wp(old_pte))
304 				pte = pte_swp_mkuffd_wp(pte);
305 		}
306 
307 #ifdef CONFIG_HUGETLB_PAGE
308 		if (folio_test_hugetlb(folio)) {
309 			struct hstate *h = hstate_vma(vma);
310 			unsigned int shift = huge_page_shift(h);
311 			unsigned long psize = huge_page_size(h);
312 
313 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
314 			if (folio_test_anon(folio))
315 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
316 						      rmap_flags);
317 			else
318 				hugetlb_add_file_rmap(folio);
319 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
320 					psize);
321 		} else
322 #endif
323 		{
324 			if (folio_test_anon(folio))
325 				folio_add_anon_rmap_pte(folio, new, vma,
326 							pvmw.address, rmap_flags);
327 			else
328 				folio_add_file_rmap_pte(folio, new, vma);
329 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
330 		}
331 		if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
332 			mlock_drain_local();
333 
334 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
335 					   compound_order(new));
336 
337 		/* No need to invalidate - it was non-present before */
338 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
339 	}
340 
341 	return true;
342 }
343 
344 /*
345  * Get rid of all migration entries and replace them by
346  * references to the indicated page.
347  */
348 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
349 {
350 	struct rmap_walk_arg rmap_walk_arg = {
351 		.folio = src,
352 		.map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
353 	};
354 
355 	struct rmap_walk_control rwc = {
356 		.rmap_one = remove_migration_pte,
357 		.arg = &rmap_walk_arg,
358 	};
359 
360 	VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
361 
362 	if (flags & RMP_LOCKED)
363 		rmap_walk_locked(dst, &rwc);
364 	else
365 		rmap_walk(dst, &rwc);
366 }
367 
368 /*
369  * Something used the pte of a page under migration. We need to
370  * get to the page and wait until migration is finished.
371  * When we return from this function the fault will be retried.
372  */
373 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
374 			  unsigned long address)
375 {
376 	spinlock_t *ptl;
377 	pte_t *ptep;
378 	pte_t pte;
379 	swp_entry_t entry;
380 
381 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
382 	if (!ptep)
383 		return;
384 
385 	pte = ptep_get(ptep);
386 	pte_unmap(ptep);
387 
388 	if (!is_swap_pte(pte))
389 		goto out;
390 
391 	entry = pte_to_swp_entry(pte);
392 	if (!is_migration_entry(entry))
393 		goto out;
394 
395 	migration_entry_wait_on_locked(entry, ptl);
396 	return;
397 out:
398 	spin_unlock(ptl);
399 }
400 
401 #ifdef CONFIG_HUGETLB_PAGE
402 /*
403  * The vma read lock must be held upon entry. Holding that lock prevents either
404  * the pte or the ptl from being freed.
405  *
406  * This function will release the vma lock before returning.
407  */
408 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
409 {
410 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
411 	pte_t pte;
412 
413 	hugetlb_vma_assert_locked(vma);
414 	spin_lock(ptl);
415 	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
416 
417 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
418 		spin_unlock(ptl);
419 		hugetlb_vma_unlock_read(vma);
420 	} else {
421 		/*
422 		 * If migration entry existed, safe to release vma lock
423 		 * here because the pgtable page won't be freed without the
424 		 * pgtable lock released.  See comment right above pgtable
425 		 * lock release in migration_entry_wait_on_locked().
426 		 */
427 		hugetlb_vma_unlock_read(vma);
428 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
429 	}
430 }
431 #endif
432 
433 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
434 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
435 {
436 	spinlock_t *ptl;
437 
438 	ptl = pmd_lock(mm, pmd);
439 	if (!is_pmd_migration_entry(*pmd))
440 		goto unlock;
441 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
442 	return;
443 unlock:
444 	spin_unlock(ptl);
445 }
446 #endif
447 
448 static int folio_expected_refs(struct address_space *mapping,
449 		struct folio *folio)
450 {
451 	int refs = 1;
452 	if (!mapping)
453 		return refs;
454 
455 	refs += folio_nr_pages(folio);
456 	if (folio_test_private(folio))
457 		refs++;
458 
459 	return refs;
460 }
461 
462 /*
463  * Replace the folio in the mapping.
464  *
465  * The number of remaining references must be:
466  * 1 for anonymous folios without a mapping
467  * 2 for folios with a mapping
468  * 3 for folios with a mapping and the private flag set.
469  */
470 static int __folio_migrate_mapping(struct address_space *mapping,
471 		struct folio *newfolio, struct folio *folio, int expected_count)
472 {
473 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
474 	struct zone *oldzone, *newzone;
475 	int dirty;
476 	long nr = folio_nr_pages(folio);
477 	long entries, i;
478 
479 	if (!mapping) {
480 		/* Take off deferred split queue while frozen and memcg set */
481 		if (folio_test_large(folio) &&
482 		    folio_test_large_rmappable(folio)) {
483 			if (!folio_ref_freeze(folio, expected_count))
484 				return -EAGAIN;
485 			folio_unqueue_deferred_split(folio);
486 			folio_ref_unfreeze(folio, expected_count);
487 		}
488 
489 		/* No turning back from here */
490 		newfolio->index = folio->index;
491 		newfolio->mapping = folio->mapping;
492 		if (folio_test_anon(folio) && folio_test_large(folio))
493 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
494 		if (folio_test_swapbacked(folio))
495 			__folio_set_swapbacked(newfolio);
496 
497 		return MIGRATEPAGE_SUCCESS;
498 	}
499 
500 	oldzone = folio_zone(folio);
501 	newzone = folio_zone(newfolio);
502 
503 	xas_lock_irq(&xas);
504 	if (!folio_ref_freeze(folio, expected_count)) {
505 		xas_unlock_irq(&xas);
506 		return -EAGAIN;
507 	}
508 
509 	/* Take off deferred split queue while frozen and memcg set */
510 	folio_unqueue_deferred_split(folio);
511 
512 	/*
513 	 * Now we know that no one else is looking at the folio:
514 	 * no turning back from here.
515 	 */
516 	newfolio->index = folio->index;
517 	newfolio->mapping = folio->mapping;
518 	if (folio_test_anon(folio) && folio_test_large(folio))
519 		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
520 	folio_ref_add(newfolio, nr); /* add cache reference */
521 	if (folio_test_swapbacked(folio))
522 		__folio_set_swapbacked(newfolio);
523 	if (folio_test_swapcache(folio)) {
524 		folio_set_swapcache(newfolio);
525 		newfolio->private = folio_get_private(folio);
526 		entries = nr;
527 	} else {
528 		entries = 1;
529 	}
530 
531 	/* Move dirty while folio refs frozen and newfolio not yet exposed */
532 	dirty = folio_test_dirty(folio);
533 	if (dirty) {
534 		folio_clear_dirty(folio);
535 		folio_set_dirty(newfolio);
536 	}
537 
538 	/* Swap cache still stores N entries instead of a high-order entry */
539 	for (i = 0; i < entries; i++) {
540 		xas_store(&xas, newfolio);
541 		xas_next(&xas);
542 	}
543 
544 	/*
545 	 * Drop cache reference from old folio by unfreezing
546 	 * to one less reference.
547 	 * We know this isn't the last reference.
548 	 */
549 	folio_ref_unfreeze(folio, expected_count - nr);
550 
551 	xas_unlock(&xas);
552 	/* Leave irq disabled to prevent preemption while updating stats */
553 
554 	/*
555 	 * If moved to a different zone then also account
556 	 * the folio for that zone. Other VM counters will be
557 	 * taken care of when we establish references to the
558 	 * new folio and drop references to the old folio.
559 	 *
560 	 * Note that anonymous folios are accounted for
561 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
562 	 * are mapped to swap space.
563 	 */
564 	if (newzone != oldzone) {
565 		struct lruvec *old_lruvec, *new_lruvec;
566 		struct mem_cgroup *memcg;
567 
568 		memcg = folio_memcg(folio);
569 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
570 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
571 
572 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
573 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
574 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
575 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
576 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
577 
578 			if (folio_test_pmd_mappable(folio)) {
579 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
580 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
581 			}
582 		}
583 #ifdef CONFIG_SWAP
584 		if (folio_test_swapcache(folio)) {
585 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
586 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
587 		}
588 #endif
589 		if (dirty && mapping_can_writeback(mapping)) {
590 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
591 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
592 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
593 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
594 		}
595 	}
596 	local_irq_enable();
597 
598 	return MIGRATEPAGE_SUCCESS;
599 }
600 
601 int folio_migrate_mapping(struct address_space *mapping,
602 		struct folio *newfolio, struct folio *folio, int extra_count)
603 {
604 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
605 
606 	if (folio_ref_count(folio) != expected_count)
607 		return -EAGAIN;
608 
609 	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
610 }
611 EXPORT_SYMBOL(folio_migrate_mapping);
612 
613 /*
614  * The expected number of remaining references is the same as that
615  * of folio_migrate_mapping().
616  */
617 int migrate_huge_page_move_mapping(struct address_space *mapping,
618 				   struct folio *dst, struct folio *src)
619 {
620 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
621 	int rc, expected_count = folio_expected_refs(mapping, src);
622 
623 	if (folio_ref_count(src) != expected_count)
624 		return -EAGAIN;
625 
626 	rc = folio_mc_copy(dst, src);
627 	if (unlikely(rc))
628 		return rc;
629 
630 	xas_lock_irq(&xas);
631 	if (!folio_ref_freeze(src, expected_count)) {
632 		xas_unlock_irq(&xas);
633 		return -EAGAIN;
634 	}
635 
636 	dst->index = src->index;
637 	dst->mapping = src->mapping;
638 
639 	folio_ref_add(dst, folio_nr_pages(dst));
640 
641 	xas_store(&xas, dst);
642 
643 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
644 
645 	xas_unlock_irq(&xas);
646 
647 	return MIGRATEPAGE_SUCCESS;
648 }
649 
650 /*
651  * Copy the flags and some other ancillary information
652  */
653 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
654 {
655 	int cpupid;
656 
657 	if (folio_test_referenced(folio))
658 		folio_set_referenced(newfolio);
659 	if (folio_test_uptodate(folio))
660 		folio_mark_uptodate(newfolio);
661 	if (folio_test_clear_active(folio)) {
662 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
663 		folio_set_active(newfolio);
664 	} else if (folio_test_clear_unevictable(folio))
665 		folio_set_unevictable(newfolio);
666 	if (folio_test_workingset(folio))
667 		folio_set_workingset(newfolio);
668 	if (folio_test_checked(folio))
669 		folio_set_checked(newfolio);
670 	/*
671 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
672 	 * migration entries. We can still have PG_anon_exclusive set on an
673 	 * effectively unmapped and unreferenced first sub-pages of an
674 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
675 	 */
676 	if (folio_test_mappedtodisk(folio))
677 		folio_set_mappedtodisk(newfolio);
678 
679 	/* Move dirty on pages not done by folio_migrate_mapping() */
680 	if (folio_test_dirty(folio))
681 		folio_set_dirty(newfolio);
682 
683 	if (folio_test_young(folio))
684 		folio_set_young(newfolio);
685 	if (folio_test_idle(folio))
686 		folio_set_idle(newfolio);
687 
688 	folio_migrate_refs(newfolio, folio);
689 	/*
690 	 * Copy NUMA information to the new page, to prevent over-eager
691 	 * future migrations of this same page.
692 	 */
693 	cpupid = folio_xchg_last_cpupid(folio, -1);
694 	/*
695 	 * For memory tiering mode, when migrate between slow and fast
696 	 * memory node, reset cpupid, because that is used to record
697 	 * page access time in slow memory node.
698 	 */
699 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
700 		bool f_toptier = node_is_toptier(folio_nid(folio));
701 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
702 
703 		if (f_toptier != t_toptier)
704 			cpupid = -1;
705 	}
706 	folio_xchg_last_cpupid(newfolio, cpupid);
707 
708 	folio_migrate_ksm(newfolio, folio);
709 	/*
710 	 * Please do not reorder this without considering how mm/ksm.c's
711 	 * ksm_get_folio() depends upon ksm_migrate_page() and the
712 	 * swapcache flag.
713 	 */
714 	if (folio_test_swapcache(folio))
715 		folio_clear_swapcache(folio);
716 	folio_clear_private(folio);
717 
718 	/* page->private contains hugetlb specific flags */
719 	if (!folio_test_hugetlb(folio))
720 		folio->private = NULL;
721 
722 	/*
723 	 * If any waiters have accumulated on the new page then
724 	 * wake them up.
725 	 */
726 	if (folio_test_writeback(newfolio))
727 		folio_end_writeback(newfolio);
728 
729 	/*
730 	 * PG_readahead shares the same bit with PG_reclaim.  The above
731 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
732 	 * bit after that.
733 	 */
734 	if (folio_test_readahead(folio))
735 		folio_set_readahead(newfolio);
736 
737 	folio_copy_owner(newfolio, folio);
738 	pgalloc_tag_swap(newfolio, folio);
739 
740 	mem_cgroup_migrate(folio, newfolio);
741 }
742 EXPORT_SYMBOL(folio_migrate_flags);
743 
744 /************************************************************
745  *                    Migration functions
746  ***********************************************************/
747 
748 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
749 			   struct folio *src, void *src_private,
750 			   enum migrate_mode mode)
751 {
752 	int rc, expected_count = folio_expected_refs(mapping, src);
753 
754 	/* Check whether src does not have extra refs before we do more work */
755 	if (folio_ref_count(src) != expected_count)
756 		return -EAGAIN;
757 
758 	rc = folio_mc_copy(dst, src);
759 	if (unlikely(rc))
760 		return rc;
761 
762 	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
763 	if (rc != MIGRATEPAGE_SUCCESS)
764 		return rc;
765 
766 	if (src_private)
767 		folio_attach_private(dst, folio_detach_private(src));
768 
769 	folio_migrate_flags(dst, src);
770 	return MIGRATEPAGE_SUCCESS;
771 }
772 
773 /**
774  * migrate_folio() - Simple folio migration.
775  * @mapping: The address_space containing the folio.
776  * @dst: The folio to migrate the data to.
777  * @src: The folio containing the current data.
778  * @mode: How to migrate the page.
779  *
780  * Common logic to directly migrate a single LRU folio suitable for
781  * folios that do not have private data.
782  *
783  * Folios are locked upon entry and exit.
784  */
785 int migrate_folio(struct address_space *mapping, struct folio *dst,
786 		  struct folio *src, enum migrate_mode mode)
787 {
788 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
789 	return __migrate_folio(mapping, dst, src, NULL, mode);
790 }
791 EXPORT_SYMBOL(migrate_folio);
792 
793 #ifdef CONFIG_BUFFER_HEAD
794 /* Returns true if all buffers are successfully locked */
795 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
796 							enum migrate_mode mode)
797 {
798 	struct buffer_head *bh = head;
799 	struct buffer_head *failed_bh;
800 
801 	do {
802 		if (!trylock_buffer(bh)) {
803 			if (mode == MIGRATE_ASYNC)
804 				goto unlock;
805 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
806 				goto unlock;
807 			lock_buffer(bh);
808 		}
809 
810 		bh = bh->b_this_page;
811 	} while (bh != head);
812 
813 	return true;
814 
815 unlock:
816 	/* We failed to lock the buffer and cannot stall. */
817 	failed_bh = bh;
818 	bh = head;
819 	while (bh != failed_bh) {
820 		unlock_buffer(bh);
821 		bh = bh->b_this_page;
822 	}
823 
824 	return false;
825 }
826 
827 static int __buffer_migrate_folio(struct address_space *mapping,
828 		struct folio *dst, struct folio *src, enum migrate_mode mode,
829 		bool check_refs)
830 {
831 	struct buffer_head *bh, *head;
832 	int rc;
833 	int expected_count;
834 
835 	head = folio_buffers(src);
836 	if (!head)
837 		return migrate_folio(mapping, dst, src, mode);
838 
839 	/* Check whether page does not have extra refs before we do more work */
840 	expected_count = folio_expected_refs(mapping, src);
841 	if (folio_ref_count(src) != expected_count)
842 		return -EAGAIN;
843 
844 	if (!buffer_migrate_lock_buffers(head, mode))
845 		return -EAGAIN;
846 
847 	if (check_refs) {
848 		bool busy, migrating;
849 		bool invalidated = false;
850 
851 		migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
852 		VM_WARN_ON_ONCE(migrating);
853 recheck_buffers:
854 		busy = false;
855 		spin_lock(&mapping->i_private_lock);
856 		bh = head;
857 		do {
858 			if (atomic_read(&bh->b_count)) {
859 				busy = true;
860 				break;
861 			}
862 			bh = bh->b_this_page;
863 		} while (bh != head);
864 		spin_unlock(&mapping->i_private_lock);
865 		if (busy) {
866 			if (invalidated) {
867 				rc = -EAGAIN;
868 				goto unlock_buffers;
869 			}
870 			invalidate_bh_lrus();
871 			invalidated = true;
872 			goto recheck_buffers;
873 		}
874 	}
875 
876 	rc = filemap_migrate_folio(mapping, dst, src, mode);
877 	if (rc != MIGRATEPAGE_SUCCESS)
878 		goto unlock_buffers;
879 
880 	bh = head;
881 	do {
882 		folio_set_bh(bh, dst, bh_offset(bh));
883 		bh = bh->b_this_page;
884 	} while (bh != head);
885 
886 unlock_buffers:
887 	if (check_refs)
888 		clear_bit_unlock(BH_Migrate, &head->b_state);
889 	bh = head;
890 	do {
891 		unlock_buffer(bh);
892 		bh = bh->b_this_page;
893 	} while (bh != head);
894 
895 	return rc;
896 }
897 
898 /**
899  * buffer_migrate_folio() - Migration function for folios with buffers.
900  * @mapping: The address space containing @src.
901  * @dst: The folio to migrate to.
902  * @src: The folio to migrate from.
903  * @mode: How to migrate the folio.
904  *
905  * This function can only be used if the underlying filesystem guarantees
906  * that no other references to @src exist. For example attached buffer
907  * heads are accessed only under the folio lock.  If your filesystem cannot
908  * provide this guarantee, buffer_migrate_folio_norefs() may be more
909  * appropriate.
910  *
911  * Return: 0 on success or a negative errno on failure.
912  */
913 int buffer_migrate_folio(struct address_space *mapping,
914 		struct folio *dst, struct folio *src, enum migrate_mode mode)
915 {
916 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
917 }
918 EXPORT_SYMBOL(buffer_migrate_folio);
919 
920 /**
921  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
922  * @mapping: The address space containing @src.
923  * @dst: The folio to migrate to.
924  * @src: The folio to migrate from.
925  * @mode: How to migrate the folio.
926  *
927  * Like buffer_migrate_folio() except that this variant is more careful
928  * and checks that there are also no buffer head references. This function
929  * is the right one for mappings where buffer heads are directly looked
930  * up and referenced (such as block device mappings).
931  *
932  * Return: 0 on success or a negative errno on failure.
933  */
934 int buffer_migrate_folio_norefs(struct address_space *mapping,
935 		struct folio *dst, struct folio *src, enum migrate_mode mode)
936 {
937 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
938 }
939 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
940 #endif /* CONFIG_BUFFER_HEAD */
941 
942 int filemap_migrate_folio(struct address_space *mapping,
943 		struct folio *dst, struct folio *src, enum migrate_mode mode)
944 {
945 	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
946 }
947 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
948 
949 /*
950  * Default handling if a filesystem does not provide a migration function.
951  */
952 static int fallback_migrate_folio(struct address_space *mapping,
953 		struct folio *dst, struct folio *src, enum migrate_mode mode)
954 {
955 	WARN_ONCE(mapping->a_ops->writepages,
956 			"%ps does not implement migrate_folio\n",
957 			mapping->a_ops);
958 	if (folio_test_dirty(src))
959 		return -EBUSY;
960 
961 	/*
962 	 * Filesystem may have private data at folio->private that we
963 	 * can't migrate automatically.
964 	 */
965 	if (!filemap_release_folio(src, GFP_KERNEL))
966 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
967 
968 	return migrate_folio(mapping, dst, src, mode);
969 }
970 
971 /*
972  * Move a page to a newly allocated page
973  * The page is locked and all ptes have been successfully removed.
974  *
975  * The new page will have replaced the old page if this function
976  * is successful.
977  *
978  * Return value:
979  *   < 0 - error code
980  *  MIGRATEPAGE_SUCCESS - success
981  */
982 static int move_to_new_folio(struct folio *dst, struct folio *src,
983 				enum migrate_mode mode)
984 {
985 	int rc = -EAGAIN;
986 	bool is_lru = !__folio_test_movable(src);
987 
988 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
989 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
990 
991 	if (likely(is_lru)) {
992 		struct address_space *mapping = folio_mapping(src);
993 
994 		if (!mapping)
995 			rc = migrate_folio(mapping, dst, src, mode);
996 		else if (mapping_inaccessible(mapping))
997 			rc = -EOPNOTSUPP;
998 		else if (mapping->a_ops->migrate_folio)
999 			/*
1000 			 * Most folios have a mapping and most filesystems
1001 			 * provide a migrate_folio callback. Anonymous folios
1002 			 * are part of swap space which also has its own
1003 			 * migrate_folio callback. This is the most common path
1004 			 * for page migration.
1005 			 */
1006 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1007 								mode);
1008 		else
1009 			rc = fallback_migrate_folio(mapping, dst, src, mode);
1010 	} else {
1011 		const struct movable_operations *mops;
1012 
1013 		/*
1014 		 * In case of non-lru page, it could be released after
1015 		 * isolation step. In that case, we shouldn't try migration.
1016 		 */
1017 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1018 		if (!folio_test_movable(src)) {
1019 			rc = MIGRATEPAGE_SUCCESS;
1020 			folio_clear_isolated(src);
1021 			goto out;
1022 		}
1023 
1024 		mops = folio_movable_ops(src);
1025 		rc = mops->migrate_page(&dst->page, &src->page, mode);
1026 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1027 				!folio_test_isolated(src));
1028 	}
1029 
1030 	/*
1031 	 * When successful, old pagecache src->mapping must be cleared before
1032 	 * src is freed; but stats require that PageAnon be left as PageAnon.
1033 	 */
1034 	if (rc == MIGRATEPAGE_SUCCESS) {
1035 		if (__folio_test_movable(src)) {
1036 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1037 
1038 			/*
1039 			 * We clear PG_movable under page_lock so any compactor
1040 			 * cannot try to migrate this page.
1041 			 */
1042 			folio_clear_isolated(src);
1043 		}
1044 
1045 		/*
1046 		 * Anonymous and movable src->mapping will be cleared by
1047 		 * free_pages_prepare so don't reset it here for keeping
1048 		 * the type to work PageAnon, for example.
1049 		 */
1050 		if (!folio_mapping_flags(src))
1051 			src->mapping = NULL;
1052 
1053 		if (likely(!folio_is_zone_device(dst)))
1054 			flush_dcache_folio(dst);
1055 	}
1056 out:
1057 	return rc;
1058 }
1059 
1060 /*
1061  * To record some information during migration, we use unused private
1062  * field of struct folio of the newly allocated destination folio.
1063  * This is safe because nobody is using it except us.
1064  */
1065 enum {
1066 	PAGE_WAS_MAPPED = BIT(0),
1067 	PAGE_WAS_MLOCKED = BIT(1),
1068 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1069 };
1070 
1071 static void __migrate_folio_record(struct folio *dst,
1072 				   int old_page_state,
1073 				   struct anon_vma *anon_vma)
1074 {
1075 	dst->private = (void *)anon_vma + old_page_state;
1076 }
1077 
1078 static void __migrate_folio_extract(struct folio *dst,
1079 				   int *old_page_state,
1080 				   struct anon_vma **anon_vmap)
1081 {
1082 	unsigned long private = (unsigned long)dst->private;
1083 
1084 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1085 	*old_page_state = private & PAGE_OLD_STATES;
1086 	dst->private = NULL;
1087 }
1088 
1089 /* Restore the source folio to the original state upon failure */
1090 static void migrate_folio_undo_src(struct folio *src,
1091 				   int page_was_mapped,
1092 				   struct anon_vma *anon_vma,
1093 				   bool locked,
1094 				   struct list_head *ret)
1095 {
1096 	if (page_was_mapped)
1097 		remove_migration_ptes(src, src, 0);
1098 	/* Drop an anon_vma reference if we took one */
1099 	if (anon_vma)
1100 		put_anon_vma(anon_vma);
1101 	if (locked)
1102 		folio_unlock(src);
1103 	if (ret)
1104 		list_move_tail(&src->lru, ret);
1105 }
1106 
1107 /* Restore the destination folio to the original state upon failure */
1108 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1109 		free_folio_t put_new_folio, unsigned long private)
1110 {
1111 	if (locked)
1112 		folio_unlock(dst);
1113 	if (put_new_folio)
1114 		put_new_folio(dst, private);
1115 	else
1116 		folio_put(dst);
1117 }
1118 
1119 /* Cleanup src folio upon migration success */
1120 static void migrate_folio_done(struct folio *src,
1121 			       enum migrate_reason reason)
1122 {
1123 	/*
1124 	 * Compaction can migrate also non-LRU pages which are
1125 	 * not accounted to NR_ISOLATED_*. They can be recognized
1126 	 * as __folio_test_movable
1127 	 */
1128 	if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1129 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1130 				    folio_is_file_lru(src), -folio_nr_pages(src));
1131 
1132 	if (reason != MR_MEMORY_FAILURE)
1133 		/* We release the page in page_handle_poison. */
1134 		folio_put(src);
1135 }
1136 
1137 /* Obtain the lock on page, remove all ptes. */
1138 static int migrate_folio_unmap(new_folio_t get_new_folio,
1139 		free_folio_t put_new_folio, unsigned long private,
1140 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1141 		enum migrate_reason reason, struct list_head *ret)
1142 {
1143 	struct folio *dst;
1144 	int rc = -EAGAIN;
1145 	int old_page_state = 0;
1146 	struct anon_vma *anon_vma = NULL;
1147 	bool is_lru = data_race(!__folio_test_movable(src));
1148 	bool locked = false;
1149 	bool dst_locked = false;
1150 
1151 	if (folio_ref_count(src) == 1) {
1152 		/* Folio was freed from under us. So we are done. */
1153 		folio_clear_active(src);
1154 		folio_clear_unevictable(src);
1155 		/* free_pages_prepare() will clear PG_isolated. */
1156 		list_del(&src->lru);
1157 		migrate_folio_done(src, reason);
1158 		return MIGRATEPAGE_SUCCESS;
1159 	}
1160 
1161 	dst = get_new_folio(src, private);
1162 	if (!dst)
1163 		return -ENOMEM;
1164 	*dstp = dst;
1165 
1166 	dst->private = NULL;
1167 
1168 	if (!folio_trylock(src)) {
1169 		if (mode == MIGRATE_ASYNC)
1170 			goto out;
1171 
1172 		/*
1173 		 * It's not safe for direct compaction to call lock_page.
1174 		 * For example, during page readahead pages are added locked
1175 		 * to the LRU. Later, when the IO completes the pages are
1176 		 * marked uptodate and unlocked. However, the queueing
1177 		 * could be merging multiple pages for one bio (e.g.
1178 		 * mpage_readahead). If an allocation happens for the
1179 		 * second or third page, the process can end up locking
1180 		 * the same page twice and deadlocking. Rather than
1181 		 * trying to be clever about what pages can be locked,
1182 		 * avoid the use of lock_page for direct compaction
1183 		 * altogether.
1184 		 */
1185 		if (current->flags & PF_MEMALLOC)
1186 			goto out;
1187 
1188 		/*
1189 		 * In "light" mode, we can wait for transient locks (eg
1190 		 * inserting a page into the page table), but it's not
1191 		 * worth waiting for I/O.
1192 		 */
1193 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1194 			goto out;
1195 
1196 		folio_lock(src);
1197 	}
1198 	locked = true;
1199 	if (folio_test_mlocked(src))
1200 		old_page_state |= PAGE_WAS_MLOCKED;
1201 
1202 	if (folio_test_writeback(src)) {
1203 		/*
1204 		 * Only in the case of a full synchronous migration is it
1205 		 * necessary to wait for PageWriteback. In the async case,
1206 		 * the retry loop is too short and in the sync-light case,
1207 		 * the overhead of stalling is too much
1208 		 */
1209 		switch (mode) {
1210 		case MIGRATE_SYNC:
1211 			break;
1212 		default:
1213 			rc = -EBUSY;
1214 			goto out;
1215 		}
1216 		folio_wait_writeback(src);
1217 	}
1218 
1219 	/*
1220 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1221 	 * we cannot notice that anon_vma is freed while we migrate a page.
1222 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1223 	 * of migration. File cache pages are no problem because of page_lock()
1224 	 * File Caches may use write_page() or lock_page() in migration, then,
1225 	 * just care Anon page here.
1226 	 *
1227 	 * Only folio_get_anon_vma() understands the subtleties of
1228 	 * getting a hold on an anon_vma from outside one of its mms.
1229 	 * But if we cannot get anon_vma, then we won't need it anyway,
1230 	 * because that implies that the anon page is no longer mapped
1231 	 * (and cannot be remapped so long as we hold the page lock).
1232 	 */
1233 	if (folio_test_anon(src) && !folio_test_ksm(src))
1234 		anon_vma = folio_get_anon_vma(src);
1235 
1236 	/*
1237 	 * Block others from accessing the new page when we get around to
1238 	 * establishing additional references. We are usually the only one
1239 	 * holding a reference to dst at this point. We used to have a BUG
1240 	 * here if folio_trylock(dst) fails, but would like to allow for
1241 	 * cases where there might be a race with the previous use of dst.
1242 	 * This is much like races on refcount of oldpage: just don't BUG().
1243 	 */
1244 	if (unlikely(!folio_trylock(dst)))
1245 		goto out;
1246 	dst_locked = true;
1247 
1248 	if (unlikely(!is_lru)) {
1249 		__migrate_folio_record(dst, old_page_state, anon_vma);
1250 		return MIGRATEPAGE_UNMAP;
1251 	}
1252 
1253 	/*
1254 	 * Corner case handling:
1255 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1256 	 * and treated as swapcache but it has no rmap yet.
1257 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1258 	 * trigger a BUG.  So handle it here.
1259 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1260 	 * fs-private metadata. The page can be picked up due to memory
1261 	 * offlining.  Everywhere else except page reclaim, the page is
1262 	 * invisible to the vm, so the page can not be migrated.  So try to
1263 	 * free the metadata, so the page can be freed.
1264 	 */
1265 	if (!src->mapping) {
1266 		if (folio_test_private(src)) {
1267 			try_to_free_buffers(src);
1268 			goto out;
1269 		}
1270 	} else if (folio_mapped(src)) {
1271 		/* Establish migration ptes */
1272 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1273 			       !folio_test_ksm(src) && !anon_vma, src);
1274 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1275 		old_page_state |= PAGE_WAS_MAPPED;
1276 	}
1277 
1278 	if (!folio_mapped(src)) {
1279 		__migrate_folio_record(dst, old_page_state, anon_vma);
1280 		return MIGRATEPAGE_UNMAP;
1281 	}
1282 
1283 out:
1284 	/*
1285 	 * A folio that has not been unmapped will be restored to
1286 	 * right list unless we want to retry.
1287 	 */
1288 	if (rc == -EAGAIN)
1289 		ret = NULL;
1290 
1291 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1292 			       anon_vma, locked, ret);
1293 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1294 
1295 	return rc;
1296 }
1297 
1298 /* Migrate the folio to the newly allocated folio in dst. */
1299 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1300 			      struct folio *src, struct folio *dst,
1301 			      enum migrate_mode mode, enum migrate_reason reason,
1302 			      struct list_head *ret)
1303 {
1304 	int rc;
1305 	int old_page_state = 0;
1306 	struct anon_vma *anon_vma = NULL;
1307 	bool is_lru = !__folio_test_movable(src);
1308 	struct list_head *prev;
1309 
1310 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1311 	prev = dst->lru.prev;
1312 	list_del(&dst->lru);
1313 
1314 	rc = move_to_new_folio(dst, src, mode);
1315 	if (rc)
1316 		goto out;
1317 
1318 	if (unlikely(!is_lru))
1319 		goto out_unlock_both;
1320 
1321 	/*
1322 	 * When successful, push dst to LRU immediately: so that if it
1323 	 * turns out to be an mlocked page, remove_migration_ptes() will
1324 	 * automatically build up the correct dst->mlock_count for it.
1325 	 *
1326 	 * We would like to do something similar for the old page, when
1327 	 * unsuccessful, and other cases when a page has been temporarily
1328 	 * isolated from the unevictable LRU: but this case is the easiest.
1329 	 */
1330 	folio_add_lru(dst);
1331 	if (old_page_state & PAGE_WAS_MLOCKED)
1332 		lru_add_drain();
1333 
1334 	if (old_page_state & PAGE_WAS_MAPPED)
1335 		remove_migration_ptes(src, dst, 0);
1336 
1337 out_unlock_both:
1338 	folio_unlock(dst);
1339 	set_page_owner_migrate_reason(&dst->page, reason);
1340 	/*
1341 	 * If migration is successful, decrease refcount of dst,
1342 	 * which will not free the page because new page owner increased
1343 	 * refcounter.
1344 	 */
1345 	folio_put(dst);
1346 
1347 	/*
1348 	 * A folio that has been migrated has all references removed
1349 	 * and will be freed.
1350 	 */
1351 	list_del(&src->lru);
1352 	/* Drop an anon_vma reference if we took one */
1353 	if (anon_vma)
1354 		put_anon_vma(anon_vma);
1355 	folio_unlock(src);
1356 	migrate_folio_done(src, reason);
1357 
1358 	return rc;
1359 out:
1360 	/*
1361 	 * A folio that has not been migrated will be restored to
1362 	 * right list unless we want to retry.
1363 	 */
1364 	if (rc == -EAGAIN) {
1365 		list_add(&dst->lru, prev);
1366 		__migrate_folio_record(dst, old_page_state, anon_vma);
1367 		return rc;
1368 	}
1369 
1370 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1371 			       anon_vma, true, ret);
1372 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1373 
1374 	return rc;
1375 }
1376 
1377 /*
1378  * Counterpart of unmap_and_move_page() for hugepage migration.
1379  *
1380  * This function doesn't wait the completion of hugepage I/O
1381  * because there is no race between I/O and migration for hugepage.
1382  * Note that currently hugepage I/O occurs only in direct I/O
1383  * where no lock is held and PG_writeback is irrelevant,
1384  * and writeback status of all subpages are counted in the reference
1385  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1386  * under direct I/O, the reference of the head page is 512 and a bit more.)
1387  * This means that when we try to migrate hugepage whose subpages are
1388  * doing direct I/O, some references remain after try_to_unmap() and
1389  * hugepage migration fails without data corruption.
1390  *
1391  * There is also no race when direct I/O is issued on the page under migration,
1392  * because then pte is replaced with migration swap entry and direct I/O code
1393  * will wait in the page fault for migration to complete.
1394  */
1395 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1396 		free_folio_t put_new_folio, unsigned long private,
1397 		struct folio *src, int force, enum migrate_mode mode,
1398 		int reason, struct list_head *ret)
1399 {
1400 	struct folio *dst;
1401 	int rc = -EAGAIN;
1402 	int page_was_mapped = 0;
1403 	struct anon_vma *anon_vma = NULL;
1404 	struct address_space *mapping = NULL;
1405 
1406 	if (folio_ref_count(src) == 1) {
1407 		/* page was freed from under us. So we are done. */
1408 		folio_putback_hugetlb(src);
1409 		return MIGRATEPAGE_SUCCESS;
1410 	}
1411 
1412 	dst = get_new_folio(src, private);
1413 	if (!dst)
1414 		return -ENOMEM;
1415 
1416 	if (!folio_trylock(src)) {
1417 		if (!force)
1418 			goto out;
1419 		switch (mode) {
1420 		case MIGRATE_SYNC:
1421 			break;
1422 		default:
1423 			goto out;
1424 		}
1425 		folio_lock(src);
1426 	}
1427 
1428 	/*
1429 	 * Check for pages which are in the process of being freed.  Without
1430 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1431 	 * be called and we could leak usage counts for subpools.
1432 	 */
1433 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1434 		rc = -EBUSY;
1435 		goto out_unlock;
1436 	}
1437 
1438 	if (folio_test_anon(src))
1439 		anon_vma = folio_get_anon_vma(src);
1440 
1441 	if (unlikely(!folio_trylock(dst)))
1442 		goto put_anon;
1443 
1444 	if (folio_mapped(src)) {
1445 		enum ttu_flags ttu = 0;
1446 
1447 		if (!folio_test_anon(src)) {
1448 			/*
1449 			 * In shared mappings, try_to_unmap could potentially
1450 			 * call huge_pmd_unshare.  Because of this, take
1451 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1452 			 * to let lower levels know we have taken the lock.
1453 			 */
1454 			mapping = hugetlb_folio_mapping_lock_write(src);
1455 			if (unlikely(!mapping))
1456 				goto unlock_put_anon;
1457 
1458 			ttu = TTU_RMAP_LOCKED;
1459 		}
1460 
1461 		try_to_migrate(src, ttu);
1462 		page_was_mapped = 1;
1463 
1464 		if (ttu & TTU_RMAP_LOCKED)
1465 			i_mmap_unlock_write(mapping);
1466 	}
1467 
1468 	if (!folio_mapped(src))
1469 		rc = move_to_new_folio(dst, src, mode);
1470 
1471 	if (page_was_mapped)
1472 		remove_migration_ptes(src,
1473 			rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1474 
1475 unlock_put_anon:
1476 	folio_unlock(dst);
1477 
1478 put_anon:
1479 	if (anon_vma)
1480 		put_anon_vma(anon_vma);
1481 
1482 	if (rc == MIGRATEPAGE_SUCCESS) {
1483 		move_hugetlb_state(src, dst, reason);
1484 		put_new_folio = NULL;
1485 	}
1486 
1487 out_unlock:
1488 	folio_unlock(src);
1489 out:
1490 	if (rc == MIGRATEPAGE_SUCCESS)
1491 		folio_putback_hugetlb(src);
1492 	else if (rc != -EAGAIN)
1493 		list_move_tail(&src->lru, ret);
1494 
1495 	/*
1496 	 * If migration was not successful and there's a freeing callback,
1497 	 * return the folio to that special allocator. Otherwise, simply drop
1498 	 * our additional reference.
1499 	 */
1500 	if (put_new_folio)
1501 		put_new_folio(dst, private);
1502 	else
1503 		folio_put(dst);
1504 
1505 	return rc;
1506 }
1507 
1508 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1509 				  enum migrate_mode mode)
1510 {
1511 	int rc;
1512 
1513 	if (mode == MIGRATE_ASYNC) {
1514 		if (!folio_trylock(folio))
1515 			return -EAGAIN;
1516 	} else {
1517 		folio_lock(folio);
1518 	}
1519 	rc = split_folio_to_list(folio, split_folios);
1520 	folio_unlock(folio);
1521 	if (!rc)
1522 		list_move_tail(&folio->lru, split_folios);
1523 
1524 	return rc;
1525 }
1526 
1527 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1528 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1529 #else
1530 #define NR_MAX_BATCHED_MIGRATION	512
1531 #endif
1532 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1533 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1534 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1535 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1536 
1537 struct migrate_pages_stats {
1538 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1539 				   units of base pages */
1540 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1541 				   units of base pages.  Untried folios aren't counted */
1542 	int nr_thp_succeeded;	/* THP migrated successfully */
1543 	int nr_thp_failed;	/* THP failed to be migrated */
1544 	int nr_thp_split;	/* THP split before migrating */
1545 	int nr_split;	/* Large folio (include THP) split before migrating */
1546 };
1547 
1548 /*
1549  * Returns the number of hugetlb folios that were not migrated, or an error code
1550  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1551  * any more because the list has become empty or no retryable hugetlb folios
1552  * exist any more. It is caller's responsibility to call putback_movable_pages()
1553  * only if ret != 0.
1554  */
1555 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1556 			    free_folio_t put_new_folio, unsigned long private,
1557 			    enum migrate_mode mode, int reason,
1558 			    struct migrate_pages_stats *stats,
1559 			    struct list_head *ret_folios)
1560 {
1561 	int retry = 1;
1562 	int nr_failed = 0;
1563 	int nr_retry_pages = 0;
1564 	int pass = 0;
1565 	struct folio *folio, *folio2;
1566 	int rc, nr_pages;
1567 
1568 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1569 		retry = 0;
1570 		nr_retry_pages = 0;
1571 
1572 		list_for_each_entry_safe(folio, folio2, from, lru) {
1573 			if (!folio_test_hugetlb(folio))
1574 				continue;
1575 
1576 			nr_pages = folio_nr_pages(folio);
1577 
1578 			cond_resched();
1579 
1580 			/*
1581 			 * Migratability of hugepages depends on architectures and
1582 			 * their size.  This check is necessary because some callers
1583 			 * of hugepage migration like soft offline and memory
1584 			 * hotremove don't walk through page tables or check whether
1585 			 * the hugepage is pmd-based or not before kicking migration.
1586 			 */
1587 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1588 				nr_failed++;
1589 				stats->nr_failed_pages += nr_pages;
1590 				list_move_tail(&folio->lru, ret_folios);
1591 				continue;
1592 			}
1593 
1594 			rc = unmap_and_move_huge_page(get_new_folio,
1595 						      put_new_folio, private,
1596 						      folio, pass > 2, mode,
1597 						      reason, ret_folios);
1598 			/*
1599 			 * The rules are:
1600 			 *	Success: hugetlb folio will be put back
1601 			 *	-EAGAIN: stay on the from list
1602 			 *	-ENOMEM: stay on the from list
1603 			 *	Other errno: put on ret_folios list
1604 			 */
1605 			switch(rc) {
1606 			case -ENOMEM:
1607 				/*
1608 				 * When memory is low, don't bother to try to migrate
1609 				 * other folios, just exit.
1610 				 */
1611 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1612 				return -ENOMEM;
1613 			case -EAGAIN:
1614 				retry++;
1615 				nr_retry_pages += nr_pages;
1616 				break;
1617 			case MIGRATEPAGE_SUCCESS:
1618 				stats->nr_succeeded += nr_pages;
1619 				break;
1620 			default:
1621 				/*
1622 				 * Permanent failure (-EBUSY, etc.):
1623 				 * unlike -EAGAIN case, the failed folio is
1624 				 * removed from migration folio list and not
1625 				 * retried in the next outer loop.
1626 				 */
1627 				nr_failed++;
1628 				stats->nr_failed_pages += nr_pages;
1629 				break;
1630 			}
1631 		}
1632 	}
1633 	/*
1634 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1635 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1636 	 * folios as failed.
1637 	 */
1638 	nr_failed += retry;
1639 	stats->nr_failed_pages += nr_retry_pages;
1640 
1641 	return nr_failed;
1642 }
1643 
1644 static void migrate_folios_move(struct list_head *src_folios,
1645 		struct list_head *dst_folios,
1646 		free_folio_t put_new_folio, unsigned long private,
1647 		enum migrate_mode mode, int reason,
1648 		struct list_head *ret_folios,
1649 		struct migrate_pages_stats *stats,
1650 		int *retry, int *thp_retry, int *nr_failed,
1651 		int *nr_retry_pages)
1652 {
1653 	struct folio *folio, *folio2, *dst, *dst2;
1654 	bool is_thp;
1655 	int nr_pages;
1656 	int rc;
1657 
1658 	dst = list_first_entry(dst_folios, struct folio, lru);
1659 	dst2 = list_next_entry(dst, lru);
1660 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1661 		is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1662 		nr_pages = folio_nr_pages(folio);
1663 
1664 		cond_resched();
1665 
1666 		rc = migrate_folio_move(put_new_folio, private,
1667 				folio, dst, mode,
1668 				reason, ret_folios);
1669 		/*
1670 		 * The rules are:
1671 		 *	Success: folio will be freed
1672 		 *	-EAGAIN: stay on the unmap_folios list
1673 		 *	Other errno: put on ret_folios list
1674 		 */
1675 		switch (rc) {
1676 		case -EAGAIN:
1677 			*retry += 1;
1678 			*thp_retry += is_thp;
1679 			*nr_retry_pages += nr_pages;
1680 			break;
1681 		case MIGRATEPAGE_SUCCESS:
1682 			stats->nr_succeeded += nr_pages;
1683 			stats->nr_thp_succeeded += is_thp;
1684 			break;
1685 		default:
1686 			*nr_failed += 1;
1687 			stats->nr_thp_failed += is_thp;
1688 			stats->nr_failed_pages += nr_pages;
1689 			break;
1690 		}
1691 		dst = dst2;
1692 		dst2 = list_next_entry(dst, lru);
1693 	}
1694 }
1695 
1696 static void migrate_folios_undo(struct list_head *src_folios,
1697 		struct list_head *dst_folios,
1698 		free_folio_t put_new_folio, unsigned long private,
1699 		struct list_head *ret_folios)
1700 {
1701 	struct folio *folio, *folio2, *dst, *dst2;
1702 
1703 	dst = list_first_entry(dst_folios, struct folio, lru);
1704 	dst2 = list_next_entry(dst, lru);
1705 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1706 		int old_page_state = 0;
1707 		struct anon_vma *anon_vma = NULL;
1708 
1709 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1710 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1711 				anon_vma, true, ret_folios);
1712 		list_del(&dst->lru);
1713 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1714 		dst = dst2;
1715 		dst2 = list_next_entry(dst, lru);
1716 	}
1717 }
1718 
1719 /*
1720  * migrate_pages_batch() first unmaps folios in the from list as many as
1721  * possible, then move the unmapped folios.
1722  *
1723  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1724  * lock or bit when we have locked more than one folio.  Which may cause
1725  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1726  * length of the from list must be <= 1.
1727  */
1728 static int migrate_pages_batch(struct list_head *from,
1729 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1730 		unsigned long private, enum migrate_mode mode, int reason,
1731 		struct list_head *ret_folios, struct list_head *split_folios,
1732 		struct migrate_pages_stats *stats, int nr_pass)
1733 {
1734 	int retry = 1;
1735 	int thp_retry = 1;
1736 	int nr_failed = 0;
1737 	int nr_retry_pages = 0;
1738 	int pass = 0;
1739 	bool is_thp = false;
1740 	bool is_large = false;
1741 	struct folio *folio, *folio2, *dst = NULL;
1742 	int rc, rc_saved = 0, nr_pages;
1743 	LIST_HEAD(unmap_folios);
1744 	LIST_HEAD(dst_folios);
1745 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1746 
1747 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1748 			!list_empty(from) && !list_is_singular(from));
1749 
1750 	for (pass = 0; pass < nr_pass && retry; pass++) {
1751 		retry = 0;
1752 		thp_retry = 0;
1753 		nr_retry_pages = 0;
1754 
1755 		list_for_each_entry_safe(folio, folio2, from, lru) {
1756 			is_large = folio_test_large(folio);
1757 			is_thp = folio_test_pmd_mappable(folio);
1758 			nr_pages = folio_nr_pages(folio);
1759 
1760 			cond_resched();
1761 
1762 			/*
1763 			 * The rare folio on the deferred split list should
1764 			 * be split now. It should not count as a failure:
1765 			 * but increment nr_failed because, without doing so,
1766 			 * migrate_pages() may report success with (split but
1767 			 * unmigrated) pages still on its fromlist; whereas it
1768 			 * always reports success when its fromlist is empty.
1769 			 * stats->nr_thp_failed should be increased too,
1770 			 * otherwise stats inconsistency will happen when
1771 			 * migrate_pages_batch is called via migrate_pages()
1772 			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1773 			 *
1774 			 * Only check it without removing it from the list.
1775 			 * Since the folio can be on deferred_split_scan()
1776 			 * local list and removing it can cause the local list
1777 			 * corruption. Folio split process below can handle it
1778 			 * with the help of folio_ref_freeze().
1779 			 *
1780 			 * nr_pages > 2 is needed to avoid checking order-1
1781 			 * page cache folios. They exist, in contrast to
1782 			 * non-existent order-1 anonymous folios, and do not
1783 			 * use _deferred_list.
1784 			 */
1785 			if (nr_pages > 2 &&
1786 			   !list_empty(&folio->_deferred_list) &&
1787 			   folio_test_partially_mapped(folio)) {
1788 				if (!try_split_folio(folio, split_folios, mode)) {
1789 					nr_failed++;
1790 					stats->nr_thp_failed += is_thp;
1791 					stats->nr_thp_split += is_thp;
1792 					stats->nr_split++;
1793 					continue;
1794 				}
1795 			}
1796 
1797 			/*
1798 			 * Large folio migration might be unsupported or
1799 			 * the allocation might be failed so we should retry
1800 			 * on the same folio with the large folio split
1801 			 * to normal folios.
1802 			 *
1803 			 * Split folios are put in split_folios, and
1804 			 * we will migrate them after the rest of the
1805 			 * list is processed.
1806 			 */
1807 			if (!thp_migration_supported() && is_thp) {
1808 				nr_failed++;
1809 				stats->nr_thp_failed++;
1810 				if (!try_split_folio(folio, split_folios, mode)) {
1811 					stats->nr_thp_split++;
1812 					stats->nr_split++;
1813 					continue;
1814 				}
1815 				stats->nr_failed_pages += nr_pages;
1816 				list_move_tail(&folio->lru, ret_folios);
1817 				continue;
1818 			}
1819 
1820 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1821 					private, folio, &dst, mode, reason,
1822 					ret_folios);
1823 			/*
1824 			 * The rules are:
1825 			 *	Success: folio will be freed
1826 			 *	Unmap: folio will be put on unmap_folios list,
1827 			 *	       dst folio put on dst_folios list
1828 			 *	-EAGAIN: stay on the from list
1829 			 *	-ENOMEM: stay on the from list
1830 			 *	Other errno: put on ret_folios list
1831 			 */
1832 			switch(rc) {
1833 			case -ENOMEM:
1834 				/*
1835 				 * When memory is low, don't bother to try to migrate
1836 				 * other folios, move unmapped folios, then exit.
1837 				 */
1838 				nr_failed++;
1839 				stats->nr_thp_failed += is_thp;
1840 				/* Large folio NUMA faulting doesn't split to retry. */
1841 				if (is_large && !nosplit) {
1842 					int ret = try_split_folio(folio, split_folios, mode);
1843 
1844 					if (!ret) {
1845 						stats->nr_thp_split += is_thp;
1846 						stats->nr_split++;
1847 						break;
1848 					} else if (reason == MR_LONGTERM_PIN &&
1849 						   ret == -EAGAIN) {
1850 						/*
1851 						 * Try again to split large folio to
1852 						 * mitigate the failure of longterm pinning.
1853 						 */
1854 						retry++;
1855 						thp_retry += is_thp;
1856 						nr_retry_pages += nr_pages;
1857 						/* Undo duplicated failure counting. */
1858 						nr_failed--;
1859 						stats->nr_thp_failed -= is_thp;
1860 						break;
1861 					}
1862 				}
1863 
1864 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1865 				/* nr_failed isn't updated for not used */
1866 				stats->nr_thp_failed += thp_retry;
1867 				rc_saved = rc;
1868 				if (list_empty(&unmap_folios))
1869 					goto out;
1870 				else
1871 					goto move;
1872 			case -EAGAIN:
1873 				retry++;
1874 				thp_retry += is_thp;
1875 				nr_retry_pages += nr_pages;
1876 				break;
1877 			case MIGRATEPAGE_SUCCESS:
1878 				stats->nr_succeeded += nr_pages;
1879 				stats->nr_thp_succeeded += is_thp;
1880 				break;
1881 			case MIGRATEPAGE_UNMAP:
1882 				list_move_tail(&folio->lru, &unmap_folios);
1883 				list_add_tail(&dst->lru, &dst_folios);
1884 				break;
1885 			default:
1886 				/*
1887 				 * Permanent failure (-EBUSY, etc.):
1888 				 * unlike -EAGAIN case, the failed folio is
1889 				 * removed from migration folio list and not
1890 				 * retried in the next outer loop.
1891 				 */
1892 				nr_failed++;
1893 				stats->nr_thp_failed += is_thp;
1894 				stats->nr_failed_pages += nr_pages;
1895 				break;
1896 			}
1897 		}
1898 	}
1899 	nr_failed += retry;
1900 	stats->nr_thp_failed += thp_retry;
1901 	stats->nr_failed_pages += nr_retry_pages;
1902 move:
1903 	/* Flush TLBs for all unmapped folios */
1904 	try_to_unmap_flush();
1905 
1906 	retry = 1;
1907 	for (pass = 0; pass < nr_pass && retry; pass++) {
1908 		retry = 0;
1909 		thp_retry = 0;
1910 		nr_retry_pages = 0;
1911 
1912 		/* Move the unmapped folios */
1913 		migrate_folios_move(&unmap_folios, &dst_folios,
1914 				put_new_folio, private, mode, reason,
1915 				ret_folios, stats, &retry, &thp_retry,
1916 				&nr_failed, &nr_retry_pages);
1917 	}
1918 	nr_failed += retry;
1919 	stats->nr_thp_failed += thp_retry;
1920 	stats->nr_failed_pages += nr_retry_pages;
1921 
1922 	rc = rc_saved ? : nr_failed;
1923 out:
1924 	/* Cleanup remaining folios */
1925 	migrate_folios_undo(&unmap_folios, &dst_folios,
1926 			put_new_folio, private, ret_folios);
1927 
1928 	return rc;
1929 }
1930 
1931 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1932 		free_folio_t put_new_folio, unsigned long private,
1933 		enum migrate_mode mode, int reason,
1934 		struct list_head *ret_folios, struct list_head *split_folios,
1935 		struct migrate_pages_stats *stats)
1936 {
1937 	int rc, nr_failed = 0;
1938 	LIST_HEAD(folios);
1939 	struct migrate_pages_stats astats;
1940 
1941 	memset(&astats, 0, sizeof(astats));
1942 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1943 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1944 				 reason, &folios, split_folios, &astats,
1945 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1946 	stats->nr_succeeded += astats.nr_succeeded;
1947 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1948 	stats->nr_thp_split += astats.nr_thp_split;
1949 	stats->nr_split += astats.nr_split;
1950 	if (rc < 0) {
1951 		stats->nr_failed_pages += astats.nr_failed_pages;
1952 		stats->nr_thp_failed += astats.nr_thp_failed;
1953 		list_splice_tail(&folios, ret_folios);
1954 		return rc;
1955 	}
1956 	stats->nr_thp_failed += astats.nr_thp_split;
1957 	/*
1958 	 * Do not count rc, as pages will be retried below.
1959 	 * Count nr_split only, since it includes nr_thp_split.
1960 	 */
1961 	nr_failed += astats.nr_split;
1962 	/*
1963 	 * Fall back to migrate all failed folios one by one synchronously. All
1964 	 * failed folios except split THPs will be retried, so their failure
1965 	 * isn't counted
1966 	 */
1967 	list_splice_tail_init(&folios, from);
1968 	while (!list_empty(from)) {
1969 		list_move(from->next, &folios);
1970 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1971 					 private, mode, reason, ret_folios,
1972 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1973 		list_splice_tail_init(&folios, ret_folios);
1974 		if (rc < 0)
1975 			return rc;
1976 		nr_failed += rc;
1977 	}
1978 
1979 	return nr_failed;
1980 }
1981 
1982 /*
1983  * migrate_pages - migrate the folios specified in a list, to the free folios
1984  *		   supplied as the target for the page migration
1985  *
1986  * @from:		The list of folios to be migrated.
1987  * @get_new_folio:	The function used to allocate free folios to be used
1988  *			as the target of the folio migration.
1989  * @put_new_folio:	The function used to free target folios if migration
1990  *			fails, or NULL if no special handling is necessary.
1991  * @private:		Private data to be passed on to get_new_folio()
1992  * @mode:		The migration mode that specifies the constraints for
1993  *			folio migration, if any.
1994  * @reason:		The reason for folio migration.
1995  * @ret_succeeded:	Set to the number of folios migrated successfully if
1996  *			the caller passes a non-NULL pointer.
1997  *
1998  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1999  * are movable any more because the list has become empty or no retryable folios
2000  * exist any more. It is caller's responsibility to call putback_movable_pages()
2001  * only if ret != 0.
2002  *
2003  * Returns the number of {normal folio, large folio, hugetlb} that were not
2004  * migrated, or an error code. The number of large folio splits will be
2005  * considered as the number of non-migrated large folio, no matter how many
2006  * split folios of the large folio are migrated successfully.
2007  */
2008 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2009 		free_folio_t put_new_folio, unsigned long private,
2010 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2011 {
2012 	int rc, rc_gather;
2013 	int nr_pages;
2014 	struct folio *folio, *folio2;
2015 	LIST_HEAD(folios);
2016 	LIST_HEAD(ret_folios);
2017 	LIST_HEAD(split_folios);
2018 	struct migrate_pages_stats stats;
2019 
2020 	trace_mm_migrate_pages_start(mode, reason);
2021 
2022 	memset(&stats, 0, sizeof(stats));
2023 
2024 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2025 				     mode, reason, &stats, &ret_folios);
2026 	if (rc_gather < 0)
2027 		goto out;
2028 
2029 again:
2030 	nr_pages = 0;
2031 	list_for_each_entry_safe(folio, folio2, from, lru) {
2032 		/* Retried hugetlb folios will be kept in list  */
2033 		if (folio_test_hugetlb(folio)) {
2034 			list_move_tail(&folio->lru, &ret_folios);
2035 			continue;
2036 		}
2037 
2038 		nr_pages += folio_nr_pages(folio);
2039 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2040 			break;
2041 	}
2042 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2043 		list_cut_before(&folios, from, &folio2->lru);
2044 	else
2045 		list_splice_init(from, &folios);
2046 	if (mode == MIGRATE_ASYNC)
2047 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2048 				private, mode, reason, &ret_folios,
2049 				&split_folios, &stats,
2050 				NR_MAX_MIGRATE_PAGES_RETRY);
2051 	else
2052 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2053 				private, mode, reason, &ret_folios,
2054 				&split_folios, &stats);
2055 	list_splice_tail_init(&folios, &ret_folios);
2056 	if (rc < 0) {
2057 		rc_gather = rc;
2058 		list_splice_tail(&split_folios, &ret_folios);
2059 		goto out;
2060 	}
2061 	if (!list_empty(&split_folios)) {
2062 		/*
2063 		 * Failure isn't counted since all split folios of a large folio
2064 		 * is counted as 1 failure already.  And, we only try to migrate
2065 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2066 		 */
2067 		migrate_pages_batch(&split_folios, get_new_folio,
2068 				put_new_folio, private, MIGRATE_ASYNC, reason,
2069 				&ret_folios, NULL, &stats, 1);
2070 		list_splice_tail_init(&split_folios, &ret_folios);
2071 	}
2072 	rc_gather += rc;
2073 	if (!list_empty(from))
2074 		goto again;
2075 out:
2076 	/*
2077 	 * Put the permanent failure folio back to migration list, they
2078 	 * will be put back to the right list by the caller.
2079 	 */
2080 	list_splice(&ret_folios, from);
2081 
2082 	/*
2083 	 * Return 0 in case all split folios of fail-to-migrate large folios
2084 	 * are migrated successfully.
2085 	 */
2086 	if (list_empty(from))
2087 		rc_gather = 0;
2088 
2089 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2090 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2091 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2092 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2093 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2094 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2095 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2096 			       stats.nr_thp_split, stats.nr_split, mode,
2097 			       reason);
2098 
2099 	if (ret_succeeded)
2100 		*ret_succeeded = stats.nr_succeeded;
2101 
2102 	return rc_gather;
2103 }
2104 
2105 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2106 {
2107 	struct migration_target_control *mtc;
2108 	gfp_t gfp_mask;
2109 	unsigned int order = 0;
2110 	int nid;
2111 	int zidx;
2112 
2113 	mtc = (struct migration_target_control *)private;
2114 	gfp_mask = mtc->gfp_mask;
2115 	nid = mtc->nid;
2116 	if (nid == NUMA_NO_NODE)
2117 		nid = folio_nid(src);
2118 
2119 	if (folio_test_hugetlb(src)) {
2120 		struct hstate *h = folio_hstate(src);
2121 
2122 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2123 		return alloc_hugetlb_folio_nodemask(h, nid,
2124 						mtc->nmask, gfp_mask,
2125 						htlb_allow_alloc_fallback(mtc->reason));
2126 	}
2127 
2128 	if (folio_test_large(src)) {
2129 		/*
2130 		 * clear __GFP_RECLAIM to make the migration callback
2131 		 * consistent with regular THP allocations.
2132 		 */
2133 		gfp_mask &= ~__GFP_RECLAIM;
2134 		gfp_mask |= GFP_TRANSHUGE;
2135 		order = folio_order(src);
2136 	}
2137 	zidx = zone_idx(folio_zone(src));
2138 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2139 		gfp_mask |= __GFP_HIGHMEM;
2140 
2141 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2142 }
2143 
2144 #ifdef CONFIG_NUMA
2145 
2146 static int store_status(int __user *status, int start, int value, int nr)
2147 {
2148 	while (nr-- > 0) {
2149 		if (put_user(value, status + start))
2150 			return -EFAULT;
2151 		start++;
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2158 {
2159 	int err;
2160 	struct migration_target_control mtc = {
2161 		.nid = node,
2162 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2163 		.reason = MR_SYSCALL,
2164 	};
2165 
2166 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2167 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2168 	if (err)
2169 		putback_movable_pages(pagelist);
2170 	return err;
2171 }
2172 
2173 static int __add_folio_for_migration(struct folio *folio, int node,
2174 		struct list_head *pagelist, bool migrate_all)
2175 {
2176 	if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2177 		return -EFAULT;
2178 
2179 	if (folio_is_zone_device(folio))
2180 		return -ENOENT;
2181 
2182 	if (folio_nid(folio) == node)
2183 		return 0;
2184 
2185 	if (folio_maybe_mapped_shared(folio) && !migrate_all)
2186 		return -EACCES;
2187 
2188 	if (folio_test_hugetlb(folio)) {
2189 		if (folio_isolate_hugetlb(folio, pagelist))
2190 			return 1;
2191 	} else if (folio_isolate_lru(folio)) {
2192 		list_add_tail(&folio->lru, pagelist);
2193 		node_stat_mod_folio(folio,
2194 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2195 			folio_nr_pages(folio));
2196 		return 1;
2197 	}
2198 	return -EBUSY;
2199 }
2200 
2201 /*
2202  * Resolves the given address to a struct folio, isolates it from the LRU and
2203  * puts it to the given pagelist.
2204  * Returns:
2205  *     errno - if the folio cannot be found/isolated
2206  *     0 - when it doesn't have to be migrated because it is already on the
2207  *         target node
2208  *     1 - when it has been queued
2209  */
2210 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2211 		int node, struct list_head *pagelist, bool migrate_all)
2212 {
2213 	struct vm_area_struct *vma;
2214 	struct folio_walk fw;
2215 	struct folio *folio;
2216 	unsigned long addr;
2217 	int err = -EFAULT;
2218 
2219 	mmap_read_lock(mm);
2220 	addr = (unsigned long)untagged_addr_remote(mm, p);
2221 
2222 	vma = vma_lookup(mm, addr);
2223 	if (vma && vma_migratable(vma)) {
2224 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2225 		if (folio) {
2226 			err = __add_folio_for_migration(folio, node, pagelist,
2227 							migrate_all);
2228 			folio_walk_end(&fw, vma);
2229 		} else {
2230 			err = -ENOENT;
2231 		}
2232 	}
2233 	mmap_read_unlock(mm);
2234 	return err;
2235 }
2236 
2237 static int move_pages_and_store_status(int node,
2238 		struct list_head *pagelist, int __user *status,
2239 		int start, int i, unsigned long nr_pages)
2240 {
2241 	int err;
2242 
2243 	if (list_empty(pagelist))
2244 		return 0;
2245 
2246 	err = do_move_pages_to_node(pagelist, node);
2247 	if (err) {
2248 		/*
2249 		 * Positive err means the number of failed
2250 		 * pages to migrate.  Since we are going to
2251 		 * abort and return the number of non-migrated
2252 		 * pages, so need to include the rest of the
2253 		 * nr_pages that have not been attempted as
2254 		 * well.
2255 		 */
2256 		if (err > 0)
2257 			err += nr_pages - i;
2258 		return err;
2259 	}
2260 	return store_status(status, start, node, i - start);
2261 }
2262 
2263 /*
2264  * Migrate an array of page address onto an array of nodes and fill
2265  * the corresponding array of status.
2266  */
2267 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2268 			 unsigned long nr_pages,
2269 			 const void __user * __user *pages,
2270 			 const int __user *nodes,
2271 			 int __user *status, int flags)
2272 {
2273 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2274 	int current_node = NUMA_NO_NODE;
2275 	LIST_HEAD(pagelist);
2276 	int start, i;
2277 	int err = 0, err1;
2278 
2279 	lru_cache_disable();
2280 
2281 	for (i = start = 0; i < nr_pages; i++) {
2282 		const void __user *p;
2283 		int node;
2284 
2285 		err = -EFAULT;
2286 		if (in_compat_syscall()) {
2287 			compat_uptr_t cp;
2288 
2289 			if (get_user(cp, compat_pages + i))
2290 				goto out_flush;
2291 
2292 			p = compat_ptr(cp);
2293 		} else {
2294 			if (get_user(p, pages + i))
2295 				goto out_flush;
2296 		}
2297 		if (get_user(node, nodes + i))
2298 			goto out_flush;
2299 
2300 		err = -ENODEV;
2301 		if (node < 0 || node >= MAX_NUMNODES)
2302 			goto out_flush;
2303 		if (!node_state(node, N_MEMORY))
2304 			goto out_flush;
2305 
2306 		err = -EACCES;
2307 		if (!node_isset(node, task_nodes))
2308 			goto out_flush;
2309 
2310 		if (current_node == NUMA_NO_NODE) {
2311 			current_node = node;
2312 			start = i;
2313 		} else if (node != current_node) {
2314 			err = move_pages_and_store_status(current_node,
2315 					&pagelist, status, start, i, nr_pages);
2316 			if (err)
2317 				goto out;
2318 			start = i;
2319 			current_node = node;
2320 		}
2321 
2322 		/*
2323 		 * Errors in the page lookup or isolation are not fatal and we simply
2324 		 * report them via status
2325 		 */
2326 		err = add_folio_for_migration(mm, p, current_node, &pagelist,
2327 					      flags & MPOL_MF_MOVE_ALL);
2328 
2329 		if (err > 0) {
2330 			/* The page is successfully queued for migration */
2331 			continue;
2332 		}
2333 
2334 		/*
2335 		 * The move_pages() man page does not have an -EEXIST choice, so
2336 		 * use -EFAULT instead.
2337 		 */
2338 		if (err == -EEXIST)
2339 			err = -EFAULT;
2340 
2341 		/*
2342 		 * If the page is already on the target node (!err), store the
2343 		 * node, otherwise, store the err.
2344 		 */
2345 		err = store_status(status, i, err ? : current_node, 1);
2346 		if (err)
2347 			goto out_flush;
2348 
2349 		err = move_pages_and_store_status(current_node, &pagelist,
2350 				status, start, i, nr_pages);
2351 		if (err) {
2352 			/* We have accounted for page i */
2353 			if (err > 0)
2354 				err--;
2355 			goto out;
2356 		}
2357 		current_node = NUMA_NO_NODE;
2358 	}
2359 out_flush:
2360 	/* Make sure we do not overwrite the existing error */
2361 	err1 = move_pages_and_store_status(current_node, &pagelist,
2362 				status, start, i, nr_pages);
2363 	if (err >= 0)
2364 		err = err1;
2365 out:
2366 	lru_cache_enable();
2367 	return err;
2368 }
2369 
2370 /*
2371  * Determine the nodes of an array of pages and store it in an array of status.
2372  */
2373 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2374 				const void __user **pages, int *status)
2375 {
2376 	unsigned long i;
2377 
2378 	mmap_read_lock(mm);
2379 
2380 	for (i = 0; i < nr_pages; i++) {
2381 		unsigned long addr = (unsigned long)(*pages);
2382 		struct vm_area_struct *vma;
2383 		struct folio_walk fw;
2384 		struct folio *folio;
2385 		int err = -EFAULT;
2386 
2387 		vma = vma_lookup(mm, addr);
2388 		if (!vma)
2389 			goto set_status;
2390 
2391 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2392 		if (folio) {
2393 			if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2394 				err = -EFAULT;
2395 			else if (folio_is_zone_device(folio))
2396 				err = -ENOENT;
2397 			else
2398 				err = folio_nid(folio);
2399 			folio_walk_end(&fw, vma);
2400 		} else {
2401 			err = -ENOENT;
2402 		}
2403 set_status:
2404 		*status = err;
2405 
2406 		pages++;
2407 		status++;
2408 	}
2409 
2410 	mmap_read_unlock(mm);
2411 }
2412 
2413 static int get_compat_pages_array(const void __user *chunk_pages[],
2414 				  const void __user * __user *pages,
2415 				  unsigned long chunk_nr)
2416 {
2417 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2418 	compat_uptr_t p;
2419 	int i;
2420 
2421 	for (i = 0; i < chunk_nr; i++) {
2422 		if (get_user(p, pages32 + i))
2423 			return -EFAULT;
2424 		chunk_pages[i] = compat_ptr(p);
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 /*
2431  * Determine the nodes of a user array of pages and store it in
2432  * a user array of status.
2433  */
2434 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2435 			 const void __user * __user *pages,
2436 			 int __user *status)
2437 {
2438 #define DO_PAGES_STAT_CHUNK_NR 16UL
2439 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2440 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2441 
2442 	while (nr_pages) {
2443 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2444 
2445 		if (in_compat_syscall()) {
2446 			if (get_compat_pages_array(chunk_pages, pages,
2447 						   chunk_nr))
2448 				break;
2449 		} else {
2450 			if (copy_from_user(chunk_pages, pages,
2451 				      chunk_nr * sizeof(*chunk_pages)))
2452 				break;
2453 		}
2454 
2455 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2456 
2457 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2458 			break;
2459 
2460 		pages += chunk_nr;
2461 		status += chunk_nr;
2462 		nr_pages -= chunk_nr;
2463 	}
2464 	return nr_pages ? -EFAULT : 0;
2465 }
2466 
2467 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2468 {
2469 	struct task_struct *task;
2470 	struct mm_struct *mm;
2471 
2472 	/*
2473 	 * There is no need to check if current process has the right to modify
2474 	 * the specified process when they are same.
2475 	 */
2476 	if (!pid) {
2477 		mmget(current->mm);
2478 		*mem_nodes = cpuset_mems_allowed(current);
2479 		return current->mm;
2480 	}
2481 
2482 	task = find_get_task_by_vpid(pid);
2483 	if (!task) {
2484 		return ERR_PTR(-ESRCH);
2485 	}
2486 
2487 	/*
2488 	 * Check if this process has the right to modify the specified
2489 	 * process. Use the regular "ptrace_may_access()" checks.
2490 	 */
2491 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2492 		mm = ERR_PTR(-EPERM);
2493 		goto out;
2494 	}
2495 
2496 	mm = ERR_PTR(security_task_movememory(task));
2497 	if (IS_ERR(mm))
2498 		goto out;
2499 	*mem_nodes = cpuset_mems_allowed(task);
2500 	mm = get_task_mm(task);
2501 out:
2502 	put_task_struct(task);
2503 	if (!mm)
2504 		mm = ERR_PTR(-EINVAL);
2505 	return mm;
2506 }
2507 
2508 /*
2509  * Move a list of pages in the address space of the currently executing
2510  * process.
2511  */
2512 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2513 			     const void __user * __user *pages,
2514 			     const int __user *nodes,
2515 			     int __user *status, int flags)
2516 {
2517 	struct mm_struct *mm;
2518 	int err;
2519 	nodemask_t task_nodes;
2520 
2521 	/* Check flags */
2522 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2523 		return -EINVAL;
2524 
2525 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2526 		return -EPERM;
2527 
2528 	mm = find_mm_struct(pid, &task_nodes);
2529 	if (IS_ERR(mm))
2530 		return PTR_ERR(mm);
2531 
2532 	if (nodes)
2533 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2534 				    nodes, status, flags);
2535 	else
2536 		err = do_pages_stat(mm, nr_pages, pages, status);
2537 
2538 	mmput(mm);
2539 	return err;
2540 }
2541 
2542 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2543 		const void __user * __user *, pages,
2544 		const int __user *, nodes,
2545 		int __user *, status, int, flags)
2546 {
2547 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2548 }
2549 
2550 #ifdef CONFIG_NUMA_BALANCING
2551 /*
2552  * Returns true if this is a safe migration target node for misplaced NUMA
2553  * pages. Currently it only checks the watermarks which is crude.
2554  */
2555 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2556 				   unsigned long nr_migrate_pages)
2557 {
2558 	int z;
2559 
2560 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2561 		struct zone *zone = pgdat->node_zones + z;
2562 
2563 		if (!managed_zone(zone))
2564 			continue;
2565 
2566 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2567 		if (!zone_watermark_ok(zone, 0,
2568 				       high_wmark_pages(zone) +
2569 				       nr_migrate_pages,
2570 				       ZONE_MOVABLE, ALLOC_CMA))
2571 			continue;
2572 		return true;
2573 	}
2574 	return false;
2575 }
2576 
2577 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2578 					   unsigned long data)
2579 {
2580 	int nid = (int) data;
2581 	int order = folio_order(src);
2582 	gfp_t gfp = __GFP_THISNODE;
2583 
2584 	if (order > 0)
2585 		gfp |= GFP_TRANSHUGE_LIGHT;
2586 	else {
2587 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2588 			__GFP_NOWARN;
2589 		gfp &= ~__GFP_RECLAIM;
2590 	}
2591 	return __folio_alloc_node(gfp, order, nid);
2592 }
2593 
2594 /*
2595  * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2596  * permitted. Must be called with the PTL still held.
2597  */
2598 int migrate_misplaced_folio_prepare(struct folio *folio,
2599 		struct vm_area_struct *vma, int node)
2600 {
2601 	int nr_pages = folio_nr_pages(folio);
2602 	pg_data_t *pgdat = NODE_DATA(node);
2603 
2604 	if (folio_is_file_lru(folio)) {
2605 		/*
2606 		 * Do not migrate file folios that are mapped in multiple
2607 		 * processes with execute permissions as they are probably
2608 		 * shared libraries.
2609 		 *
2610 		 * See folio_maybe_mapped_shared() on possible imprecision
2611 		 * when we cannot easily detect if a folio is shared.
2612 		 */
2613 		if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
2614 			return -EACCES;
2615 
2616 		/*
2617 		 * Do not migrate dirty folios as not all filesystems can move
2618 		 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2619 		 * cycles.
2620 		 */
2621 		if (folio_test_dirty(folio))
2622 			return -EAGAIN;
2623 	}
2624 
2625 	/* Avoid migrating to a node that is nearly full */
2626 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2627 		int z;
2628 
2629 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2630 			return -EAGAIN;
2631 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2632 			if (managed_zone(pgdat->node_zones + z))
2633 				break;
2634 		}
2635 
2636 		/*
2637 		 * If there are no managed zones, it should not proceed
2638 		 * further.
2639 		 */
2640 		if (z < 0)
2641 			return -EAGAIN;
2642 
2643 		wakeup_kswapd(pgdat->node_zones + z, 0,
2644 			      folio_order(folio), ZONE_MOVABLE);
2645 		return -EAGAIN;
2646 	}
2647 
2648 	if (!folio_isolate_lru(folio))
2649 		return -EAGAIN;
2650 
2651 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2652 			    nr_pages);
2653 	return 0;
2654 }
2655 
2656 /*
2657  * Attempt to migrate a misplaced folio to the specified destination
2658  * node. Caller is expected to have isolated the folio by calling
2659  * migrate_misplaced_folio_prepare(), which will result in an
2660  * elevated reference count on the folio. This function will un-isolate the
2661  * folio, dereferencing the folio before returning.
2662  */
2663 int migrate_misplaced_folio(struct folio *folio, int node)
2664 {
2665 	pg_data_t *pgdat = NODE_DATA(node);
2666 	int nr_remaining;
2667 	unsigned int nr_succeeded;
2668 	LIST_HEAD(migratepages);
2669 	struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2670 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2671 
2672 	list_add(&folio->lru, &migratepages);
2673 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2674 				     NULL, node, MIGRATE_ASYNC,
2675 				     MR_NUMA_MISPLACED, &nr_succeeded);
2676 	if (nr_remaining && !list_empty(&migratepages))
2677 		putback_movable_pages(&migratepages);
2678 	if (nr_succeeded) {
2679 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2680 		count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2681 		if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2682 		    && !node_is_toptier(folio_nid(folio))
2683 		    && node_is_toptier(node))
2684 			mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2685 	}
2686 	mem_cgroup_put(memcg);
2687 	BUG_ON(!list_empty(&migratepages));
2688 	return nr_remaining ? -EAGAIN : 0;
2689 }
2690 #endif /* CONFIG_NUMA_BALANCING */
2691 #endif /* CONFIG_NUMA */
2692