xref: /linux/mm/migrate.c (revision acc53a0b4c156877773da6e9eea4113dc7e770ae)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/pfn_t.h>
39 #include <linux/page_idle.h>
40 #include <linux/page_owner.h>
41 #include <linux/sched/mm.h>
42 #include <linux/ptrace.h>
43 #include <linux/memory.h>
44 #include <linux/sched/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/pagewalk.h>
47 
48 #include <asm/tlbflush.h>
49 
50 #include <trace/events/migrate.h>
51 
52 #include "internal.h"
53 #include "swap.h"
54 
55 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
56 {
57 	struct folio *folio = folio_get_nontail_page(page);
58 	const struct movable_operations *mops;
59 
60 	/*
61 	 * Avoid burning cycles with pages that are yet under __free_pages(),
62 	 * or just got freed under us.
63 	 *
64 	 * In case we 'win' a race for a movable page being freed under us and
65 	 * raise its refcount preventing __free_pages() from doing its job
66 	 * the put_page() at the end of this block will take care of
67 	 * release this page, thus avoiding a nasty leakage.
68 	 */
69 	if (!folio)
70 		goto out;
71 
72 	/*
73 	 * Check movable flag before taking the page lock because
74 	 * we use non-atomic bitops on newly allocated page flags so
75 	 * unconditionally grabbing the lock ruins page's owner side.
76 	 */
77 	if (unlikely(!__folio_test_movable(folio)))
78 		goto out_putfolio;
79 
80 	/*
81 	 * As movable pages are not isolated from LRU lists, concurrent
82 	 * compaction threads can race against page migration functions
83 	 * as well as race against the releasing a page.
84 	 *
85 	 * In order to avoid having an already isolated movable page
86 	 * being (wrongly) re-isolated while it is under migration,
87 	 * or to avoid attempting to isolate pages being released,
88 	 * lets be sure we have the page lock
89 	 * before proceeding with the movable page isolation steps.
90 	 */
91 	if (unlikely(!folio_trylock(folio)))
92 		goto out_putfolio;
93 
94 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
95 		goto out_no_isolated;
96 
97 	mops = folio_movable_ops(folio);
98 	VM_BUG_ON_FOLIO(!mops, folio);
99 
100 	if (!mops->isolate_page(&folio->page, mode))
101 		goto out_no_isolated;
102 
103 	/* Driver shouldn't use the isolated flag */
104 	WARN_ON_ONCE(folio_test_isolated(folio));
105 	folio_set_isolated(folio);
106 	folio_unlock(folio);
107 
108 	return true;
109 
110 out_no_isolated:
111 	folio_unlock(folio);
112 out_putfolio:
113 	folio_put(folio);
114 out:
115 	return false;
116 }
117 
118 static void putback_movable_folio(struct folio *folio)
119 {
120 	const struct movable_operations *mops = folio_movable_ops(folio);
121 
122 	mops->putback_page(&folio->page);
123 	folio_clear_isolated(folio);
124 }
125 
126 /*
127  * Put previously isolated pages back onto the appropriate lists
128  * from where they were once taken off for compaction/migration.
129  *
130  * This function shall be used whenever the isolated pageset has been
131  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
132  * and folio_isolate_hugetlb().
133  */
134 void putback_movable_pages(struct list_head *l)
135 {
136 	struct folio *folio;
137 	struct folio *folio2;
138 
139 	list_for_each_entry_safe(folio, folio2, l, lru) {
140 		if (unlikely(folio_test_hugetlb(folio))) {
141 			folio_putback_hugetlb(folio);
142 			continue;
143 		}
144 		list_del(&folio->lru);
145 		/*
146 		 * We isolated non-lru movable folio so here we can use
147 		 * __folio_test_movable because LRU folio's mapping cannot
148 		 * have PAGE_MAPPING_MOVABLE.
149 		 */
150 		if (unlikely(__folio_test_movable(folio))) {
151 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
152 			folio_lock(folio);
153 			if (folio_test_movable(folio))
154 				putback_movable_folio(folio);
155 			else
156 				folio_clear_isolated(folio);
157 			folio_unlock(folio);
158 			folio_put(folio);
159 		} else {
160 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
161 					folio_is_file_lru(folio), -folio_nr_pages(folio));
162 			folio_putback_lru(folio);
163 		}
164 	}
165 }
166 
167 /* Must be called with an elevated refcount on the non-hugetlb folio */
168 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
169 {
170 	bool isolated, lru;
171 
172 	if (folio_test_hugetlb(folio))
173 		return folio_isolate_hugetlb(folio, list);
174 
175 	lru = !__folio_test_movable(folio);
176 	if (lru)
177 		isolated = folio_isolate_lru(folio);
178 	else
179 		isolated = isolate_movable_page(&folio->page,
180 						ISOLATE_UNEVICTABLE);
181 
182 	if (!isolated)
183 		return false;
184 
185 	list_add(&folio->lru, list);
186 	if (lru)
187 		node_stat_add_folio(folio, NR_ISOLATED_ANON +
188 				    folio_is_file_lru(folio));
189 
190 	return true;
191 }
192 
193 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
194 					  struct folio *folio,
195 					  unsigned long idx)
196 {
197 	struct page *page = folio_page(folio, idx);
198 	bool contains_data;
199 	pte_t newpte;
200 	void *addr;
201 
202 	if (PageCompound(page))
203 		return false;
204 	VM_BUG_ON_PAGE(!PageAnon(page), page);
205 	VM_BUG_ON_PAGE(!PageLocked(page), page);
206 	VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
207 
208 	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
209 	    mm_forbids_zeropage(pvmw->vma->vm_mm))
210 		return false;
211 
212 	/*
213 	 * The pmd entry mapping the old thp was flushed and the pte mapping
214 	 * this subpage has been non present. If the subpage is only zero-filled
215 	 * then map it to the shared zeropage.
216 	 */
217 	addr = kmap_local_page(page);
218 	contains_data = memchr_inv(addr, 0, PAGE_SIZE);
219 	kunmap_local(addr);
220 
221 	if (contains_data)
222 		return false;
223 
224 	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
225 					pvmw->vma->vm_page_prot));
226 	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
227 
228 	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
229 	return true;
230 }
231 
232 struct rmap_walk_arg {
233 	struct folio *folio;
234 	bool map_unused_to_zeropage;
235 };
236 
237 /*
238  * Restore a potential migration pte to a working pte entry
239  */
240 static bool remove_migration_pte(struct folio *folio,
241 		struct vm_area_struct *vma, unsigned long addr, void *arg)
242 {
243 	struct rmap_walk_arg *rmap_walk_arg = arg;
244 	DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
245 
246 	while (page_vma_mapped_walk(&pvmw)) {
247 		rmap_t rmap_flags = RMAP_NONE;
248 		pte_t old_pte;
249 		pte_t pte;
250 		swp_entry_t entry;
251 		struct page *new;
252 		unsigned long idx = 0;
253 
254 		/* pgoff is invalid for ksm pages, but they are never large */
255 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
256 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
257 		new = folio_page(folio, idx);
258 
259 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
260 		/* PMD-mapped THP migration entry */
261 		if (!pvmw.pte) {
262 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
263 					!folio_test_pmd_mappable(folio), folio);
264 			remove_migration_pmd(&pvmw, new);
265 			continue;
266 		}
267 #endif
268 		if (rmap_walk_arg->map_unused_to_zeropage &&
269 		    try_to_map_unused_to_zeropage(&pvmw, folio, idx))
270 			continue;
271 
272 		folio_get(folio);
273 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
274 		old_pte = ptep_get(pvmw.pte);
275 
276 		entry = pte_to_swp_entry(old_pte);
277 		if (!is_migration_entry_young(entry))
278 			pte = pte_mkold(pte);
279 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
280 			pte = pte_mkdirty(pte);
281 		if (pte_swp_soft_dirty(old_pte))
282 			pte = pte_mksoft_dirty(pte);
283 		else
284 			pte = pte_clear_soft_dirty(pte);
285 
286 		if (is_writable_migration_entry(entry))
287 			pte = pte_mkwrite(pte, vma);
288 		else if (pte_swp_uffd_wp(old_pte))
289 			pte = pte_mkuffd_wp(pte);
290 
291 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
292 			rmap_flags |= RMAP_EXCLUSIVE;
293 
294 		if (unlikely(is_device_private_page(new))) {
295 			if (pte_write(pte))
296 				entry = make_writable_device_private_entry(
297 							page_to_pfn(new));
298 			else
299 				entry = make_readable_device_private_entry(
300 							page_to_pfn(new));
301 			pte = swp_entry_to_pte(entry);
302 			if (pte_swp_soft_dirty(old_pte))
303 				pte = pte_swp_mksoft_dirty(pte);
304 			if (pte_swp_uffd_wp(old_pte))
305 				pte = pte_swp_mkuffd_wp(pte);
306 		}
307 
308 #ifdef CONFIG_HUGETLB_PAGE
309 		if (folio_test_hugetlb(folio)) {
310 			struct hstate *h = hstate_vma(vma);
311 			unsigned int shift = huge_page_shift(h);
312 			unsigned long psize = huge_page_size(h);
313 
314 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
315 			if (folio_test_anon(folio))
316 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
317 						      rmap_flags);
318 			else
319 				hugetlb_add_file_rmap(folio);
320 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
321 					psize);
322 		} else
323 #endif
324 		{
325 			if (folio_test_anon(folio))
326 				folio_add_anon_rmap_pte(folio, new, vma,
327 							pvmw.address, rmap_flags);
328 			else
329 				folio_add_file_rmap_pte(folio, new, vma);
330 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
331 		}
332 		if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
333 			mlock_drain_local();
334 
335 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
336 					   compound_order(new));
337 
338 		/* No need to invalidate - it was non-present before */
339 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
340 	}
341 
342 	return true;
343 }
344 
345 /*
346  * Get rid of all migration entries and replace them by
347  * references to the indicated page.
348  */
349 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
350 {
351 	struct rmap_walk_arg rmap_walk_arg = {
352 		.folio = src,
353 		.map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
354 	};
355 
356 	struct rmap_walk_control rwc = {
357 		.rmap_one = remove_migration_pte,
358 		.arg = &rmap_walk_arg,
359 	};
360 
361 	VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
362 
363 	if (flags & RMP_LOCKED)
364 		rmap_walk_locked(dst, &rwc);
365 	else
366 		rmap_walk(dst, &rwc);
367 }
368 
369 /*
370  * Something used the pte of a page under migration. We need to
371  * get to the page and wait until migration is finished.
372  * When we return from this function the fault will be retried.
373  */
374 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
375 			  unsigned long address)
376 {
377 	spinlock_t *ptl;
378 	pte_t *ptep;
379 	pte_t pte;
380 	swp_entry_t entry;
381 
382 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
383 	if (!ptep)
384 		return;
385 
386 	pte = ptep_get(ptep);
387 	pte_unmap(ptep);
388 
389 	if (!is_swap_pte(pte))
390 		goto out;
391 
392 	entry = pte_to_swp_entry(pte);
393 	if (!is_migration_entry(entry))
394 		goto out;
395 
396 	migration_entry_wait_on_locked(entry, ptl);
397 	return;
398 out:
399 	spin_unlock(ptl);
400 }
401 
402 #ifdef CONFIG_HUGETLB_PAGE
403 /*
404  * The vma read lock must be held upon entry. Holding that lock prevents either
405  * the pte or the ptl from being freed.
406  *
407  * This function will release the vma lock before returning.
408  */
409 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
410 {
411 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
412 	pte_t pte;
413 
414 	hugetlb_vma_assert_locked(vma);
415 	spin_lock(ptl);
416 	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
417 
418 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
419 		spin_unlock(ptl);
420 		hugetlb_vma_unlock_read(vma);
421 	} else {
422 		/*
423 		 * If migration entry existed, safe to release vma lock
424 		 * here because the pgtable page won't be freed without the
425 		 * pgtable lock released.  See comment right above pgtable
426 		 * lock release in migration_entry_wait_on_locked().
427 		 */
428 		hugetlb_vma_unlock_read(vma);
429 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
430 	}
431 }
432 #endif
433 
434 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
435 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
436 {
437 	spinlock_t *ptl;
438 
439 	ptl = pmd_lock(mm, pmd);
440 	if (!is_pmd_migration_entry(*pmd))
441 		goto unlock;
442 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
443 	return;
444 unlock:
445 	spin_unlock(ptl);
446 }
447 #endif
448 
449 /*
450  * Replace the folio in the mapping.
451  *
452  * The number of remaining references must be:
453  * 1 for anonymous folios without a mapping
454  * 2 for folios with a mapping
455  * 3 for folios with a mapping and the private flag set.
456  */
457 static int __folio_migrate_mapping(struct address_space *mapping,
458 		struct folio *newfolio, struct folio *folio, int expected_count)
459 {
460 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
461 	struct zone *oldzone, *newzone;
462 	int dirty;
463 	long nr = folio_nr_pages(folio);
464 	long entries, i;
465 
466 	if (!mapping) {
467 		/* Take off deferred split queue while frozen and memcg set */
468 		if (folio_test_large(folio) &&
469 		    folio_test_large_rmappable(folio)) {
470 			if (!folio_ref_freeze(folio, expected_count))
471 				return -EAGAIN;
472 			folio_unqueue_deferred_split(folio);
473 			folio_ref_unfreeze(folio, expected_count);
474 		}
475 
476 		/* No turning back from here */
477 		newfolio->index = folio->index;
478 		newfolio->mapping = folio->mapping;
479 		if (folio_test_anon(folio) && folio_test_large(folio))
480 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
481 		if (folio_test_swapbacked(folio))
482 			__folio_set_swapbacked(newfolio);
483 
484 		return MIGRATEPAGE_SUCCESS;
485 	}
486 
487 	oldzone = folio_zone(folio);
488 	newzone = folio_zone(newfolio);
489 
490 	xas_lock_irq(&xas);
491 	if (!folio_ref_freeze(folio, expected_count)) {
492 		xas_unlock_irq(&xas);
493 		return -EAGAIN;
494 	}
495 
496 	/* Take off deferred split queue while frozen and memcg set */
497 	folio_unqueue_deferred_split(folio);
498 
499 	/*
500 	 * Now we know that no one else is looking at the folio:
501 	 * no turning back from here.
502 	 */
503 	newfolio->index = folio->index;
504 	newfolio->mapping = folio->mapping;
505 	if (folio_test_anon(folio) && folio_test_large(folio))
506 		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
507 	folio_ref_add(newfolio, nr); /* add cache reference */
508 	if (folio_test_swapbacked(folio))
509 		__folio_set_swapbacked(newfolio);
510 	if (folio_test_swapcache(folio)) {
511 		folio_set_swapcache(newfolio);
512 		newfolio->private = folio_get_private(folio);
513 		entries = nr;
514 	} else {
515 		entries = 1;
516 	}
517 
518 	/* Move dirty while folio refs frozen and newfolio not yet exposed */
519 	dirty = folio_test_dirty(folio);
520 	if (dirty) {
521 		folio_clear_dirty(folio);
522 		folio_set_dirty(newfolio);
523 	}
524 
525 	/* Swap cache still stores N entries instead of a high-order entry */
526 	for (i = 0; i < entries; i++) {
527 		xas_store(&xas, newfolio);
528 		xas_next(&xas);
529 	}
530 
531 	/*
532 	 * Drop cache reference from old folio by unfreezing
533 	 * to one less reference.
534 	 * We know this isn't the last reference.
535 	 */
536 	folio_ref_unfreeze(folio, expected_count - nr);
537 
538 	xas_unlock(&xas);
539 	/* Leave irq disabled to prevent preemption while updating stats */
540 
541 	/*
542 	 * If moved to a different zone then also account
543 	 * the folio for that zone. Other VM counters will be
544 	 * taken care of when we establish references to the
545 	 * new folio and drop references to the old folio.
546 	 *
547 	 * Note that anonymous folios are accounted for
548 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
549 	 * are mapped to swap space.
550 	 */
551 	if (newzone != oldzone) {
552 		struct lruvec *old_lruvec, *new_lruvec;
553 		struct mem_cgroup *memcg;
554 
555 		memcg = folio_memcg(folio);
556 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
557 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
558 
559 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
560 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
561 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
562 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
563 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
564 
565 			if (folio_test_pmd_mappable(folio)) {
566 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
567 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
568 			}
569 		}
570 #ifdef CONFIG_SWAP
571 		if (folio_test_swapcache(folio)) {
572 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
573 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
574 		}
575 #endif
576 		if (dirty && mapping_can_writeback(mapping)) {
577 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
578 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
579 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
580 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
581 		}
582 	}
583 	local_irq_enable();
584 
585 	return MIGRATEPAGE_SUCCESS;
586 }
587 
588 int folio_migrate_mapping(struct address_space *mapping,
589 		struct folio *newfolio, struct folio *folio, int extra_count)
590 {
591 	int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
592 
593 	if (folio_ref_count(folio) != expected_count)
594 		return -EAGAIN;
595 
596 	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
597 }
598 EXPORT_SYMBOL(folio_migrate_mapping);
599 
600 /*
601  * The expected number of remaining references is the same as that
602  * of folio_migrate_mapping().
603  */
604 int migrate_huge_page_move_mapping(struct address_space *mapping,
605 				   struct folio *dst, struct folio *src)
606 {
607 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
608 	int rc, expected_count = folio_expected_ref_count(src) + 1;
609 
610 	if (folio_ref_count(src) != expected_count)
611 		return -EAGAIN;
612 
613 	rc = folio_mc_copy(dst, src);
614 	if (unlikely(rc))
615 		return rc;
616 
617 	xas_lock_irq(&xas);
618 	if (!folio_ref_freeze(src, expected_count)) {
619 		xas_unlock_irq(&xas);
620 		return -EAGAIN;
621 	}
622 
623 	dst->index = src->index;
624 	dst->mapping = src->mapping;
625 
626 	folio_ref_add(dst, folio_nr_pages(dst));
627 
628 	xas_store(&xas, dst);
629 
630 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
631 
632 	xas_unlock_irq(&xas);
633 
634 	return MIGRATEPAGE_SUCCESS;
635 }
636 
637 /*
638  * Copy the flags and some other ancillary information
639  */
640 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
641 {
642 	int cpupid;
643 
644 	if (folio_test_referenced(folio))
645 		folio_set_referenced(newfolio);
646 	if (folio_test_uptodate(folio))
647 		folio_mark_uptodate(newfolio);
648 	if (folio_test_clear_active(folio)) {
649 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
650 		folio_set_active(newfolio);
651 	} else if (folio_test_clear_unevictable(folio))
652 		folio_set_unevictable(newfolio);
653 	if (folio_test_workingset(folio))
654 		folio_set_workingset(newfolio);
655 	if (folio_test_checked(folio))
656 		folio_set_checked(newfolio);
657 	/*
658 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
659 	 * migration entries. We can still have PG_anon_exclusive set on an
660 	 * effectively unmapped and unreferenced first sub-pages of an
661 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
662 	 */
663 	if (folio_test_mappedtodisk(folio))
664 		folio_set_mappedtodisk(newfolio);
665 
666 	/* Move dirty on pages not done by folio_migrate_mapping() */
667 	if (folio_test_dirty(folio))
668 		folio_set_dirty(newfolio);
669 
670 	if (folio_test_young(folio))
671 		folio_set_young(newfolio);
672 	if (folio_test_idle(folio))
673 		folio_set_idle(newfolio);
674 
675 	folio_migrate_refs(newfolio, folio);
676 	/*
677 	 * Copy NUMA information to the new page, to prevent over-eager
678 	 * future migrations of this same page.
679 	 */
680 	cpupid = folio_xchg_last_cpupid(folio, -1);
681 	/*
682 	 * For memory tiering mode, when migrate between slow and fast
683 	 * memory node, reset cpupid, because that is used to record
684 	 * page access time in slow memory node.
685 	 */
686 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
687 		bool f_toptier = node_is_toptier(folio_nid(folio));
688 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
689 
690 		if (f_toptier != t_toptier)
691 			cpupid = -1;
692 	}
693 	folio_xchg_last_cpupid(newfolio, cpupid);
694 
695 	folio_migrate_ksm(newfolio, folio);
696 	/*
697 	 * Please do not reorder this without considering how mm/ksm.c's
698 	 * ksm_get_folio() depends upon ksm_migrate_page() and the
699 	 * swapcache flag.
700 	 */
701 	if (folio_test_swapcache(folio))
702 		folio_clear_swapcache(folio);
703 	folio_clear_private(folio);
704 
705 	/* page->private contains hugetlb specific flags */
706 	if (!folio_test_hugetlb(folio))
707 		folio->private = NULL;
708 
709 	/*
710 	 * If any waiters have accumulated on the new page then
711 	 * wake them up.
712 	 */
713 	if (folio_test_writeback(newfolio))
714 		folio_end_writeback(newfolio);
715 
716 	/*
717 	 * PG_readahead shares the same bit with PG_reclaim.  The above
718 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
719 	 * bit after that.
720 	 */
721 	if (folio_test_readahead(folio))
722 		folio_set_readahead(newfolio);
723 
724 	folio_copy_owner(newfolio, folio);
725 	pgalloc_tag_swap(newfolio, folio);
726 
727 	mem_cgroup_migrate(folio, newfolio);
728 }
729 EXPORT_SYMBOL(folio_migrate_flags);
730 
731 /************************************************************
732  *                    Migration functions
733  ***********************************************************/
734 
735 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
736 			   struct folio *src, void *src_private,
737 			   enum migrate_mode mode)
738 {
739 	int rc, expected_count = folio_expected_ref_count(src) + 1;
740 
741 	/* Check whether src does not have extra refs before we do more work */
742 	if (folio_ref_count(src) != expected_count)
743 		return -EAGAIN;
744 
745 	rc = folio_mc_copy(dst, src);
746 	if (unlikely(rc))
747 		return rc;
748 
749 	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
750 	if (rc != MIGRATEPAGE_SUCCESS)
751 		return rc;
752 
753 	if (src_private)
754 		folio_attach_private(dst, folio_detach_private(src));
755 
756 	folio_migrate_flags(dst, src);
757 	return MIGRATEPAGE_SUCCESS;
758 }
759 
760 /**
761  * migrate_folio() - Simple folio migration.
762  * @mapping: The address_space containing the folio.
763  * @dst: The folio to migrate the data to.
764  * @src: The folio containing the current data.
765  * @mode: How to migrate the page.
766  *
767  * Common logic to directly migrate a single LRU folio suitable for
768  * folios that do not have private data.
769  *
770  * Folios are locked upon entry and exit.
771  */
772 int migrate_folio(struct address_space *mapping, struct folio *dst,
773 		  struct folio *src, enum migrate_mode mode)
774 {
775 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
776 	return __migrate_folio(mapping, dst, src, NULL, mode);
777 }
778 EXPORT_SYMBOL(migrate_folio);
779 
780 #ifdef CONFIG_BUFFER_HEAD
781 /* Returns true if all buffers are successfully locked */
782 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
783 							enum migrate_mode mode)
784 {
785 	struct buffer_head *bh = head;
786 	struct buffer_head *failed_bh;
787 
788 	do {
789 		if (!trylock_buffer(bh)) {
790 			if (mode == MIGRATE_ASYNC)
791 				goto unlock;
792 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
793 				goto unlock;
794 			lock_buffer(bh);
795 		}
796 
797 		bh = bh->b_this_page;
798 	} while (bh != head);
799 
800 	return true;
801 
802 unlock:
803 	/* We failed to lock the buffer and cannot stall. */
804 	failed_bh = bh;
805 	bh = head;
806 	while (bh != failed_bh) {
807 		unlock_buffer(bh);
808 		bh = bh->b_this_page;
809 	}
810 
811 	return false;
812 }
813 
814 static int __buffer_migrate_folio(struct address_space *mapping,
815 		struct folio *dst, struct folio *src, enum migrate_mode mode,
816 		bool check_refs)
817 {
818 	struct buffer_head *bh, *head;
819 	int rc;
820 	int expected_count;
821 
822 	head = folio_buffers(src);
823 	if (!head)
824 		return migrate_folio(mapping, dst, src, mode);
825 
826 	/* Check whether page does not have extra refs before we do more work */
827 	expected_count = folio_expected_ref_count(src) + 1;
828 	if (folio_ref_count(src) != expected_count)
829 		return -EAGAIN;
830 
831 	if (!buffer_migrate_lock_buffers(head, mode))
832 		return -EAGAIN;
833 
834 	if (check_refs) {
835 		bool busy, migrating;
836 		bool invalidated = false;
837 
838 		migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
839 		VM_WARN_ON_ONCE(migrating);
840 recheck_buffers:
841 		busy = false;
842 		spin_lock(&mapping->i_private_lock);
843 		bh = head;
844 		do {
845 			if (atomic_read(&bh->b_count)) {
846 				busy = true;
847 				break;
848 			}
849 			bh = bh->b_this_page;
850 		} while (bh != head);
851 		spin_unlock(&mapping->i_private_lock);
852 		if (busy) {
853 			if (invalidated) {
854 				rc = -EAGAIN;
855 				goto unlock_buffers;
856 			}
857 			invalidate_bh_lrus();
858 			invalidated = true;
859 			goto recheck_buffers;
860 		}
861 	}
862 
863 	rc = filemap_migrate_folio(mapping, dst, src, mode);
864 	if (rc != MIGRATEPAGE_SUCCESS)
865 		goto unlock_buffers;
866 
867 	bh = head;
868 	do {
869 		folio_set_bh(bh, dst, bh_offset(bh));
870 		bh = bh->b_this_page;
871 	} while (bh != head);
872 
873 unlock_buffers:
874 	if (check_refs)
875 		clear_bit_unlock(BH_Migrate, &head->b_state);
876 	bh = head;
877 	do {
878 		unlock_buffer(bh);
879 		bh = bh->b_this_page;
880 	} while (bh != head);
881 
882 	return rc;
883 }
884 
885 /**
886  * buffer_migrate_folio() - Migration function for folios with buffers.
887  * @mapping: The address space containing @src.
888  * @dst: The folio to migrate to.
889  * @src: The folio to migrate from.
890  * @mode: How to migrate the folio.
891  *
892  * This function can only be used if the underlying filesystem guarantees
893  * that no other references to @src exist. For example attached buffer
894  * heads are accessed only under the folio lock.  If your filesystem cannot
895  * provide this guarantee, buffer_migrate_folio_norefs() may be more
896  * appropriate.
897  *
898  * Return: 0 on success or a negative errno on failure.
899  */
900 int buffer_migrate_folio(struct address_space *mapping,
901 		struct folio *dst, struct folio *src, enum migrate_mode mode)
902 {
903 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
904 }
905 EXPORT_SYMBOL(buffer_migrate_folio);
906 
907 /**
908  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
909  * @mapping: The address space containing @src.
910  * @dst: The folio to migrate to.
911  * @src: The folio to migrate from.
912  * @mode: How to migrate the folio.
913  *
914  * Like buffer_migrate_folio() except that this variant is more careful
915  * and checks that there are also no buffer head references. This function
916  * is the right one for mappings where buffer heads are directly looked
917  * up and referenced (such as block device mappings).
918  *
919  * Return: 0 on success or a negative errno on failure.
920  */
921 int buffer_migrate_folio_norefs(struct address_space *mapping,
922 		struct folio *dst, struct folio *src, enum migrate_mode mode)
923 {
924 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
925 }
926 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
927 #endif /* CONFIG_BUFFER_HEAD */
928 
929 int filemap_migrate_folio(struct address_space *mapping,
930 		struct folio *dst, struct folio *src, enum migrate_mode mode)
931 {
932 	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
933 }
934 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
935 
936 /*
937  * Writeback a folio to clean the dirty state
938  */
939 static int writeout(struct address_space *mapping, struct folio *folio)
940 {
941 	struct writeback_control wbc = {
942 		.sync_mode = WB_SYNC_NONE,
943 		.nr_to_write = 1,
944 		.range_start = 0,
945 		.range_end = LLONG_MAX,
946 		.for_reclaim = 1
947 	};
948 	int rc;
949 
950 	if (!mapping->a_ops->writepage)
951 		/* No write method for the address space */
952 		return -EINVAL;
953 
954 	if (!folio_clear_dirty_for_io(folio))
955 		/* Someone else already triggered a write */
956 		return -EAGAIN;
957 
958 	/*
959 	 * A dirty folio may imply that the underlying filesystem has
960 	 * the folio on some queue. So the folio must be clean for
961 	 * migration. Writeout may mean we lose the lock and the
962 	 * folio state is no longer what we checked for earlier.
963 	 * At this point we know that the migration attempt cannot
964 	 * be successful.
965 	 */
966 	remove_migration_ptes(folio, folio, 0);
967 
968 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
969 
970 	if (rc != AOP_WRITEPAGE_ACTIVATE)
971 		/* unlocked. Relock */
972 		folio_lock(folio);
973 
974 	return (rc < 0) ? -EIO : -EAGAIN;
975 }
976 
977 /*
978  * Default handling if a filesystem does not provide a migration function.
979  */
980 static int fallback_migrate_folio(struct address_space *mapping,
981 		struct folio *dst, struct folio *src, enum migrate_mode mode)
982 {
983 	if (folio_test_dirty(src)) {
984 		/* Only writeback folios in full synchronous migration */
985 		switch (mode) {
986 		case MIGRATE_SYNC:
987 			break;
988 		default:
989 			return -EBUSY;
990 		}
991 		return writeout(mapping, src);
992 	}
993 
994 	/*
995 	 * Buffers may be managed in a filesystem specific way.
996 	 * We must have no buffers or drop them.
997 	 */
998 	if (!filemap_release_folio(src, GFP_KERNEL))
999 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1000 
1001 	return migrate_folio(mapping, dst, src, mode);
1002 }
1003 
1004 /*
1005  * Move a page to a newly allocated page
1006  * The page is locked and all ptes have been successfully removed.
1007  *
1008  * The new page will have replaced the old page if this function
1009  * is successful.
1010  *
1011  * Return value:
1012  *   < 0 - error code
1013  *  MIGRATEPAGE_SUCCESS - success
1014  */
1015 static int move_to_new_folio(struct folio *dst, struct folio *src,
1016 				enum migrate_mode mode)
1017 {
1018 	int rc = -EAGAIN;
1019 	bool is_lru = !__folio_test_movable(src);
1020 
1021 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1022 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1023 
1024 	if (likely(is_lru)) {
1025 		struct address_space *mapping = folio_mapping(src);
1026 
1027 		if (!mapping)
1028 			rc = migrate_folio(mapping, dst, src, mode);
1029 		else if (mapping_inaccessible(mapping))
1030 			rc = -EOPNOTSUPP;
1031 		else if (mapping->a_ops->migrate_folio)
1032 			/*
1033 			 * Most folios have a mapping and most filesystems
1034 			 * provide a migrate_folio callback. Anonymous folios
1035 			 * are part of swap space which also has its own
1036 			 * migrate_folio callback. This is the most common path
1037 			 * for page migration.
1038 			 */
1039 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1040 								mode);
1041 		else
1042 			rc = fallback_migrate_folio(mapping, dst, src, mode);
1043 	} else {
1044 		const struct movable_operations *mops;
1045 
1046 		/*
1047 		 * In case of non-lru page, it could be released after
1048 		 * isolation step. In that case, we shouldn't try migration.
1049 		 */
1050 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1051 		if (!folio_test_movable(src)) {
1052 			rc = MIGRATEPAGE_SUCCESS;
1053 			folio_clear_isolated(src);
1054 			goto out;
1055 		}
1056 
1057 		mops = folio_movable_ops(src);
1058 		rc = mops->migrate_page(&dst->page, &src->page, mode);
1059 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1060 				!folio_test_isolated(src));
1061 	}
1062 
1063 	/*
1064 	 * When successful, old pagecache src->mapping must be cleared before
1065 	 * src is freed; but stats require that PageAnon be left as PageAnon.
1066 	 */
1067 	if (rc == MIGRATEPAGE_SUCCESS) {
1068 		if (__folio_test_movable(src)) {
1069 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1070 
1071 			/*
1072 			 * We clear PG_movable under page_lock so any compactor
1073 			 * cannot try to migrate this page.
1074 			 */
1075 			folio_clear_isolated(src);
1076 		}
1077 
1078 		/*
1079 		 * Anonymous and movable src->mapping will be cleared by
1080 		 * free_pages_prepare so don't reset it here for keeping
1081 		 * the type to work PageAnon, for example.
1082 		 */
1083 		if (!folio_mapping_flags(src))
1084 			src->mapping = NULL;
1085 
1086 		if (likely(!folio_is_zone_device(dst)))
1087 			flush_dcache_folio(dst);
1088 	}
1089 out:
1090 	return rc;
1091 }
1092 
1093 /*
1094  * To record some information during migration, we use unused private
1095  * field of struct folio of the newly allocated destination folio.
1096  * This is safe because nobody is using it except us.
1097  */
1098 enum {
1099 	PAGE_WAS_MAPPED = BIT(0),
1100 	PAGE_WAS_MLOCKED = BIT(1),
1101 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1102 };
1103 
1104 static void __migrate_folio_record(struct folio *dst,
1105 				   int old_page_state,
1106 				   struct anon_vma *anon_vma)
1107 {
1108 	dst->private = (void *)anon_vma + old_page_state;
1109 }
1110 
1111 static void __migrate_folio_extract(struct folio *dst,
1112 				   int *old_page_state,
1113 				   struct anon_vma **anon_vmap)
1114 {
1115 	unsigned long private = (unsigned long)dst->private;
1116 
1117 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1118 	*old_page_state = private & PAGE_OLD_STATES;
1119 	dst->private = NULL;
1120 }
1121 
1122 /* Restore the source folio to the original state upon failure */
1123 static void migrate_folio_undo_src(struct folio *src,
1124 				   int page_was_mapped,
1125 				   struct anon_vma *anon_vma,
1126 				   bool locked,
1127 				   struct list_head *ret)
1128 {
1129 	if (page_was_mapped)
1130 		remove_migration_ptes(src, src, 0);
1131 	/* Drop an anon_vma reference if we took one */
1132 	if (anon_vma)
1133 		put_anon_vma(anon_vma);
1134 	if (locked)
1135 		folio_unlock(src);
1136 	if (ret)
1137 		list_move_tail(&src->lru, ret);
1138 }
1139 
1140 /* Restore the destination folio to the original state upon failure */
1141 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1142 		free_folio_t put_new_folio, unsigned long private)
1143 {
1144 	if (locked)
1145 		folio_unlock(dst);
1146 	if (put_new_folio)
1147 		put_new_folio(dst, private);
1148 	else
1149 		folio_put(dst);
1150 }
1151 
1152 /* Cleanup src folio upon migration success */
1153 static void migrate_folio_done(struct folio *src,
1154 			       enum migrate_reason reason)
1155 {
1156 	/*
1157 	 * Compaction can migrate also non-LRU pages which are
1158 	 * not accounted to NR_ISOLATED_*. They can be recognized
1159 	 * as __folio_test_movable
1160 	 */
1161 	if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1162 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1163 				    folio_is_file_lru(src), -folio_nr_pages(src));
1164 
1165 	if (reason != MR_MEMORY_FAILURE)
1166 		/* We release the page in page_handle_poison. */
1167 		folio_put(src);
1168 }
1169 
1170 /* Obtain the lock on page, remove all ptes. */
1171 static int migrate_folio_unmap(new_folio_t get_new_folio,
1172 		free_folio_t put_new_folio, unsigned long private,
1173 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1174 		enum migrate_reason reason, struct list_head *ret)
1175 {
1176 	struct folio *dst;
1177 	int rc = -EAGAIN;
1178 	int old_page_state = 0;
1179 	struct anon_vma *anon_vma = NULL;
1180 	bool is_lru = data_race(!__folio_test_movable(src));
1181 	bool locked = false;
1182 	bool dst_locked = false;
1183 
1184 	if (folio_ref_count(src) == 1) {
1185 		/* Folio was freed from under us. So we are done. */
1186 		folio_clear_active(src);
1187 		folio_clear_unevictable(src);
1188 		/* free_pages_prepare() will clear PG_isolated. */
1189 		list_del(&src->lru);
1190 		migrate_folio_done(src, reason);
1191 		return MIGRATEPAGE_SUCCESS;
1192 	}
1193 
1194 	dst = get_new_folio(src, private);
1195 	if (!dst)
1196 		return -ENOMEM;
1197 	*dstp = dst;
1198 
1199 	dst->private = NULL;
1200 
1201 	if (!folio_trylock(src)) {
1202 		if (mode == MIGRATE_ASYNC)
1203 			goto out;
1204 
1205 		/*
1206 		 * It's not safe for direct compaction to call lock_page.
1207 		 * For example, during page readahead pages are added locked
1208 		 * to the LRU. Later, when the IO completes the pages are
1209 		 * marked uptodate and unlocked. However, the queueing
1210 		 * could be merging multiple pages for one bio (e.g.
1211 		 * mpage_readahead). If an allocation happens for the
1212 		 * second or third page, the process can end up locking
1213 		 * the same page twice and deadlocking. Rather than
1214 		 * trying to be clever about what pages can be locked,
1215 		 * avoid the use of lock_page for direct compaction
1216 		 * altogether.
1217 		 */
1218 		if (current->flags & PF_MEMALLOC)
1219 			goto out;
1220 
1221 		/*
1222 		 * In "light" mode, we can wait for transient locks (eg
1223 		 * inserting a page into the page table), but it's not
1224 		 * worth waiting for I/O.
1225 		 */
1226 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1227 			goto out;
1228 
1229 		folio_lock(src);
1230 	}
1231 	locked = true;
1232 	if (folio_test_mlocked(src))
1233 		old_page_state |= PAGE_WAS_MLOCKED;
1234 
1235 	if (folio_test_writeback(src)) {
1236 		/*
1237 		 * Only in the case of a full synchronous migration is it
1238 		 * necessary to wait for PageWriteback. In the async case,
1239 		 * the retry loop is too short and in the sync-light case,
1240 		 * the overhead of stalling is too much
1241 		 */
1242 		switch (mode) {
1243 		case MIGRATE_SYNC:
1244 			break;
1245 		default:
1246 			rc = -EBUSY;
1247 			goto out;
1248 		}
1249 		folio_wait_writeback(src);
1250 	}
1251 
1252 	/*
1253 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1254 	 * we cannot notice that anon_vma is freed while we migrate a page.
1255 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1256 	 * of migration. File cache pages are no problem because of page_lock()
1257 	 * File Caches may use write_page() or lock_page() in migration, then,
1258 	 * just care Anon page here.
1259 	 *
1260 	 * Only folio_get_anon_vma() understands the subtleties of
1261 	 * getting a hold on an anon_vma from outside one of its mms.
1262 	 * But if we cannot get anon_vma, then we won't need it anyway,
1263 	 * because that implies that the anon page is no longer mapped
1264 	 * (and cannot be remapped so long as we hold the page lock).
1265 	 */
1266 	if (folio_test_anon(src) && !folio_test_ksm(src))
1267 		anon_vma = folio_get_anon_vma(src);
1268 
1269 	/*
1270 	 * Block others from accessing the new page when we get around to
1271 	 * establishing additional references. We are usually the only one
1272 	 * holding a reference to dst at this point. We used to have a BUG
1273 	 * here if folio_trylock(dst) fails, but would like to allow for
1274 	 * cases where there might be a race with the previous use of dst.
1275 	 * This is much like races on refcount of oldpage: just don't BUG().
1276 	 */
1277 	if (unlikely(!folio_trylock(dst)))
1278 		goto out;
1279 	dst_locked = true;
1280 
1281 	if (unlikely(!is_lru)) {
1282 		__migrate_folio_record(dst, old_page_state, anon_vma);
1283 		return MIGRATEPAGE_UNMAP;
1284 	}
1285 
1286 	/*
1287 	 * Corner case handling:
1288 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1289 	 * and treated as swapcache but it has no rmap yet.
1290 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1291 	 * trigger a BUG.  So handle it here.
1292 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1293 	 * fs-private metadata. The page can be picked up due to memory
1294 	 * offlining.  Everywhere else except page reclaim, the page is
1295 	 * invisible to the vm, so the page can not be migrated.  So try to
1296 	 * free the metadata, so the page can be freed.
1297 	 */
1298 	if (!src->mapping) {
1299 		if (folio_test_private(src)) {
1300 			try_to_free_buffers(src);
1301 			goto out;
1302 		}
1303 	} else if (folio_mapped(src)) {
1304 		/* Establish migration ptes */
1305 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1306 			       !folio_test_ksm(src) && !anon_vma, src);
1307 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1308 		old_page_state |= PAGE_WAS_MAPPED;
1309 	}
1310 
1311 	if (!folio_mapped(src)) {
1312 		__migrate_folio_record(dst, old_page_state, anon_vma);
1313 		return MIGRATEPAGE_UNMAP;
1314 	}
1315 
1316 out:
1317 	/*
1318 	 * A folio that has not been unmapped will be restored to
1319 	 * right list unless we want to retry.
1320 	 */
1321 	if (rc == -EAGAIN)
1322 		ret = NULL;
1323 
1324 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1325 			       anon_vma, locked, ret);
1326 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1327 
1328 	return rc;
1329 }
1330 
1331 /* Migrate the folio to the newly allocated folio in dst. */
1332 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1333 			      struct folio *src, struct folio *dst,
1334 			      enum migrate_mode mode, enum migrate_reason reason,
1335 			      struct list_head *ret)
1336 {
1337 	int rc;
1338 	int old_page_state = 0;
1339 	struct anon_vma *anon_vma = NULL;
1340 	bool is_lru = !__folio_test_movable(src);
1341 	struct list_head *prev;
1342 
1343 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1344 	prev = dst->lru.prev;
1345 	list_del(&dst->lru);
1346 
1347 	rc = move_to_new_folio(dst, src, mode);
1348 	if (rc)
1349 		goto out;
1350 
1351 	if (unlikely(!is_lru))
1352 		goto out_unlock_both;
1353 
1354 	/*
1355 	 * When successful, push dst to LRU immediately: so that if it
1356 	 * turns out to be an mlocked page, remove_migration_ptes() will
1357 	 * automatically build up the correct dst->mlock_count for it.
1358 	 *
1359 	 * We would like to do something similar for the old page, when
1360 	 * unsuccessful, and other cases when a page has been temporarily
1361 	 * isolated from the unevictable LRU: but this case is the easiest.
1362 	 */
1363 	folio_add_lru(dst);
1364 	if (old_page_state & PAGE_WAS_MLOCKED)
1365 		lru_add_drain();
1366 
1367 	if (old_page_state & PAGE_WAS_MAPPED)
1368 		remove_migration_ptes(src, dst, 0);
1369 
1370 out_unlock_both:
1371 	folio_unlock(dst);
1372 	set_page_owner_migrate_reason(&dst->page, reason);
1373 	/*
1374 	 * If migration is successful, decrease refcount of dst,
1375 	 * which will not free the page because new page owner increased
1376 	 * refcounter.
1377 	 */
1378 	folio_put(dst);
1379 
1380 	/*
1381 	 * A folio that has been migrated has all references removed
1382 	 * and will be freed.
1383 	 */
1384 	list_del(&src->lru);
1385 	/* Drop an anon_vma reference if we took one */
1386 	if (anon_vma)
1387 		put_anon_vma(anon_vma);
1388 	folio_unlock(src);
1389 	migrate_folio_done(src, reason);
1390 
1391 	return rc;
1392 out:
1393 	/*
1394 	 * A folio that has not been migrated will be restored to
1395 	 * right list unless we want to retry.
1396 	 */
1397 	if (rc == -EAGAIN) {
1398 		list_add(&dst->lru, prev);
1399 		__migrate_folio_record(dst, old_page_state, anon_vma);
1400 		return rc;
1401 	}
1402 
1403 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1404 			       anon_vma, true, ret);
1405 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1406 
1407 	return rc;
1408 }
1409 
1410 /*
1411  * Counterpart of unmap_and_move_page() for hugepage migration.
1412  *
1413  * This function doesn't wait the completion of hugepage I/O
1414  * because there is no race between I/O and migration for hugepage.
1415  * Note that currently hugepage I/O occurs only in direct I/O
1416  * where no lock is held and PG_writeback is irrelevant,
1417  * and writeback status of all subpages are counted in the reference
1418  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1419  * under direct I/O, the reference of the head page is 512 and a bit more.)
1420  * This means that when we try to migrate hugepage whose subpages are
1421  * doing direct I/O, some references remain after try_to_unmap() and
1422  * hugepage migration fails without data corruption.
1423  *
1424  * There is also no race when direct I/O is issued on the page under migration,
1425  * because then pte is replaced with migration swap entry and direct I/O code
1426  * will wait in the page fault for migration to complete.
1427  */
1428 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1429 		free_folio_t put_new_folio, unsigned long private,
1430 		struct folio *src, int force, enum migrate_mode mode,
1431 		int reason, struct list_head *ret)
1432 {
1433 	struct folio *dst;
1434 	int rc = -EAGAIN;
1435 	int page_was_mapped = 0;
1436 	struct anon_vma *anon_vma = NULL;
1437 	struct address_space *mapping = NULL;
1438 
1439 	if (folio_ref_count(src) == 1) {
1440 		/* page was freed from under us. So we are done. */
1441 		folio_putback_hugetlb(src);
1442 		return MIGRATEPAGE_SUCCESS;
1443 	}
1444 
1445 	dst = get_new_folio(src, private);
1446 	if (!dst)
1447 		return -ENOMEM;
1448 
1449 	if (!folio_trylock(src)) {
1450 		if (!force)
1451 			goto out;
1452 		switch (mode) {
1453 		case MIGRATE_SYNC:
1454 			break;
1455 		default:
1456 			goto out;
1457 		}
1458 		folio_lock(src);
1459 	}
1460 
1461 	/*
1462 	 * Check for pages which are in the process of being freed.  Without
1463 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1464 	 * be called and we could leak usage counts for subpools.
1465 	 */
1466 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1467 		rc = -EBUSY;
1468 		goto out_unlock;
1469 	}
1470 
1471 	if (folio_test_anon(src))
1472 		anon_vma = folio_get_anon_vma(src);
1473 
1474 	if (unlikely(!folio_trylock(dst)))
1475 		goto put_anon;
1476 
1477 	if (folio_mapped(src)) {
1478 		enum ttu_flags ttu = 0;
1479 
1480 		if (!folio_test_anon(src)) {
1481 			/*
1482 			 * In shared mappings, try_to_unmap could potentially
1483 			 * call huge_pmd_unshare.  Because of this, take
1484 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1485 			 * to let lower levels know we have taken the lock.
1486 			 */
1487 			mapping = hugetlb_folio_mapping_lock_write(src);
1488 			if (unlikely(!mapping))
1489 				goto unlock_put_anon;
1490 
1491 			ttu = TTU_RMAP_LOCKED;
1492 		}
1493 
1494 		try_to_migrate(src, ttu);
1495 		page_was_mapped = 1;
1496 
1497 		if (ttu & TTU_RMAP_LOCKED)
1498 			i_mmap_unlock_write(mapping);
1499 	}
1500 
1501 	if (!folio_mapped(src))
1502 		rc = move_to_new_folio(dst, src, mode);
1503 
1504 	if (page_was_mapped)
1505 		remove_migration_ptes(src,
1506 			rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1507 
1508 unlock_put_anon:
1509 	folio_unlock(dst);
1510 
1511 put_anon:
1512 	if (anon_vma)
1513 		put_anon_vma(anon_vma);
1514 
1515 	if (rc == MIGRATEPAGE_SUCCESS) {
1516 		move_hugetlb_state(src, dst, reason);
1517 		put_new_folio = NULL;
1518 	}
1519 
1520 out_unlock:
1521 	folio_unlock(src);
1522 out:
1523 	if (rc == MIGRATEPAGE_SUCCESS)
1524 		folio_putback_hugetlb(src);
1525 	else if (rc != -EAGAIN)
1526 		list_move_tail(&src->lru, ret);
1527 
1528 	/*
1529 	 * If migration was not successful and there's a freeing callback,
1530 	 * return the folio to that special allocator. Otherwise, simply drop
1531 	 * our additional reference.
1532 	 */
1533 	if (put_new_folio)
1534 		put_new_folio(dst, private);
1535 	else
1536 		folio_put(dst);
1537 
1538 	return rc;
1539 }
1540 
1541 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1542 				  enum migrate_mode mode)
1543 {
1544 	int rc;
1545 
1546 	if (mode == MIGRATE_ASYNC) {
1547 		if (!folio_trylock(folio))
1548 			return -EAGAIN;
1549 	} else {
1550 		folio_lock(folio);
1551 	}
1552 	rc = split_folio_to_list(folio, split_folios);
1553 	folio_unlock(folio);
1554 	if (!rc)
1555 		list_move_tail(&folio->lru, split_folios);
1556 
1557 	return rc;
1558 }
1559 
1560 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1561 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1562 #else
1563 #define NR_MAX_BATCHED_MIGRATION	512
1564 #endif
1565 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1566 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1567 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1568 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1569 
1570 struct migrate_pages_stats {
1571 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1572 				   units of base pages */
1573 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1574 				   units of base pages.  Untried folios aren't counted */
1575 	int nr_thp_succeeded;	/* THP migrated successfully */
1576 	int nr_thp_failed;	/* THP failed to be migrated */
1577 	int nr_thp_split;	/* THP split before migrating */
1578 	int nr_split;	/* Large folio (include THP) split before migrating */
1579 };
1580 
1581 /*
1582  * Returns the number of hugetlb folios that were not migrated, or an error code
1583  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1584  * any more because the list has become empty or no retryable hugetlb folios
1585  * exist any more. It is caller's responsibility to call putback_movable_pages()
1586  * only if ret != 0.
1587  */
1588 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1589 			    free_folio_t put_new_folio, unsigned long private,
1590 			    enum migrate_mode mode, int reason,
1591 			    struct migrate_pages_stats *stats,
1592 			    struct list_head *ret_folios)
1593 {
1594 	int retry = 1;
1595 	int nr_failed = 0;
1596 	int nr_retry_pages = 0;
1597 	int pass = 0;
1598 	struct folio *folio, *folio2;
1599 	int rc, nr_pages;
1600 
1601 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1602 		retry = 0;
1603 		nr_retry_pages = 0;
1604 
1605 		list_for_each_entry_safe(folio, folio2, from, lru) {
1606 			if (!folio_test_hugetlb(folio))
1607 				continue;
1608 
1609 			nr_pages = folio_nr_pages(folio);
1610 
1611 			cond_resched();
1612 
1613 			/*
1614 			 * Migratability of hugepages depends on architectures and
1615 			 * their size.  This check is necessary because some callers
1616 			 * of hugepage migration like soft offline and memory
1617 			 * hotremove don't walk through page tables or check whether
1618 			 * the hugepage is pmd-based or not before kicking migration.
1619 			 */
1620 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1621 				nr_failed++;
1622 				stats->nr_failed_pages += nr_pages;
1623 				list_move_tail(&folio->lru, ret_folios);
1624 				continue;
1625 			}
1626 
1627 			rc = unmap_and_move_huge_page(get_new_folio,
1628 						      put_new_folio, private,
1629 						      folio, pass > 2, mode,
1630 						      reason, ret_folios);
1631 			/*
1632 			 * The rules are:
1633 			 *	Success: hugetlb folio will be put back
1634 			 *	-EAGAIN: stay on the from list
1635 			 *	-ENOMEM: stay on the from list
1636 			 *	Other errno: put on ret_folios list
1637 			 */
1638 			switch(rc) {
1639 			case -ENOMEM:
1640 				/*
1641 				 * When memory is low, don't bother to try to migrate
1642 				 * other folios, just exit.
1643 				 */
1644 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1645 				return -ENOMEM;
1646 			case -EAGAIN:
1647 				retry++;
1648 				nr_retry_pages += nr_pages;
1649 				break;
1650 			case MIGRATEPAGE_SUCCESS:
1651 				stats->nr_succeeded += nr_pages;
1652 				break;
1653 			default:
1654 				/*
1655 				 * Permanent failure (-EBUSY, etc.):
1656 				 * unlike -EAGAIN case, the failed folio is
1657 				 * removed from migration folio list and not
1658 				 * retried in the next outer loop.
1659 				 */
1660 				nr_failed++;
1661 				stats->nr_failed_pages += nr_pages;
1662 				break;
1663 			}
1664 		}
1665 	}
1666 	/*
1667 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1668 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1669 	 * folios as failed.
1670 	 */
1671 	nr_failed += retry;
1672 	stats->nr_failed_pages += nr_retry_pages;
1673 
1674 	return nr_failed;
1675 }
1676 
1677 static void migrate_folios_move(struct list_head *src_folios,
1678 		struct list_head *dst_folios,
1679 		free_folio_t put_new_folio, unsigned long private,
1680 		enum migrate_mode mode, int reason,
1681 		struct list_head *ret_folios,
1682 		struct migrate_pages_stats *stats,
1683 		int *retry, int *thp_retry, int *nr_failed,
1684 		int *nr_retry_pages)
1685 {
1686 	struct folio *folio, *folio2, *dst, *dst2;
1687 	bool is_thp;
1688 	int nr_pages;
1689 	int rc;
1690 
1691 	dst = list_first_entry(dst_folios, struct folio, lru);
1692 	dst2 = list_next_entry(dst, lru);
1693 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1694 		is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1695 		nr_pages = folio_nr_pages(folio);
1696 
1697 		cond_resched();
1698 
1699 		rc = migrate_folio_move(put_new_folio, private,
1700 				folio, dst, mode,
1701 				reason, ret_folios);
1702 		/*
1703 		 * The rules are:
1704 		 *	Success: folio will be freed
1705 		 *	-EAGAIN: stay on the unmap_folios list
1706 		 *	Other errno: put on ret_folios list
1707 		 */
1708 		switch (rc) {
1709 		case -EAGAIN:
1710 			*retry += 1;
1711 			*thp_retry += is_thp;
1712 			*nr_retry_pages += nr_pages;
1713 			break;
1714 		case MIGRATEPAGE_SUCCESS:
1715 			stats->nr_succeeded += nr_pages;
1716 			stats->nr_thp_succeeded += is_thp;
1717 			break;
1718 		default:
1719 			*nr_failed += 1;
1720 			stats->nr_thp_failed += is_thp;
1721 			stats->nr_failed_pages += nr_pages;
1722 			break;
1723 		}
1724 		dst = dst2;
1725 		dst2 = list_next_entry(dst, lru);
1726 	}
1727 }
1728 
1729 static void migrate_folios_undo(struct list_head *src_folios,
1730 		struct list_head *dst_folios,
1731 		free_folio_t put_new_folio, unsigned long private,
1732 		struct list_head *ret_folios)
1733 {
1734 	struct folio *folio, *folio2, *dst, *dst2;
1735 
1736 	dst = list_first_entry(dst_folios, struct folio, lru);
1737 	dst2 = list_next_entry(dst, lru);
1738 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1739 		int old_page_state = 0;
1740 		struct anon_vma *anon_vma = NULL;
1741 
1742 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1743 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1744 				anon_vma, true, ret_folios);
1745 		list_del(&dst->lru);
1746 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1747 		dst = dst2;
1748 		dst2 = list_next_entry(dst, lru);
1749 	}
1750 }
1751 
1752 /*
1753  * migrate_pages_batch() first unmaps folios in the from list as many as
1754  * possible, then move the unmapped folios.
1755  *
1756  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1757  * lock or bit when we have locked more than one folio.  Which may cause
1758  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1759  * length of the from list must be <= 1.
1760  */
1761 static int migrate_pages_batch(struct list_head *from,
1762 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1763 		unsigned long private, enum migrate_mode mode, int reason,
1764 		struct list_head *ret_folios, struct list_head *split_folios,
1765 		struct migrate_pages_stats *stats, int nr_pass)
1766 {
1767 	int retry = 1;
1768 	int thp_retry = 1;
1769 	int nr_failed = 0;
1770 	int nr_retry_pages = 0;
1771 	int pass = 0;
1772 	bool is_thp = false;
1773 	bool is_large = false;
1774 	struct folio *folio, *folio2, *dst = NULL;
1775 	int rc, rc_saved = 0, nr_pages;
1776 	LIST_HEAD(unmap_folios);
1777 	LIST_HEAD(dst_folios);
1778 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1779 
1780 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1781 			!list_empty(from) && !list_is_singular(from));
1782 
1783 	for (pass = 0; pass < nr_pass && retry; pass++) {
1784 		retry = 0;
1785 		thp_retry = 0;
1786 		nr_retry_pages = 0;
1787 
1788 		list_for_each_entry_safe(folio, folio2, from, lru) {
1789 			is_large = folio_test_large(folio);
1790 			is_thp = folio_test_pmd_mappable(folio);
1791 			nr_pages = folio_nr_pages(folio);
1792 
1793 			cond_resched();
1794 
1795 			/*
1796 			 * The rare folio on the deferred split list should
1797 			 * be split now. It should not count as a failure:
1798 			 * but increment nr_failed because, without doing so,
1799 			 * migrate_pages() may report success with (split but
1800 			 * unmigrated) pages still on its fromlist; whereas it
1801 			 * always reports success when its fromlist is empty.
1802 			 * stats->nr_thp_failed should be increased too,
1803 			 * otherwise stats inconsistency will happen when
1804 			 * migrate_pages_batch is called via migrate_pages()
1805 			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1806 			 *
1807 			 * Only check it without removing it from the list.
1808 			 * Since the folio can be on deferred_split_scan()
1809 			 * local list and removing it can cause the local list
1810 			 * corruption. Folio split process below can handle it
1811 			 * with the help of folio_ref_freeze().
1812 			 *
1813 			 * nr_pages > 2 is needed to avoid checking order-1
1814 			 * page cache folios. They exist, in contrast to
1815 			 * non-existent order-1 anonymous folios, and do not
1816 			 * use _deferred_list.
1817 			 */
1818 			if (nr_pages > 2 &&
1819 			   !list_empty(&folio->_deferred_list) &&
1820 			   folio_test_partially_mapped(folio)) {
1821 				if (!try_split_folio(folio, split_folios, mode)) {
1822 					nr_failed++;
1823 					stats->nr_thp_failed += is_thp;
1824 					stats->nr_thp_split += is_thp;
1825 					stats->nr_split++;
1826 					continue;
1827 				}
1828 			}
1829 
1830 			/*
1831 			 * Large folio migration might be unsupported or
1832 			 * the allocation might be failed so we should retry
1833 			 * on the same folio with the large folio split
1834 			 * to normal folios.
1835 			 *
1836 			 * Split folios are put in split_folios, and
1837 			 * we will migrate them after the rest of the
1838 			 * list is processed.
1839 			 */
1840 			if (!thp_migration_supported() && is_thp) {
1841 				nr_failed++;
1842 				stats->nr_thp_failed++;
1843 				if (!try_split_folio(folio, split_folios, mode)) {
1844 					stats->nr_thp_split++;
1845 					stats->nr_split++;
1846 					continue;
1847 				}
1848 				stats->nr_failed_pages += nr_pages;
1849 				list_move_tail(&folio->lru, ret_folios);
1850 				continue;
1851 			}
1852 
1853 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1854 					private, folio, &dst, mode, reason,
1855 					ret_folios);
1856 			/*
1857 			 * The rules are:
1858 			 *	Success: folio will be freed
1859 			 *	Unmap: folio will be put on unmap_folios list,
1860 			 *	       dst folio put on dst_folios list
1861 			 *	-EAGAIN: stay on the from list
1862 			 *	-ENOMEM: stay on the from list
1863 			 *	Other errno: put on ret_folios list
1864 			 */
1865 			switch(rc) {
1866 			case -ENOMEM:
1867 				/*
1868 				 * When memory is low, don't bother to try to migrate
1869 				 * other folios, move unmapped folios, then exit.
1870 				 */
1871 				nr_failed++;
1872 				stats->nr_thp_failed += is_thp;
1873 				/* Large folio NUMA faulting doesn't split to retry. */
1874 				if (is_large && !nosplit) {
1875 					int ret = try_split_folio(folio, split_folios, mode);
1876 
1877 					if (!ret) {
1878 						stats->nr_thp_split += is_thp;
1879 						stats->nr_split++;
1880 						break;
1881 					} else if (reason == MR_LONGTERM_PIN &&
1882 						   ret == -EAGAIN) {
1883 						/*
1884 						 * Try again to split large folio to
1885 						 * mitigate the failure of longterm pinning.
1886 						 */
1887 						retry++;
1888 						thp_retry += is_thp;
1889 						nr_retry_pages += nr_pages;
1890 						/* Undo duplicated failure counting. */
1891 						nr_failed--;
1892 						stats->nr_thp_failed -= is_thp;
1893 						break;
1894 					}
1895 				}
1896 
1897 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1898 				/* nr_failed isn't updated for not used */
1899 				stats->nr_thp_failed += thp_retry;
1900 				rc_saved = rc;
1901 				if (list_empty(&unmap_folios))
1902 					goto out;
1903 				else
1904 					goto move;
1905 			case -EAGAIN:
1906 				retry++;
1907 				thp_retry += is_thp;
1908 				nr_retry_pages += nr_pages;
1909 				break;
1910 			case MIGRATEPAGE_SUCCESS:
1911 				stats->nr_succeeded += nr_pages;
1912 				stats->nr_thp_succeeded += is_thp;
1913 				break;
1914 			case MIGRATEPAGE_UNMAP:
1915 				list_move_tail(&folio->lru, &unmap_folios);
1916 				list_add_tail(&dst->lru, &dst_folios);
1917 				break;
1918 			default:
1919 				/*
1920 				 * Permanent failure (-EBUSY, etc.):
1921 				 * unlike -EAGAIN case, the failed folio is
1922 				 * removed from migration folio list and not
1923 				 * retried in the next outer loop.
1924 				 */
1925 				nr_failed++;
1926 				stats->nr_thp_failed += is_thp;
1927 				stats->nr_failed_pages += nr_pages;
1928 				break;
1929 			}
1930 		}
1931 	}
1932 	nr_failed += retry;
1933 	stats->nr_thp_failed += thp_retry;
1934 	stats->nr_failed_pages += nr_retry_pages;
1935 move:
1936 	/* Flush TLBs for all unmapped folios */
1937 	try_to_unmap_flush();
1938 
1939 	retry = 1;
1940 	for (pass = 0; pass < nr_pass && retry; pass++) {
1941 		retry = 0;
1942 		thp_retry = 0;
1943 		nr_retry_pages = 0;
1944 
1945 		/* Move the unmapped folios */
1946 		migrate_folios_move(&unmap_folios, &dst_folios,
1947 				put_new_folio, private, mode, reason,
1948 				ret_folios, stats, &retry, &thp_retry,
1949 				&nr_failed, &nr_retry_pages);
1950 	}
1951 	nr_failed += retry;
1952 	stats->nr_thp_failed += thp_retry;
1953 	stats->nr_failed_pages += nr_retry_pages;
1954 
1955 	rc = rc_saved ? : nr_failed;
1956 out:
1957 	/* Cleanup remaining folios */
1958 	migrate_folios_undo(&unmap_folios, &dst_folios,
1959 			put_new_folio, private, ret_folios);
1960 
1961 	return rc;
1962 }
1963 
1964 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1965 		free_folio_t put_new_folio, unsigned long private,
1966 		enum migrate_mode mode, int reason,
1967 		struct list_head *ret_folios, struct list_head *split_folios,
1968 		struct migrate_pages_stats *stats)
1969 {
1970 	int rc, nr_failed = 0;
1971 	LIST_HEAD(folios);
1972 	struct migrate_pages_stats astats;
1973 
1974 	memset(&astats, 0, sizeof(astats));
1975 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1976 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1977 				 reason, &folios, split_folios, &astats,
1978 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1979 	stats->nr_succeeded += astats.nr_succeeded;
1980 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1981 	stats->nr_thp_split += astats.nr_thp_split;
1982 	stats->nr_split += astats.nr_split;
1983 	if (rc < 0) {
1984 		stats->nr_failed_pages += astats.nr_failed_pages;
1985 		stats->nr_thp_failed += astats.nr_thp_failed;
1986 		list_splice_tail(&folios, ret_folios);
1987 		return rc;
1988 	}
1989 	stats->nr_thp_failed += astats.nr_thp_split;
1990 	/*
1991 	 * Do not count rc, as pages will be retried below.
1992 	 * Count nr_split only, since it includes nr_thp_split.
1993 	 */
1994 	nr_failed += astats.nr_split;
1995 	/*
1996 	 * Fall back to migrate all failed folios one by one synchronously. All
1997 	 * failed folios except split THPs will be retried, so their failure
1998 	 * isn't counted
1999 	 */
2000 	list_splice_tail_init(&folios, from);
2001 	while (!list_empty(from)) {
2002 		list_move(from->next, &folios);
2003 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2004 					 private, mode, reason, ret_folios,
2005 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
2006 		list_splice_tail_init(&folios, ret_folios);
2007 		if (rc < 0)
2008 			return rc;
2009 		nr_failed += rc;
2010 	}
2011 
2012 	return nr_failed;
2013 }
2014 
2015 /*
2016  * migrate_pages - migrate the folios specified in a list, to the free folios
2017  *		   supplied as the target for the page migration
2018  *
2019  * @from:		The list of folios to be migrated.
2020  * @get_new_folio:	The function used to allocate free folios to be used
2021  *			as the target of the folio migration.
2022  * @put_new_folio:	The function used to free target folios if migration
2023  *			fails, or NULL if no special handling is necessary.
2024  * @private:		Private data to be passed on to get_new_folio()
2025  * @mode:		The migration mode that specifies the constraints for
2026  *			folio migration, if any.
2027  * @reason:		The reason for folio migration.
2028  * @ret_succeeded:	Set to the number of folios migrated successfully if
2029  *			the caller passes a non-NULL pointer.
2030  *
2031  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2032  * are movable any more because the list has become empty or no retryable folios
2033  * exist any more. It is caller's responsibility to call putback_movable_pages()
2034  * only if ret != 0.
2035  *
2036  * Returns the number of {normal folio, large folio, hugetlb} that were not
2037  * migrated, or an error code. The number of large folio splits will be
2038  * considered as the number of non-migrated large folio, no matter how many
2039  * split folios of the large folio are migrated successfully.
2040  */
2041 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2042 		free_folio_t put_new_folio, unsigned long private,
2043 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2044 {
2045 	int rc, rc_gather;
2046 	int nr_pages;
2047 	struct folio *folio, *folio2;
2048 	LIST_HEAD(folios);
2049 	LIST_HEAD(ret_folios);
2050 	LIST_HEAD(split_folios);
2051 	struct migrate_pages_stats stats;
2052 
2053 	trace_mm_migrate_pages_start(mode, reason);
2054 
2055 	memset(&stats, 0, sizeof(stats));
2056 
2057 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2058 				     mode, reason, &stats, &ret_folios);
2059 	if (rc_gather < 0)
2060 		goto out;
2061 
2062 again:
2063 	nr_pages = 0;
2064 	list_for_each_entry_safe(folio, folio2, from, lru) {
2065 		/* Retried hugetlb folios will be kept in list  */
2066 		if (folio_test_hugetlb(folio)) {
2067 			list_move_tail(&folio->lru, &ret_folios);
2068 			continue;
2069 		}
2070 
2071 		nr_pages += folio_nr_pages(folio);
2072 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2073 			break;
2074 	}
2075 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2076 		list_cut_before(&folios, from, &folio2->lru);
2077 	else
2078 		list_splice_init(from, &folios);
2079 	if (mode == MIGRATE_ASYNC)
2080 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2081 				private, mode, reason, &ret_folios,
2082 				&split_folios, &stats,
2083 				NR_MAX_MIGRATE_PAGES_RETRY);
2084 	else
2085 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2086 				private, mode, reason, &ret_folios,
2087 				&split_folios, &stats);
2088 	list_splice_tail_init(&folios, &ret_folios);
2089 	if (rc < 0) {
2090 		rc_gather = rc;
2091 		list_splice_tail(&split_folios, &ret_folios);
2092 		goto out;
2093 	}
2094 	if (!list_empty(&split_folios)) {
2095 		/*
2096 		 * Failure isn't counted since all split folios of a large folio
2097 		 * is counted as 1 failure already.  And, we only try to migrate
2098 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2099 		 */
2100 		migrate_pages_batch(&split_folios, get_new_folio,
2101 				put_new_folio, private, MIGRATE_ASYNC, reason,
2102 				&ret_folios, NULL, &stats, 1);
2103 		list_splice_tail_init(&split_folios, &ret_folios);
2104 	}
2105 	rc_gather += rc;
2106 	if (!list_empty(from))
2107 		goto again;
2108 out:
2109 	/*
2110 	 * Put the permanent failure folio back to migration list, they
2111 	 * will be put back to the right list by the caller.
2112 	 */
2113 	list_splice(&ret_folios, from);
2114 
2115 	/*
2116 	 * Return 0 in case all split folios of fail-to-migrate large folios
2117 	 * are migrated successfully.
2118 	 */
2119 	if (list_empty(from))
2120 		rc_gather = 0;
2121 
2122 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2123 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2124 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2125 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2126 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2127 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2128 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2129 			       stats.nr_thp_split, stats.nr_split, mode,
2130 			       reason);
2131 
2132 	if (ret_succeeded)
2133 		*ret_succeeded = stats.nr_succeeded;
2134 
2135 	return rc_gather;
2136 }
2137 
2138 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2139 {
2140 	struct migration_target_control *mtc;
2141 	gfp_t gfp_mask;
2142 	unsigned int order = 0;
2143 	int nid;
2144 	int zidx;
2145 
2146 	mtc = (struct migration_target_control *)private;
2147 	gfp_mask = mtc->gfp_mask;
2148 	nid = mtc->nid;
2149 	if (nid == NUMA_NO_NODE)
2150 		nid = folio_nid(src);
2151 
2152 	if (folio_test_hugetlb(src)) {
2153 		struct hstate *h = folio_hstate(src);
2154 
2155 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2156 		return alloc_hugetlb_folio_nodemask(h, nid,
2157 						mtc->nmask, gfp_mask,
2158 						htlb_allow_alloc_fallback(mtc->reason));
2159 	}
2160 
2161 	if (folio_test_large(src)) {
2162 		/*
2163 		 * clear __GFP_RECLAIM to make the migration callback
2164 		 * consistent with regular THP allocations.
2165 		 */
2166 		gfp_mask &= ~__GFP_RECLAIM;
2167 		gfp_mask |= GFP_TRANSHUGE;
2168 		order = folio_order(src);
2169 	}
2170 	zidx = zone_idx(folio_zone(src));
2171 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2172 		gfp_mask |= __GFP_HIGHMEM;
2173 
2174 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2175 }
2176 
2177 #ifdef CONFIG_NUMA
2178 
2179 static int store_status(int __user *status, int start, int value, int nr)
2180 {
2181 	while (nr-- > 0) {
2182 		if (put_user(value, status + start))
2183 			return -EFAULT;
2184 		start++;
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2191 {
2192 	int err;
2193 	struct migration_target_control mtc = {
2194 		.nid = node,
2195 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2196 		.reason = MR_SYSCALL,
2197 	};
2198 
2199 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2200 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2201 	if (err)
2202 		putback_movable_pages(pagelist);
2203 	return err;
2204 }
2205 
2206 static int __add_folio_for_migration(struct folio *folio, int node,
2207 		struct list_head *pagelist, bool migrate_all)
2208 {
2209 	if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2210 		return -EFAULT;
2211 
2212 	if (folio_is_zone_device(folio))
2213 		return -ENOENT;
2214 
2215 	if (folio_nid(folio) == node)
2216 		return 0;
2217 
2218 	if (folio_maybe_mapped_shared(folio) && !migrate_all)
2219 		return -EACCES;
2220 
2221 	if (folio_test_hugetlb(folio)) {
2222 		if (folio_isolate_hugetlb(folio, pagelist))
2223 			return 1;
2224 	} else if (folio_isolate_lru(folio)) {
2225 		list_add_tail(&folio->lru, pagelist);
2226 		node_stat_mod_folio(folio,
2227 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2228 			folio_nr_pages(folio));
2229 		return 1;
2230 	}
2231 	return -EBUSY;
2232 }
2233 
2234 /*
2235  * Resolves the given address to a struct folio, isolates it from the LRU and
2236  * puts it to the given pagelist.
2237  * Returns:
2238  *     errno - if the folio cannot be found/isolated
2239  *     0 - when it doesn't have to be migrated because it is already on the
2240  *         target node
2241  *     1 - when it has been queued
2242  */
2243 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2244 		int node, struct list_head *pagelist, bool migrate_all)
2245 {
2246 	struct vm_area_struct *vma;
2247 	struct folio_walk fw;
2248 	struct folio *folio;
2249 	unsigned long addr;
2250 	int err = -EFAULT;
2251 
2252 	mmap_read_lock(mm);
2253 	addr = (unsigned long)untagged_addr_remote(mm, p);
2254 
2255 	vma = vma_lookup(mm, addr);
2256 	if (vma && vma_migratable(vma)) {
2257 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2258 		if (folio) {
2259 			err = __add_folio_for_migration(folio, node, pagelist,
2260 							migrate_all);
2261 			folio_walk_end(&fw, vma);
2262 		} else {
2263 			err = -ENOENT;
2264 		}
2265 	}
2266 	mmap_read_unlock(mm);
2267 	return err;
2268 }
2269 
2270 static int move_pages_and_store_status(int node,
2271 		struct list_head *pagelist, int __user *status,
2272 		int start, int i, unsigned long nr_pages)
2273 {
2274 	int err;
2275 
2276 	if (list_empty(pagelist))
2277 		return 0;
2278 
2279 	err = do_move_pages_to_node(pagelist, node);
2280 	if (err) {
2281 		/*
2282 		 * Positive err means the number of failed
2283 		 * pages to migrate.  Since we are going to
2284 		 * abort and return the number of non-migrated
2285 		 * pages, so need to include the rest of the
2286 		 * nr_pages that have not been attempted as
2287 		 * well.
2288 		 */
2289 		if (err > 0)
2290 			err += nr_pages - i;
2291 		return err;
2292 	}
2293 	return store_status(status, start, node, i - start);
2294 }
2295 
2296 /*
2297  * Migrate an array of page address onto an array of nodes and fill
2298  * the corresponding array of status.
2299  */
2300 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2301 			 unsigned long nr_pages,
2302 			 const void __user * __user *pages,
2303 			 const int __user *nodes,
2304 			 int __user *status, int flags)
2305 {
2306 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2307 	int current_node = NUMA_NO_NODE;
2308 	LIST_HEAD(pagelist);
2309 	int start, i;
2310 	int err = 0, err1;
2311 
2312 	lru_cache_disable();
2313 
2314 	for (i = start = 0; i < nr_pages; i++) {
2315 		const void __user *p;
2316 		int node;
2317 
2318 		err = -EFAULT;
2319 		if (in_compat_syscall()) {
2320 			compat_uptr_t cp;
2321 
2322 			if (get_user(cp, compat_pages + i))
2323 				goto out_flush;
2324 
2325 			p = compat_ptr(cp);
2326 		} else {
2327 			if (get_user(p, pages + i))
2328 				goto out_flush;
2329 		}
2330 		if (get_user(node, nodes + i))
2331 			goto out_flush;
2332 
2333 		err = -ENODEV;
2334 		if (node < 0 || node >= MAX_NUMNODES)
2335 			goto out_flush;
2336 		if (!node_state(node, N_MEMORY))
2337 			goto out_flush;
2338 
2339 		err = -EACCES;
2340 		if (!node_isset(node, task_nodes))
2341 			goto out_flush;
2342 
2343 		if (current_node == NUMA_NO_NODE) {
2344 			current_node = node;
2345 			start = i;
2346 		} else if (node != current_node) {
2347 			err = move_pages_and_store_status(current_node,
2348 					&pagelist, status, start, i, nr_pages);
2349 			if (err)
2350 				goto out;
2351 			start = i;
2352 			current_node = node;
2353 		}
2354 
2355 		/*
2356 		 * Errors in the page lookup or isolation are not fatal and we simply
2357 		 * report them via status
2358 		 */
2359 		err = add_folio_for_migration(mm, p, current_node, &pagelist,
2360 					      flags & MPOL_MF_MOVE_ALL);
2361 
2362 		if (err > 0) {
2363 			/* The page is successfully queued for migration */
2364 			continue;
2365 		}
2366 
2367 		/*
2368 		 * The move_pages() man page does not have an -EEXIST choice, so
2369 		 * use -EFAULT instead.
2370 		 */
2371 		if (err == -EEXIST)
2372 			err = -EFAULT;
2373 
2374 		/*
2375 		 * If the page is already on the target node (!err), store the
2376 		 * node, otherwise, store the err.
2377 		 */
2378 		err = store_status(status, i, err ? : current_node, 1);
2379 		if (err)
2380 			goto out_flush;
2381 
2382 		err = move_pages_and_store_status(current_node, &pagelist,
2383 				status, start, i, nr_pages);
2384 		if (err) {
2385 			/* We have accounted for page i */
2386 			if (err > 0)
2387 				err--;
2388 			goto out;
2389 		}
2390 		current_node = NUMA_NO_NODE;
2391 	}
2392 out_flush:
2393 	/* Make sure we do not overwrite the existing error */
2394 	err1 = move_pages_and_store_status(current_node, &pagelist,
2395 				status, start, i, nr_pages);
2396 	if (err >= 0)
2397 		err = err1;
2398 out:
2399 	lru_cache_enable();
2400 	return err;
2401 }
2402 
2403 /*
2404  * Determine the nodes of an array of pages and store it in an array of status.
2405  */
2406 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2407 				const void __user **pages, int *status)
2408 {
2409 	unsigned long i;
2410 
2411 	mmap_read_lock(mm);
2412 
2413 	for (i = 0; i < nr_pages; i++) {
2414 		unsigned long addr = (unsigned long)(*pages);
2415 		struct vm_area_struct *vma;
2416 		struct folio_walk fw;
2417 		struct folio *folio;
2418 		int err = -EFAULT;
2419 
2420 		vma = vma_lookup(mm, addr);
2421 		if (!vma)
2422 			goto set_status;
2423 
2424 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2425 		if (folio) {
2426 			if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2427 				err = -EFAULT;
2428 			else if (folio_is_zone_device(folio))
2429 				err = -ENOENT;
2430 			else
2431 				err = folio_nid(folio);
2432 			folio_walk_end(&fw, vma);
2433 		} else {
2434 			err = -ENOENT;
2435 		}
2436 set_status:
2437 		*status = err;
2438 
2439 		pages++;
2440 		status++;
2441 	}
2442 
2443 	mmap_read_unlock(mm);
2444 }
2445 
2446 static int get_compat_pages_array(const void __user *chunk_pages[],
2447 				  const void __user * __user *pages,
2448 				  unsigned long chunk_nr)
2449 {
2450 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2451 	compat_uptr_t p;
2452 	int i;
2453 
2454 	for (i = 0; i < chunk_nr; i++) {
2455 		if (get_user(p, pages32 + i))
2456 			return -EFAULT;
2457 		chunk_pages[i] = compat_ptr(p);
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 /*
2464  * Determine the nodes of a user array of pages and store it in
2465  * a user array of status.
2466  */
2467 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2468 			 const void __user * __user *pages,
2469 			 int __user *status)
2470 {
2471 #define DO_PAGES_STAT_CHUNK_NR 16UL
2472 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2473 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2474 
2475 	while (nr_pages) {
2476 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2477 
2478 		if (in_compat_syscall()) {
2479 			if (get_compat_pages_array(chunk_pages, pages,
2480 						   chunk_nr))
2481 				break;
2482 		} else {
2483 			if (copy_from_user(chunk_pages, pages,
2484 				      chunk_nr * sizeof(*chunk_pages)))
2485 				break;
2486 		}
2487 
2488 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2489 
2490 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2491 			break;
2492 
2493 		pages += chunk_nr;
2494 		status += chunk_nr;
2495 		nr_pages -= chunk_nr;
2496 	}
2497 	return nr_pages ? -EFAULT : 0;
2498 }
2499 
2500 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2501 {
2502 	struct task_struct *task;
2503 	struct mm_struct *mm;
2504 
2505 	/*
2506 	 * There is no need to check if current process has the right to modify
2507 	 * the specified process when they are same.
2508 	 */
2509 	if (!pid) {
2510 		mmget(current->mm);
2511 		*mem_nodes = cpuset_mems_allowed(current);
2512 		return current->mm;
2513 	}
2514 
2515 	task = find_get_task_by_vpid(pid);
2516 	if (!task) {
2517 		return ERR_PTR(-ESRCH);
2518 	}
2519 
2520 	/*
2521 	 * Check if this process has the right to modify the specified
2522 	 * process. Use the regular "ptrace_may_access()" checks.
2523 	 */
2524 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2525 		mm = ERR_PTR(-EPERM);
2526 		goto out;
2527 	}
2528 
2529 	mm = ERR_PTR(security_task_movememory(task));
2530 	if (IS_ERR(mm))
2531 		goto out;
2532 	*mem_nodes = cpuset_mems_allowed(task);
2533 	mm = get_task_mm(task);
2534 out:
2535 	put_task_struct(task);
2536 	if (!mm)
2537 		mm = ERR_PTR(-EINVAL);
2538 	return mm;
2539 }
2540 
2541 /*
2542  * Move a list of pages in the address space of the currently executing
2543  * process.
2544  */
2545 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2546 			     const void __user * __user *pages,
2547 			     const int __user *nodes,
2548 			     int __user *status, int flags)
2549 {
2550 	struct mm_struct *mm;
2551 	int err;
2552 	nodemask_t task_nodes;
2553 
2554 	/* Check flags */
2555 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2556 		return -EINVAL;
2557 
2558 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2559 		return -EPERM;
2560 
2561 	mm = find_mm_struct(pid, &task_nodes);
2562 	if (IS_ERR(mm))
2563 		return PTR_ERR(mm);
2564 
2565 	if (nodes)
2566 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2567 				    nodes, status, flags);
2568 	else
2569 		err = do_pages_stat(mm, nr_pages, pages, status);
2570 
2571 	mmput(mm);
2572 	return err;
2573 }
2574 
2575 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2576 		const void __user * __user *, pages,
2577 		const int __user *, nodes,
2578 		int __user *, status, int, flags)
2579 {
2580 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2581 }
2582 
2583 #ifdef CONFIG_NUMA_BALANCING
2584 /*
2585  * Returns true if this is a safe migration target node for misplaced NUMA
2586  * pages. Currently it only checks the watermarks which is crude.
2587  */
2588 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2589 				   unsigned long nr_migrate_pages)
2590 {
2591 	int z;
2592 
2593 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2594 		struct zone *zone = pgdat->node_zones + z;
2595 
2596 		if (!managed_zone(zone))
2597 			continue;
2598 
2599 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2600 		if (!zone_watermark_ok(zone, 0,
2601 				       high_wmark_pages(zone) +
2602 				       nr_migrate_pages,
2603 				       ZONE_MOVABLE, ALLOC_CMA))
2604 			continue;
2605 		return true;
2606 	}
2607 	return false;
2608 }
2609 
2610 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2611 					   unsigned long data)
2612 {
2613 	int nid = (int) data;
2614 	int order = folio_order(src);
2615 	gfp_t gfp = __GFP_THISNODE;
2616 
2617 	if (order > 0)
2618 		gfp |= GFP_TRANSHUGE_LIGHT;
2619 	else {
2620 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2621 			__GFP_NOWARN;
2622 		gfp &= ~__GFP_RECLAIM;
2623 	}
2624 	return __folio_alloc_node(gfp, order, nid);
2625 }
2626 
2627 /*
2628  * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2629  * permitted. Must be called with the PTL still held.
2630  */
2631 int migrate_misplaced_folio_prepare(struct folio *folio,
2632 		struct vm_area_struct *vma, int node)
2633 {
2634 	int nr_pages = folio_nr_pages(folio);
2635 	pg_data_t *pgdat = NODE_DATA(node);
2636 
2637 	if (folio_is_file_lru(folio)) {
2638 		/*
2639 		 * Do not migrate file folios that are mapped in multiple
2640 		 * processes with execute permissions as they are probably
2641 		 * shared libraries.
2642 		 *
2643 		 * See folio_maybe_mapped_shared() on possible imprecision
2644 		 * when we cannot easily detect if a folio is shared.
2645 		 */
2646 		if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
2647 			return -EACCES;
2648 
2649 		/*
2650 		 * Do not migrate dirty folios as not all filesystems can move
2651 		 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2652 		 * cycles.
2653 		 */
2654 		if (folio_test_dirty(folio))
2655 			return -EAGAIN;
2656 	}
2657 
2658 	/* Avoid migrating to a node that is nearly full */
2659 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2660 		int z;
2661 
2662 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2663 			return -EAGAIN;
2664 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2665 			if (managed_zone(pgdat->node_zones + z))
2666 				break;
2667 		}
2668 
2669 		/*
2670 		 * If there are no managed zones, it should not proceed
2671 		 * further.
2672 		 */
2673 		if (z < 0)
2674 			return -EAGAIN;
2675 
2676 		wakeup_kswapd(pgdat->node_zones + z, 0,
2677 			      folio_order(folio), ZONE_MOVABLE);
2678 		return -EAGAIN;
2679 	}
2680 
2681 	if (!folio_isolate_lru(folio))
2682 		return -EAGAIN;
2683 
2684 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2685 			    nr_pages);
2686 	return 0;
2687 }
2688 
2689 /*
2690  * Attempt to migrate a misplaced folio to the specified destination
2691  * node. Caller is expected to have isolated the folio by calling
2692  * migrate_misplaced_folio_prepare(), which will result in an
2693  * elevated reference count on the folio. This function will un-isolate the
2694  * folio, dereferencing the folio before returning.
2695  */
2696 int migrate_misplaced_folio(struct folio *folio, int node)
2697 {
2698 	pg_data_t *pgdat = NODE_DATA(node);
2699 	int nr_remaining;
2700 	unsigned int nr_succeeded;
2701 	LIST_HEAD(migratepages);
2702 	struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2703 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2704 
2705 	list_add(&folio->lru, &migratepages);
2706 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2707 				     NULL, node, MIGRATE_ASYNC,
2708 				     MR_NUMA_MISPLACED, &nr_succeeded);
2709 	if (nr_remaining && !list_empty(&migratepages))
2710 		putback_movable_pages(&migratepages);
2711 	if (nr_succeeded) {
2712 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2713 		count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2714 		if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2715 		    && !node_is_toptier(folio_nid(folio))
2716 		    && node_is_toptier(node))
2717 			mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2718 	}
2719 	mem_cgroup_put(memcg);
2720 	BUG_ON(!list_empty(&migratepages));
2721 	return nr_remaining ? -EAGAIN : 0;
2722 }
2723 #endif /* CONFIG_NUMA_BALANCING */
2724 #endif /* CONFIG_NUMA */
2725