xref: /linux/mm/migrate.c (revision 6bc0987d0b508b3768808efafa1e90041713526b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/leafops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/page_idle.h>
39 #include <linux/page_owner.h>
40 #include <linux/sched/mm.h>
41 #include <linux/ptrace.h>
42 #include <linux/memory.h>
43 #include <linux/sched/sysctl.h>
44 #include <linux/memory-tiers.h>
45 #include <linux/pagewalk.h>
46 
47 #include <asm/tlbflush.h>
48 
49 #include <trace/events/migrate.h>
50 
51 #include "internal.h"
52 #include "swap.h"
53 
54 static const struct movable_operations *offline_movable_ops;
55 static const struct movable_operations *zsmalloc_movable_ops;
56 
57 int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
58 {
59 	/*
60 	 * We only allow for selected types and don't handle concurrent
61 	 * registration attempts yet.
62 	 */
63 	switch (type) {
64 	case PGTY_offline:
65 		if (offline_movable_ops && ops)
66 			return -EBUSY;
67 		offline_movable_ops = ops;
68 		break;
69 	case PGTY_zsmalloc:
70 		if (zsmalloc_movable_ops && ops)
71 			return -EBUSY;
72 		zsmalloc_movable_ops = ops;
73 		break;
74 	default:
75 		return -EINVAL;
76 	}
77 	return 0;
78 }
79 EXPORT_SYMBOL_GPL(set_movable_ops);
80 
81 static const struct movable_operations *page_movable_ops(struct page *page)
82 {
83 	VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
84 
85 	/*
86 	 * If we enable page migration for a page of a certain type by marking
87 	 * it as movable, the page type must be sticky until the page gets freed
88 	 * back to the buddy.
89 	 */
90 	if (PageOffline(page))
91 		/* Only balloon page migration sets PageOffline pages movable. */
92 		return offline_movable_ops;
93 	if (PageZsmalloc(page))
94 		return zsmalloc_movable_ops;
95 
96 	return NULL;
97 }
98 
99 /**
100  * isolate_movable_ops_page - isolate a movable_ops page for migration
101  * @page: The page.
102  * @mode: The isolation mode.
103  *
104  * Try to isolate a movable_ops page for migration. Will fail if the page is
105  * not a movable_ops page, if the page is already isolated for migration
106  * or if the page was just was released by its owner.
107  *
108  * Once isolated, the page cannot get freed until it is either putback
109  * or migrated.
110  *
111  * Returns true if isolation succeeded, otherwise false.
112  */
113 bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
114 {
115 	/*
116 	 * TODO: these pages will not be folios in the future. All
117 	 * folio dependencies will have to be removed.
118 	 */
119 	struct folio *folio = folio_get_nontail_page(page);
120 	const struct movable_operations *mops;
121 
122 	/*
123 	 * Avoid burning cycles with pages that are yet under __free_pages(),
124 	 * or just got freed under us.
125 	 *
126 	 * In case we 'win' a race for a movable page being freed under us and
127 	 * raise its refcount preventing __free_pages() from doing its job
128 	 * the put_page() at the end of this block will take care of
129 	 * release this page, thus avoiding a nasty leakage.
130 	 */
131 	if (!folio)
132 		goto out;
133 
134 	/*
135 	 * Check for movable_ops pages before taking the page lock because
136 	 * we use non-atomic bitops on newly allocated page flags so
137 	 * unconditionally grabbing the lock ruins page's owner side.
138 	 *
139 	 * Note that once a page has movable_ops, it will stay that way
140 	 * until the page was freed.
141 	 */
142 	if (unlikely(!page_has_movable_ops(page)))
143 		goto out_putfolio;
144 
145 	/*
146 	 * As movable pages are not isolated from LRU lists, concurrent
147 	 * compaction threads can race against page migration functions
148 	 * as well as race against the releasing a page.
149 	 *
150 	 * In order to avoid having an already isolated movable page
151 	 * being (wrongly) re-isolated while it is under migration,
152 	 * or to avoid attempting to isolate pages being released,
153 	 * lets be sure we have the page lock
154 	 * before proceeding with the movable page isolation steps.
155 	 */
156 	if (unlikely(!folio_trylock(folio)))
157 		goto out_putfolio;
158 
159 	VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
160 	if (PageMovableOpsIsolated(page))
161 		goto out_no_isolated;
162 
163 	mops = page_movable_ops(page);
164 	if (WARN_ON_ONCE(!mops))
165 		goto out_no_isolated;
166 
167 	if (!mops->isolate_page(page, mode))
168 		goto out_no_isolated;
169 
170 	/* Driver shouldn't use the isolated flag */
171 	VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page);
172 	SetPageMovableOpsIsolated(page);
173 	folio_unlock(folio);
174 
175 	return true;
176 
177 out_no_isolated:
178 	folio_unlock(folio);
179 out_putfolio:
180 	folio_put(folio);
181 out:
182 	return false;
183 }
184 
185 /**
186  * putback_movable_ops_page - putback an isolated movable_ops page
187  * @page: The isolated page.
188  *
189  * Putback an isolated movable_ops page.
190  *
191  * After the page was putback, it might get freed instantly.
192  */
193 static void putback_movable_ops_page(struct page *page)
194 {
195 	/*
196 	 * TODO: these pages will not be folios in the future. All
197 	 * folio dependencies will have to be removed.
198 	 */
199 	struct folio *folio = page_folio(page);
200 
201 	VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
202 	VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page);
203 	folio_lock(folio);
204 	page_movable_ops(page)->putback_page(page);
205 	ClearPageMovableOpsIsolated(page);
206 	folio_unlock(folio);
207 	folio_put(folio);
208 }
209 
210 /**
211  * migrate_movable_ops_page - migrate an isolated movable_ops page
212  * @dst: The destination page.
213  * @src: The source page.
214  * @mode: The migration mode.
215  *
216  * Migrate an isolated movable_ops page.
217  *
218  * If the src page was already released by its owner, the src page is
219  * un-isolated (putback) and migration succeeds; the migration core will be the
220  * owner of both pages.
221  *
222  * If the src page was not released by its owner and the migration was
223  * successful, the owner of the src page and the dst page are swapped and
224  * the src page is un-isolated.
225  *
226  * If migration fails, the ownership stays unmodified and the src page
227  * remains isolated: migration may be retried later or the page can be putback.
228  *
229  * TODO: migration core will treat both pages as folios and lock them before
230  * this call to unlock them after this call. Further, the folio refcounts on
231  * src and dst are also released by migration core. These pages will not be
232  * folios in the future, so that must be reworked.
233  *
234  * Returns 0 on success, otherwise a negative error code.
235  */
236 static int migrate_movable_ops_page(struct page *dst, struct page *src,
237 		enum migrate_mode mode)
238 {
239 	int rc;
240 
241 	VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
242 	VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src);
243 	rc = page_movable_ops(src)->migrate_page(dst, src, mode);
244 	if (!rc)
245 		ClearPageMovableOpsIsolated(src);
246 	return rc;
247 }
248 
249 /*
250  * Put previously isolated pages back onto the appropriate lists
251  * from where they were once taken off for compaction/migration.
252  *
253  * This function shall be used whenever the isolated pageset has been
254  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
255  * and folio_isolate_hugetlb().
256  */
257 void putback_movable_pages(struct list_head *l)
258 {
259 	struct folio *folio;
260 	struct folio *folio2;
261 
262 	list_for_each_entry_safe(folio, folio2, l, lru) {
263 		if (unlikely(folio_test_hugetlb(folio))) {
264 			folio_putback_hugetlb(folio);
265 			continue;
266 		}
267 		list_del(&folio->lru);
268 		if (unlikely(page_has_movable_ops(&folio->page))) {
269 			putback_movable_ops_page(&folio->page);
270 		} else {
271 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
272 					folio_is_file_lru(folio), -folio_nr_pages(folio));
273 			folio_putback_lru(folio);
274 		}
275 	}
276 }
277 
278 /* Must be called with an elevated refcount on the non-hugetlb folio */
279 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
280 {
281 	if (folio_test_hugetlb(folio))
282 		return folio_isolate_hugetlb(folio, list);
283 
284 	if (page_has_movable_ops(&folio->page)) {
285 		if (!isolate_movable_ops_page(&folio->page,
286 					      ISOLATE_UNEVICTABLE))
287 			return false;
288 	} else {
289 		if (!folio_isolate_lru(folio))
290 			return false;
291 		node_stat_add_folio(folio, NR_ISOLATED_ANON +
292 				    folio_is_file_lru(folio));
293 	}
294 	list_add(&folio->lru, list);
295 	return true;
296 }
297 
298 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
299 		struct folio *folio, pte_t old_pte, unsigned long idx)
300 {
301 	struct page *page = folio_page(folio, idx);
302 	pte_t newpte;
303 
304 	if (PageCompound(page) || PageHWPoison(page))
305 		return false;
306 
307 	VM_BUG_ON_PAGE(!PageAnon(page), page);
308 	VM_BUG_ON_PAGE(!PageLocked(page), page);
309 	VM_BUG_ON_PAGE(pte_present(old_pte), page);
310 	VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio);
311 
312 	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
313 	    mm_forbids_zeropage(pvmw->vma->vm_mm))
314 		return false;
315 
316 	/*
317 	 * The pmd entry mapping the old thp was flushed and the pte mapping
318 	 * this subpage has been non present. If the subpage is only zero-filled
319 	 * then map it to the shared zeropage.
320 	 */
321 	if (!pages_identical(page, ZERO_PAGE(0)))
322 		return false;
323 
324 	newpte = pte_mkspecial(pfn_pte(zero_pfn(pvmw->address),
325 					pvmw->vma->vm_page_prot));
326 
327 	if (pte_swp_soft_dirty(old_pte))
328 		newpte = pte_mksoft_dirty(newpte);
329 	if (pte_swp_uffd_wp(old_pte))
330 		newpte = pte_mkuffd_wp(newpte);
331 
332 	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
333 
334 	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
335 	return true;
336 }
337 
338 struct rmap_walk_arg {
339 	struct folio *folio;
340 	bool map_unused_to_zeropage;
341 };
342 
343 /*
344  * Restore a potential migration pte to a working pte entry
345  */
346 static bool remove_migration_pte(struct folio *folio,
347 		struct vm_area_struct *vma, unsigned long addr, void *arg)
348 {
349 	struct rmap_walk_arg *rmap_walk_arg = arg;
350 	DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
351 
352 	while (page_vma_mapped_walk(&pvmw)) {
353 		rmap_t rmap_flags = RMAP_NONE;
354 		pte_t old_pte;
355 		pte_t pte;
356 		softleaf_t entry;
357 		struct page *new;
358 		unsigned long idx = 0;
359 
360 		/* pgoff is invalid for ksm pages, but they are never large */
361 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
362 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
363 		new = folio_page(folio, idx);
364 
365 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
366 		/* PMD-mapped THP migration entry */
367 		if (!pvmw.pte) {
368 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
369 					!folio_test_pmd_mappable(folio), folio);
370 			remove_migration_pmd(&pvmw, new);
371 			continue;
372 		}
373 #endif
374 		old_pte = ptep_get(pvmw.pte);
375 		if (rmap_walk_arg->map_unused_to_zeropage &&
376 		    try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
377 			continue;
378 
379 		folio_get(folio);
380 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
381 
382 		entry = softleaf_from_pte(old_pte);
383 		if (!softleaf_is_migration_young(entry))
384 			pte = pte_mkold(pte);
385 		if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
386 			pte = pte_mkdirty(pte);
387 		if (pte_swp_soft_dirty(old_pte))
388 			pte = pte_mksoft_dirty(pte);
389 		else
390 			pte = pte_clear_soft_dirty(pte);
391 
392 		if (softleaf_is_migration_write(entry))
393 			pte = pte_mkwrite(pte, vma);
394 		else if (pte_swp_uffd_wp(old_pte))
395 			pte = pte_mkuffd_wp(pte);
396 
397 		if (folio_test_anon(folio) && !softleaf_is_migration_read(entry))
398 			rmap_flags |= RMAP_EXCLUSIVE;
399 
400 		if (unlikely(is_device_private_page(new))) {
401 			if (pte_write(pte))
402 				entry = make_writable_device_private_entry(
403 							page_to_pfn(new));
404 			else
405 				entry = make_readable_device_private_entry(
406 							page_to_pfn(new));
407 			pte = softleaf_to_pte(entry);
408 			if (pte_swp_soft_dirty(old_pte))
409 				pte = pte_swp_mksoft_dirty(pte);
410 			if (pte_swp_uffd_wp(old_pte))
411 				pte = pte_swp_mkuffd_wp(pte);
412 		}
413 
414 #ifdef CONFIG_HUGETLB_PAGE
415 		if (folio_test_hugetlb(folio)) {
416 			struct hstate *h = hstate_vma(vma);
417 			unsigned int shift = huge_page_shift(h);
418 			unsigned long psize = huge_page_size(h);
419 
420 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
421 			if (folio_test_anon(folio))
422 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
423 						      rmap_flags);
424 			else
425 				hugetlb_add_file_rmap(folio);
426 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
427 					psize);
428 		} else
429 #endif
430 		{
431 			if (folio_test_anon(folio))
432 				folio_add_anon_rmap_pte(folio, new, vma,
433 							pvmw.address, rmap_flags);
434 			else
435 				folio_add_file_rmap_pte(folio, new, vma);
436 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
437 		}
438 		if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
439 			mlock_drain_local();
440 
441 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
442 					   compound_order(new));
443 
444 		/* No need to invalidate - it was non-present before */
445 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
446 	}
447 
448 	return true;
449 }
450 
451 /*
452  * Get rid of all migration entries and replace them by
453  * references to the indicated page.
454  */
455 void remove_migration_ptes(struct folio *src, struct folio *dst,
456 		enum ttu_flags flags)
457 {
458 	struct rmap_walk_arg rmap_walk_arg = {
459 		.folio = src,
460 		.map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE,
461 	};
462 
463 	struct rmap_walk_control rwc = {
464 		.rmap_one = remove_migration_pte,
465 		.arg = &rmap_walk_arg,
466 	};
467 
468 	VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src);
469 
470 	if (flags & TTU_RMAP_LOCKED)
471 		rmap_walk_locked(dst, &rwc);
472 	else
473 		rmap_walk(dst, &rwc);
474 }
475 
476 /*
477  * Something used the pte of a page under migration. We need to
478  * get to the page and wait until migration is finished.
479  * When we return from this function the fault will be retried.
480  */
481 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
482 			  unsigned long address)
483 {
484 	spinlock_t *ptl;
485 	pte_t *ptep;
486 	pte_t pte;
487 	softleaf_t entry;
488 
489 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
490 	if (!ptep)
491 		return;
492 
493 	pte = ptep_get(ptep);
494 	pte_unmap(ptep);
495 
496 	if (pte_none(pte) || pte_present(pte))
497 		goto out;
498 
499 	entry = softleaf_from_pte(pte);
500 	if (!softleaf_is_migration(entry))
501 		goto out;
502 
503 	softleaf_entry_wait_on_locked(entry, ptl);
504 	return;
505 out:
506 	spin_unlock(ptl);
507 }
508 
509 #ifdef CONFIG_HUGETLB_PAGE
510 /*
511  * The vma read lock must be held upon entry. Holding that lock prevents either
512  * the pte or the ptl from being freed.
513  *
514  * This function will release the vma lock before returning.
515  */
516 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
517 {
518 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
519 	softleaf_t entry;
520 	pte_t pte;
521 
522 	hugetlb_vma_assert_locked(vma);
523 	spin_lock(ptl);
524 	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
525 
526 	if (huge_pte_none(pte))
527 		goto fail;
528 
529 	entry = softleaf_from_pte(pte);
530 	if (softleaf_is_migration(entry)) {
531 		/*
532 		 * If migration entry existed, safe to release vma lock
533 		 * here because the pgtable page won't be freed without the
534 		 * pgtable lock released.  See comment right above pgtable
535 		 * lock release in softleaf_entry_wait_on_locked().
536 		 */
537 		hugetlb_vma_unlock_read(vma);
538 		softleaf_entry_wait_on_locked(entry, ptl);
539 		return;
540 	}
541 
542 fail:
543 	spin_unlock(ptl);
544 	hugetlb_vma_unlock_read(vma);
545 }
546 #endif
547 
548 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
549 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
550 {
551 	spinlock_t *ptl;
552 
553 	ptl = pmd_lock(mm, pmd);
554 	if (!pmd_is_migration_entry(*pmd))
555 		goto unlock;
556 	softleaf_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl);
557 	return;
558 unlock:
559 	spin_unlock(ptl);
560 }
561 #endif
562 
563 /*
564  * Replace the folio in the mapping.
565  *
566  * The number of remaining references must be:
567  * 1 for anonymous folios without a mapping
568  * 2 for folios with a mapping
569  * 3 for folios with a mapping and the private flag set.
570  */
571 static int __folio_migrate_mapping(struct address_space *mapping,
572 		struct folio *newfolio, struct folio *folio, int expected_count)
573 {
574 	XA_STATE(xas, &mapping->i_pages, folio->index);
575 	struct swap_cluster_info *ci = NULL;
576 	struct zone *oldzone, *newzone;
577 	int dirty;
578 	long nr = folio_nr_pages(folio);
579 
580 	if (!mapping) {
581 		/* Take off deferred split queue while frozen and memcg set */
582 		if (folio_test_large(folio) &&
583 		    folio_test_large_rmappable(folio)) {
584 			if (!folio_ref_freeze(folio, expected_count))
585 				return -EAGAIN;
586 			folio_unqueue_deferred_split(folio);
587 			folio_ref_unfreeze(folio, expected_count);
588 		}
589 
590 		/* No turning back from here */
591 		newfolio->index = folio->index;
592 		newfolio->mapping = folio->mapping;
593 		if (folio_test_anon(folio) && folio_test_large(folio))
594 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
595 		if (folio_test_swapbacked(folio))
596 			__folio_set_swapbacked(newfolio);
597 
598 		return 0;
599 	}
600 
601 	oldzone = folio_zone(folio);
602 	newzone = folio_zone(newfolio);
603 
604 	if (folio_test_swapcache(folio))
605 		ci = swap_cluster_get_and_lock_irq(folio);
606 	else
607 		xas_lock_irq(&xas);
608 
609 	if (!folio_ref_freeze(folio, expected_count)) {
610 		if (ci)
611 			swap_cluster_unlock_irq(ci);
612 		else
613 			xas_unlock_irq(&xas);
614 		return -EAGAIN;
615 	}
616 
617 	/* Take off deferred split queue while frozen and memcg set */
618 	folio_unqueue_deferred_split(folio);
619 
620 	/*
621 	 * Now we know that no one else is looking at the folio:
622 	 * no turning back from here.
623 	 */
624 	newfolio->index = folio->index;
625 	newfolio->mapping = folio->mapping;
626 	if (folio_test_anon(folio) && folio_test_large(folio))
627 		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
628 	folio_ref_add(newfolio, nr); /* add cache reference */
629 	if (folio_test_swapbacked(folio))
630 		__folio_set_swapbacked(newfolio);
631 	if (folio_test_swapcache(folio)) {
632 		folio_set_swapcache(newfolio);
633 		newfolio->private = folio_get_private(folio);
634 	}
635 
636 	/* Move dirty while folio refs frozen and newfolio not yet exposed */
637 	dirty = folio_test_dirty(folio);
638 	if (dirty) {
639 		folio_clear_dirty(folio);
640 		folio_set_dirty(newfolio);
641 	}
642 
643 	if (folio_test_swapcache(folio))
644 		__swap_cache_replace_folio(ci, folio, newfolio);
645 	else
646 		xas_store(&xas, newfolio);
647 
648 	/*
649 	 * Drop cache reference from old folio by unfreezing
650 	 * to one less reference.
651 	 * We know this isn't the last reference.
652 	 */
653 	folio_ref_unfreeze(folio, expected_count - nr);
654 
655 	/* Leave irq disabled to prevent preemption while updating stats */
656 	if (ci)
657 		swap_cluster_unlock(ci);
658 	else
659 		xas_unlock(&xas);
660 
661 	/*
662 	 * If moved to a different zone then also account
663 	 * the folio for that zone. Other VM counters will be
664 	 * taken care of when we establish references to the
665 	 * new folio and drop references to the old folio.
666 	 *
667 	 * Note that anonymous folios are accounted for
668 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
669 	 * are mapped to swap space.
670 	 */
671 	if (newzone != oldzone) {
672 		struct lruvec *old_lruvec, *new_lruvec;
673 		struct mem_cgroup *memcg;
674 
675 		memcg = folio_memcg(folio);
676 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
677 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
678 
679 		mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
680 		mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
681 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
682 			mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
683 			mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
684 
685 			if (folio_test_pmd_mappable(folio)) {
686 				mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
687 				mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
688 			}
689 		}
690 #ifdef CONFIG_SWAP
691 		if (folio_test_swapcache(folio)) {
692 			mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
693 			mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
694 		}
695 #endif
696 		if (dirty && mapping_can_writeback(mapping)) {
697 			mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
698 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
699 			mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
700 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
701 		}
702 	}
703 	local_irq_enable();
704 
705 	return 0;
706 }
707 
708 int folio_migrate_mapping(struct address_space *mapping,
709 		struct folio *newfolio, struct folio *folio, int extra_count)
710 {
711 	int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
712 
713 	if (folio_ref_count(folio) != expected_count)
714 		return -EAGAIN;
715 
716 	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
717 }
718 EXPORT_SYMBOL(folio_migrate_mapping);
719 
720 /*
721  * The expected number of remaining references is the same as that
722  * of folio_migrate_mapping().
723  */
724 int migrate_huge_page_move_mapping(struct address_space *mapping,
725 				   struct folio *dst, struct folio *src)
726 {
727 	XA_STATE(xas, &mapping->i_pages, src->index);
728 	int rc, expected_count = folio_expected_ref_count(src) + 1;
729 
730 	if (folio_ref_count(src) != expected_count)
731 		return -EAGAIN;
732 
733 	rc = folio_mc_copy(dst, src);
734 	if (unlikely(rc))
735 		return rc;
736 
737 	xas_lock_irq(&xas);
738 	if (!folio_ref_freeze(src, expected_count)) {
739 		xas_unlock_irq(&xas);
740 		return -EAGAIN;
741 	}
742 
743 	dst->index = src->index;
744 	dst->mapping = src->mapping;
745 
746 	folio_ref_add(dst, folio_nr_pages(dst));
747 
748 	xas_store(&xas, dst);
749 
750 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
751 
752 	xas_unlock_irq(&xas);
753 
754 	return 0;
755 }
756 
757 /*
758  * Copy the flags and some other ancillary information
759  */
760 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
761 {
762 	int cpupid;
763 
764 	if (folio_test_referenced(folio))
765 		folio_set_referenced(newfolio);
766 	if (folio_test_uptodate(folio))
767 		folio_mark_uptodate(newfolio);
768 	if (folio_test_clear_active(folio)) {
769 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
770 		folio_set_active(newfolio);
771 	} else if (folio_test_clear_unevictable(folio))
772 		folio_set_unevictable(newfolio);
773 	if (folio_test_workingset(folio))
774 		folio_set_workingset(newfolio);
775 	if (folio_test_checked(folio))
776 		folio_set_checked(newfolio);
777 	/*
778 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
779 	 * migration entries. We can still have PG_anon_exclusive set on an
780 	 * effectively unmapped and unreferenced first sub-pages of an
781 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
782 	 */
783 	if (folio_test_mappedtodisk(folio))
784 		folio_set_mappedtodisk(newfolio);
785 
786 	/* Move dirty on pages not done by folio_migrate_mapping() */
787 	if (folio_test_dirty(folio))
788 		folio_set_dirty(newfolio);
789 
790 	if (folio_test_young(folio))
791 		folio_set_young(newfolio);
792 	if (folio_test_idle(folio))
793 		folio_set_idle(newfolio);
794 
795 	folio_migrate_refs(newfolio, folio);
796 	/*
797 	 * Copy NUMA information to the new page, to prevent over-eager
798 	 * future migrations of this same page.
799 	 */
800 	cpupid = folio_xchg_last_cpupid(folio, -1);
801 	/*
802 	 * For memory tiering mode, when migrate between slow and fast
803 	 * memory node, reset cpupid, because that is used to record
804 	 * page access time in slow memory node.
805 	 */
806 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
807 		bool f_toptier = node_is_toptier(folio_nid(folio));
808 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
809 
810 		if (f_toptier != t_toptier)
811 			cpupid = -1;
812 	}
813 	folio_xchg_last_cpupid(newfolio, cpupid);
814 
815 	folio_migrate_ksm(newfolio, folio);
816 	/*
817 	 * Please do not reorder this without considering how mm/ksm.c's
818 	 * ksm_get_folio() depends upon ksm_migrate_page() and the
819 	 * swapcache flag.
820 	 */
821 	if (folio_test_swapcache(folio))
822 		folio_clear_swapcache(folio);
823 	folio_clear_private(folio);
824 
825 	/* page->private contains hugetlb specific flags */
826 	if (!folio_test_hugetlb(folio))
827 		folio->private = NULL;
828 
829 	/*
830 	 * If any waiters have accumulated on the new page then
831 	 * wake them up.
832 	 */
833 	if (folio_test_writeback(newfolio))
834 		folio_end_writeback(newfolio);
835 
836 	/*
837 	 * PG_readahead shares the same bit with PG_reclaim.  The above
838 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
839 	 * bit after that.
840 	 */
841 	if (folio_test_readahead(folio))
842 		folio_set_readahead(newfolio);
843 
844 	folio_copy_owner(newfolio, folio);
845 	pgalloc_tag_swap(newfolio, folio);
846 
847 	mem_cgroup_migrate(folio, newfolio);
848 }
849 EXPORT_SYMBOL(folio_migrate_flags);
850 
851 /************************************************************
852  *                    Migration functions
853  ***********************************************************/
854 
855 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
856 			   struct folio *src, void *src_private,
857 			   enum migrate_mode mode)
858 {
859 	int rc, expected_count = folio_expected_ref_count(src) + 1;
860 
861 	/* Check whether src does not have extra refs before we do more work */
862 	if (folio_ref_count(src) != expected_count)
863 		return -EAGAIN;
864 
865 	rc = folio_mc_copy(dst, src);
866 	if (unlikely(rc))
867 		return rc;
868 
869 	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
870 	if (rc)
871 		return rc;
872 
873 	if (src_private)
874 		folio_attach_private(dst, folio_detach_private(src));
875 
876 	folio_migrate_flags(dst, src);
877 	return 0;
878 }
879 
880 /**
881  * migrate_folio() - Simple folio migration.
882  * @mapping: The address_space containing the folio.
883  * @dst: The folio to migrate the data to.
884  * @src: The folio containing the current data.
885  * @mode: How to migrate the page.
886  *
887  * Common logic to directly migrate a single LRU folio suitable for
888  * folios that do not have private data.
889  *
890  * Folios are locked upon entry and exit.
891  */
892 int migrate_folio(struct address_space *mapping, struct folio *dst,
893 		  struct folio *src, enum migrate_mode mode)
894 {
895 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
896 	return __migrate_folio(mapping, dst, src, NULL, mode);
897 }
898 EXPORT_SYMBOL(migrate_folio);
899 
900 #ifdef CONFIG_BUFFER_HEAD
901 /* Returns true if all buffers are successfully locked */
902 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
903 							enum migrate_mode mode)
904 {
905 	struct buffer_head *bh = head;
906 	struct buffer_head *failed_bh;
907 
908 	do {
909 		if (!trylock_buffer(bh)) {
910 			if (mode == MIGRATE_ASYNC)
911 				goto unlock;
912 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
913 				goto unlock;
914 			lock_buffer(bh);
915 		}
916 
917 		bh = bh->b_this_page;
918 	} while (bh != head);
919 
920 	return true;
921 
922 unlock:
923 	/* We failed to lock the buffer and cannot stall. */
924 	failed_bh = bh;
925 	bh = head;
926 	while (bh != failed_bh) {
927 		unlock_buffer(bh);
928 		bh = bh->b_this_page;
929 	}
930 
931 	return false;
932 }
933 
934 static int __buffer_migrate_folio(struct address_space *mapping,
935 		struct folio *dst, struct folio *src, enum migrate_mode mode,
936 		bool check_refs)
937 {
938 	struct buffer_head *bh, *head;
939 	int rc;
940 	int expected_count;
941 
942 	head = folio_buffers(src);
943 	if (!head)
944 		return migrate_folio(mapping, dst, src, mode);
945 
946 	/* Check whether page does not have extra refs before we do more work */
947 	expected_count = folio_expected_ref_count(src) + 1;
948 	if (folio_ref_count(src) != expected_count)
949 		return -EAGAIN;
950 
951 	if (!buffer_migrate_lock_buffers(head, mode))
952 		return -EAGAIN;
953 
954 	if (check_refs) {
955 		bool busy, migrating;
956 		bool invalidated = false;
957 
958 		migrating = test_and_set_bit_lock(BH_Migrate, &head->b_state);
959 		VM_WARN_ON_ONCE(migrating);
960 recheck_buffers:
961 		busy = false;
962 		spin_lock(&mapping->i_private_lock);
963 		bh = head;
964 		do {
965 			if (atomic_read(&bh->b_count)) {
966 				busy = true;
967 				break;
968 			}
969 			bh = bh->b_this_page;
970 		} while (bh != head);
971 		spin_unlock(&mapping->i_private_lock);
972 		if (busy) {
973 			if (invalidated) {
974 				rc = -EAGAIN;
975 				goto unlock_buffers;
976 			}
977 			invalidate_bh_lrus();
978 			invalidated = true;
979 			goto recheck_buffers;
980 		}
981 	}
982 
983 	rc = filemap_migrate_folio(mapping, dst, src, mode);
984 	if (rc)
985 		goto unlock_buffers;
986 
987 	bh = head;
988 	do {
989 		folio_set_bh(bh, dst, bh_offset(bh));
990 		bh = bh->b_this_page;
991 	} while (bh != head);
992 
993 unlock_buffers:
994 	if (check_refs)
995 		clear_bit_unlock(BH_Migrate, &head->b_state);
996 	bh = head;
997 	do {
998 		unlock_buffer(bh);
999 		bh = bh->b_this_page;
1000 	} while (bh != head);
1001 
1002 	return rc;
1003 }
1004 
1005 /**
1006  * buffer_migrate_folio() - Migration function for folios with buffers.
1007  * @mapping: The address space containing @src.
1008  * @dst: The folio to migrate to.
1009  * @src: The folio to migrate from.
1010  * @mode: How to migrate the folio.
1011  *
1012  * This function can only be used if the underlying filesystem guarantees
1013  * that no other references to @src exist. For example attached buffer
1014  * heads are accessed only under the folio lock.  If your filesystem cannot
1015  * provide this guarantee, buffer_migrate_folio_norefs() may be more
1016  * appropriate.
1017  *
1018  * Return: 0 on success or a negative errno on failure.
1019  */
1020 int buffer_migrate_folio(struct address_space *mapping,
1021 		struct folio *dst, struct folio *src, enum migrate_mode mode)
1022 {
1023 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
1024 }
1025 EXPORT_SYMBOL(buffer_migrate_folio);
1026 
1027 /**
1028  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
1029  * @mapping: The address space containing @src.
1030  * @dst: The folio to migrate to.
1031  * @src: The folio to migrate from.
1032  * @mode: How to migrate the folio.
1033  *
1034  * Like buffer_migrate_folio() except that this variant is more careful
1035  * and checks that there are also no buffer head references. This function
1036  * is the right one for mappings where buffer heads are directly looked
1037  * up and referenced (such as block device mappings).
1038  *
1039  * Return: 0 on success or a negative errno on failure.
1040  */
1041 int buffer_migrate_folio_norefs(struct address_space *mapping,
1042 		struct folio *dst, struct folio *src, enum migrate_mode mode)
1043 {
1044 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
1045 }
1046 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
1047 #endif /* CONFIG_BUFFER_HEAD */
1048 
1049 int filemap_migrate_folio(struct address_space *mapping,
1050 		struct folio *dst, struct folio *src, enum migrate_mode mode)
1051 {
1052 	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
1053 }
1054 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
1055 
1056 /*
1057  * Default handling if a filesystem does not provide a migration function.
1058  */
1059 static int fallback_migrate_folio(struct address_space *mapping,
1060 		struct folio *dst, struct folio *src, enum migrate_mode mode)
1061 {
1062 	WARN_ONCE(mapping->a_ops->writepages,
1063 			"%ps does not implement migrate_folio\n",
1064 			mapping->a_ops);
1065 	if (folio_test_dirty(src))
1066 		return -EBUSY;
1067 
1068 	/*
1069 	 * Filesystem may have private data at folio->private that we
1070 	 * can't migrate automatically.
1071 	 */
1072 	if (!filemap_release_folio(src, GFP_KERNEL))
1073 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1074 
1075 	return migrate_folio(mapping, dst, src, mode);
1076 }
1077 
1078 /*
1079  * Move a src folio to a newly allocated dst folio.
1080  *
1081  * The src and dst folios are locked and the src folios was unmapped from
1082  * the page tables.
1083  *
1084  * On success, the src folio was replaced by the dst folio.
1085  *
1086  * Return value:
1087  *   < 0 - error code
1088  *     0 - success
1089  */
1090 static int move_to_new_folio(struct folio *dst, struct folio *src,
1091 				enum migrate_mode mode)
1092 {
1093 	struct address_space *mapping = folio_mapping(src);
1094 	int rc = -EAGAIN;
1095 
1096 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1097 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1098 
1099 	if (!mapping)
1100 		rc = migrate_folio(mapping, dst, src, mode);
1101 	else if (mapping_inaccessible(mapping))
1102 		rc = -EOPNOTSUPP;
1103 	else if (mapping->a_ops->migrate_folio)
1104 		/*
1105 		 * Most folios have a mapping and most filesystems
1106 		 * provide a migrate_folio callback. Anonymous folios
1107 		 * are part of swap space which also has its own
1108 		 * migrate_folio callback. This is the most common path
1109 		 * for page migration.
1110 		 */
1111 		rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1112 							mode);
1113 	else
1114 		rc = fallback_migrate_folio(mapping, dst, src, mode);
1115 
1116 	if (!rc) {
1117 		/*
1118 		 * For pagecache folios, src->mapping must be cleared before src
1119 		 * is freed. Anonymous folios must stay anonymous until freed.
1120 		 */
1121 		if (!folio_test_anon(src))
1122 			src->mapping = NULL;
1123 
1124 		if (likely(!folio_is_zone_device(dst)))
1125 			flush_dcache_folio(dst);
1126 	}
1127 	return rc;
1128 }
1129 
1130 /*
1131  * To record some information during migration, we use unused private
1132  * field of struct folio of the newly allocated destination folio.
1133  * This is safe because nobody is using it except us.
1134  */
1135 enum {
1136 	PAGE_WAS_MAPPED = BIT(0),
1137 	PAGE_WAS_MLOCKED = BIT(1),
1138 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1139 };
1140 
1141 static void __migrate_folio_record(struct folio *dst,
1142 				   int old_page_state,
1143 				   struct anon_vma *anon_vma)
1144 {
1145 	dst->private = (void *)anon_vma + old_page_state;
1146 }
1147 
1148 static void __migrate_folio_extract(struct folio *dst,
1149 				   int *old_page_state,
1150 				   struct anon_vma **anon_vmap)
1151 {
1152 	unsigned long private = (unsigned long)dst->private;
1153 
1154 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1155 	*old_page_state = private & PAGE_OLD_STATES;
1156 	dst->private = NULL;
1157 }
1158 
1159 /* Restore the source folio to the original state upon failure */
1160 static void migrate_folio_undo_src(struct folio *src,
1161 				   int page_was_mapped,
1162 				   struct anon_vma *anon_vma,
1163 				   bool locked,
1164 				   struct list_head *ret)
1165 {
1166 	if (page_was_mapped)
1167 		remove_migration_ptes(src, src, 0);
1168 	/* Drop an anon_vma reference if we took one */
1169 	if (anon_vma)
1170 		put_anon_vma(anon_vma);
1171 	if (locked)
1172 		folio_unlock(src);
1173 	if (ret)
1174 		list_move_tail(&src->lru, ret);
1175 }
1176 
1177 /* Restore the destination folio to the original state upon failure */
1178 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1179 		free_folio_t put_new_folio, unsigned long private)
1180 {
1181 	if (locked)
1182 		folio_unlock(dst);
1183 	if (put_new_folio)
1184 		put_new_folio(dst, private);
1185 	else
1186 		folio_put(dst);
1187 }
1188 
1189 /* Cleanup src folio upon migration success */
1190 static void migrate_folio_done(struct folio *src,
1191 			       enum migrate_reason reason)
1192 {
1193 	if (likely(!page_has_movable_ops(&src->page)) && reason != MR_DEMOTION)
1194 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1195 				    folio_is_file_lru(src), -folio_nr_pages(src));
1196 
1197 	if (reason != MR_MEMORY_FAILURE)
1198 		/* We release the page in page_handle_poison. */
1199 		folio_put(src);
1200 }
1201 
1202 /* Obtain the lock on page, remove all ptes. */
1203 static int migrate_folio_unmap(new_folio_t get_new_folio,
1204 		free_folio_t put_new_folio, unsigned long private,
1205 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1206 		struct list_head *ret)
1207 {
1208 	struct folio *dst;
1209 	int rc = -EAGAIN;
1210 	int old_page_state = 0;
1211 	struct anon_vma *anon_vma = NULL;
1212 	bool locked = false;
1213 	bool dst_locked = false;
1214 
1215 	dst = get_new_folio(src, private);
1216 	if (!dst)
1217 		return -ENOMEM;
1218 	*dstp = dst;
1219 
1220 	dst->private = NULL;
1221 
1222 	if (!folio_trylock(src)) {
1223 		if (mode == MIGRATE_ASYNC)
1224 			goto out;
1225 
1226 		/*
1227 		 * It's not safe for direct compaction to call lock_page.
1228 		 * For example, during page readahead pages are added locked
1229 		 * to the LRU. Later, when the IO completes the pages are
1230 		 * marked uptodate and unlocked. However, the queueing
1231 		 * could be merging multiple pages for one bio (e.g.
1232 		 * mpage_readahead). If an allocation happens for the
1233 		 * second or third page, the process can end up locking
1234 		 * the same page twice and deadlocking. Rather than
1235 		 * trying to be clever about what pages can be locked,
1236 		 * avoid the use of lock_page for direct compaction
1237 		 * altogether.
1238 		 */
1239 		if (current->flags & PF_MEMALLOC)
1240 			goto out;
1241 
1242 		/*
1243 		 * In "light" mode, we can wait for transient locks (eg
1244 		 * inserting a page into the page table), but it's not
1245 		 * worth waiting for I/O.
1246 		 */
1247 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1248 			goto out;
1249 
1250 		folio_lock(src);
1251 	}
1252 	locked = true;
1253 	if (folio_test_mlocked(src))
1254 		old_page_state |= PAGE_WAS_MLOCKED;
1255 
1256 	if (folio_test_writeback(src)) {
1257 		/*
1258 		 * Only in the case of a full synchronous migration is it
1259 		 * necessary to wait for PageWriteback. In the async case,
1260 		 * the retry loop is too short and in the sync-light case,
1261 		 * the overhead of stalling is too much
1262 		 */
1263 		switch (mode) {
1264 		case MIGRATE_SYNC:
1265 			break;
1266 		default:
1267 			rc = -EBUSY;
1268 			goto out;
1269 		}
1270 		folio_wait_writeback(src);
1271 	}
1272 
1273 	/*
1274 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1275 	 * we cannot notice that anon_vma is freed while we migrate a page.
1276 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1277 	 * of migration. File cache pages are no problem because of page_lock()
1278 	 * File Caches may use write_page() or lock_page() in migration, then,
1279 	 * just care Anon page here.
1280 	 *
1281 	 * Only folio_get_anon_vma() understands the subtleties of
1282 	 * getting a hold on an anon_vma from outside one of its mms.
1283 	 * But if we cannot get anon_vma, then we won't need it anyway,
1284 	 * because that implies that the anon page is no longer mapped
1285 	 * (and cannot be remapped so long as we hold the page lock).
1286 	 */
1287 	if (folio_test_anon(src) && !folio_test_ksm(src))
1288 		anon_vma = folio_get_anon_vma(src);
1289 
1290 	/*
1291 	 * Block others from accessing the new page when we get around to
1292 	 * establishing additional references. We are usually the only one
1293 	 * holding a reference to dst at this point. We used to have a BUG
1294 	 * here if folio_trylock(dst) fails, but would like to allow for
1295 	 * cases where there might be a race with the previous use of dst.
1296 	 * This is much like races on refcount of oldpage: just don't BUG().
1297 	 */
1298 	if (unlikely(!folio_trylock(dst)))
1299 		goto out;
1300 	dst_locked = true;
1301 
1302 	if (unlikely(page_has_movable_ops(&src->page))) {
1303 		__migrate_folio_record(dst, old_page_state, anon_vma);
1304 		return 0;
1305 	}
1306 
1307 	/*
1308 	 * Corner case handling:
1309 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1310 	 * and treated as swapcache but it has no rmap yet.
1311 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1312 	 * trigger a BUG.  So handle it here.
1313 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1314 	 * fs-private metadata. The page can be picked up due to memory
1315 	 * offlining.  Everywhere else except page reclaim, the page is
1316 	 * invisible to the vm, so the page can not be migrated.  So try to
1317 	 * free the metadata, so the page can be freed.
1318 	 */
1319 	if (!src->mapping) {
1320 		if (folio_test_private(src)) {
1321 			try_to_free_buffers(src);
1322 			goto out;
1323 		}
1324 	} else if (folio_mapped(src)) {
1325 		/* Establish migration ptes */
1326 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1327 			       !folio_test_ksm(src) && !anon_vma, src);
1328 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1329 		old_page_state |= PAGE_WAS_MAPPED;
1330 	}
1331 
1332 	if (!folio_mapped(src)) {
1333 		__migrate_folio_record(dst, old_page_state, anon_vma);
1334 		return 0;
1335 	}
1336 
1337 out:
1338 	/*
1339 	 * A folio that has not been unmapped will be restored to
1340 	 * right list unless we want to retry.
1341 	 */
1342 	if (rc == -EAGAIN)
1343 		ret = NULL;
1344 
1345 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1346 			       anon_vma, locked, ret);
1347 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1348 
1349 	return rc;
1350 }
1351 
1352 /* Migrate the folio to the newly allocated folio in dst. */
1353 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1354 			      struct folio *src, struct folio *dst,
1355 			      enum migrate_mode mode, enum migrate_reason reason,
1356 			      struct list_head *ret)
1357 {
1358 	int rc;
1359 	int old_page_state = 0;
1360 	struct anon_vma *anon_vma = NULL;
1361 	bool src_deferred_split = false;
1362 	bool src_partially_mapped = false;
1363 	struct list_head *prev;
1364 
1365 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1366 	prev = dst->lru.prev;
1367 	list_del(&dst->lru);
1368 
1369 	if (unlikely(page_has_movable_ops(&src->page))) {
1370 		rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
1371 		if (rc)
1372 			goto out;
1373 		goto out_unlock_both;
1374 	}
1375 
1376 	if (folio_order(src) > 1 &&
1377 	    !data_race(list_empty(&src->_deferred_list))) {
1378 		src_deferred_split = true;
1379 		src_partially_mapped = folio_test_partially_mapped(src);
1380 	}
1381 
1382 	rc = move_to_new_folio(dst, src, mode);
1383 	if (rc)
1384 		goto out;
1385 
1386 	/*
1387 	 * When successful, push dst to LRU immediately: so that if it
1388 	 * turns out to be an mlocked page, remove_migration_ptes() will
1389 	 * automatically build up the correct dst->mlock_count for it.
1390 	 *
1391 	 * We would like to do something similar for the old page, when
1392 	 * unsuccessful, and other cases when a page has been temporarily
1393 	 * isolated from the unevictable LRU: but this case is the easiest.
1394 	 */
1395 	folio_add_lru(dst);
1396 	if (old_page_state & PAGE_WAS_MLOCKED)
1397 		lru_add_drain();
1398 
1399 	if (old_page_state & PAGE_WAS_MAPPED)
1400 		remove_migration_ptes(src, dst, 0);
1401 
1402 	/*
1403 	 * Requeue the destination folio on the deferred split queue if
1404 	 * the source was on the queue.  The source is unqueued in
1405 	 * __folio_migrate_mapping(), so we recorded the state from
1406 	 * before move_to_new_folio().
1407 	 */
1408 	if (src_deferred_split)
1409 		deferred_split_folio(dst, src_partially_mapped);
1410 
1411 out_unlock_both:
1412 	folio_unlock(dst);
1413 	folio_set_owner_migrate_reason(dst, reason);
1414 	/*
1415 	 * If migration is successful, decrease refcount of dst,
1416 	 * which will not free the page because new page owner increased
1417 	 * refcounter.
1418 	 */
1419 	folio_put(dst);
1420 
1421 	/*
1422 	 * A folio that has been migrated has all references removed
1423 	 * and will be freed.
1424 	 */
1425 	list_del(&src->lru);
1426 	/* Drop an anon_vma reference if we took one */
1427 	if (anon_vma)
1428 		put_anon_vma(anon_vma);
1429 	folio_unlock(src);
1430 	migrate_folio_done(src, reason);
1431 
1432 	return rc;
1433 out:
1434 	/*
1435 	 * A folio that has not been migrated will be restored to
1436 	 * right list unless we want to retry.
1437 	 */
1438 	if (rc == -EAGAIN) {
1439 		list_add(&dst->lru, prev);
1440 		__migrate_folio_record(dst, old_page_state, anon_vma);
1441 		return rc;
1442 	}
1443 
1444 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1445 			       anon_vma, true, ret);
1446 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1447 
1448 	return rc;
1449 }
1450 
1451 /*
1452  * Counterpart of unmap_and_move_page() for hugepage migration.
1453  *
1454  * This function doesn't wait the completion of hugepage I/O
1455  * because there is no race between I/O and migration for hugepage.
1456  * Note that currently hugepage I/O occurs only in direct I/O
1457  * where no lock is held and PG_writeback is irrelevant,
1458  * and writeback status of all subpages are counted in the reference
1459  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1460  * under direct I/O, the reference of the head page is 512 and a bit more.)
1461  * This means that when we try to migrate hugepage whose subpages are
1462  * doing direct I/O, some references remain after try_to_unmap() and
1463  * hugepage migration fails without data corruption.
1464  *
1465  * There is also no race when direct I/O is issued on the page under migration,
1466  * because then pte is replaced with migration swap entry and direct I/O code
1467  * will wait in the page fault for migration to complete.
1468  */
1469 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1470 		free_folio_t put_new_folio, unsigned long private,
1471 		struct folio *src, int force, enum migrate_mode mode,
1472 		int reason, struct list_head *ret)
1473 {
1474 	struct folio *dst;
1475 	int rc = -EAGAIN;
1476 	int page_was_mapped = 0;
1477 	struct anon_vma *anon_vma = NULL;
1478 	struct address_space *mapping = NULL;
1479 	enum ttu_flags ttu = 0;
1480 
1481 	if (folio_ref_count(src) == 1) {
1482 		/* page was freed from under us. So we are done. */
1483 		folio_putback_hugetlb(src);
1484 		return 0;
1485 	}
1486 
1487 	dst = get_new_folio(src, private);
1488 	if (!dst)
1489 		return -ENOMEM;
1490 
1491 	if (!folio_trylock(src)) {
1492 		if (!force)
1493 			goto out;
1494 		switch (mode) {
1495 		case MIGRATE_SYNC:
1496 			break;
1497 		default:
1498 			goto out;
1499 		}
1500 		folio_lock(src);
1501 	}
1502 
1503 	/*
1504 	 * Check for pages which are in the process of being freed.  Without
1505 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1506 	 * be called and we could leak usage counts for subpools.
1507 	 */
1508 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1509 		rc = -EBUSY;
1510 		goto out_unlock;
1511 	}
1512 
1513 	if (folio_test_anon(src))
1514 		anon_vma = folio_get_anon_vma(src);
1515 
1516 	if (unlikely(!folio_trylock(dst)))
1517 		goto put_anon;
1518 
1519 	if (folio_mapped(src)) {
1520 		if (!folio_test_anon(src)) {
1521 			/*
1522 			 * In shared mappings, try_to_unmap could potentially
1523 			 * call huge_pmd_unshare.  Because of this, take
1524 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1525 			 * to let lower levels know we have taken the lock.
1526 			 */
1527 			mapping = hugetlb_folio_mapping_lock_write(src);
1528 			if (unlikely(!mapping))
1529 				goto unlock_put_anon;
1530 
1531 			ttu = TTU_RMAP_LOCKED;
1532 		}
1533 
1534 		try_to_migrate(src, ttu);
1535 		page_was_mapped = 1;
1536 	}
1537 
1538 	if (!folio_mapped(src))
1539 		rc = move_to_new_folio(dst, src, mode);
1540 
1541 	if (page_was_mapped)
1542 		remove_migration_ptes(src, !rc ? dst : src, ttu);
1543 
1544 	if (ttu & TTU_RMAP_LOCKED)
1545 		i_mmap_unlock_write(mapping);
1546 
1547 unlock_put_anon:
1548 	folio_unlock(dst);
1549 
1550 put_anon:
1551 	if (anon_vma)
1552 		put_anon_vma(anon_vma);
1553 
1554 	if (!rc) {
1555 		move_hugetlb_state(src, dst, reason);
1556 		put_new_folio = NULL;
1557 	}
1558 
1559 out_unlock:
1560 	folio_unlock(src);
1561 out:
1562 	if (!rc)
1563 		folio_putback_hugetlb(src);
1564 	else if (rc != -EAGAIN)
1565 		list_move_tail(&src->lru, ret);
1566 
1567 	/*
1568 	 * If migration was not successful and there's a freeing callback,
1569 	 * return the folio to that special allocator. Otherwise, simply drop
1570 	 * our additional reference.
1571 	 */
1572 	if (put_new_folio)
1573 		put_new_folio(dst, private);
1574 	else
1575 		folio_put(dst);
1576 
1577 	return rc;
1578 }
1579 
1580 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1581 				  enum migrate_mode mode)
1582 {
1583 	int rc;
1584 
1585 	if (mode == MIGRATE_ASYNC) {
1586 		if (!folio_trylock(folio))
1587 			return -EAGAIN;
1588 	} else {
1589 		folio_lock(folio);
1590 	}
1591 	rc = split_folio_to_list(folio, split_folios);
1592 	folio_unlock(folio);
1593 	if (!rc)
1594 		list_move_tail(&folio->lru, split_folios);
1595 
1596 	return rc;
1597 }
1598 
1599 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1600 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1601 #else
1602 #define NR_MAX_BATCHED_MIGRATION	512
1603 #endif
1604 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1605 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1606 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1607 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1608 
1609 struct migrate_pages_stats {
1610 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1611 				   units of base pages */
1612 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1613 				   units of base pages.  Untried folios aren't counted */
1614 	int nr_thp_succeeded;	/* THP migrated successfully */
1615 	int nr_thp_failed;	/* THP failed to be migrated */
1616 	int nr_thp_split;	/* THP split before migrating */
1617 	int nr_split;	/* Large folio (include THP) split before migrating */
1618 };
1619 
1620 /*
1621  * Returns the number of hugetlb folios that were not migrated, or an error code
1622  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1623  * any more because the list has become empty or no retryable hugetlb folios
1624  * exist any more. It is caller's responsibility to call putback_movable_pages()
1625  * only if ret != 0.
1626  */
1627 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1628 			    free_folio_t put_new_folio, unsigned long private,
1629 			    enum migrate_mode mode, int reason,
1630 			    struct migrate_pages_stats *stats,
1631 			    struct list_head *ret_folios)
1632 {
1633 	int retry = 1;
1634 	int nr_failed = 0;
1635 	int nr_retry_pages = 0;
1636 	int pass = 0;
1637 	struct folio *folio, *folio2;
1638 	int rc, nr_pages;
1639 
1640 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1641 		retry = 0;
1642 		nr_retry_pages = 0;
1643 
1644 		list_for_each_entry_safe(folio, folio2, from, lru) {
1645 			if (!folio_test_hugetlb(folio))
1646 				continue;
1647 
1648 			nr_pages = folio_nr_pages(folio);
1649 
1650 			cond_resched();
1651 
1652 			/*
1653 			 * Migratability of hugepages depends on architectures and
1654 			 * their size.  This check is necessary because some callers
1655 			 * of hugepage migration like soft offline and memory
1656 			 * hotremove don't walk through page tables or check whether
1657 			 * the hugepage is pmd-based or not before kicking migration.
1658 			 */
1659 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1660 				nr_failed++;
1661 				stats->nr_failed_pages += nr_pages;
1662 				list_move_tail(&folio->lru, ret_folios);
1663 				continue;
1664 			}
1665 
1666 			rc = unmap_and_move_huge_page(get_new_folio,
1667 						      put_new_folio, private,
1668 						      folio, pass > 2, mode,
1669 						      reason, ret_folios);
1670 			/*
1671 			 * The rules are:
1672 			 *	0: hugetlb folio will be put back
1673 			 *	-EAGAIN: stay on the from list
1674 			 *	-ENOMEM: stay on the from list
1675 			 *	Other errno: put on ret_folios list
1676 			 */
1677 			switch(rc) {
1678 			case -ENOMEM:
1679 				/*
1680 				 * When memory is low, don't bother to try to migrate
1681 				 * other folios, just exit.
1682 				 */
1683 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1684 				return -ENOMEM;
1685 			case -EAGAIN:
1686 				retry++;
1687 				nr_retry_pages += nr_pages;
1688 				break;
1689 			case 0:
1690 				stats->nr_succeeded += nr_pages;
1691 				break;
1692 			default:
1693 				/*
1694 				 * Permanent failure (-EBUSY, etc.):
1695 				 * unlike -EAGAIN case, the failed folio is
1696 				 * removed from migration folio list and not
1697 				 * retried in the next outer loop.
1698 				 */
1699 				nr_failed++;
1700 				stats->nr_failed_pages += nr_pages;
1701 				break;
1702 			}
1703 		}
1704 	}
1705 	/*
1706 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1707 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1708 	 * folios as failed.
1709 	 */
1710 	nr_failed += retry;
1711 	stats->nr_failed_pages += nr_retry_pages;
1712 
1713 	return nr_failed;
1714 }
1715 
1716 static void migrate_folios_move(struct list_head *src_folios,
1717 		struct list_head *dst_folios,
1718 		free_folio_t put_new_folio, unsigned long private,
1719 		enum migrate_mode mode, int reason,
1720 		struct list_head *ret_folios,
1721 		struct migrate_pages_stats *stats,
1722 		int *retry, int *thp_retry, int *nr_failed,
1723 		int *nr_retry_pages)
1724 {
1725 	struct folio *folio, *folio2, *dst, *dst2;
1726 	bool is_thp;
1727 	int nr_pages;
1728 	int rc;
1729 
1730 	dst = list_first_entry(dst_folios, struct folio, lru);
1731 	dst2 = list_next_entry(dst, lru);
1732 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1733 		is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1734 		nr_pages = folio_nr_pages(folio);
1735 
1736 		cond_resched();
1737 
1738 		rc = migrate_folio_move(put_new_folio, private,
1739 				folio, dst, mode,
1740 				reason, ret_folios);
1741 		/*
1742 		 * The rules are:
1743 		 *	0: folio will be freed
1744 		 *	-EAGAIN: stay on the unmap_folios list
1745 		 *	Other errno: put on ret_folios list
1746 		 */
1747 		switch (rc) {
1748 		case -EAGAIN:
1749 			*retry += 1;
1750 			*thp_retry += is_thp;
1751 			*nr_retry_pages += nr_pages;
1752 			break;
1753 		case 0:
1754 			stats->nr_succeeded += nr_pages;
1755 			stats->nr_thp_succeeded += is_thp;
1756 			break;
1757 		default:
1758 			*nr_failed += 1;
1759 			stats->nr_thp_failed += is_thp;
1760 			stats->nr_failed_pages += nr_pages;
1761 			break;
1762 		}
1763 		dst = dst2;
1764 		dst2 = list_next_entry(dst, lru);
1765 	}
1766 }
1767 
1768 static void migrate_folios_undo(struct list_head *src_folios,
1769 		struct list_head *dst_folios,
1770 		free_folio_t put_new_folio, unsigned long private,
1771 		struct list_head *ret_folios)
1772 {
1773 	struct folio *folio, *folio2, *dst, *dst2;
1774 
1775 	dst = list_first_entry(dst_folios, struct folio, lru);
1776 	dst2 = list_next_entry(dst, lru);
1777 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1778 		int old_page_state = 0;
1779 		struct anon_vma *anon_vma = NULL;
1780 
1781 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1782 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1783 				anon_vma, true, ret_folios);
1784 		list_del(&dst->lru);
1785 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1786 		dst = dst2;
1787 		dst2 = list_next_entry(dst, lru);
1788 	}
1789 }
1790 
1791 /*
1792  * migrate_pages_batch() first unmaps folios in the from list as many as
1793  * possible, then move the unmapped folios.
1794  *
1795  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1796  * lock or bit when we have locked more than one folio.  Which may cause
1797  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1798  * length of the from list must be <= 1.
1799  */
1800 static int migrate_pages_batch(struct list_head *from,
1801 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1802 		unsigned long private, enum migrate_mode mode, int reason,
1803 		struct list_head *ret_folios, struct list_head *split_folios,
1804 		struct migrate_pages_stats *stats, int nr_pass)
1805 {
1806 	int retry = 1;
1807 	int thp_retry = 1;
1808 	int nr_failed = 0;
1809 	int nr_retry_pages = 0;
1810 	int pass = 0;
1811 	bool is_thp = false;
1812 	bool is_large = false;
1813 	struct folio *folio, *folio2, *dst = NULL;
1814 	int rc, rc_saved = 0, nr_pages;
1815 	LIST_HEAD(unmap_folios);
1816 	LIST_HEAD(dst_folios);
1817 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1818 
1819 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1820 			!list_empty(from) && !list_is_singular(from));
1821 
1822 	for (pass = 0; pass < nr_pass && retry; pass++) {
1823 		retry = 0;
1824 		thp_retry = 0;
1825 		nr_retry_pages = 0;
1826 
1827 		list_for_each_entry_safe(folio, folio2, from, lru) {
1828 			is_large = folio_test_large(folio);
1829 			is_thp = folio_test_pmd_mappable(folio);
1830 			nr_pages = folio_nr_pages(folio);
1831 
1832 			cond_resched();
1833 
1834 			/*
1835 			 * The rare folio on the deferred split list should
1836 			 * be split now. It should not count as a failure:
1837 			 * but increment nr_failed because, without doing so,
1838 			 * migrate_pages() may report success with (split but
1839 			 * unmigrated) pages still on its fromlist; whereas it
1840 			 * always reports success when its fromlist is empty.
1841 			 * stats->nr_thp_failed should be increased too,
1842 			 * otherwise stats inconsistency will happen when
1843 			 * migrate_pages_batch is called via migrate_pages()
1844 			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1845 			 *
1846 			 * Only check it without removing it from the list.
1847 			 * Since the folio can be on deferred_split_scan()
1848 			 * local list and removing it can cause the local list
1849 			 * corruption. Folio split process below can handle it
1850 			 * with the help of folio_ref_freeze().
1851 			 *
1852 			 * nr_pages > 2 is needed to avoid checking order-1
1853 			 * page cache folios. They exist, in contrast to
1854 			 * non-existent order-1 anonymous folios, and do not
1855 			 * use _deferred_list.
1856 			 */
1857 			if (nr_pages > 2 &&
1858 			   !list_empty(&folio->_deferred_list) &&
1859 			   folio_test_partially_mapped(folio)) {
1860 				if (!try_split_folio(folio, split_folios, mode)) {
1861 					nr_failed++;
1862 					stats->nr_thp_failed += is_thp;
1863 					stats->nr_thp_split += is_thp;
1864 					stats->nr_split++;
1865 					continue;
1866 				}
1867 			}
1868 
1869 			/*
1870 			 * Large folio migration might be unsupported or
1871 			 * the allocation might be failed so we should retry
1872 			 * on the same folio with the large folio split
1873 			 * to normal folios.
1874 			 *
1875 			 * Split folios are put in split_folios, and
1876 			 * we will migrate them after the rest of the
1877 			 * list is processed.
1878 			 */
1879 			if (!thp_migration_supported() && is_thp) {
1880 				nr_failed++;
1881 				stats->nr_thp_failed++;
1882 				if (!try_split_folio(folio, split_folios, mode)) {
1883 					stats->nr_thp_split++;
1884 					stats->nr_split++;
1885 					continue;
1886 				}
1887 				stats->nr_failed_pages += nr_pages;
1888 				list_move_tail(&folio->lru, ret_folios);
1889 				continue;
1890 			}
1891 
1892 			/*
1893 			 * If we are holding the last folio reference, the folio
1894 			 * was freed from under us, so just drop our reference.
1895 			 */
1896 			if (likely(!page_has_movable_ops(&folio->page)) &&
1897 			    folio_ref_count(folio) == 1) {
1898 				folio_clear_active(folio);
1899 				folio_clear_unevictable(folio);
1900 				list_del(&folio->lru);
1901 				migrate_folio_done(folio, reason);
1902 				stats->nr_succeeded += nr_pages;
1903 				stats->nr_thp_succeeded += is_thp;
1904 				continue;
1905 			}
1906 
1907 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1908 					private, folio, &dst, mode, ret_folios);
1909 			/*
1910 			 * The rules are:
1911 			 *	0: folio will be put on unmap_folios list,
1912 			 *	   dst folio put on dst_folios list
1913 			 *	-EAGAIN: stay on the from list
1914 			 *	-ENOMEM: stay on the from list
1915 			 *	Other errno: put on ret_folios list
1916 			 */
1917 			switch(rc) {
1918 			case -ENOMEM:
1919 				/*
1920 				 * When memory is low, don't bother to try to migrate
1921 				 * other folios, move unmapped folios, then exit.
1922 				 */
1923 				nr_failed++;
1924 				stats->nr_thp_failed += is_thp;
1925 				/* Large folio NUMA faulting doesn't split to retry. */
1926 				if (is_large && !nosplit) {
1927 					int ret = try_split_folio(folio, split_folios, mode);
1928 
1929 					if (!ret) {
1930 						stats->nr_thp_split += is_thp;
1931 						stats->nr_split++;
1932 						break;
1933 					} else if (reason == MR_LONGTERM_PIN &&
1934 						   ret == -EAGAIN) {
1935 						/*
1936 						 * Try again to split large folio to
1937 						 * mitigate the failure of longterm pinning.
1938 						 */
1939 						retry++;
1940 						thp_retry += is_thp;
1941 						nr_retry_pages += nr_pages;
1942 						/* Undo duplicated failure counting. */
1943 						nr_failed--;
1944 						stats->nr_thp_failed -= is_thp;
1945 						break;
1946 					}
1947 				}
1948 
1949 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1950 				/* nr_failed isn't updated for not used */
1951 				stats->nr_thp_failed += thp_retry;
1952 				rc_saved = rc;
1953 				if (list_empty(&unmap_folios))
1954 					goto out;
1955 				else
1956 					goto move;
1957 			case -EAGAIN:
1958 				retry++;
1959 				thp_retry += is_thp;
1960 				nr_retry_pages += nr_pages;
1961 				break;
1962 			case 0:
1963 				list_move_tail(&folio->lru, &unmap_folios);
1964 				list_add_tail(&dst->lru, &dst_folios);
1965 				break;
1966 			default:
1967 				/*
1968 				 * Permanent failure (-EBUSY, etc.):
1969 				 * unlike -EAGAIN case, the failed folio is
1970 				 * removed from migration folio list and not
1971 				 * retried in the next outer loop.
1972 				 */
1973 				nr_failed++;
1974 				stats->nr_thp_failed += is_thp;
1975 				stats->nr_failed_pages += nr_pages;
1976 				break;
1977 			}
1978 		}
1979 	}
1980 	nr_failed += retry;
1981 	stats->nr_thp_failed += thp_retry;
1982 	stats->nr_failed_pages += nr_retry_pages;
1983 move:
1984 	/* Flush TLBs for all unmapped folios */
1985 	try_to_unmap_flush();
1986 
1987 	retry = 1;
1988 	for (pass = 0; pass < nr_pass && retry; pass++) {
1989 		retry = 0;
1990 		thp_retry = 0;
1991 		nr_retry_pages = 0;
1992 
1993 		/* Move the unmapped folios */
1994 		migrate_folios_move(&unmap_folios, &dst_folios,
1995 				put_new_folio, private, mode, reason,
1996 				ret_folios, stats, &retry, &thp_retry,
1997 				&nr_failed, &nr_retry_pages);
1998 	}
1999 	nr_failed += retry;
2000 	stats->nr_thp_failed += thp_retry;
2001 	stats->nr_failed_pages += nr_retry_pages;
2002 
2003 	rc = rc_saved ? : nr_failed;
2004 out:
2005 	/* Cleanup remaining folios */
2006 	migrate_folios_undo(&unmap_folios, &dst_folios,
2007 			put_new_folio, private, ret_folios);
2008 
2009 	return rc;
2010 }
2011 
2012 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
2013 		free_folio_t put_new_folio, unsigned long private,
2014 		enum migrate_mode mode, int reason,
2015 		struct list_head *ret_folios, struct list_head *split_folios,
2016 		struct migrate_pages_stats *stats)
2017 {
2018 	int rc, nr_failed = 0;
2019 	LIST_HEAD(folios);
2020 	struct migrate_pages_stats astats;
2021 
2022 	memset(&astats, 0, sizeof(astats));
2023 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
2024 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2025 				 reason, &folios, split_folios, &astats,
2026 				 NR_MAX_MIGRATE_ASYNC_RETRY);
2027 	stats->nr_succeeded += astats.nr_succeeded;
2028 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
2029 	stats->nr_thp_split += astats.nr_thp_split;
2030 	stats->nr_split += astats.nr_split;
2031 	if (rc < 0) {
2032 		stats->nr_failed_pages += astats.nr_failed_pages;
2033 		stats->nr_thp_failed += astats.nr_thp_failed;
2034 		list_splice_tail(&folios, ret_folios);
2035 		return rc;
2036 	}
2037 	stats->nr_thp_failed += astats.nr_thp_split;
2038 	/*
2039 	 * Do not count rc, as pages will be retried below.
2040 	 * Count nr_split only, since it includes nr_thp_split.
2041 	 */
2042 	nr_failed += astats.nr_split;
2043 	/*
2044 	 * Fall back to migrate all failed folios one by one synchronously. All
2045 	 * failed folios except split THPs will be retried, so their failure
2046 	 * isn't counted
2047 	 */
2048 	list_splice_tail_init(&folios, from);
2049 	while (!list_empty(from)) {
2050 		list_move(from->next, &folios);
2051 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2052 					 private, mode, reason, ret_folios,
2053 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
2054 		list_splice_tail_init(&folios, ret_folios);
2055 		if (rc < 0)
2056 			return rc;
2057 		nr_failed += rc;
2058 	}
2059 
2060 	return nr_failed;
2061 }
2062 
2063 /*
2064  * migrate_pages - migrate the folios specified in a list, to the free folios
2065  *		   supplied as the target for the page migration
2066  *
2067  * @from:		The list of folios to be migrated.
2068  * @get_new_folio:	The function used to allocate free folios to be used
2069  *			as the target of the folio migration.
2070  * @put_new_folio:	The function used to free target folios if migration
2071  *			fails, or NULL if no special handling is necessary.
2072  * @private:		Private data to be passed on to get_new_folio()
2073  * @mode:		The migration mode that specifies the constraints for
2074  *			folio migration, if any.
2075  * @reason:		The reason for folio migration.
2076  * @ret_succeeded:	Set to the number of folios migrated successfully if
2077  *			the caller passes a non-NULL pointer.
2078  *
2079  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2080  * are movable any more because the list has become empty or no retryable folios
2081  * exist any more. It is caller's responsibility to call putback_movable_pages()
2082  * only if ret != 0.
2083  *
2084  * Returns the number of {normal folio, large folio, hugetlb} that were not
2085  * migrated, or an error code. The number of large folio splits will be
2086  * considered as the number of non-migrated large folio, no matter how many
2087  * split folios of the large folio are migrated successfully.
2088  */
2089 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2090 		free_folio_t put_new_folio, unsigned long private,
2091 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2092 {
2093 	int rc, rc_gather;
2094 	int nr_pages;
2095 	struct folio *folio, *folio2;
2096 	LIST_HEAD(folios);
2097 	LIST_HEAD(ret_folios);
2098 	LIST_HEAD(split_folios);
2099 	struct migrate_pages_stats stats;
2100 
2101 	trace_mm_migrate_pages_start(mode, reason);
2102 
2103 	memset(&stats, 0, sizeof(stats));
2104 
2105 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2106 				     mode, reason, &stats, &ret_folios);
2107 	if (rc_gather < 0)
2108 		goto out;
2109 
2110 again:
2111 	nr_pages = 0;
2112 	list_for_each_entry_safe(folio, folio2, from, lru) {
2113 		/* Retried hugetlb folios will be kept in list  */
2114 		if (folio_test_hugetlb(folio)) {
2115 			list_move_tail(&folio->lru, &ret_folios);
2116 			continue;
2117 		}
2118 
2119 		nr_pages += folio_nr_pages(folio);
2120 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2121 			break;
2122 	}
2123 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2124 		list_cut_before(&folios, from, &folio2->lru);
2125 	else
2126 		list_splice_init(from, &folios);
2127 	if (mode == MIGRATE_ASYNC)
2128 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2129 				private, mode, reason, &ret_folios,
2130 				&split_folios, &stats,
2131 				NR_MAX_MIGRATE_PAGES_RETRY);
2132 	else
2133 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2134 				private, mode, reason, &ret_folios,
2135 				&split_folios, &stats);
2136 	list_splice_tail_init(&folios, &ret_folios);
2137 	if (rc < 0) {
2138 		rc_gather = rc;
2139 		list_splice_tail(&split_folios, &ret_folios);
2140 		goto out;
2141 	}
2142 	if (!list_empty(&split_folios)) {
2143 		/*
2144 		 * Failure isn't counted since all split folios of a large folio
2145 		 * is counted as 1 failure already.  And, we only try to migrate
2146 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2147 		 */
2148 		migrate_pages_batch(&split_folios, get_new_folio,
2149 				put_new_folio, private, MIGRATE_ASYNC, reason,
2150 				&ret_folios, NULL, &stats, 1);
2151 		list_splice_tail_init(&split_folios, &ret_folios);
2152 	}
2153 	rc_gather += rc;
2154 	if (!list_empty(from))
2155 		goto again;
2156 out:
2157 	/*
2158 	 * Put the permanent failure folio back to migration list, they
2159 	 * will be put back to the right list by the caller.
2160 	 */
2161 	list_splice(&ret_folios, from);
2162 
2163 	/*
2164 	 * Return 0 in case all split folios of fail-to-migrate large folios
2165 	 * are migrated successfully.
2166 	 */
2167 	if (list_empty(from))
2168 		rc_gather = 0;
2169 
2170 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2171 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2172 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2173 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2174 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2175 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2176 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2177 			       stats.nr_thp_split, stats.nr_split, mode,
2178 			       reason);
2179 
2180 	if (ret_succeeded)
2181 		*ret_succeeded = stats.nr_succeeded;
2182 
2183 	return rc_gather;
2184 }
2185 
2186 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2187 {
2188 	struct migration_target_control *mtc;
2189 	gfp_t gfp_mask;
2190 	unsigned int order = 0;
2191 	int nid;
2192 	enum zone_type zidx;
2193 
2194 	mtc = (struct migration_target_control *)private;
2195 	gfp_mask = mtc->gfp_mask;
2196 	nid = mtc->nid;
2197 	if (nid == NUMA_NO_NODE)
2198 		nid = folio_nid(src);
2199 
2200 	if (folio_test_hugetlb(src)) {
2201 		struct hstate *h = folio_hstate(src);
2202 
2203 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2204 		return alloc_hugetlb_folio_nodemask(h, nid,
2205 						mtc->nmask, gfp_mask,
2206 						htlb_allow_alloc_fallback(mtc->reason));
2207 	}
2208 
2209 	if (folio_test_large(src)) {
2210 		/*
2211 		 * clear __GFP_RECLAIM to make the migration callback
2212 		 * consistent with regular THP allocations.
2213 		 */
2214 		gfp_mask &= ~__GFP_RECLAIM;
2215 		gfp_mask |= GFP_TRANSHUGE;
2216 		order = folio_order(src);
2217 	}
2218 	zidx = folio_zonenum(src);
2219 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2220 		gfp_mask |= __GFP_HIGHMEM;
2221 
2222 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2223 }
2224 
2225 #ifdef CONFIG_NUMA_MIGRATION
2226 static int store_status(int __user *status, int start, int value, int nr)
2227 {
2228 	while (nr-- > 0) {
2229 		if (put_user(value, status + start))
2230 			return -EFAULT;
2231 		start++;
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2238 {
2239 	int err;
2240 	struct migration_target_control mtc = {
2241 		.nid = node,
2242 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2243 		.reason = MR_SYSCALL,
2244 	};
2245 
2246 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2247 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2248 	if (err)
2249 		putback_movable_pages(pagelist);
2250 	return err;
2251 }
2252 
2253 static int __add_folio_for_migration(struct folio *folio, int node,
2254 		struct list_head *pagelist, bool migrate_all)
2255 {
2256 	if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2257 		return -EFAULT;
2258 
2259 	if (folio_is_zone_device(folio))
2260 		return -ENOENT;
2261 
2262 	if (folio_nid(folio) == node)
2263 		return 0;
2264 
2265 	if (folio_maybe_mapped_shared(folio) && !migrate_all)
2266 		return -EACCES;
2267 
2268 	if (folio_test_hugetlb(folio)) {
2269 		if (folio_isolate_hugetlb(folio, pagelist))
2270 			return 1;
2271 	} else if (folio_isolate_lru(folio)) {
2272 		list_add_tail(&folio->lru, pagelist);
2273 		node_stat_mod_folio(folio,
2274 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2275 			folio_nr_pages(folio));
2276 		return 1;
2277 	}
2278 	return -EBUSY;
2279 }
2280 
2281 /*
2282  * Resolves the given address to a struct folio, isolates it from the LRU and
2283  * puts it to the given pagelist.
2284  * Returns:
2285  *     errno - if the folio cannot be found/isolated
2286  *     0 - when it doesn't have to be migrated because it is already on the
2287  *         target node
2288  *     1 - when it has been queued
2289  */
2290 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2291 		int node, struct list_head *pagelist, bool migrate_all)
2292 {
2293 	struct vm_area_struct *vma;
2294 	struct folio_walk fw;
2295 	struct folio *folio;
2296 	unsigned long addr;
2297 	int err = -EFAULT;
2298 
2299 	mmap_read_lock(mm);
2300 	addr = (unsigned long)untagged_addr_remote(mm, p);
2301 
2302 	vma = vma_lookup(mm, addr);
2303 	if (vma && vma_migratable(vma)) {
2304 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2305 		if (folio) {
2306 			err = __add_folio_for_migration(folio, node, pagelist,
2307 							migrate_all);
2308 			folio_walk_end(&fw, vma);
2309 		} else {
2310 			err = -ENOENT;
2311 		}
2312 	}
2313 	mmap_read_unlock(mm);
2314 	return err;
2315 }
2316 
2317 static int move_pages_and_store_status(int node,
2318 		struct list_head *pagelist, int __user *status,
2319 		int start, int i, unsigned long nr_pages)
2320 {
2321 	int err;
2322 
2323 	if (list_empty(pagelist))
2324 		return 0;
2325 
2326 	err = do_move_pages_to_node(pagelist, node);
2327 	if (err) {
2328 		/*
2329 		 * Positive err means the number of failed
2330 		 * pages to migrate.  Since we are going to
2331 		 * abort and return the number of non-migrated
2332 		 * pages, so need to include the rest of the
2333 		 * nr_pages that have not been attempted as
2334 		 * well.
2335 		 */
2336 		if (err > 0)
2337 			err += nr_pages - i;
2338 		return err;
2339 	}
2340 	return store_status(status, start, node, i - start);
2341 }
2342 
2343 /*
2344  * Migrate an array of page address onto an array of nodes and fill
2345  * the corresponding array of status.
2346  */
2347 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2348 			 unsigned long nr_pages,
2349 			 const void __user * __user *pages,
2350 			 const int __user *nodes,
2351 			 int __user *status, int flags)
2352 {
2353 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2354 	int current_node = NUMA_NO_NODE;
2355 	LIST_HEAD(pagelist);
2356 	int start, i;
2357 	int err = 0, err1;
2358 
2359 	lru_cache_disable();
2360 
2361 	for (i = start = 0; i < nr_pages; i++) {
2362 		const void __user *p;
2363 		int node;
2364 
2365 		err = -EFAULT;
2366 		if (in_compat_syscall()) {
2367 			compat_uptr_t cp;
2368 
2369 			if (get_user(cp, compat_pages + i))
2370 				goto out_flush;
2371 
2372 			p = compat_ptr(cp);
2373 		} else {
2374 			if (get_user(p, pages + i))
2375 				goto out_flush;
2376 		}
2377 		if (get_user(node, nodes + i))
2378 			goto out_flush;
2379 
2380 		err = -ENODEV;
2381 		if (node < 0 || node >= MAX_NUMNODES)
2382 			goto out_flush;
2383 		if (!node_state(node, N_MEMORY))
2384 			goto out_flush;
2385 
2386 		err = -EACCES;
2387 		if (!node_isset(node, task_nodes))
2388 			goto out_flush;
2389 
2390 		if (current_node == NUMA_NO_NODE) {
2391 			current_node = node;
2392 			start = i;
2393 		} else if (node != current_node) {
2394 			err = move_pages_and_store_status(current_node,
2395 					&pagelist, status, start, i, nr_pages);
2396 			if (err)
2397 				goto out;
2398 			start = i;
2399 			current_node = node;
2400 		}
2401 
2402 		/*
2403 		 * Errors in the page lookup or isolation are not fatal and we simply
2404 		 * report them via status
2405 		 */
2406 		err = add_folio_for_migration(mm, p, current_node, &pagelist,
2407 					      flags & MPOL_MF_MOVE_ALL);
2408 
2409 		if (err > 0) {
2410 			/* The page is successfully queued for migration */
2411 			continue;
2412 		}
2413 
2414 		/*
2415 		 * If the page is already on the target node (!err), store the
2416 		 * node, otherwise, store the err.
2417 		 */
2418 		err = store_status(status, i, err ? : current_node, 1);
2419 		if (err)
2420 			goto out_flush;
2421 
2422 		err = move_pages_and_store_status(current_node, &pagelist,
2423 				status, start, i, nr_pages);
2424 		if (err) {
2425 			/* We have accounted for page i */
2426 			if (err > 0)
2427 				err--;
2428 			goto out;
2429 		}
2430 		current_node = NUMA_NO_NODE;
2431 	}
2432 out_flush:
2433 	/* Make sure we do not overwrite the existing error */
2434 	err1 = move_pages_and_store_status(current_node, &pagelist,
2435 				status, start, i, nr_pages);
2436 	if (err >= 0)
2437 		err = err1;
2438 out:
2439 	lru_cache_enable();
2440 	return err;
2441 }
2442 
2443 /*
2444  * Determine the nodes of an array of pages and store it in an array of status.
2445  */
2446 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2447 				const void __user **pages, int *status)
2448 {
2449 	unsigned long i;
2450 
2451 	mmap_read_lock(mm);
2452 
2453 	for (i = 0; i < nr_pages; i++) {
2454 		unsigned long addr = (unsigned long)(*pages);
2455 		struct vm_area_struct *vma;
2456 		struct folio_walk fw;
2457 		struct folio *folio;
2458 		int err = -EFAULT;
2459 
2460 		vma = vma_lookup(mm, addr);
2461 		if (!vma)
2462 			goto set_status;
2463 
2464 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2465 		if (folio) {
2466 			if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2467 				err = -EFAULT;
2468 			else if (folio_is_zone_device(folio))
2469 				err = -ENOENT;
2470 			else
2471 				err = folio_nid(folio);
2472 			folio_walk_end(&fw, vma);
2473 		} else {
2474 			err = -ENOENT;
2475 		}
2476 set_status:
2477 		*status = err;
2478 
2479 		pages++;
2480 		status++;
2481 	}
2482 
2483 	mmap_read_unlock(mm);
2484 }
2485 
2486 static int get_compat_pages_array(const void __user *chunk_pages[],
2487 				  const void __user * __user *pages,
2488 				  unsigned long chunk_offset,
2489 				  unsigned long chunk_nr)
2490 {
2491 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2492 	compat_uptr_t p;
2493 	int i;
2494 
2495 	for (i = 0; i < chunk_nr; i++) {
2496 		if (get_user(p, pages32 + chunk_offset + i))
2497 			return -EFAULT;
2498 		chunk_pages[i] = compat_ptr(p);
2499 	}
2500 
2501 	return 0;
2502 }
2503 
2504 /*
2505  * Determine the nodes of a user array of pages and store it in
2506  * a user array of status.
2507  */
2508 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2509 			 const void __user * __user *pages,
2510 			 int __user *status)
2511 {
2512 #define DO_PAGES_STAT_CHUNK_NR 16UL
2513 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2514 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2515 	unsigned long chunk_offset = 0;
2516 
2517 	while (nr_pages) {
2518 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2519 
2520 		if (in_compat_syscall()) {
2521 			if (get_compat_pages_array(chunk_pages, pages,
2522 						   chunk_offset, chunk_nr))
2523 				break;
2524 		} else {
2525 			if (copy_from_user(chunk_pages, pages + chunk_offset,
2526 				      chunk_nr * sizeof(*chunk_pages)))
2527 				break;
2528 		}
2529 
2530 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2531 
2532 		if (copy_to_user(status + chunk_offset, chunk_status,
2533 				 chunk_nr * sizeof(*status)))
2534 			break;
2535 
2536 		chunk_offset += chunk_nr;
2537 		nr_pages -= chunk_nr;
2538 	}
2539 	return nr_pages ? -EFAULT : 0;
2540 }
2541 
2542 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2543 {
2544 	struct task_struct *task;
2545 	struct mm_struct *mm;
2546 
2547 	/*
2548 	 * There is no need to check if current process has the right to modify
2549 	 * the specified process when they are same.
2550 	 */
2551 	if (!pid) {
2552 		mmget(current->mm);
2553 		*mem_nodes = cpuset_mems_allowed(current);
2554 		return current->mm;
2555 	}
2556 
2557 	task = find_get_task_by_vpid(pid);
2558 	if (!task) {
2559 		return ERR_PTR(-ESRCH);
2560 	}
2561 
2562 	/*
2563 	 * Check if this process has the right to modify the specified
2564 	 * process. Use the regular "ptrace_may_access()" checks.
2565 	 */
2566 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2567 		mm = ERR_PTR(-EPERM);
2568 		goto out;
2569 	}
2570 
2571 	mm = ERR_PTR(security_task_movememory(task));
2572 	if (IS_ERR(mm))
2573 		goto out;
2574 	*mem_nodes = cpuset_mems_allowed(task);
2575 	mm = get_task_mm(task);
2576 out:
2577 	put_task_struct(task);
2578 	if (!mm)
2579 		mm = ERR_PTR(-EINVAL);
2580 	return mm;
2581 }
2582 
2583 /*
2584  * Move a list of pages in the address space of the currently executing
2585  * process.
2586  */
2587 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2588 			     const void __user * __user *pages,
2589 			     const int __user *nodes,
2590 			     int __user *status, int flags)
2591 {
2592 	struct mm_struct *mm;
2593 	int err;
2594 	nodemask_t task_nodes;
2595 
2596 	/* Check flags */
2597 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2598 		return -EINVAL;
2599 
2600 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2601 		return -EPERM;
2602 
2603 	mm = find_mm_struct(pid, &task_nodes);
2604 	if (IS_ERR(mm))
2605 		return PTR_ERR(mm);
2606 
2607 	if (nodes)
2608 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2609 				    nodes, status, flags);
2610 	else
2611 		err = do_pages_stat(mm, nr_pages, pages, status);
2612 
2613 	mmput(mm);
2614 	return err;
2615 }
2616 
2617 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2618 		const void __user * __user *, pages,
2619 		const int __user *, nodes,
2620 		int __user *, status, int, flags)
2621 {
2622 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2623 }
2624 #endif /* CONFIG_NUMA_MIGRATION */
2625 
2626 #ifdef CONFIG_NUMA_BALANCING
2627 /*
2628  * Returns true if this is a safe migration target node for misplaced NUMA
2629  * pages. Currently it only checks the watermarks which is crude.
2630  */
2631 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2632 				   unsigned long nr_migrate_pages)
2633 {
2634 	int z;
2635 
2636 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2637 		struct zone *zone = pgdat->node_zones + z;
2638 
2639 		if (!managed_zone(zone))
2640 			continue;
2641 
2642 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2643 		if (!zone_watermark_ok(zone, 0,
2644 				       high_wmark_pages(zone) +
2645 				       nr_migrate_pages,
2646 				       ZONE_MOVABLE, ALLOC_CMA))
2647 			continue;
2648 		return true;
2649 	}
2650 	return false;
2651 }
2652 
2653 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2654 					   unsigned long data)
2655 {
2656 	int nid = (int) data;
2657 	int order = folio_order(src);
2658 	gfp_t gfp = __GFP_THISNODE;
2659 
2660 	if (order > 0)
2661 		gfp |= GFP_TRANSHUGE_LIGHT;
2662 	else {
2663 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2664 			__GFP_NOWARN;
2665 		gfp &= ~__GFP_RECLAIM;
2666 	}
2667 	return __folio_alloc_node(gfp, order, nid);
2668 }
2669 
2670 /*
2671  * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2672  * permitted. Must be called with the PTL still held.
2673  */
2674 int migrate_misplaced_folio_prepare(struct folio *folio,
2675 		struct vm_area_struct *vma, int node)
2676 {
2677 	int nr_pages = folio_nr_pages(folio);
2678 	pg_data_t *pgdat = NODE_DATA(node);
2679 
2680 	if (folio_is_file_lru(folio)) {
2681 		/*
2682 		 * Do not migrate file folios that are mapped in multiple
2683 		 * processes with execute permissions as they are probably
2684 		 * shared libraries.
2685 		 *
2686 		 * See folio_maybe_mapped_shared() on possible imprecision
2687 		 * when we cannot easily detect if a folio is shared.
2688 		 */
2689 		if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
2690 			return -EACCES;
2691 
2692 		/*
2693 		 * Do not migrate dirty folios as not all filesystems can move
2694 		 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2695 		 * cycles.
2696 		 */
2697 		if (folio_test_dirty(folio))
2698 			return -EAGAIN;
2699 	}
2700 
2701 	/* Avoid migrating to a node that is nearly full */
2702 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2703 		int z;
2704 
2705 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2706 			return -EAGAIN;
2707 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2708 			if (managed_zone(pgdat->node_zones + z))
2709 				break;
2710 		}
2711 
2712 		/*
2713 		 * If there are no managed zones, it should not proceed
2714 		 * further.
2715 		 */
2716 		if (z < 0)
2717 			return -EAGAIN;
2718 
2719 		wakeup_kswapd(pgdat->node_zones + z, 0,
2720 			      folio_order(folio), ZONE_MOVABLE);
2721 		return -EAGAIN;
2722 	}
2723 
2724 	if (!folio_isolate_lru(folio))
2725 		return -EAGAIN;
2726 
2727 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2728 			    nr_pages);
2729 	return 0;
2730 }
2731 
2732 /*
2733  * Attempt to migrate a misplaced folio to the specified destination
2734  * node. Caller is expected to have isolated the folio by calling
2735  * migrate_misplaced_folio_prepare(), which will result in an
2736  * elevated reference count on the folio. This function will un-isolate the
2737  * folio, dereferencing the folio before returning.
2738  */
2739 int migrate_misplaced_folio(struct folio *folio, int node)
2740 {
2741 	pg_data_t *pgdat = NODE_DATA(node);
2742 	int nr_remaining;
2743 	unsigned int nr_succeeded;
2744 	LIST_HEAD(migratepages);
2745 	struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2746 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2747 
2748 	list_add(&folio->lru, &migratepages);
2749 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2750 				     NULL, node, MIGRATE_ASYNC,
2751 				     MR_NUMA_MISPLACED, &nr_succeeded);
2752 	if (nr_remaining && !list_empty(&migratepages))
2753 		putback_movable_pages(&migratepages);
2754 	if (nr_succeeded) {
2755 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2756 		count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2757 		if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2758 		    && !node_is_toptier(folio_nid(folio))
2759 		    && node_is_toptier(node))
2760 			mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2761 	}
2762 	mem_cgroup_put(memcg);
2763 	BUG_ON(!list_empty(&migratepages));
2764 	return nr_remaining ? -EAGAIN : 0;
2765 }
2766 #endif /* CONFIG_NUMA_BALANCING */
2767