xref: /linux/mm/migrate.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b20a3503SChristoph Lameter /*
314e0f9bcSHugh Dickins  * Memory Migration functionality - linux/mm/migrate.c
4b20a3503SChristoph Lameter  *
5b20a3503SChristoph Lameter  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6b20a3503SChristoph Lameter  *
7b20a3503SChristoph Lameter  * Page migration was first developed in the context of the memory hotplug
8b20a3503SChristoph Lameter  * project. The main authors of the migration code are:
9b20a3503SChristoph Lameter  *
10b20a3503SChristoph Lameter  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11b20a3503SChristoph Lameter  * Hirokazu Takahashi <taka@valinux.co.jp>
12b20a3503SChristoph Lameter  * Dave Hansen <haveblue@us.ibm.com>
13cde53535SChristoph Lameter  * Christoph Lameter
14b20a3503SChristoph Lameter  */
15b20a3503SChristoph Lameter 
16b20a3503SChristoph Lameter #include <linux/migrate.h>
17b95f1b31SPaul Gortmaker #include <linux/export.h>
18b20a3503SChristoph Lameter #include <linux/swap.h>
190697212aSChristoph Lameter #include <linux/swapops.h>
20b20a3503SChristoph Lameter #include <linux/pagemap.h>
21e23ca00bSChristoph Lameter #include <linux/buffer_head.h>
22b20a3503SChristoph Lameter #include <linux/mm_inline.h>
23b488893aSPavel Emelyanov #include <linux/nsproxy.h>
24e9995ef9SHugh Dickins #include <linux/ksm.h>
25b20a3503SChristoph Lameter #include <linux/rmap.h>
26b20a3503SChristoph Lameter #include <linux/topology.h>
27b20a3503SChristoph Lameter #include <linux/cpu.h>
28b20a3503SChristoph Lameter #include <linux/cpuset.h>
2904e62a29SChristoph Lameter #include <linux/writeback.h>
30742755a1SChristoph Lameter #include <linux/mempolicy.h>
31742755a1SChristoph Lameter #include <linux/vmalloc.h>
3286c3a764SDavid Quigley #include <linux/security.h>
3342cb14b1SHugh Dickins #include <linux/backing-dev.h>
34bda807d4SMinchan Kim #include <linux/compaction.h>
354f5ca265SAdrian Bunk #include <linux/syscalls.h>
367addf443SDominik Brodowski #include <linux/compat.h>
37290408d4SNaoya Horiguchi #include <linux/hugetlb.h>
388e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
395a0e3ad6STejun Heo #include <linux/gfp.h>
40df6ad698SJérôme Glisse #include <linux/pfn_t.h>
41a5430ddaSJérôme Glisse #include <linux/memremap.h>
428315ada7SJérôme Glisse #include <linux/userfaultfd_k.h>
43bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h>
4433c3fc71SVladimir Davydov #include <linux/page_idle.h>
45d435edcaSVlastimil Babka #include <linux/page_owner.h>
466e84f315SIngo Molnar #include <linux/sched/mm.h>
47197e7e52SLinus Torvalds #include <linux/ptrace.h>
4834290e2cSRalph Campbell #include <linux/oom.h>
49884a6e5dSDave Hansen #include <linux/memory.h>
50ac16ec83SBaolin Wang #include <linux/random.h>
51c574bbe9SHuang Ying #include <linux/sched/sysctl.h>
52467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h>
53b20a3503SChristoph Lameter 
540d1836c3SMichal Nazarewicz #include <asm/tlbflush.h>
550d1836c3SMichal Nazarewicz 
567b2a2d4aSMel Gorman #include <trace/events/migrate.h>
577b2a2d4aSMel Gorman 
58b20a3503SChristoph Lameter #include "internal.h"
59b20a3503SChristoph Lameter 
60cd775580SBaolin Wang bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61bda807d4SMinchan Kim {
6219979497SVishal Moola (Oracle) 	struct folio *folio = folio_get_nontail_page(page);
6368f2736aSMatthew Wilcox (Oracle) 	const struct movable_operations *mops;
64bda807d4SMinchan Kim 
65bda807d4SMinchan Kim 	/*
66bda807d4SMinchan Kim 	 * Avoid burning cycles with pages that are yet under __free_pages(),
67bda807d4SMinchan Kim 	 * or just got freed under us.
68bda807d4SMinchan Kim 	 *
69bda807d4SMinchan Kim 	 * In case we 'win' a race for a movable page being freed under us and
70bda807d4SMinchan Kim 	 * raise its refcount preventing __free_pages() from doing its job
71bda807d4SMinchan Kim 	 * the put_page() at the end of this block will take care of
72bda807d4SMinchan Kim 	 * release this page, thus avoiding a nasty leakage.
73bda807d4SMinchan Kim 	 */
7419979497SVishal Moola (Oracle) 	if (!folio)
75bda807d4SMinchan Kim 		goto out;
76bda807d4SMinchan Kim 
7719979497SVishal Moola (Oracle) 	if (unlikely(folio_test_slab(folio)))
7819979497SVishal Moola (Oracle) 		goto out_putfolio;
798b881763SVlastimil Babka 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
808b881763SVlastimil Babka 	smp_rmb();
81bda807d4SMinchan Kim 	/*
828b881763SVlastimil Babka 	 * Check movable flag before taking the page lock because
838b881763SVlastimil Babka 	 * we use non-atomic bitops on newly allocated page flags so
848b881763SVlastimil Babka 	 * unconditionally grabbing the lock ruins page's owner side.
85bda807d4SMinchan Kim 	 */
8619979497SVishal Moola (Oracle) 	if (unlikely(!__folio_test_movable(folio)))
8719979497SVishal Moola (Oracle) 		goto out_putfolio;
888b881763SVlastimil Babka 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
898b881763SVlastimil Babka 	smp_rmb();
9019979497SVishal Moola (Oracle) 	if (unlikely(folio_test_slab(folio)))
9119979497SVishal Moola (Oracle) 		goto out_putfolio;
928b881763SVlastimil Babka 
93bda807d4SMinchan Kim 	/*
94bda807d4SMinchan Kim 	 * As movable pages are not isolated from LRU lists, concurrent
95bda807d4SMinchan Kim 	 * compaction threads can race against page migration functions
96bda807d4SMinchan Kim 	 * as well as race against the releasing a page.
97bda807d4SMinchan Kim 	 *
98bda807d4SMinchan Kim 	 * In order to avoid having an already isolated movable page
99bda807d4SMinchan Kim 	 * being (wrongly) re-isolated while it is under migration,
100bda807d4SMinchan Kim 	 * or to avoid attempting to isolate pages being released,
101bda807d4SMinchan Kim 	 * lets be sure we have the page lock
102bda807d4SMinchan Kim 	 * before proceeding with the movable page isolation steps.
103bda807d4SMinchan Kim 	 */
10419979497SVishal Moola (Oracle) 	if (unlikely(!folio_trylock(folio)))
10519979497SVishal Moola (Oracle) 		goto out_putfolio;
106bda807d4SMinchan Kim 
10719979497SVishal Moola (Oracle) 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
108bda807d4SMinchan Kim 		goto out_no_isolated;
109bda807d4SMinchan Kim 
11019979497SVishal Moola (Oracle) 	mops = folio_movable_ops(folio);
11119979497SVishal Moola (Oracle) 	VM_BUG_ON_FOLIO(!mops, folio);
112bda807d4SMinchan Kim 
11319979497SVishal Moola (Oracle) 	if (!mops->isolate_page(&folio->page, mode))
114bda807d4SMinchan Kim 		goto out_no_isolated;
115bda807d4SMinchan Kim 
116bda807d4SMinchan Kim 	/* Driver shouldn't use PG_isolated bit of page->flags */
11719979497SVishal Moola (Oracle) 	WARN_ON_ONCE(folio_test_isolated(folio));
11819979497SVishal Moola (Oracle) 	folio_set_isolated(folio);
11919979497SVishal Moola (Oracle) 	folio_unlock(folio);
120bda807d4SMinchan Kim 
121cd775580SBaolin Wang 	return true;
122bda807d4SMinchan Kim 
123bda807d4SMinchan Kim out_no_isolated:
12419979497SVishal Moola (Oracle) 	folio_unlock(folio);
12519979497SVishal Moola (Oracle) out_putfolio:
12619979497SVishal Moola (Oracle) 	folio_put(folio);
127bda807d4SMinchan Kim out:
128cd775580SBaolin Wang 	return false;
129bda807d4SMinchan Kim }
130bda807d4SMinchan Kim 
131280d724aSVishal Moola (Oracle) static void putback_movable_folio(struct folio *folio)
132bda807d4SMinchan Kim {
133280d724aSVishal Moola (Oracle) 	const struct movable_operations *mops = folio_movable_ops(folio);
134bda807d4SMinchan Kim 
135280d724aSVishal Moola (Oracle) 	mops->putback_page(&folio->page);
136280d724aSVishal Moola (Oracle) 	folio_clear_isolated(folio);
137bda807d4SMinchan Kim }
138bda807d4SMinchan Kim 
139b20a3503SChristoph Lameter /*
1405733c7d1SRafael Aquini  * Put previously isolated pages back onto the appropriate lists
1415733c7d1SRafael Aquini  * from where they were once taken off for compaction/migration.
1425733c7d1SRafael Aquini  *
14359c82b70SJoonsoo Kim  * This function shall be used whenever the isolated pageset has been
14459c82b70SJoonsoo Kim  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
1457ce82f4cSMiaohe Lin  * and isolate_hugetlb().
1465733c7d1SRafael Aquini  */
1475733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l)
1485733c7d1SRafael Aquini {
149280d724aSVishal Moola (Oracle) 	struct folio *folio;
150280d724aSVishal Moola (Oracle) 	struct folio *folio2;
1515733c7d1SRafael Aquini 
152280d724aSVishal Moola (Oracle) 	list_for_each_entry_safe(folio, folio2, l, lru) {
153280d724aSVishal Moola (Oracle) 		if (unlikely(folio_test_hugetlb(folio))) {
154280d724aSVishal Moola (Oracle) 			folio_putback_active_hugetlb(folio);
15531caf665SNaoya Horiguchi 			continue;
15631caf665SNaoya Horiguchi 		}
157280d724aSVishal Moola (Oracle) 		list_del(&folio->lru);
158bda807d4SMinchan Kim 		/*
159280d724aSVishal Moola (Oracle) 		 * We isolated non-lru movable folio so here we can use
1607e2a5e5aSKefeng Wang 		 * __folio_test_movable because LRU folio's mapping cannot
1617e2a5e5aSKefeng Wang 		 * have PAGE_MAPPING_MOVABLE.
162bda807d4SMinchan Kim 		 */
163280d724aSVishal Moola (Oracle) 		if (unlikely(__folio_test_movable(folio))) {
164280d724aSVishal Moola (Oracle) 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165280d724aSVishal Moola (Oracle) 			folio_lock(folio);
166280d724aSVishal Moola (Oracle) 			if (folio_test_movable(folio))
167280d724aSVishal Moola (Oracle) 				putback_movable_folio(folio);
168bf6bddf1SRafael Aquini 			else
169280d724aSVishal Moola (Oracle) 				folio_clear_isolated(folio);
170280d724aSVishal Moola (Oracle) 			folio_unlock(folio);
171280d724aSVishal Moola (Oracle) 			folio_put(folio);
172bda807d4SMinchan Kim 		} else {
173280d724aSVishal Moola (Oracle) 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174280d724aSVishal Moola (Oracle) 					folio_is_file_lru(folio), -folio_nr_pages(folio));
175280d724aSVishal Moola (Oracle) 			folio_putback_lru(folio);
176b20a3503SChristoph Lameter 		}
177b20a3503SChristoph Lameter 	}
178bda807d4SMinchan Kim }
179b20a3503SChristoph Lameter 
1800697212aSChristoph Lameter /*
1810697212aSChristoph Lameter  * Restore a potential migration pte to a working pte entry
1820697212aSChristoph Lameter  */
1832f031c6fSMatthew Wilcox (Oracle) static bool remove_migration_pte(struct folio *folio,
1842f031c6fSMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long addr, void *old)
1850697212aSChristoph Lameter {
1864eecb8b9SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
1870697212aSChristoph Lameter 
1883fe87967SKirill A. Shutemov 	while (page_vma_mapped_walk(&pvmw)) {
1896c287605SDavid Hildenbrand 		rmap_t rmap_flags = RMAP_NONE;
190c33c7948SRyan Roberts 		pte_t old_pte;
1910697212aSChristoph Lameter 		pte_t pte;
1920697212aSChristoph Lameter 		swp_entry_t entry;
1934eecb8b9SMatthew Wilcox (Oracle) 		struct page *new;
1944eecb8b9SMatthew Wilcox (Oracle) 		unsigned long idx = 0;
1950697212aSChristoph Lameter 
1964eecb8b9SMatthew Wilcox (Oracle) 		/* pgoff is invalid for ksm pages, but they are never large */
1974eecb8b9SMatthew Wilcox (Oracle) 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
1984eecb8b9SMatthew Wilcox (Oracle) 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
1994eecb8b9SMatthew Wilcox (Oracle) 		new = folio_page(folio, idx);
2000697212aSChristoph Lameter 
201616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202616b8371SZi Yan 		/* PMD-mapped THP migration entry */
203616b8371SZi Yan 		if (!pvmw.pte) {
2044eecb8b9SMatthew Wilcox (Oracle) 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2054eecb8b9SMatthew Wilcox (Oracle) 					!folio_test_pmd_mappable(folio), folio);
206616b8371SZi Yan 			remove_migration_pmd(&pvmw, new);
207616b8371SZi Yan 			continue;
208616b8371SZi Yan 		}
209616b8371SZi Yan #endif
210616b8371SZi Yan 
2114eecb8b9SMatthew Wilcox (Oracle) 		folio_get(folio);
2122e346877SPeter Xu 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213c33c7948SRyan Roberts 		old_pte = ptep_get(pvmw.pte);
214d3cb8bf6SMel Gorman 
215c33c7948SRyan Roberts 		entry = pte_to_swp_entry(old_pte);
2162e346877SPeter Xu 		if (!is_migration_entry_young(entry))
2172e346877SPeter Xu 			pte = pte_mkold(pte);
2182e346877SPeter Xu 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2192e346877SPeter Xu 			pte = pte_mkdirty(pte);
220*055267feSPaul Gofman 		if (pte_swp_soft_dirty(old_pte))
221*055267feSPaul Gofman 			pte = pte_mksoft_dirty(pte);
222*055267feSPaul Gofman 		else
223*055267feSPaul Gofman 			pte = pte_clear_soft_dirty(pte);
224*055267feSPaul Gofman 
2254dd845b5SAlistair Popple 		if (is_writable_migration_entry(entry))
226161e393cSRick Edgecombe 			pte = pte_mkwrite(pte, vma);
227c33c7948SRyan Roberts 		else if (pte_swp_uffd_wp(old_pte))
228f45ec5ffSPeter Xu 			pte = pte_mkuffd_wp(pte);
229d3cb8bf6SMel Gorman 
2306c287605SDavid Hildenbrand 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
2316c287605SDavid Hildenbrand 			rmap_flags |= RMAP_EXCLUSIVE;
2326c287605SDavid Hildenbrand 
2336128763fSRalph Campbell 		if (unlikely(is_device_private_page(new))) {
2344dd845b5SAlistair Popple 			if (pte_write(pte))
2354dd845b5SAlistair Popple 				entry = make_writable_device_private_entry(
2364dd845b5SAlistair Popple 							page_to_pfn(new));
2374dd845b5SAlistair Popple 			else
2384dd845b5SAlistair Popple 				entry = make_readable_device_private_entry(
2394dd845b5SAlistair Popple 							page_to_pfn(new));
240a5430ddaSJérôme Glisse 			pte = swp_entry_to_pte(entry);
241c33c7948SRyan Roberts 			if (pte_swp_soft_dirty(old_pte))
2423d321bf8SRalph Campbell 				pte = pte_swp_mksoft_dirty(pte);
243c33c7948SRyan Roberts 			if (pte_swp_uffd_wp(old_pte))
244ebdf8321SAlistair Popple 				pte = pte_swp_mkuffd_wp(pte);
245df6ad698SJérôme Glisse 		}
246a5430ddaSJérôme Glisse 
2473ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE
2484eecb8b9SMatthew Wilcox (Oracle) 		if (folio_test_hugetlb(folio)) {
249935d4f0cSRyan Roberts 			struct hstate *h = hstate_vma(vma);
250935d4f0cSRyan Roberts 			unsigned int shift = huge_page_shift(h);
251935d4f0cSRyan Roberts 			unsigned long psize = huge_page_size(h);
25279c1c594SChristophe Leroy 
25379c1c594SChristophe Leroy 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
2544eecb8b9SMatthew Wilcox (Oracle) 			if (folio_test_anon(folio))
2559d5fafd5SDavid Hildenbrand 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
2566c287605SDavid Hildenbrand 						      rmap_flags);
257290408d4SNaoya Horiguchi 			else
25844887f39SDavid Hildenbrand 				hugetlb_add_file_rmap(folio);
259935d4f0cSRyan Roberts 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260935d4f0cSRyan Roberts 					psize);
261383321abSAneesh Kumar K.V 		} else
262383321abSAneesh Kumar K.V #endif
263383321abSAneesh Kumar K.V 		{
2644eecb8b9SMatthew Wilcox (Oracle) 			if (folio_test_anon(folio))
265a15dc478SDavid Hildenbrand 				folio_add_anon_rmap_pte(folio, new, vma,
266a15dc478SDavid Hildenbrand 							pvmw.address, rmap_flags);
26704e62a29SChristoph Lameter 			else
268c4dffb0bSDavid Hildenbrand 				folio_add_file_rmap_pte(folio, new, vma);
2691eba86c0SPasha Tatashin 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
270383321abSAneesh Kumar K.V 		}
271b7435507SHugh Dickins 		if (vma->vm_flags & VM_LOCKED)
27296f97c43SLorenzo Stoakes 			mlock_drain_local();
273e125fe40SKirill A. Shutemov 
2744cc79b33SAnshuman Khandual 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
2754cc79b33SAnshuman Khandual 					   compound_order(new));
2764cc79b33SAnshuman Khandual 
27704e62a29SChristoph Lameter 		/* No need to invalidate - it was non-present before */
2783fe87967SKirill A. Shutemov 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
2793fe87967SKirill A. Shutemov 	}
2803fe87967SKirill A. Shutemov 
281e4b82222SMinchan Kim 	return true;
2820697212aSChristoph Lameter }
2830697212aSChristoph Lameter 
2840697212aSChristoph Lameter /*
28504e62a29SChristoph Lameter  * Get rid of all migration entries and replace them by
28604e62a29SChristoph Lameter  * references to the indicated page.
28704e62a29SChristoph Lameter  */
2884eecb8b9SMatthew Wilcox (Oracle) void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
28904e62a29SChristoph Lameter {
290051ac83aSJoonsoo Kim 	struct rmap_walk_control rwc = {
291051ac83aSJoonsoo Kim 		.rmap_one = remove_migration_pte,
2924eecb8b9SMatthew Wilcox (Oracle) 		.arg = src,
293051ac83aSJoonsoo Kim 	};
294051ac83aSJoonsoo Kim 
295e388466dSKirill A. Shutemov 	if (locked)
2962f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_locked(dst, &rwc);
297e388466dSKirill A. Shutemov 	else
2982f031c6fSMatthew Wilcox (Oracle) 		rmap_walk(dst, &rwc);
29904e62a29SChristoph Lameter }
30004e62a29SChristoph Lameter 
30104e62a29SChristoph Lameter /*
3020697212aSChristoph Lameter  * Something used the pte of a page under migration. We need to
3030697212aSChristoph Lameter  * get to the page and wait until migration is finished.
3040697212aSChristoph Lameter  * When we return from this function the fault will be retried.
3050697212aSChristoph Lameter  */
3060cb8fd4dSHugh Dickins void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
3070cb8fd4dSHugh Dickins 			  unsigned long address)
3080697212aSChristoph Lameter {
3090cb8fd4dSHugh Dickins 	spinlock_t *ptl;
3100cb8fd4dSHugh Dickins 	pte_t *ptep;
31130dad309SNaoya Horiguchi 	pte_t pte;
3120697212aSChristoph Lameter 	swp_entry_t entry;
3130697212aSChristoph Lameter 
3140cb8fd4dSHugh Dickins 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
31504dee9e8SHugh Dickins 	if (!ptep)
31604dee9e8SHugh Dickins 		return;
31704dee9e8SHugh Dickins 
318c33c7948SRyan Roberts 	pte = ptep_get(ptep);
3190cb8fd4dSHugh Dickins 	pte_unmap(ptep);
3200cb8fd4dSHugh Dickins 
3210697212aSChristoph Lameter 	if (!is_swap_pte(pte))
3220697212aSChristoph Lameter 		goto out;
3230697212aSChristoph Lameter 
3240697212aSChristoph Lameter 	entry = pte_to_swp_entry(pte);
3250697212aSChristoph Lameter 	if (!is_migration_entry(entry))
3260697212aSChristoph Lameter 		goto out;
3270697212aSChristoph Lameter 
3280cb8fd4dSHugh Dickins 	migration_entry_wait_on_locked(entry, ptl);
3290697212aSChristoph Lameter 	return;
3300697212aSChristoph Lameter out:
3310cb8fd4dSHugh Dickins 	spin_unlock(ptl);
33230dad309SNaoya Horiguchi }
33330dad309SNaoya Horiguchi 
334ad1ac596SMiaohe Lin #ifdef CONFIG_HUGETLB_PAGE
335fcd48540SPeter Xu /*
336fcd48540SPeter Xu  * The vma read lock must be held upon entry. Holding that lock prevents either
337fcd48540SPeter Xu  * the pte or the ptl from being freed.
338fcd48540SPeter Xu  *
339fcd48540SPeter Xu  * This function will release the vma lock before returning.
340fcd48540SPeter Xu  */
3410cb8fd4dSHugh Dickins void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
34230dad309SNaoya Horiguchi {
3430cb8fd4dSHugh Dickins 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
344ad1ac596SMiaohe Lin 	pte_t pte;
345ad1ac596SMiaohe Lin 
346fcd48540SPeter Xu 	hugetlb_vma_assert_locked(vma);
347ad1ac596SMiaohe Lin 	spin_lock(ptl);
348ad1ac596SMiaohe Lin 	pte = huge_ptep_get(ptep);
349ad1ac596SMiaohe Lin 
350fcd48540SPeter Xu 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
351ad1ac596SMiaohe Lin 		spin_unlock(ptl);
352fcd48540SPeter Xu 		hugetlb_vma_unlock_read(vma);
353fcd48540SPeter Xu 	} else {
354fcd48540SPeter Xu 		/*
355fcd48540SPeter Xu 		 * If migration entry existed, safe to release vma lock
356fcd48540SPeter Xu 		 * here because the pgtable page won't be freed without the
357fcd48540SPeter Xu 		 * pgtable lock released.  See comment right above pgtable
358fcd48540SPeter Xu 		 * lock release in migration_entry_wait_on_locked().
359fcd48540SPeter Xu 		 */
360fcd48540SPeter Xu 		hugetlb_vma_unlock_read(vma);
3610cb8fd4dSHugh Dickins 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
36230dad309SNaoya Horiguchi 	}
363fcd48540SPeter Xu }
364ad1ac596SMiaohe Lin #endif
365ad1ac596SMiaohe Lin 
366616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368616b8371SZi Yan {
369616b8371SZi Yan 	spinlock_t *ptl;
370616b8371SZi Yan 
371616b8371SZi Yan 	ptl = pmd_lock(mm, pmd);
372616b8371SZi Yan 	if (!is_pmd_migration_entry(*pmd))
373616b8371SZi Yan 		goto unlock;
3740cb8fd4dSHugh Dickins 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
375616b8371SZi Yan 	return;
376616b8371SZi Yan unlock:
377616b8371SZi Yan 	spin_unlock(ptl);
378616b8371SZi Yan }
379616b8371SZi Yan #endif
380616b8371SZi Yan 
381108ca835SMatthew Wilcox (Oracle) static int folio_expected_refs(struct address_space *mapping,
382108ca835SMatthew Wilcox (Oracle) 		struct folio *folio)
3830b3901b3SJan Kara {
384108ca835SMatthew Wilcox (Oracle) 	int refs = 1;
385108ca835SMatthew Wilcox (Oracle) 	if (!mapping)
386108ca835SMatthew Wilcox (Oracle) 		return refs;
3870b3901b3SJan Kara 
388108ca835SMatthew Wilcox (Oracle) 	refs += folio_nr_pages(folio);
389108ca835SMatthew Wilcox (Oracle) 	if (folio_test_private(folio))
390108ca835SMatthew Wilcox (Oracle) 		refs++;
391108ca835SMatthew Wilcox (Oracle) 
392108ca835SMatthew Wilcox (Oracle) 	return refs;
3930b3901b3SJan Kara }
3940b3901b3SJan Kara 
395b20a3503SChristoph Lameter /*
396c3fcf8a5SChristoph Lameter  * Replace the page in the mapping.
3975b5c7120SChristoph Lameter  *
3985b5c7120SChristoph Lameter  * The number of remaining references must be:
3995b5c7120SChristoph Lameter  * 1 for anonymous pages without a mapping
4005b5c7120SChristoph Lameter  * 2 for pages with a mapping
401266cf658SDavid Howells  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
402b20a3503SChristoph Lameter  */
4033417013eSMatthew Wilcox (Oracle) int folio_migrate_mapping(struct address_space *mapping,
4043417013eSMatthew Wilcox (Oracle) 		struct folio *newfolio, struct folio *folio, int extra_count)
405b20a3503SChristoph Lameter {
4063417013eSMatthew Wilcox (Oracle) 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
40742cb14b1SHugh Dickins 	struct zone *oldzone, *newzone;
40842cb14b1SHugh Dickins 	int dirty;
409108ca835SMatthew Wilcox (Oracle) 	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
4103417013eSMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
411fc346d0aSCharan Teja Kalla 	long entries, i;
4128763cb45SJérôme Glisse 
4136c5240aeSChristoph Lameter 	if (!mapping) {
4140e8c7d0fSChristoph Lameter 		/* Anonymous page without mapping */
4153417013eSMatthew Wilcox (Oracle) 		if (folio_ref_count(folio) != expected_count)
4166c5240aeSChristoph Lameter 			return -EAGAIN;
417cf4b769aSHugh Dickins 
418cf4b769aSHugh Dickins 		/* No turning back from here */
4193417013eSMatthew Wilcox (Oracle) 		newfolio->index = folio->index;
4203417013eSMatthew Wilcox (Oracle) 		newfolio->mapping = folio->mapping;
4213417013eSMatthew Wilcox (Oracle) 		if (folio_test_swapbacked(folio))
4223417013eSMatthew Wilcox (Oracle) 			__folio_set_swapbacked(newfolio);
423cf4b769aSHugh Dickins 
42478bd5209SRafael Aquini 		return MIGRATEPAGE_SUCCESS;
4256c5240aeSChristoph Lameter 	}
4266c5240aeSChristoph Lameter 
4273417013eSMatthew Wilcox (Oracle) 	oldzone = folio_zone(folio);
4283417013eSMatthew Wilcox (Oracle) 	newzone = folio_zone(newfolio);
42942cb14b1SHugh Dickins 
43089eb946aSMatthew Wilcox 	xas_lock_irq(&xas);
4313417013eSMatthew Wilcox (Oracle) 	if (!folio_ref_freeze(folio, expected_count)) {
43289eb946aSMatthew Wilcox 		xas_unlock_irq(&xas);
433e286781dSNick Piggin 		return -EAGAIN;
434e286781dSNick Piggin 	}
435e286781dSNick Piggin 
436b20a3503SChristoph Lameter 	/*
4373417013eSMatthew Wilcox (Oracle) 	 * Now we know that no one else is looking at the folio:
438cf4b769aSHugh Dickins 	 * no turning back from here.
439b20a3503SChristoph Lameter 	 */
4403417013eSMatthew Wilcox (Oracle) 	newfolio->index = folio->index;
4413417013eSMatthew Wilcox (Oracle) 	newfolio->mapping = folio->mapping;
4423417013eSMatthew Wilcox (Oracle) 	folio_ref_add(newfolio, nr); /* add cache reference */
4433417013eSMatthew Wilcox (Oracle) 	if (folio_test_swapbacked(folio)) {
4443417013eSMatthew Wilcox (Oracle) 		__folio_set_swapbacked(newfolio);
4453417013eSMatthew Wilcox (Oracle) 		if (folio_test_swapcache(folio)) {
4463417013eSMatthew Wilcox (Oracle) 			folio_set_swapcache(newfolio);
4473417013eSMatthew Wilcox (Oracle) 			newfolio->private = folio_get_private(folio);
448b20a3503SChristoph Lameter 		}
449fc346d0aSCharan Teja Kalla 		entries = nr;
4506326fec1SNicholas Piggin 	} else {
4513417013eSMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
452fc346d0aSCharan Teja Kalla 		entries = 1;
4536326fec1SNicholas Piggin 	}
454b20a3503SChristoph Lameter 
45542cb14b1SHugh Dickins 	/* Move dirty while page refs frozen and newpage not yet exposed */
4563417013eSMatthew Wilcox (Oracle) 	dirty = folio_test_dirty(folio);
45742cb14b1SHugh Dickins 	if (dirty) {
4583417013eSMatthew Wilcox (Oracle) 		folio_clear_dirty(folio);
4593417013eSMatthew Wilcox (Oracle) 		folio_set_dirty(newfolio);
46042cb14b1SHugh Dickins 	}
46142cb14b1SHugh Dickins 
462fc346d0aSCharan Teja Kalla 	/* Swap cache still stores N entries instead of a high-order entry */
463fc346d0aSCharan Teja Kalla 	for (i = 0; i < entries; i++) {
4643417013eSMatthew Wilcox (Oracle) 		xas_store(&xas, newfolio);
465fc346d0aSCharan Teja Kalla 		xas_next(&xas);
466fc346d0aSCharan Teja Kalla 	}
4677cf9c2c7SNick Piggin 
4687cf9c2c7SNick Piggin 	/*
469937a94c9SJacobo Giralt 	 * Drop cache reference from old page by unfreezing
470937a94c9SJacobo Giralt 	 * to one less reference.
4717cf9c2c7SNick Piggin 	 * We know this isn't the last reference.
4727cf9c2c7SNick Piggin 	 */
4733417013eSMatthew Wilcox (Oracle) 	folio_ref_unfreeze(folio, expected_count - nr);
4747cf9c2c7SNick Piggin 
47589eb946aSMatthew Wilcox 	xas_unlock(&xas);
47642cb14b1SHugh Dickins 	/* Leave irq disabled to prevent preemption while updating stats */
47742cb14b1SHugh Dickins 
4780e8c7d0fSChristoph Lameter 	/*
4790e8c7d0fSChristoph Lameter 	 * If moved to a different zone then also account
4800e8c7d0fSChristoph Lameter 	 * the page for that zone. Other VM counters will be
4810e8c7d0fSChristoph Lameter 	 * taken care of when we establish references to the
4820e8c7d0fSChristoph Lameter 	 * new page and drop references to the old page.
4830e8c7d0fSChristoph Lameter 	 *
4840e8c7d0fSChristoph Lameter 	 * Note that anonymous pages are accounted for
4854b9d0fabSMel Gorman 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
4860e8c7d0fSChristoph Lameter 	 * are mapped to swap space.
4870e8c7d0fSChristoph Lameter 	 */
48842cb14b1SHugh Dickins 	if (newzone != oldzone) {
4890d1c2072SJohannes Weiner 		struct lruvec *old_lruvec, *new_lruvec;
4900d1c2072SJohannes Weiner 		struct mem_cgroup *memcg;
4910d1c2072SJohannes Weiner 
4923417013eSMatthew Wilcox (Oracle) 		memcg = folio_memcg(folio);
4930d1c2072SJohannes Weiner 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
4940d1c2072SJohannes Weiner 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
4950d1c2072SJohannes Weiner 
4965c447d27SShakeel Butt 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
4975c447d27SShakeel Butt 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
4983417013eSMatthew Wilcox (Oracle) 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
4995c447d27SShakeel Butt 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
5005c447d27SShakeel Butt 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
5010b52c420SJan Glauber 
5020b52c420SJan Glauber 			if (folio_test_pmd_mappable(folio)) {
5030b52c420SJan Glauber 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
5040b52c420SJan Glauber 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
5050b52c420SJan Glauber 			}
5064b02108aSKOSAKI Motohiro 		}
507b6038942SShakeel Butt #ifdef CONFIG_SWAP
5083417013eSMatthew Wilcox (Oracle) 		if (folio_test_swapcache(folio)) {
509b6038942SShakeel Butt 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
510b6038942SShakeel Butt 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
511b6038942SShakeel Butt 		}
512b6038942SShakeel Butt #endif
513f56753acSChristoph Hellwig 		if (dirty && mapping_can_writeback(mapping)) {
5145c447d27SShakeel Butt 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
5155c447d27SShakeel Butt 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
5165c447d27SShakeel Butt 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
5175c447d27SShakeel Butt 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
51842cb14b1SHugh Dickins 		}
51942cb14b1SHugh Dickins 	}
52042cb14b1SHugh Dickins 	local_irq_enable();
521b20a3503SChristoph Lameter 
52278bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
523b20a3503SChristoph Lameter }
5243417013eSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_mapping);
525b20a3503SChristoph Lameter 
526b20a3503SChristoph Lameter /*
527290408d4SNaoya Horiguchi  * The expected number of remaining references is the same as that
5283417013eSMatthew Wilcox (Oracle)  * of folio_migrate_mapping().
529290408d4SNaoya Horiguchi  */
530290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping,
531b890ec2aSMatthew Wilcox (Oracle) 				   struct folio *dst, struct folio *src)
532290408d4SNaoya Horiguchi {
533b890ec2aSMatthew Wilcox (Oracle) 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
534290408d4SNaoya Horiguchi 	int expected_count;
535290408d4SNaoya Horiguchi 
53689eb946aSMatthew Wilcox 	xas_lock_irq(&xas);
537a08c7193SSidhartha Kumar 	expected_count = folio_expected_refs(mapping, src);
538b890ec2aSMatthew Wilcox (Oracle) 	if (!folio_ref_freeze(src, expected_count)) {
53989eb946aSMatthew Wilcox 		xas_unlock_irq(&xas);
540290408d4SNaoya Horiguchi 		return -EAGAIN;
541290408d4SNaoya Horiguchi 	}
542290408d4SNaoya Horiguchi 
543b890ec2aSMatthew Wilcox (Oracle) 	dst->index = src->index;
544b890ec2aSMatthew Wilcox (Oracle) 	dst->mapping = src->mapping;
5456a93ca8fSJohannes Weiner 
546a08c7193SSidhartha Kumar 	folio_ref_add(dst, folio_nr_pages(dst));
547290408d4SNaoya Horiguchi 
548b890ec2aSMatthew Wilcox (Oracle) 	xas_store(&xas, dst);
549290408d4SNaoya Horiguchi 
550a08c7193SSidhartha Kumar 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
551290408d4SNaoya Horiguchi 
55289eb946aSMatthew Wilcox 	xas_unlock_irq(&xas);
5536a93ca8fSJohannes Weiner 
55478bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
555290408d4SNaoya Horiguchi }
556290408d4SNaoya Horiguchi 
557290408d4SNaoya Horiguchi /*
55819138349SMatthew Wilcox (Oracle)  * Copy the flags and some other ancillary information
559b20a3503SChristoph Lameter  */
56019138349SMatthew Wilcox (Oracle) void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
561b20a3503SChristoph Lameter {
5627851a45cSRik van Riel 	int cpupid;
5637851a45cSRik van Riel 
56419138349SMatthew Wilcox (Oracle) 	if (folio_test_error(folio))
56519138349SMatthew Wilcox (Oracle) 		folio_set_error(newfolio);
56619138349SMatthew Wilcox (Oracle) 	if (folio_test_referenced(folio))
56719138349SMatthew Wilcox (Oracle) 		folio_set_referenced(newfolio);
56819138349SMatthew Wilcox (Oracle) 	if (folio_test_uptodate(folio))
56919138349SMatthew Wilcox (Oracle) 		folio_mark_uptodate(newfolio);
57019138349SMatthew Wilcox (Oracle) 	if (folio_test_clear_active(folio)) {
57119138349SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
57219138349SMatthew Wilcox (Oracle) 		folio_set_active(newfolio);
57319138349SMatthew Wilcox (Oracle) 	} else if (folio_test_clear_unevictable(folio))
57419138349SMatthew Wilcox (Oracle) 		folio_set_unevictable(newfolio);
57519138349SMatthew Wilcox (Oracle) 	if (folio_test_workingset(folio))
57619138349SMatthew Wilcox (Oracle) 		folio_set_workingset(newfolio);
57719138349SMatthew Wilcox (Oracle) 	if (folio_test_checked(folio))
57819138349SMatthew Wilcox (Oracle) 		folio_set_checked(newfolio);
5796c287605SDavid Hildenbrand 	/*
5806c287605SDavid Hildenbrand 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
5816c287605SDavid Hildenbrand 	 * migration entries. We can still have PG_anon_exclusive set on an
5826c287605SDavid Hildenbrand 	 * effectively unmapped and unreferenced first sub-pages of an
5836c287605SDavid Hildenbrand 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
5846c287605SDavid Hildenbrand 	 */
58519138349SMatthew Wilcox (Oracle) 	if (folio_test_mappedtodisk(folio))
58619138349SMatthew Wilcox (Oracle) 		folio_set_mappedtodisk(newfolio);
587b20a3503SChristoph Lameter 
5883417013eSMatthew Wilcox (Oracle) 	/* Move dirty on pages not done by folio_migrate_mapping() */
58919138349SMatthew Wilcox (Oracle) 	if (folio_test_dirty(folio))
59019138349SMatthew Wilcox (Oracle) 		folio_set_dirty(newfolio);
591b20a3503SChristoph Lameter 
59219138349SMatthew Wilcox (Oracle) 	if (folio_test_young(folio))
59319138349SMatthew Wilcox (Oracle) 		folio_set_young(newfolio);
59419138349SMatthew Wilcox (Oracle) 	if (folio_test_idle(folio))
59519138349SMatthew Wilcox (Oracle) 		folio_set_idle(newfolio);
59633c3fc71SVladimir Davydov 
5977851a45cSRik van Riel 	/*
5987851a45cSRik van Riel 	 * Copy NUMA information to the new page, to prevent over-eager
5997851a45cSRik van Riel 	 * future migrations of this same page.
6007851a45cSRik van Riel 	 */
6014e694fe4SKefeng Wang 	cpupid = folio_xchg_last_cpupid(folio, -1);
60233024536SHuang Ying 	/*
60333024536SHuang Ying 	 * For memory tiering mode, when migrate between slow and fast
60433024536SHuang Ying 	 * memory node, reset cpupid, because that is used to record
60533024536SHuang Ying 	 * page access time in slow memory node.
60633024536SHuang Ying 	 */
60733024536SHuang Ying 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
6084e694fe4SKefeng Wang 		bool f_toptier = node_is_toptier(folio_nid(folio));
6094e694fe4SKefeng Wang 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
61033024536SHuang Ying 
61133024536SHuang Ying 		if (f_toptier != t_toptier)
61233024536SHuang Ying 			cpupid = -1;
61333024536SHuang Ying 	}
6144e694fe4SKefeng Wang 	folio_xchg_last_cpupid(newfolio, cpupid);
6157851a45cSRik van Riel 
61619138349SMatthew Wilcox (Oracle) 	folio_migrate_ksm(newfolio, folio);
617c8d6553bSHugh Dickins 	/*
618c8d6553bSHugh Dickins 	 * Please do not reorder this without considering how mm/ksm.c's
619c8d6553bSHugh Dickins 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
620c8d6553bSHugh Dickins 	 */
62119138349SMatthew Wilcox (Oracle) 	if (folio_test_swapcache(folio))
62219138349SMatthew Wilcox (Oracle) 		folio_clear_swapcache(folio);
62319138349SMatthew Wilcox (Oracle) 	folio_clear_private(folio);
624ad2fa371SMuchun Song 
625ad2fa371SMuchun Song 	/* page->private contains hugetlb specific flags */
62619138349SMatthew Wilcox (Oracle) 	if (!folio_test_hugetlb(folio))
62719138349SMatthew Wilcox (Oracle) 		folio->private = NULL;
628b20a3503SChristoph Lameter 
629b20a3503SChristoph Lameter 	/*
630b20a3503SChristoph Lameter 	 * If any waiters have accumulated on the new page then
631b20a3503SChristoph Lameter 	 * wake them up.
632b20a3503SChristoph Lameter 	 */
63319138349SMatthew Wilcox (Oracle) 	if (folio_test_writeback(newfolio))
63419138349SMatthew Wilcox (Oracle) 		folio_end_writeback(newfolio);
635d435edcaSVlastimil Babka 
6366aeff241SYang Shi 	/*
6376aeff241SYang Shi 	 * PG_readahead shares the same bit with PG_reclaim.  The above
6386aeff241SYang Shi 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
6396aeff241SYang Shi 	 * bit after that.
6406aeff241SYang Shi 	 */
64119138349SMatthew Wilcox (Oracle) 	if (folio_test_readahead(folio))
64219138349SMatthew Wilcox (Oracle) 		folio_set_readahead(newfolio);
6436aeff241SYang Shi 
64419138349SMatthew Wilcox (Oracle) 	folio_copy_owner(newfolio, folio);
64574485cf2SJohannes Weiner 
646d21bba2bSMatthew Wilcox (Oracle) 	mem_cgroup_migrate(folio, newfolio);
647b20a3503SChristoph Lameter }
64819138349SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_flags);
6492916ecc0SJérôme Glisse 
650715cbfd6SMatthew Wilcox (Oracle) void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
6512916ecc0SJérôme Glisse {
652715cbfd6SMatthew Wilcox (Oracle) 	folio_copy(newfolio, folio);
653715cbfd6SMatthew Wilcox (Oracle) 	folio_migrate_flags(newfolio, folio);
6542916ecc0SJérôme Glisse }
655715cbfd6SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_copy);
656b20a3503SChristoph Lameter 
6571d8b85ccSChristoph Lameter /************************************************************
6581d8b85ccSChristoph Lameter  *                    Migration functions
6591d8b85ccSChristoph Lameter  ***********************************************************/
6601d8b85ccSChristoph Lameter 
66116ce101dSAlistair Popple int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
66216ce101dSAlistair Popple 		struct folio *src, enum migrate_mode mode, int extra_count)
66316ce101dSAlistair Popple {
66416ce101dSAlistair Popple 	int rc;
66516ce101dSAlistair Popple 
66616ce101dSAlistair Popple 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
66716ce101dSAlistair Popple 
66816ce101dSAlistair Popple 	rc = folio_migrate_mapping(mapping, dst, src, extra_count);
66916ce101dSAlistair Popple 
67016ce101dSAlistair Popple 	if (rc != MIGRATEPAGE_SUCCESS)
67116ce101dSAlistair Popple 		return rc;
67216ce101dSAlistair Popple 
67316ce101dSAlistair Popple 	if (mode != MIGRATE_SYNC_NO_COPY)
67416ce101dSAlistair Popple 		folio_migrate_copy(dst, src);
67516ce101dSAlistair Popple 	else
67616ce101dSAlistair Popple 		folio_migrate_flags(dst, src);
67716ce101dSAlistair Popple 	return MIGRATEPAGE_SUCCESS;
67816ce101dSAlistair Popple }
67916ce101dSAlistair Popple 
68054184650SMatthew Wilcox (Oracle) /**
68154184650SMatthew Wilcox (Oracle)  * migrate_folio() - Simple folio migration.
68254184650SMatthew Wilcox (Oracle)  * @mapping: The address_space containing the folio.
68354184650SMatthew Wilcox (Oracle)  * @dst: The folio to migrate the data to.
68454184650SMatthew Wilcox (Oracle)  * @src: The folio containing the current data.
68554184650SMatthew Wilcox (Oracle)  * @mode: How to migrate the page.
686b20a3503SChristoph Lameter  *
68754184650SMatthew Wilcox (Oracle)  * Common logic to directly migrate a single LRU folio suitable for
68854184650SMatthew Wilcox (Oracle)  * folios that do not use PagePrivate/PagePrivate2.
68954184650SMatthew Wilcox (Oracle)  *
69054184650SMatthew Wilcox (Oracle)  * Folios are locked upon entry and exit.
691b20a3503SChristoph Lameter  */
69254184650SMatthew Wilcox (Oracle) int migrate_folio(struct address_space *mapping, struct folio *dst,
69354184650SMatthew Wilcox (Oracle) 		struct folio *src, enum migrate_mode mode)
694b20a3503SChristoph Lameter {
69516ce101dSAlistair Popple 	return migrate_folio_extra(mapping, dst, src, mode, 0);
696b20a3503SChristoph Lameter }
69754184650SMatthew Wilcox (Oracle) EXPORT_SYMBOL(migrate_folio);
698b20a3503SChristoph Lameter 
699925c86a1SChristoph Hellwig #ifdef CONFIG_BUFFER_HEAD
70084ade7c1SJan Kara /* Returns true if all buffers are successfully locked */
70184ade7c1SJan Kara static bool buffer_migrate_lock_buffers(struct buffer_head *head,
70284ade7c1SJan Kara 							enum migrate_mode mode)
70384ade7c1SJan Kara {
70484ade7c1SJan Kara 	struct buffer_head *bh = head;
7054bb6dc79SDouglas Anderson 	struct buffer_head *failed_bh;
70684ade7c1SJan Kara 
70784ade7c1SJan Kara 	do {
7084bb6dc79SDouglas Anderson 		if (!trylock_buffer(bh)) {
7094bb6dc79SDouglas Anderson 			if (mode == MIGRATE_ASYNC)
7104bb6dc79SDouglas Anderson 				goto unlock;
7114bb6dc79SDouglas Anderson 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
7124bb6dc79SDouglas Anderson 				goto unlock;
71384ade7c1SJan Kara 			lock_buffer(bh);
7144bb6dc79SDouglas Anderson 		}
71584ade7c1SJan Kara 
7164bb6dc79SDouglas Anderson 		bh = bh->b_this_page;
71784ade7c1SJan Kara 	} while (bh != head);
71884ade7c1SJan Kara 
71984ade7c1SJan Kara 	return true;
72084ade7c1SJan Kara 
7214bb6dc79SDouglas Anderson unlock:
7224bb6dc79SDouglas Anderson 	/* We failed to lock the buffer and cannot stall. */
7234bb6dc79SDouglas Anderson 	failed_bh = bh;
72484ade7c1SJan Kara 	bh = head;
72584ade7c1SJan Kara 	while (bh != failed_bh) {
72684ade7c1SJan Kara 		unlock_buffer(bh);
72784ade7c1SJan Kara 		bh = bh->b_this_page;
72884ade7c1SJan Kara 	}
72984ade7c1SJan Kara 
7304bb6dc79SDouglas Anderson 	return false;
73184ade7c1SJan Kara }
73284ade7c1SJan Kara 
73367235182SMatthew Wilcox (Oracle) static int __buffer_migrate_folio(struct address_space *mapping,
73467235182SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode mode,
73589cb0888SJan Kara 		bool check_refs)
7361d8b85ccSChristoph Lameter {
7371d8b85ccSChristoph Lameter 	struct buffer_head *bh, *head;
7381d8b85ccSChristoph Lameter 	int rc;
739cc4f11e6SJan Kara 	int expected_count;
7401d8b85ccSChristoph Lameter 
74167235182SMatthew Wilcox (Oracle) 	head = folio_buffers(src);
74267235182SMatthew Wilcox (Oracle) 	if (!head)
74354184650SMatthew Wilcox (Oracle) 		return migrate_folio(mapping, dst, src, mode);
7441d8b85ccSChristoph Lameter 
745cc4f11e6SJan Kara 	/* Check whether page does not have extra refs before we do more work */
746108ca835SMatthew Wilcox (Oracle) 	expected_count = folio_expected_refs(mapping, src);
74767235182SMatthew Wilcox (Oracle) 	if (folio_ref_count(src) != expected_count)
748cc4f11e6SJan Kara 		return -EAGAIN;
749cc4f11e6SJan Kara 
750cc4f11e6SJan Kara 	if (!buffer_migrate_lock_buffers(head, mode))
751cc4f11e6SJan Kara 		return -EAGAIN;
7521d8b85ccSChristoph Lameter 
75389cb0888SJan Kara 	if (check_refs) {
75489cb0888SJan Kara 		bool busy;
75589cb0888SJan Kara 		bool invalidated = false;
75689cb0888SJan Kara 
75789cb0888SJan Kara recheck_buffers:
75889cb0888SJan Kara 		busy = false;
759600f111eSMatthew Wilcox (Oracle) 		spin_lock(&mapping->i_private_lock);
76089cb0888SJan Kara 		bh = head;
76189cb0888SJan Kara 		do {
76289cb0888SJan Kara 			if (atomic_read(&bh->b_count)) {
76389cb0888SJan Kara 				busy = true;
76489cb0888SJan Kara 				break;
76589cb0888SJan Kara 			}
76689cb0888SJan Kara 			bh = bh->b_this_page;
76789cb0888SJan Kara 		} while (bh != head);
76889cb0888SJan Kara 		if (busy) {
76989cb0888SJan Kara 			if (invalidated) {
77089cb0888SJan Kara 				rc = -EAGAIN;
77189cb0888SJan Kara 				goto unlock_buffers;
77289cb0888SJan Kara 			}
773600f111eSMatthew Wilcox (Oracle) 			spin_unlock(&mapping->i_private_lock);
77489cb0888SJan Kara 			invalidate_bh_lrus();
77589cb0888SJan Kara 			invalidated = true;
77689cb0888SJan Kara 			goto recheck_buffers;
77789cb0888SJan Kara 		}
77889cb0888SJan Kara 	}
77989cb0888SJan Kara 
78067235182SMatthew Wilcox (Oracle) 	rc = folio_migrate_mapping(mapping, dst, src, 0);
78178bd5209SRafael Aquini 	if (rc != MIGRATEPAGE_SUCCESS)
782cc4f11e6SJan Kara 		goto unlock_buffers;
7831d8b85ccSChristoph Lameter 
78467235182SMatthew Wilcox (Oracle) 	folio_attach_private(dst, folio_detach_private(src));
7851d8b85ccSChristoph Lameter 
7861d8b85ccSChristoph Lameter 	bh = head;
7871d8b85ccSChristoph Lameter 	do {
788d5db4f9dSMatthew Wilcox (Oracle) 		folio_set_bh(bh, dst, bh_offset(bh));
7891d8b85ccSChristoph Lameter 		bh = bh->b_this_page;
7901d8b85ccSChristoph Lameter 	} while (bh != head);
7911d8b85ccSChristoph Lameter 
7922916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
79367235182SMatthew Wilcox (Oracle) 		folio_migrate_copy(dst, src);
7942916ecc0SJérôme Glisse 	else
79567235182SMatthew Wilcox (Oracle) 		folio_migrate_flags(dst, src);
7961d8b85ccSChristoph Lameter 
797cc4f11e6SJan Kara 	rc = MIGRATEPAGE_SUCCESS;
798cc4f11e6SJan Kara unlock_buffers:
799ebdf4de5SJan Kara 	if (check_refs)
800600f111eSMatthew Wilcox (Oracle) 		spin_unlock(&mapping->i_private_lock);
8011d8b85ccSChristoph Lameter 	bh = head;
8021d8b85ccSChristoph Lameter 	do {
8031d8b85ccSChristoph Lameter 		unlock_buffer(bh);
8041d8b85ccSChristoph Lameter 		bh = bh->b_this_page;
8051d8b85ccSChristoph Lameter 	} while (bh != head);
8061d8b85ccSChristoph Lameter 
807cc4f11e6SJan Kara 	return rc;
8081d8b85ccSChristoph Lameter }
80989cb0888SJan Kara 
81067235182SMatthew Wilcox (Oracle) /**
81167235182SMatthew Wilcox (Oracle)  * buffer_migrate_folio() - Migration function for folios with buffers.
81267235182SMatthew Wilcox (Oracle)  * @mapping: The address space containing @src.
81367235182SMatthew Wilcox (Oracle)  * @dst: The folio to migrate to.
81467235182SMatthew Wilcox (Oracle)  * @src: The folio to migrate from.
81567235182SMatthew Wilcox (Oracle)  * @mode: How to migrate the folio.
81667235182SMatthew Wilcox (Oracle)  *
81767235182SMatthew Wilcox (Oracle)  * This function can only be used if the underlying filesystem guarantees
81867235182SMatthew Wilcox (Oracle)  * that no other references to @src exist. For example attached buffer
81967235182SMatthew Wilcox (Oracle)  * heads are accessed only under the folio lock.  If your filesystem cannot
82067235182SMatthew Wilcox (Oracle)  * provide this guarantee, buffer_migrate_folio_norefs() may be more
82167235182SMatthew Wilcox (Oracle)  * appropriate.
82267235182SMatthew Wilcox (Oracle)  *
82367235182SMatthew Wilcox (Oracle)  * Return: 0 on success or a negative errno on failure.
82489cb0888SJan Kara  */
82567235182SMatthew Wilcox (Oracle) int buffer_migrate_folio(struct address_space *mapping,
82667235182SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode mode)
82789cb0888SJan Kara {
82867235182SMatthew Wilcox (Oracle) 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
82989cb0888SJan Kara }
83067235182SMatthew Wilcox (Oracle) EXPORT_SYMBOL(buffer_migrate_folio);
83189cb0888SJan Kara 
83267235182SMatthew Wilcox (Oracle) /**
83367235182SMatthew Wilcox (Oracle)  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
83467235182SMatthew Wilcox (Oracle)  * @mapping: The address space containing @src.
83567235182SMatthew Wilcox (Oracle)  * @dst: The folio to migrate to.
83667235182SMatthew Wilcox (Oracle)  * @src: The folio to migrate from.
83767235182SMatthew Wilcox (Oracle)  * @mode: How to migrate the folio.
83867235182SMatthew Wilcox (Oracle)  *
83967235182SMatthew Wilcox (Oracle)  * Like buffer_migrate_folio() except that this variant is more careful
84067235182SMatthew Wilcox (Oracle)  * and checks that there are also no buffer head references. This function
84167235182SMatthew Wilcox (Oracle)  * is the right one for mappings where buffer heads are directly looked
84267235182SMatthew Wilcox (Oracle)  * up and referenced (such as block device mappings).
84367235182SMatthew Wilcox (Oracle)  *
84467235182SMatthew Wilcox (Oracle)  * Return: 0 on success or a negative errno on failure.
84589cb0888SJan Kara  */
84667235182SMatthew Wilcox (Oracle) int buffer_migrate_folio_norefs(struct address_space *mapping,
84767235182SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode mode)
84889cb0888SJan Kara {
84967235182SMatthew Wilcox (Oracle) 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
85089cb0888SJan Kara }
851e26355e2SJan Kara EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
852925c86a1SChristoph Hellwig #endif /* CONFIG_BUFFER_HEAD */
8531d8b85ccSChristoph Lameter 
8542ec810d5SMatthew Wilcox (Oracle) int filemap_migrate_folio(struct address_space *mapping,
8552ec810d5SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode mode)
8562ec810d5SMatthew Wilcox (Oracle) {
8572ec810d5SMatthew Wilcox (Oracle) 	int ret;
8582ec810d5SMatthew Wilcox (Oracle) 
8592ec810d5SMatthew Wilcox (Oracle) 	ret = folio_migrate_mapping(mapping, dst, src, 0);
8602ec810d5SMatthew Wilcox (Oracle) 	if (ret != MIGRATEPAGE_SUCCESS)
8612ec810d5SMatthew Wilcox (Oracle) 		return ret;
8622ec810d5SMatthew Wilcox (Oracle) 
8632ec810d5SMatthew Wilcox (Oracle) 	if (folio_get_private(src))
8642ec810d5SMatthew Wilcox (Oracle) 		folio_attach_private(dst, folio_detach_private(src));
8652ec810d5SMatthew Wilcox (Oracle) 
8662ec810d5SMatthew Wilcox (Oracle) 	if (mode != MIGRATE_SYNC_NO_COPY)
8672ec810d5SMatthew Wilcox (Oracle) 		folio_migrate_copy(dst, src);
8682ec810d5SMatthew Wilcox (Oracle) 	else
8692ec810d5SMatthew Wilcox (Oracle) 		folio_migrate_flags(dst, src);
8702ec810d5SMatthew Wilcox (Oracle) 	return MIGRATEPAGE_SUCCESS;
8712ec810d5SMatthew Wilcox (Oracle) }
8722ec810d5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(filemap_migrate_folio);
8732ec810d5SMatthew Wilcox (Oracle) 
87404e62a29SChristoph Lameter /*
8752be7fa10SMatthew Wilcox (Oracle)  * Writeback a folio to clean the dirty state
87604e62a29SChristoph Lameter  */
8772be7fa10SMatthew Wilcox (Oracle) static int writeout(struct address_space *mapping, struct folio *folio)
87804e62a29SChristoph Lameter {
87904e62a29SChristoph Lameter 	struct writeback_control wbc = {
88004e62a29SChristoph Lameter 		.sync_mode = WB_SYNC_NONE,
88104e62a29SChristoph Lameter 		.nr_to_write = 1,
88204e62a29SChristoph Lameter 		.range_start = 0,
88304e62a29SChristoph Lameter 		.range_end = LLONG_MAX,
88404e62a29SChristoph Lameter 		.for_reclaim = 1
88504e62a29SChristoph Lameter 	};
88604e62a29SChristoph Lameter 	int rc;
88704e62a29SChristoph Lameter 
88804e62a29SChristoph Lameter 	if (!mapping->a_ops->writepage)
88904e62a29SChristoph Lameter 		/* No write method for the address space */
89004e62a29SChristoph Lameter 		return -EINVAL;
89104e62a29SChristoph Lameter 
8922be7fa10SMatthew Wilcox (Oracle) 	if (!folio_clear_dirty_for_io(folio))
89304e62a29SChristoph Lameter 		/* Someone else already triggered a write */
89404e62a29SChristoph Lameter 		return -EAGAIN;
89504e62a29SChristoph Lameter 
89604e62a29SChristoph Lameter 	/*
8972be7fa10SMatthew Wilcox (Oracle) 	 * A dirty folio may imply that the underlying filesystem has
8982be7fa10SMatthew Wilcox (Oracle) 	 * the folio on some queue. So the folio must be clean for
8992be7fa10SMatthew Wilcox (Oracle) 	 * migration. Writeout may mean we lose the lock and the
9002be7fa10SMatthew Wilcox (Oracle) 	 * folio state is no longer what we checked for earlier.
90104e62a29SChristoph Lameter 	 * At this point we know that the migration attempt cannot
90204e62a29SChristoph Lameter 	 * be successful.
90304e62a29SChristoph Lameter 	 */
9044eecb8b9SMatthew Wilcox (Oracle) 	remove_migration_ptes(folio, folio, false);
90504e62a29SChristoph Lameter 
9062be7fa10SMatthew Wilcox (Oracle) 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
90704e62a29SChristoph Lameter 
90804e62a29SChristoph Lameter 	if (rc != AOP_WRITEPAGE_ACTIVATE)
90904e62a29SChristoph Lameter 		/* unlocked. Relock */
9102be7fa10SMatthew Wilcox (Oracle) 		folio_lock(folio);
91104e62a29SChristoph Lameter 
912bda8550dSHugh Dickins 	return (rc < 0) ? -EIO : -EAGAIN;
91304e62a29SChristoph Lameter }
91404e62a29SChristoph Lameter 
91504e62a29SChristoph Lameter /*
91604e62a29SChristoph Lameter  * Default handling if a filesystem does not provide a migration function.
91704e62a29SChristoph Lameter  */
9188faa8ef5SMatthew Wilcox (Oracle) static int fallback_migrate_folio(struct address_space *mapping,
9198faa8ef5SMatthew Wilcox (Oracle) 		struct folio *dst, struct folio *src, enum migrate_mode mode)
9208351a6e4SChristoph Lameter {
9218faa8ef5SMatthew Wilcox (Oracle) 	if (folio_test_dirty(src)) {
9228faa8ef5SMatthew Wilcox (Oracle) 		/* Only writeback folios in full synchronous migration */
9232916ecc0SJérôme Glisse 		switch (mode) {
9242916ecc0SJérôme Glisse 		case MIGRATE_SYNC:
9252916ecc0SJérôme Glisse 		case MIGRATE_SYNC_NO_COPY:
9262916ecc0SJérôme Glisse 			break;
9272916ecc0SJérôme Glisse 		default:
928b969c4abSMel Gorman 			return -EBUSY;
9292916ecc0SJérôme Glisse 		}
9302be7fa10SMatthew Wilcox (Oracle) 		return writeout(mapping, src);
931b969c4abSMel Gorman 	}
9328351a6e4SChristoph Lameter 
9338351a6e4SChristoph Lameter 	/*
9348351a6e4SChristoph Lameter 	 * Buffers may be managed in a filesystem specific way.
9358351a6e4SChristoph Lameter 	 * We must have no buffers or drop them.
9368351a6e4SChristoph Lameter 	 */
9370201ebf2SDavid Howells 	if (!filemap_release_folio(src, GFP_KERNEL))
938806031bbSMel Gorman 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
9398351a6e4SChristoph Lameter 
94054184650SMatthew Wilcox (Oracle) 	return migrate_folio(mapping, dst, src, mode);
9418351a6e4SChristoph Lameter }
9428351a6e4SChristoph Lameter 
9431d8b85ccSChristoph Lameter /*
944e24f0b8fSChristoph Lameter  * Move a page to a newly allocated page
945e24f0b8fSChristoph Lameter  * The page is locked and all ptes have been successfully removed.
946b20a3503SChristoph Lameter  *
947e24f0b8fSChristoph Lameter  * The new page will have replaced the old page if this function
948e24f0b8fSChristoph Lameter  * is successful.
949894bc310SLee Schermerhorn  *
950894bc310SLee Schermerhorn  * Return value:
951894bc310SLee Schermerhorn  *   < 0 - error code
95278bd5209SRafael Aquini  *  MIGRATEPAGE_SUCCESS - success
953b20a3503SChristoph Lameter  */
954e7e3ffebSMatthew Wilcox (Oracle) static int move_to_new_folio(struct folio *dst, struct folio *src,
9555c3f9a67SHugh Dickins 				enum migrate_mode mode)
956b20a3503SChristoph Lameter {
957bda807d4SMinchan Kim 	int rc = -EAGAIN;
9587e2a5e5aSKefeng Wang 	bool is_lru = !__folio_test_movable(src);
959b20a3503SChristoph Lameter 
960e7e3ffebSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
961e7e3ffebSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
962b20a3503SChristoph Lameter 
963bda807d4SMinchan Kim 	if (likely(is_lru)) {
96468f2736aSMatthew Wilcox (Oracle) 		struct address_space *mapping = folio_mapping(src);
96568f2736aSMatthew Wilcox (Oracle) 
966b20a3503SChristoph Lameter 		if (!mapping)
96754184650SMatthew Wilcox (Oracle) 			rc = migrate_folio(mapping, dst, src, mode);
9680003e2a4SSean Christopherson 		else if (mapping_unmovable(mapping))
9690003e2a4SSean Christopherson 			rc = -EOPNOTSUPP;
9705490da4fSMatthew Wilcox (Oracle) 		else if (mapping->a_ops->migrate_folio)
971b20a3503SChristoph Lameter 			/*
9725490da4fSMatthew Wilcox (Oracle) 			 * Most folios have a mapping and most filesystems
9735490da4fSMatthew Wilcox (Oracle) 			 * provide a migrate_folio callback. Anonymous folios
974bda807d4SMinchan Kim 			 * are part of swap space which also has its own
9755490da4fSMatthew Wilcox (Oracle) 			 * migrate_folio callback. This is the most common path
976bda807d4SMinchan Kim 			 * for page migration.
977b20a3503SChristoph Lameter 			 */
9785490da4fSMatthew Wilcox (Oracle) 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
9795490da4fSMatthew Wilcox (Oracle) 								mode);
9808351a6e4SChristoph Lameter 		else
9818faa8ef5SMatthew Wilcox (Oracle) 			rc = fallback_migrate_folio(mapping, dst, src, mode);
982bda807d4SMinchan Kim 	} else {
98368f2736aSMatthew Wilcox (Oracle) 		const struct movable_operations *mops;
98468f2736aSMatthew Wilcox (Oracle) 
985bda807d4SMinchan Kim 		/*
986bda807d4SMinchan Kim 		 * In case of non-lru page, it could be released after
987bda807d4SMinchan Kim 		 * isolation step. In that case, we shouldn't try migration.
988bda807d4SMinchan Kim 		 */
989e7e3ffebSMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990e7e3ffebSMatthew Wilcox (Oracle) 		if (!folio_test_movable(src)) {
991bda807d4SMinchan Kim 			rc = MIGRATEPAGE_SUCCESS;
992e7e3ffebSMatthew Wilcox (Oracle) 			folio_clear_isolated(src);
993bda807d4SMinchan Kim 			goto out;
994bda807d4SMinchan Kim 		}
995bda807d4SMinchan Kim 
996da707a6dSVishal Moola (Oracle) 		mops = folio_movable_ops(src);
99768f2736aSMatthew Wilcox (Oracle) 		rc = mops->migrate_page(&dst->page, &src->page, mode);
998bda807d4SMinchan Kim 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
999e7e3ffebSMatthew Wilcox (Oracle) 				!folio_test_isolated(src));
1000bda807d4SMinchan Kim 	}
1001b20a3503SChristoph Lameter 
10025c3f9a67SHugh Dickins 	/*
1003e7e3ffebSMatthew Wilcox (Oracle) 	 * When successful, old pagecache src->mapping must be cleared before
1004e7e3ffebSMatthew Wilcox (Oracle) 	 * src is freed; but stats require that PageAnon be left as PageAnon.
10055c3f9a67SHugh Dickins 	 */
10065c3f9a67SHugh Dickins 	if (rc == MIGRATEPAGE_SUCCESS) {
10077e2a5e5aSKefeng Wang 		if (__folio_test_movable(src)) {
1008e7e3ffebSMatthew Wilcox (Oracle) 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009bda807d4SMinchan Kim 
1010bda807d4SMinchan Kim 			/*
1011bda807d4SMinchan Kim 			 * We clear PG_movable under page_lock so any compactor
1012bda807d4SMinchan Kim 			 * cannot try to migrate this page.
1013bda807d4SMinchan Kim 			 */
1014e7e3ffebSMatthew Wilcox (Oracle) 			folio_clear_isolated(src);
1015bda807d4SMinchan Kim 		}
1016bda807d4SMinchan Kim 
1017bda807d4SMinchan Kim 		/*
1018e7e3ffebSMatthew Wilcox (Oracle) 		 * Anonymous and movable src->mapping will be cleared by
1019bda807d4SMinchan Kim 		 * free_pages_prepare so don't reset it here for keeping
1020bda807d4SMinchan Kim 		 * the type to work PageAnon, for example.
1021bda807d4SMinchan Kim 		 */
1022e7e3ffebSMatthew Wilcox (Oracle) 		if (!folio_mapping_flags(src))
1023e7e3ffebSMatthew Wilcox (Oracle) 			src->mapping = NULL;
1024d2b2c6ddSLars Persson 
1025e7e3ffebSMatthew Wilcox (Oracle) 		if (likely(!folio_is_zone_device(dst)))
1026e7e3ffebSMatthew Wilcox (Oracle) 			flush_dcache_folio(dst);
10273fe2011fSMel Gorman 	}
1028bda807d4SMinchan Kim out:
1029e24f0b8fSChristoph Lameter 	return rc;
1030e24f0b8fSChristoph Lameter }
1031e24f0b8fSChristoph Lameter 
103264c8902eSHuang Ying /*
1033d1adb25dSBaolin Wang  * To record some information during migration, we use unused private
1034d1adb25dSBaolin Wang  * field of struct folio of the newly allocated destination folio.
1035d1adb25dSBaolin Wang  * This is safe because nobody is using it except us.
103664c8902eSHuang Ying  */
1037eebb3dabSBaolin Wang enum {
1038eebb3dabSBaolin Wang 	PAGE_WAS_MAPPED = BIT(0),
1039eebb3dabSBaolin Wang 	PAGE_WAS_MLOCKED = BIT(1),
1040d1adb25dSBaolin Wang 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1041eebb3dabSBaolin Wang };
1042eebb3dabSBaolin Wang 
104364c8902eSHuang Ying static void __migrate_folio_record(struct folio *dst,
1044d1adb25dSBaolin Wang 				   int old_page_state,
104564c8902eSHuang Ying 				   struct anon_vma *anon_vma)
1046e24f0b8fSChristoph Lameter {
1047d1adb25dSBaolin Wang 	dst->private = (void *)anon_vma + old_page_state;
104864c8902eSHuang Ying }
104964c8902eSHuang Ying 
105064c8902eSHuang Ying static void __migrate_folio_extract(struct folio *dst,
1051eebb3dabSBaolin Wang 				   int *old_page_state,
105264c8902eSHuang Ying 				   struct anon_vma **anon_vmap)
105364c8902eSHuang Ying {
1054d1adb25dSBaolin Wang 	unsigned long private = (unsigned long)dst->private;
1055d1adb25dSBaolin Wang 
1056d1adb25dSBaolin Wang 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1057d1adb25dSBaolin Wang 	*old_page_state = private & PAGE_OLD_STATES;
105864c8902eSHuang Ying 	dst->private = NULL;
105964c8902eSHuang Ying }
106064c8902eSHuang Ying 
10615dfab109SHuang Ying /* Restore the source folio to the original state upon failure */
10625dfab109SHuang Ying static void migrate_folio_undo_src(struct folio *src,
10635dfab109SHuang Ying 				   int page_was_mapped,
10645dfab109SHuang Ying 				   struct anon_vma *anon_vma,
1065ebe75e47SHuang Ying 				   bool locked,
10665dfab109SHuang Ying 				   struct list_head *ret)
10675dfab109SHuang Ying {
10685dfab109SHuang Ying 	if (page_was_mapped)
10695dfab109SHuang Ying 		remove_migration_ptes(src, src, false);
10705dfab109SHuang Ying 	/* Drop an anon_vma reference if we took one */
10715dfab109SHuang Ying 	if (anon_vma)
10725dfab109SHuang Ying 		put_anon_vma(anon_vma);
1073ebe75e47SHuang Ying 	if (locked)
10745dfab109SHuang Ying 		folio_unlock(src);
1075ebe75e47SHuang Ying 	if (ret)
10765dfab109SHuang Ying 		list_move_tail(&src->lru, ret);
10775dfab109SHuang Ying }
10785dfab109SHuang Ying 
10795dfab109SHuang Ying /* Restore the destination folio to the original state upon failure */
10804e096ae1SMatthew Wilcox (Oracle) static void migrate_folio_undo_dst(struct folio *dst, bool locked,
10814e096ae1SMatthew Wilcox (Oracle) 		free_folio_t put_new_folio, unsigned long private)
10825dfab109SHuang Ying {
1083ebe75e47SHuang Ying 	if (locked)
10845dfab109SHuang Ying 		folio_unlock(dst);
10854e096ae1SMatthew Wilcox (Oracle) 	if (put_new_folio)
10864e096ae1SMatthew Wilcox (Oracle) 		put_new_folio(dst, private);
10875dfab109SHuang Ying 	else
10885dfab109SHuang Ying 		folio_put(dst);
10895dfab109SHuang Ying }
10905dfab109SHuang Ying 
109164c8902eSHuang Ying /* Cleanup src folio upon migration success */
109264c8902eSHuang Ying static void migrate_folio_done(struct folio *src,
109364c8902eSHuang Ying 			       enum migrate_reason reason)
109464c8902eSHuang Ying {
109564c8902eSHuang Ying 	/*
109664c8902eSHuang Ying 	 * Compaction can migrate also non-LRU pages which are
109764c8902eSHuang Ying 	 * not accounted to NR_ISOLATED_*. They can be recognized
10987e2a5e5aSKefeng Wang 	 * as __folio_test_movable
109964c8902eSHuang Ying 	 */
110064c8902eSHuang Ying 	if (likely(!__folio_test_movable(src)))
110164c8902eSHuang Ying 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
110264c8902eSHuang Ying 				    folio_is_file_lru(src), -folio_nr_pages(src));
110364c8902eSHuang Ying 
110464c8902eSHuang Ying 	if (reason != MR_MEMORY_FAILURE)
110564c8902eSHuang Ying 		/* We release the page in page_handle_poison. */
110664c8902eSHuang Ying 		folio_put(src);
110764c8902eSHuang Ying }
110864c8902eSHuang Ying 
1109ebe75e47SHuang Ying /* Obtain the lock on page, remove all ptes. */
11104e096ae1SMatthew Wilcox (Oracle) static int migrate_folio_unmap(new_folio_t get_new_folio,
11114e096ae1SMatthew Wilcox (Oracle) 		free_folio_t put_new_folio, unsigned long private,
11124e096ae1SMatthew Wilcox (Oracle) 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
11132ef7dbb2SHuang Ying 		enum migrate_reason reason, struct list_head *ret)
1114e24f0b8fSChristoph Lameter {
1115ebe75e47SHuang Ying 	struct folio *dst;
11160dabec93SMinchan Kim 	int rc = -EAGAIN;
1117eebb3dabSBaolin Wang 	int old_page_state = 0;
11183f6c8272SMel Gorman 	struct anon_vma *anon_vma = NULL;
11197e2a5e5aSKefeng Wang 	bool is_lru = !__folio_test_movable(src);
1120ebe75e47SHuang Ying 	bool locked = false;
1121ebe75e47SHuang Ying 	bool dst_locked = false;
1122ebe75e47SHuang Ying 
1123ebe75e47SHuang Ying 	if (folio_ref_count(src) == 1) {
1124ebe75e47SHuang Ying 		/* Folio was freed from under us. So we are done. */
1125ebe75e47SHuang Ying 		folio_clear_active(src);
1126ebe75e47SHuang Ying 		folio_clear_unevictable(src);
1127ebe75e47SHuang Ying 		/* free_pages_prepare() will clear PG_isolated. */
1128ebe75e47SHuang Ying 		list_del(&src->lru);
1129ebe75e47SHuang Ying 		migrate_folio_done(src, reason);
1130ebe75e47SHuang Ying 		return MIGRATEPAGE_SUCCESS;
1131ebe75e47SHuang Ying 	}
1132ebe75e47SHuang Ying 
11334e096ae1SMatthew Wilcox (Oracle) 	dst = get_new_folio(src, private);
11344e096ae1SMatthew Wilcox (Oracle) 	if (!dst)
1135ebe75e47SHuang Ying 		return -ENOMEM;
1136ebe75e47SHuang Ying 	*dstp = dst;
1137ebe75e47SHuang Ying 
1138ebe75e47SHuang Ying 	dst->private = NULL;
113995a402c3SChristoph Lameter 
1140682a71a1SMatthew Wilcox (Oracle) 	if (!folio_trylock(src)) {
11412ef7dbb2SHuang Ying 		if (mode == MIGRATE_ASYNC)
11420dabec93SMinchan Kim 			goto out;
11433e7d3449SMel Gorman 
11443e7d3449SMel Gorman 		/*
11453e7d3449SMel Gorman 		 * It's not safe for direct compaction to call lock_page.
11463e7d3449SMel Gorman 		 * For example, during page readahead pages are added locked
11473e7d3449SMel Gorman 		 * to the LRU. Later, when the IO completes the pages are
11483e7d3449SMel Gorman 		 * marked uptodate and unlocked. However, the queueing
11493e7d3449SMel Gorman 		 * could be merging multiple pages for one bio (e.g.
1150d4388340SMatthew Wilcox (Oracle) 		 * mpage_readahead). If an allocation happens for the
11513e7d3449SMel Gorman 		 * second or third page, the process can end up locking
11523e7d3449SMel Gorman 		 * the same page twice and deadlocking. Rather than
11533e7d3449SMel Gorman 		 * trying to be clever about what pages can be locked,
11543e7d3449SMel Gorman 		 * avoid the use of lock_page for direct compaction
11553e7d3449SMel Gorman 		 * altogether.
11563e7d3449SMel Gorman 		 */
11573e7d3449SMel Gorman 		if (current->flags & PF_MEMALLOC)
11580dabec93SMinchan Kim 			goto out;
11593e7d3449SMel Gorman 
11604bb6dc79SDouglas Anderson 		/*
11614bb6dc79SDouglas Anderson 		 * In "light" mode, we can wait for transient locks (eg
11624bb6dc79SDouglas Anderson 		 * inserting a page into the page table), but it's not
11634bb6dc79SDouglas Anderson 		 * worth waiting for I/O.
11644bb6dc79SDouglas Anderson 		 */
11654bb6dc79SDouglas Anderson 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
11664bb6dc79SDouglas Anderson 			goto out;
11674bb6dc79SDouglas Anderson 
1168682a71a1SMatthew Wilcox (Oracle) 		folio_lock(src);
1169e24f0b8fSChristoph Lameter 	}
1170ebe75e47SHuang Ying 	locked = true;
1171eebb3dabSBaolin Wang 	if (folio_test_mlocked(src))
1172eebb3dabSBaolin Wang 		old_page_state |= PAGE_WAS_MLOCKED;
1173e24f0b8fSChristoph Lameter 
1174682a71a1SMatthew Wilcox (Oracle) 	if (folio_test_writeback(src)) {
117511bc82d6SAndrea Arcangeli 		/*
1176fed5b64aSJianguo Wu 		 * Only in the case of a full synchronous migration is it
1177a6bc32b8SMel Gorman 		 * necessary to wait for PageWriteback. In the async case,
1178a6bc32b8SMel Gorman 		 * the retry loop is too short and in the sync-light case,
1179a6bc32b8SMel Gorman 		 * the overhead of stalling is too much
118011bc82d6SAndrea Arcangeli 		 */
11812916ecc0SJérôme Glisse 		switch (mode) {
11822916ecc0SJérôme Glisse 		case MIGRATE_SYNC:
11832916ecc0SJérôme Glisse 		case MIGRATE_SYNC_NO_COPY:
11842916ecc0SJérôme Glisse 			break;
11852916ecc0SJérôme Glisse 		default:
118611bc82d6SAndrea Arcangeli 			rc = -EBUSY;
1187ebe75e47SHuang Ying 			goto out;
118811bc82d6SAndrea Arcangeli 		}
1189682a71a1SMatthew Wilcox (Oracle) 		folio_wait_writeback(src);
1190e24f0b8fSChristoph Lameter 	}
119103f15c86SHugh Dickins 
1192e24f0b8fSChristoph Lameter 	/*
1193682a71a1SMatthew Wilcox (Oracle) 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1194682a71a1SMatthew Wilcox (Oracle) 	 * we cannot notice that anon_vma is freed while we migrate a page.
11951ce82b69SHugh Dickins 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1196dc386d4dSKAMEZAWA Hiroyuki 	 * of migration. File cache pages are no problem because of page_lock()
1197989f89c5SKAMEZAWA Hiroyuki 	 * File Caches may use write_page() or lock_page() in migration, then,
1198989f89c5SKAMEZAWA Hiroyuki 	 * just care Anon page here.
11993fe2011fSMel Gorman 	 *
120029eea9b5SMatthew Wilcox (Oracle) 	 * Only folio_get_anon_vma() understands the subtleties of
120103f15c86SHugh Dickins 	 * getting a hold on an anon_vma from outside one of its mms.
120203f15c86SHugh Dickins 	 * But if we cannot get anon_vma, then we won't need it anyway,
120303f15c86SHugh Dickins 	 * because that implies that the anon page is no longer mapped
120403f15c86SHugh Dickins 	 * (and cannot be remapped so long as we hold the page lock).
12053fe2011fSMel Gorman 	 */
1206682a71a1SMatthew Wilcox (Oracle) 	if (folio_test_anon(src) && !folio_test_ksm(src))
120729eea9b5SMatthew Wilcox (Oracle) 		anon_vma = folio_get_anon_vma(src);
120862e1c553SShaohua Li 
12097db7671fSHugh Dickins 	/*
12107db7671fSHugh Dickins 	 * Block others from accessing the new page when we get around to
12117db7671fSHugh Dickins 	 * establishing additional references. We are usually the only one
1212682a71a1SMatthew Wilcox (Oracle) 	 * holding a reference to dst at this point. We used to have a BUG
1213682a71a1SMatthew Wilcox (Oracle) 	 * here if folio_trylock(dst) fails, but would like to allow for
1214682a71a1SMatthew Wilcox (Oracle) 	 * cases where there might be a race with the previous use of dst.
12157db7671fSHugh Dickins 	 * This is much like races on refcount of oldpage: just don't BUG().
12167db7671fSHugh Dickins 	 */
1217682a71a1SMatthew Wilcox (Oracle) 	if (unlikely(!folio_trylock(dst)))
1218ebe75e47SHuang Ying 		goto out;
1219ebe75e47SHuang Ying 	dst_locked = true;
12207db7671fSHugh Dickins 
1221bda807d4SMinchan Kim 	if (unlikely(!is_lru)) {
1222eebb3dabSBaolin Wang 		__migrate_folio_record(dst, old_page_state, anon_vma);
122364c8902eSHuang Ying 		return MIGRATEPAGE_UNMAP;
1224bda807d4SMinchan Kim 	}
1225bda807d4SMinchan Kim 
1226dc386d4dSKAMEZAWA Hiroyuki 	/*
122762e1c553SShaohua Li 	 * Corner case handling:
122862e1c553SShaohua Li 	 * 1. When a new swap-cache page is read into, it is added to the LRU
122962e1c553SShaohua Li 	 * and treated as swapcache but it has no rmap yet.
1230682a71a1SMatthew Wilcox (Oracle) 	 * Calling try_to_unmap() against a src->mapping==NULL page will
123162e1c553SShaohua Li 	 * trigger a BUG.  So handle it here.
1232d12b8951SYang Shi 	 * 2. An orphaned page (see truncate_cleanup_page) might have
123362e1c553SShaohua Li 	 * fs-private metadata. The page can be picked up due to memory
123462e1c553SShaohua Li 	 * offlining.  Everywhere else except page reclaim, the page is
123562e1c553SShaohua Li 	 * invisible to the vm, so the page can not be migrated.  So try to
123662e1c553SShaohua Li 	 * free the metadata, so the page can be freed.
1237dc386d4dSKAMEZAWA Hiroyuki 	 */
1238682a71a1SMatthew Wilcox (Oracle) 	if (!src->mapping) {
1239682a71a1SMatthew Wilcox (Oracle) 		if (folio_test_private(src)) {
1240682a71a1SMatthew Wilcox (Oracle) 			try_to_free_buffers(src);
1241ebe75e47SHuang Ying 			goto out;
124262e1c553SShaohua Li 		}
1243682a71a1SMatthew Wilcox (Oracle) 	} else if (folio_mapped(src)) {
12447db7671fSHugh Dickins 		/* Establish migration ptes */
1245682a71a1SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1246682a71a1SMatthew Wilcox (Oracle) 			       !folio_test_ksm(src) && !anon_vma, src);
1247fb3592c4SHuang Ying 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1248eebb3dabSBaolin Wang 		old_page_state |= PAGE_WAS_MAPPED;
12492ebba6b7SHugh Dickins 	}
1250dc386d4dSKAMEZAWA Hiroyuki 
125164c8902eSHuang Ying 	if (!folio_mapped(src)) {
1252eebb3dabSBaolin Wang 		__migrate_folio_record(dst, old_page_state, anon_vma);
125364c8902eSHuang Ying 		return MIGRATEPAGE_UNMAP;
125464c8902eSHuang Ying 	}
125564c8902eSHuang Ying 
125664c8902eSHuang Ying out:
125780562ba0SHuang Ying 	/*
125880562ba0SHuang Ying 	 * A folio that has not been unmapped will be restored to
125980562ba0SHuang Ying 	 * right list unless we want to retry.
126080562ba0SHuang Ying 	 */
1261fb3592c4SHuang Ying 	if (rc == -EAGAIN)
1262ebe75e47SHuang Ying 		ret = NULL;
126380562ba0SHuang Ying 
1264eebb3dabSBaolin Wang 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1265eebb3dabSBaolin Wang 			       anon_vma, locked, ret);
12664e096ae1SMatthew Wilcox (Oracle) 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
126780562ba0SHuang Ying 
126880562ba0SHuang Ying 	return rc;
126980562ba0SHuang Ying }
127080562ba0SHuang Ying 
1271ebe75e47SHuang Ying /* Migrate the folio to the newly allocated folio in dst. */
12724e096ae1SMatthew Wilcox (Oracle) static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1273ebe75e47SHuang Ying 			      struct folio *src, struct folio *dst,
1274ebe75e47SHuang Ying 			      enum migrate_mode mode, enum migrate_reason reason,
1275ebe75e47SHuang Ying 			      struct list_head *ret)
127664c8902eSHuang Ying {
127764c8902eSHuang Ying 	int rc;
1278eebb3dabSBaolin Wang 	int old_page_state = 0;
127964c8902eSHuang Ying 	struct anon_vma *anon_vma = NULL;
12807e2a5e5aSKefeng Wang 	bool is_lru = !__folio_test_movable(src);
12815dfab109SHuang Ying 	struct list_head *prev;
128264c8902eSHuang Ying 
1283eebb3dabSBaolin Wang 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
12845dfab109SHuang Ying 	prev = dst->lru.prev;
12855dfab109SHuang Ying 	list_del(&dst->lru);
128664c8902eSHuang Ying 
1287682a71a1SMatthew Wilcox (Oracle) 	rc = move_to_new_folio(dst, src, mode);
1288ebe75e47SHuang Ying 	if (rc)
1289ebe75e47SHuang Ying 		goto out;
12905dfab109SHuang Ying 
129164c8902eSHuang Ying 	if (unlikely(!is_lru))
129264c8902eSHuang Ying 		goto out_unlock_both;
1293e24f0b8fSChristoph Lameter 
1294c3096e67SHugh Dickins 	/*
1295682a71a1SMatthew Wilcox (Oracle) 	 * When successful, push dst to LRU immediately: so that if it
1296c3096e67SHugh Dickins 	 * turns out to be an mlocked page, remove_migration_ptes() will
1297682a71a1SMatthew Wilcox (Oracle) 	 * automatically build up the correct dst->mlock_count for it.
1298c3096e67SHugh Dickins 	 *
1299c3096e67SHugh Dickins 	 * We would like to do something similar for the old page, when
1300c3096e67SHugh Dickins 	 * unsuccessful, and other cases when a page has been temporarily
1301c3096e67SHugh Dickins 	 * isolated from the unevictable LRU: but this case is the easiest.
1302c3096e67SHugh Dickins 	 */
1303682a71a1SMatthew Wilcox (Oracle) 	folio_add_lru(dst);
1304eebb3dabSBaolin Wang 	if (old_page_state & PAGE_WAS_MLOCKED)
1305c3096e67SHugh Dickins 		lru_add_drain();
1306c3096e67SHugh Dickins 
1307eebb3dabSBaolin Wang 	if (old_page_state & PAGE_WAS_MAPPED)
1308ebe75e47SHuang Ying 		remove_migration_ptes(src, dst, false);
13093f6c8272SMel Gorman 
13107db7671fSHugh Dickins out_unlock_both:
1311682a71a1SMatthew Wilcox (Oracle) 	folio_unlock(dst);
1312ebe75e47SHuang Ying 	set_page_owner_migrate_reason(&dst->page, reason);
1313c6c919ebSMinchan Kim 	/*
1314682a71a1SMatthew Wilcox (Oracle) 	 * If migration is successful, decrease refcount of dst,
1315c6c919ebSMinchan Kim 	 * which will not free the page because new page owner increased
1316c3096e67SHugh Dickins 	 * refcounter.
1317c6c919ebSMinchan Kim 	 */
1318682a71a1SMatthew Wilcox (Oracle) 	folio_put(dst);
1319c6c919ebSMinchan Kim 
1320ebe75e47SHuang Ying 	/*
1321ebe75e47SHuang Ying 	 * A folio that has been migrated has all references removed
1322ebe75e47SHuang Ying 	 * and will be freed.
1323ebe75e47SHuang Ying 	 */
1324ebe75e47SHuang Ying 	list_del(&src->lru);
1325ebe75e47SHuang Ying 	/* Drop an anon_vma reference if we took one */
1326ebe75e47SHuang Ying 	if (anon_vma)
1327ebe75e47SHuang Ying 		put_anon_vma(anon_vma);
1328ebe75e47SHuang Ying 	folio_unlock(src);
1329ebe75e47SHuang Ying 	migrate_folio_done(src, reason);
1330ebe75e47SHuang Ying 
1331ebe75e47SHuang Ying 	return rc;
1332ebe75e47SHuang Ying out:
1333ebe75e47SHuang Ying 	/*
1334ebe75e47SHuang Ying 	 * A folio that has not been migrated will be restored to
1335ebe75e47SHuang Ying 	 * right list unless we want to retry.
1336ebe75e47SHuang Ying 	 */
1337ebe75e47SHuang Ying 	if (rc == -EAGAIN) {
1338ebe75e47SHuang Ying 		list_add(&dst->lru, prev);
1339eebb3dabSBaolin Wang 		__migrate_folio_record(dst, old_page_state, anon_vma);
13400dabec93SMinchan Kim 		return rc;
13410dabec93SMinchan Kim 	}
134295a402c3SChristoph Lameter 
1343eebb3dabSBaolin Wang 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344eebb3dabSBaolin Wang 			       anon_vma, true, ret);
13454e096ae1SMatthew Wilcox (Oracle) 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
134668711a74SDavid Rientjes 
1347e24f0b8fSChristoph Lameter 	return rc;
1348e24f0b8fSChristoph Lameter }
1349b20a3503SChristoph Lameter 
1350e24f0b8fSChristoph Lameter /*
1351290408d4SNaoya Horiguchi  * Counterpart of unmap_and_move_page() for hugepage migration.
1352290408d4SNaoya Horiguchi  *
1353290408d4SNaoya Horiguchi  * This function doesn't wait the completion of hugepage I/O
1354290408d4SNaoya Horiguchi  * because there is no race between I/O and migration for hugepage.
1355290408d4SNaoya Horiguchi  * Note that currently hugepage I/O occurs only in direct I/O
1356290408d4SNaoya Horiguchi  * where no lock is held and PG_writeback is irrelevant,
1357290408d4SNaoya Horiguchi  * and writeback status of all subpages are counted in the reference
1358290408d4SNaoya Horiguchi  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1359290408d4SNaoya Horiguchi  * under direct I/O, the reference of the head page is 512 and a bit more.)
1360290408d4SNaoya Horiguchi  * This means that when we try to migrate hugepage whose subpages are
1361290408d4SNaoya Horiguchi  * doing direct I/O, some references remain after try_to_unmap() and
1362290408d4SNaoya Horiguchi  * hugepage migration fails without data corruption.
1363290408d4SNaoya Horiguchi  *
1364290408d4SNaoya Horiguchi  * There is also no race when direct I/O is issued on the page under migration,
1365290408d4SNaoya Horiguchi  * because then pte is replaced with migration swap entry and direct I/O code
1366290408d4SNaoya Horiguchi  * will wait in the page fault for migration to complete.
1367290408d4SNaoya Horiguchi  */
13684e096ae1SMatthew Wilcox (Oracle) static int unmap_and_move_huge_page(new_folio_t get_new_folio,
13694e096ae1SMatthew Wilcox (Oracle) 		free_folio_t put_new_folio, unsigned long private,
13704e096ae1SMatthew Wilcox (Oracle) 		struct folio *src, int force, enum migrate_mode mode,
13714e096ae1SMatthew Wilcox (Oracle) 		int reason, struct list_head *ret)
1372290408d4SNaoya Horiguchi {
13734e096ae1SMatthew Wilcox (Oracle) 	struct folio *dst;
13742def7424SHugh Dickins 	int rc = -EAGAIN;
13752ebba6b7SHugh Dickins 	int page_was_mapped = 0;
1376290408d4SNaoya Horiguchi 	struct anon_vma *anon_vma = NULL;
1377c0d0381aSMike Kravetz 	struct address_space *mapping = NULL;
1378290408d4SNaoya Horiguchi 
1379c33db292SMatthew Wilcox (Oracle) 	if (folio_ref_count(src) == 1) {
138071a64f61SMuchun Song 		/* page was freed from under us. So we are done. */
1381ea8e72f4SSidhartha Kumar 		folio_putback_active_hugetlb(src);
138271a64f61SMuchun Song 		return MIGRATEPAGE_SUCCESS;
138371a64f61SMuchun Song 	}
138471a64f61SMuchun Song 
13854e096ae1SMatthew Wilcox (Oracle) 	dst = get_new_folio(src, private);
13864e096ae1SMatthew Wilcox (Oracle) 	if (!dst)
1387290408d4SNaoya Horiguchi 		return -ENOMEM;
1388290408d4SNaoya Horiguchi 
1389c33db292SMatthew Wilcox (Oracle) 	if (!folio_trylock(src)) {
13902916ecc0SJérôme Glisse 		if (!force)
1391290408d4SNaoya Horiguchi 			goto out;
13922916ecc0SJérôme Glisse 		switch (mode) {
13932916ecc0SJérôme Glisse 		case MIGRATE_SYNC:
13942916ecc0SJérôme Glisse 		case MIGRATE_SYNC_NO_COPY:
13952916ecc0SJérôme Glisse 			break;
13962916ecc0SJérôme Glisse 		default:
13972916ecc0SJérôme Glisse 			goto out;
13982916ecc0SJérôme Glisse 		}
1399c33db292SMatthew Wilcox (Oracle) 		folio_lock(src);
1400290408d4SNaoya Horiguchi 	}
1401290408d4SNaoya Horiguchi 
1402cb6acd01SMike Kravetz 	/*
1403cb6acd01SMike Kravetz 	 * Check for pages which are in the process of being freed.  Without
1404c33db292SMatthew Wilcox (Oracle) 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1405cb6acd01SMike Kravetz 	 * be called and we could leak usage counts for subpools.
1406cb6acd01SMike Kravetz 	 */
1407345c62d1SSidhartha Kumar 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408cb6acd01SMike Kravetz 		rc = -EBUSY;
1409cb6acd01SMike Kravetz 		goto out_unlock;
1410cb6acd01SMike Kravetz 	}
1411cb6acd01SMike Kravetz 
1412c33db292SMatthew Wilcox (Oracle) 	if (folio_test_anon(src))
141329eea9b5SMatthew Wilcox (Oracle) 		anon_vma = folio_get_anon_vma(src);
1414290408d4SNaoya Horiguchi 
1415c33db292SMatthew Wilcox (Oracle) 	if (unlikely(!folio_trylock(dst)))
14167db7671fSHugh Dickins 		goto put_anon;
14177db7671fSHugh Dickins 
1418c33db292SMatthew Wilcox (Oracle) 	if (folio_mapped(src)) {
1419a98a2f0cSAlistair Popple 		enum ttu_flags ttu = 0;
1420336bf30eSMike Kravetz 
1421c33db292SMatthew Wilcox (Oracle) 		if (!folio_test_anon(src)) {
1422c0d0381aSMike Kravetz 			/*
1423336bf30eSMike Kravetz 			 * In shared mappings, try_to_unmap could potentially
1424336bf30eSMike Kravetz 			 * call huge_pmd_unshare.  Because of this, take
1425336bf30eSMike Kravetz 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1426336bf30eSMike Kravetz 			 * to let lower levels know we have taken the lock.
1427c0d0381aSMike Kravetz 			 */
14284e096ae1SMatthew Wilcox (Oracle) 			mapping = hugetlb_page_mapping_lock_write(&src->page);
1429c0d0381aSMike Kravetz 			if (unlikely(!mapping))
1430c0d0381aSMike Kravetz 				goto unlock_put_anon;
1431c0d0381aSMike Kravetz 
14325202978bSMiaohe Lin 			ttu = TTU_RMAP_LOCKED;
1433336bf30eSMike Kravetz 		}
1434336bf30eSMike Kravetz 
14354b8554c5SMatthew Wilcox (Oracle) 		try_to_migrate(src, ttu);
14362ebba6b7SHugh Dickins 		page_was_mapped = 1;
1437336bf30eSMike Kravetz 
14385202978bSMiaohe Lin 		if (ttu & TTU_RMAP_LOCKED)
1439336bf30eSMike Kravetz 			i_mmap_unlock_write(mapping);
14402ebba6b7SHugh Dickins 	}
1441290408d4SNaoya Horiguchi 
1442c33db292SMatthew Wilcox (Oracle) 	if (!folio_mapped(src))
1443e7e3ffebSMatthew Wilcox (Oracle) 		rc = move_to_new_folio(dst, src, mode);
1444290408d4SNaoya Horiguchi 
1445336bf30eSMike Kravetz 	if (page_was_mapped)
14464eecb8b9SMatthew Wilcox (Oracle) 		remove_migration_ptes(src,
14474eecb8b9SMatthew Wilcox (Oracle) 			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1448290408d4SNaoya Horiguchi 
1449c0d0381aSMike Kravetz unlock_put_anon:
1450c33db292SMatthew Wilcox (Oracle) 	folio_unlock(dst);
14517db7671fSHugh Dickins 
14527db7671fSHugh Dickins put_anon:
1453fd4a4663SHugh Dickins 	if (anon_vma)
14549e60109fSPeter Zijlstra 		put_anon_vma(anon_vma);
14558e6ac7faSAneesh Kumar K.V 
14562def7424SHugh Dickins 	if (rc == MIGRATEPAGE_SUCCESS) {
1457345c62d1SSidhartha Kumar 		move_hugetlb_state(src, dst, reason);
14584e096ae1SMatthew Wilcox (Oracle) 		put_new_folio = NULL;
14592def7424SHugh Dickins 	}
14608e6ac7faSAneesh Kumar K.V 
1461cb6acd01SMike Kravetz out_unlock:
1462c33db292SMatthew Wilcox (Oracle) 	folio_unlock(src);
146309761333SHillf Danton out:
1464dd4ae78aSYang Shi 	if (rc == MIGRATEPAGE_SUCCESS)
1465ea8e72f4SSidhartha Kumar 		folio_putback_active_hugetlb(src);
1466a04840c6SMiaohe Lin 	else if (rc != -EAGAIN)
1467c33db292SMatthew Wilcox (Oracle) 		list_move_tail(&src->lru, ret);
146868711a74SDavid Rientjes 
146968711a74SDavid Rientjes 	/*
147068711a74SDavid Rientjes 	 * If migration was not successful and there's a freeing callback, use
147168711a74SDavid Rientjes 	 * it.  Otherwise, put_page() will drop the reference grabbed during
147268711a74SDavid Rientjes 	 * isolation.
147368711a74SDavid Rientjes 	 */
14744e096ae1SMatthew Wilcox (Oracle) 	if (put_new_folio)
14754e096ae1SMatthew Wilcox (Oracle) 		put_new_folio(dst, private);
147668711a74SDavid Rientjes 	else
1477ea8e72f4SSidhartha Kumar 		folio_putback_active_hugetlb(dst);
147868711a74SDavid Rientjes 
1479290408d4SNaoya Horiguchi 	return rc;
1480290408d4SNaoya Horiguchi }
1481290408d4SNaoya Horiguchi 
1482eaec4e63SHuang Ying static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1483d532e2e5SYang Shi {
14849c62ff00SHuang Ying 	int rc;
1485d532e2e5SYang Shi 
1486eaec4e63SHuang Ying 	folio_lock(folio);
1487eaec4e63SHuang Ying 	rc = split_folio_to_list(folio, split_folios);
1488eaec4e63SHuang Ying 	folio_unlock(folio);
1489e6fa8a79SHuang Ying 	if (!rc)
1490eaec4e63SHuang Ying 		list_move_tail(&folio->lru, split_folios);
1491d532e2e5SYang Shi 
1492d532e2e5SYang Shi 	return rc;
1493d532e2e5SYang Shi }
1494d532e2e5SYang Shi 
149542012e04SHuang Ying #ifdef CONFIG_TRANSPARENT_HUGEPAGE
149642012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
149742012e04SHuang Ying #else
149842012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION	512
149942012e04SHuang Ying #endif
1500e5bfff8bSHuang Ying #define NR_MAX_MIGRATE_PAGES_RETRY	10
15012ef7dbb2SHuang Ying #define NR_MAX_MIGRATE_ASYNC_RETRY	3
15022ef7dbb2SHuang Ying #define NR_MAX_MIGRATE_SYNC_RETRY					\
15032ef7dbb2SHuang Ying 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1504e5bfff8bSHuang Ying 
15055b855937SHuang Ying struct migrate_pages_stats {
15065b855937SHuang Ying 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
15075b855937SHuang Ying 				   units of base pages */
15085b855937SHuang Ying 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
15095b855937SHuang Ying 				   units of base pages.  Untried folios aren't counted */
15105b855937SHuang Ying 	int nr_thp_succeeded;	/* THP migrated successfully */
15115b855937SHuang Ying 	int nr_thp_failed;	/* THP failed to be migrated */
15125b855937SHuang Ying 	int nr_thp_split;	/* THP split before migrating */
1513a259945eSZi Yan 	int nr_split;	/* Large folio (include THP) split before migrating */
15145b855937SHuang Ying };
15155b855937SHuang Ying 
1516290408d4SNaoya Horiguchi /*
1517e5bfff8bSHuang Ying  * Returns the number of hugetlb folios that were not migrated, or an error code
1518e5bfff8bSHuang Ying  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1519e5bfff8bSHuang Ying  * any more because the list has become empty or no retryable hugetlb folios
1520e5bfff8bSHuang Ying  * exist any more. It is caller's responsibility to call putback_movable_pages()
1521e5bfff8bSHuang Ying  * only if ret != 0.
1522e5bfff8bSHuang Ying  */
15234e096ae1SMatthew Wilcox (Oracle) static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
15244e096ae1SMatthew Wilcox (Oracle) 			    free_folio_t put_new_folio, unsigned long private,
1525e5bfff8bSHuang Ying 			    enum migrate_mode mode, int reason,
1526e5bfff8bSHuang Ying 			    struct migrate_pages_stats *stats,
1527e5bfff8bSHuang Ying 			    struct list_head *ret_folios)
1528e5bfff8bSHuang Ying {
1529e5bfff8bSHuang Ying 	int retry = 1;
1530e5bfff8bSHuang Ying 	int nr_failed = 0;
1531e5bfff8bSHuang Ying 	int nr_retry_pages = 0;
1532e5bfff8bSHuang Ying 	int pass = 0;
1533e5bfff8bSHuang Ying 	struct folio *folio, *folio2;
1534e5bfff8bSHuang Ying 	int rc, nr_pages;
1535e5bfff8bSHuang Ying 
1536e5bfff8bSHuang Ying 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1537e5bfff8bSHuang Ying 		retry = 0;
1538e5bfff8bSHuang Ying 		nr_retry_pages = 0;
1539e5bfff8bSHuang Ying 
1540e5bfff8bSHuang Ying 		list_for_each_entry_safe(folio, folio2, from, lru) {
1541e5bfff8bSHuang Ying 			if (!folio_test_hugetlb(folio))
1542e5bfff8bSHuang Ying 				continue;
1543e5bfff8bSHuang Ying 
1544e5bfff8bSHuang Ying 			nr_pages = folio_nr_pages(folio);
1545e5bfff8bSHuang Ying 
1546e5bfff8bSHuang Ying 			cond_resched();
1547e5bfff8bSHuang Ying 
15486f7d760eSHuang Ying 			/*
15496f7d760eSHuang Ying 			 * Migratability of hugepages depends on architectures and
15506f7d760eSHuang Ying 			 * their size.  This check is necessary because some callers
15516f7d760eSHuang Ying 			 * of hugepage migration like soft offline and memory
15526f7d760eSHuang Ying 			 * hotremove don't walk through page tables or check whether
15536f7d760eSHuang Ying 			 * the hugepage is pmd-based or not before kicking migration.
15546f7d760eSHuang Ying 			 */
15556f7d760eSHuang Ying 			if (!hugepage_migration_supported(folio_hstate(folio))) {
15566f7d760eSHuang Ying 				nr_failed++;
15576f7d760eSHuang Ying 				stats->nr_failed_pages += nr_pages;
15586f7d760eSHuang Ying 				list_move_tail(&folio->lru, ret_folios);
15596f7d760eSHuang Ying 				continue;
15606f7d760eSHuang Ying 			}
15616f7d760eSHuang Ying 
15624e096ae1SMatthew Wilcox (Oracle) 			rc = unmap_and_move_huge_page(get_new_folio,
15634e096ae1SMatthew Wilcox (Oracle) 						      put_new_folio, private,
15644e096ae1SMatthew Wilcox (Oracle) 						      folio, pass > 2, mode,
1565e5bfff8bSHuang Ying 						      reason, ret_folios);
1566e5bfff8bSHuang Ying 			/*
1567e5bfff8bSHuang Ying 			 * The rules are:
1568e5bfff8bSHuang Ying 			 *	Success: hugetlb folio will be put back
1569e5bfff8bSHuang Ying 			 *	-EAGAIN: stay on the from list
1570e5bfff8bSHuang Ying 			 *	-ENOMEM: stay on the from list
1571e5bfff8bSHuang Ying 			 *	Other errno: put on ret_folios list
1572e5bfff8bSHuang Ying 			 */
1573e5bfff8bSHuang Ying 			switch(rc) {
1574e5bfff8bSHuang Ying 			case -ENOMEM:
1575e5bfff8bSHuang Ying 				/*
1576e5bfff8bSHuang Ying 				 * When memory is low, don't bother to try to migrate
1577e5bfff8bSHuang Ying 				 * other folios, just exit.
1578e5bfff8bSHuang Ying 				 */
1579e5bfff8bSHuang Ying 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1580e5bfff8bSHuang Ying 				return -ENOMEM;
1581e5bfff8bSHuang Ying 			case -EAGAIN:
1582e5bfff8bSHuang Ying 				retry++;
1583e5bfff8bSHuang Ying 				nr_retry_pages += nr_pages;
1584e5bfff8bSHuang Ying 				break;
1585e5bfff8bSHuang Ying 			case MIGRATEPAGE_SUCCESS:
1586e5bfff8bSHuang Ying 				stats->nr_succeeded += nr_pages;
1587e5bfff8bSHuang Ying 				break;
1588e5bfff8bSHuang Ying 			default:
1589e5bfff8bSHuang Ying 				/*
1590e5bfff8bSHuang Ying 				 * Permanent failure (-EBUSY, etc.):
1591e5bfff8bSHuang Ying 				 * unlike -EAGAIN case, the failed folio is
1592e5bfff8bSHuang Ying 				 * removed from migration folio list and not
1593e5bfff8bSHuang Ying 				 * retried in the next outer loop.
1594e5bfff8bSHuang Ying 				 */
1595e5bfff8bSHuang Ying 				nr_failed++;
1596e5bfff8bSHuang Ying 				stats->nr_failed_pages += nr_pages;
1597e5bfff8bSHuang Ying 				break;
1598e5bfff8bSHuang Ying 			}
1599e5bfff8bSHuang Ying 		}
1600e5bfff8bSHuang Ying 	}
1601e5bfff8bSHuang Ying 	/*
1602e5bfff8bSHuang Ying 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1603e5bfff8bSHuang Ying 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604e5bfff8bSHuang Ying 	 * folios as failed.
1605e5bfff8bSHuang Ying 	 */
1606e5bfff8bSHuang Ying 	nr_failed += retry;
1607e5bfff8bSHuang Ying 	stats->nr_failed_pages += nr_retry_pages;
1608e5bfff8bSHuang Ying 
1609e5bfff8bSHuang Ying 	return nr_failed;
1610e5bfff8bSHuang Ying }
1611e5bfff8bSHuang Ying 
16125dfab109SHuang Ying /*
16135dfab109SHuang Ying  * migrate_pages_batch() first unmaps folios in the from list as many as
16145dfab109SHuang Ying  * possible, then move the unmapped folios.
1615fb3592c4SHuang Ying  *
1616fb3592c4SHuang Ying  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1617fb3592c4SHuang Ying  * lock or bit when we have locked more than one folio.  Which may cause
1618fb3592c4SHuang Ying  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1619fb3592c4SHuang Ying  * length of the from list must be <= 1.
16205dfab109SHuang Ying  */
16214e096ae1SMatthew Wilcox (Oracle) static int migrate_pages_batch(struct list_head *from,
16224e096ae1SMatthew Wilcox (Oracle) 		new_folio_t get_new_folio, free_folio_t put_new_folio,
16234e096ae1SMatthew Wilcox (Oracle) 		unsigned long private, enum migrate_mode mode, int reason,
16244e096ae1SMatthew Wilcox (Oracle) 		struct list_head *ret_folios, struct list_head *split_folios,
16254e096ae1SMatthew Wilcox (Oracle) 		struct migrate_pages_stats *stats, int nr_pass)
162642012e04SHuang Ying {
1627a21d2133SHuang Ying 	int retry = 1;
162842012e04SHuang Ying 	int thp_retry = 1;
162942012e04SHuang Ying 	int nr_failed = 0;
163042012e04SHuang Ying 	int nr_retry_pages = 0;
163142012e04SHuang Ying 	int pass = 0;
163242012e04SHuang Ying 	bool is_thp = false;
1633a259945eSZi Yan 	bool is_large = false;
16345dfab109SHuang Ying 	struct folio *folio, *folio2, *dst = NULL, *dst2;
1635a21d2133SHuang Ying 	int rc, rc_saved = 0, nr_pages;
16365dfab109SHuang Ying 	LIST_HEAD(unmap_folios);
16375dfab109SHuang Ying 	LIST_HEAD(dst_folios);
163842012e04SHuang Ying 	bool nosplit = (reason == MR_NUMA_MISPLACED);
163942012e04SHuang Ying 
1640fb3592c4SHuang Ying 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1641fb3592c4SHuang Ying 			!list_empty(from) && !list_is_singular(from));
1642a21d2133SHuang Ying 
1643124abcedSHuang Ying 	for (pass = 0; pass < nr_pass && retry; pass++) {
164442012e04SHuang Ying 		retry = 0;
164542012e04SHuang Ying 		thp_retry = 0;
164642012e04SHuang Ying 		nr_retry_pages = 0;
164742012e04SHuang Ying 
164842012e04SHuang Ying 		list_for_each_entry_safe(folio, folio2, from, lru) {
1649a259945eSZi Yan 			is_large = folio_test_large(folio);
1650a259945eSZi Yan 			is_thp = is_large && folio_test_pmd_mappable(folio);
165142012e04SHuang Ying 			nr_pages = folio_nr_pages(folio);
165242012e04SHuang Ying 
165342012e04SHuang Ying 			cond_resched();
165442012e04SHuang Ying 
16556f7d760eSHuang Ying 			/*
16566f7d760eSHuang Ying 			 * Large folio migration might be unsupported or
16576f7d760eSHuang Ying 			 * the allocation might be failed so we should retry
16586f7d760eSHuang Ying 			 * on the same folio with the large folio split
16596f7d760eSHuang Ying 			 * to normal folios.
16606f7d760eSHuang Ying 			 *
16616f7d760eSHuang Ying 			 * Split folios are put in split_folios, and
16626f7d760eSHuang Ying 			 * we will migrate them after the rest of the
16636f7d760eSHuang Ying 			 * list is processed.
16646f7d760eSHuang Ying 			 */
16656f7d760eSHuang Ying 			if (!thp_migration_supported() && is_thp) {
1666124abcedSHuang Ying 				nr_failed++;
16676f7d760eSHuang Ying 				stats->nr_thp_failed++;
1668a21d2133SHuang Ying 				if (!try_split_folio(folio, split_folios)) {
16696f7d760eSHuang Ying 					stats->nr_thp_split++;
1670a259945eSZi Yan 					stats->nr_split++;
16716f7d760eSHuang Ying 					continue;
16726f7d760eSHuang Ying 				}
16736f7d760eSHuang Ying 				stats->nr_failed_pages += nr_pages;
16746f7d760eSHuang Ying 				list_move_tail(&folio->lru, ret_folios);
16756f7d760eSHuang Ying 				continue;
16766f7d760eSHuang Ying 			}
16776f7d760eSHuang Ying 
16784e096ae1SMatthew Wilcox (Oracle) 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
16794e096ae1SMatthew Wilcox (Oracle) 					private, folio, &dst, mode, reason,
16804e096ae1SMatthew Wilcox (Oracle) 					ret_folios);
168142012e04SHuang Ying 			/*
168242012e04SHuang Ying 			 * The rules are:
168342012e04SHuang Ying 			 *	Success: folio will be freed
16845dfab109SHuang Ying 			 *	Unmap: folio will be put on unmap_folios list,
16855dfab109SHuang Ying 			 *	       dst folio put on dst_folios list
168642012e04SHuang Ying 			 *	-EAGAIN: stay on the from list
168742012e04SHuang Ying 			 *	-ENOMEM: stay on the from list
168842012e04SHuang Ying 			 *	Other errno: put on ret_folios list
168942012e04SHuang Ying 			 */
169042012e04SHuang Ying 			switch(rc) {
169142012e04SHuang Ying 			case -ENOMEM:
169242012e04SHuang Ying 				/*
169342012e04SHuang Ying 				 * When memory is low, don't bother to try to migrate
16945dfab109SHuang Ying 				 * other folios, move unmapped folios, then exit.
169542012e04SHuang Ying 				 */
1696124abcedSHuang Ying 				nr_failed++;
169742012e04SHuang Ying 				stats->nr_thp_failed += is_thp;
169842012e04SHuang Ying 				/* Large folio NUMA faulting doesn't split to retry. */
1699a259945eSZi Yan 				if (is_large && !nosplit) {
1700a21d2133SHuang Ying 					int ret = try_split_folio(folio, split_folios);
170142012e04SHuang Ying 
170242012e04SHuang Ying 					if (!ret) {
170342012e04SHuang Ying 						stats->nr_thp_split += is_thp;
170449cac03aSZi Yan 						stats->nr_split++;
170542012e04SHuang Ying 						break;
170642012e04SHuang Ying 					} else if (reason == MR_LONGTERM_PIN &&
170742012e04SHuang Ying 						   ret == -EAGAIN) {
170842012e04SHuang Ying 						/*
170942012e04SHuang Ying 						 * Try again to split large folio to
171042012e04SHuang Ying 						 * mitigate the failure of longterm pinning.
171142012e04SHuang Ying 						 */
1712124abcedSHuang Ying 						retry++;
171342012e04SHuang Ying 						thp_retry += is_thp;
171442012e04SHuang Ying 						nr_retry_pages += nr_pages;
1715851ae642SHuang Ying 						/* Undo duplicated failure counting. */
1716124abcedSHuang Ying 						nr_failed--;
1717851ae642SHuang Ying 						stats->nr_thp_failed -= is_thp;
171842012e04SHuang Ying 						break;
171942012e04SHuang Ying 					}
172042012e04SHuang Ying 				}
172142012e04SHuang Ying 
172242012e04SHuang Ying 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
172342012e04SHuang Ying 				/* nr_failed isn't updated for not used */
172442012e04SHuang Ying 				stats->nr_thp_failed += thp_retry;
17255dfab109SHuang Ying 				rc_saved = rc;
17265dfab109SHuang Ying 				if (list_empty(&unmap_folios))
172742012e04SHuang Ying 					goto out;
17285dfab109SHuang Ying 				else
17295dfab109SHuang Ying 					goto move;
173042012e04SHuang Ying 			case -EAGAIN:
173142012e04SHuang Ying 				retry++;
1732124abcedSHuang Ying 				thp_retry += is_thp;
173342012e04SHuang Ying 				nr_retry_pages += nr_pages;
173442012e04SHuang Ying 				break;
173542012e04SHuang Ying 			case MIGRATEPAGE_SUCCESS:
173642012e04SHuang Ying 				stats->nr_succeeded += nr_pages;
173742012e04SHuang Ying 				stats->nr_thp_succeeded += is_thp;
173842012e04SHuang Ying 				break;
17395dfab109SHuang Ying 			case MIGRATEPAGE_UNMAP:
17405dfab109SHuang Ying 				list_move_tail(&folio->lru, &unmap_folios);
17415dfab109SHuang Ying 				list_add_tail(&dst->lru, &dst_folios);
17425dfab109SHuang Ying 				break;
174342012e04SHuang Ying 			default:
174442012e04SHuang Ying 				/*
174542012e04SHuang Ying 				 * Permanent failure (-EBUSY, etc.):
174642012e04SHuang Ying 				 * unlike -EAGAIN case, the failed folio is
174742012e04SHuang Ying 				 * removed from migration folio list and not
174842012e04SHuang Ying 				 * retried in the next outer loop.
174942012e04SHuang Ying 				 */
175042012e04SHuang Ying 				nr_failed++;
1751124abcedSHuang Ying 				stats->nr_thp_failed += is_thp;
175242012e04SHuang Ying 				stats->nr_failed_pages += nr_pages;
175342012e04SHuang Ying 				break;
175442012e04SHuang Ying 			}
175542012e04SHuang Ying 		}
175642012e04SHuang Ying 	}
175742012e04SHuang Ying 	nr_failed += retry;
175842012e04SHuang Ying 	stats->nr_thp_failed += thp_retry;
175942012e04SHuang Ying 	stats->nr_failed_pages += nr_retry_pages;
17605dfab109SHuang Ying move:
17617e12beb8SHuang Ying 	/* Flush TLBs for all unmapped folios */
17627e12beb8SHuang Ying 	try_to_unmap_flush();
17637e12beb8SHuang Ying 
17645dfab109SHuang Ying 	retry = 1;
1765124abcedSHuang Ying 	for (pass = 0; pass < nr_pass && retry; pass++) {
17665dfab109SHuang Ying 		retry = 0;
17675dfab109SHuang Ying 		thp_retry = 0;
17685dfab109SHuang Ying 		nr_retry_pages = 0;
17695dfab109SHuang Ying 
17705dfab109SHuang Ying 		dst = list_first_entry(&dst_folios, struct folio, lru);
17715dfab109SHuang Ying 		dst2 = list_next_entry(dst, lru);
17725dfab109SHuang Ying 		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1773124abcedSHuang Ying 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
17745dfab109SHuang Ying 			nr_pages = folio_nr_pages(folio);
17755dfab109SHuang Ying 
17765dfab109SHuang Ying 			cond_resched();
17775dfab109SHuang Ying 
17784e096ae1SMatthew Wilcox (Oracle) 			rc = migrate_folio_move(put_new_folio, private,
17795dfab109SHuang Ying 						folio, dst, mode,
17805dfab109SHuang Ying 						reason, ret_folios);
17815dfab109SHuang Ying 			/*
17825dfab109SHuang Ying 			 * The rules are:
17835dfab109SHuang Ying 			 *	Success: folio will be freed
17845dfab109SHuang Ying 			 *	-EAGAIN: stay on the unmap_folios list
17855dfab109SHuang Ying 			 *	Other errno: put on ret_folios list
17865dfab109SHuang Ying 			 */
17875dfab109SHuang Ying 			switch(rc) {
17885dfab109SHuang Ying 			case -EAGAIN:
17895dfab109SHuang Ying 				retry++;
1790124abcedSHuang Ying 				thp_retry += is_thp;
17915dfab109SHuang Ying 				nr_retry_pages += nr_pages;
17925dfab109SHuang Ying 				break;
17935dfab109SHuang Ying 			case MIGRATEPAGE_SUCCESS:
17945dfab109SHuang Ying 				stats->nr_succeeded += nr_pages;
17955dfab109SHuang Ying 				stats->nr_thp_succeeded += is_thp;
17965dfab109SHuang Ying 				break;
17975dfab109SHuang Ying 			default:
17985dfab109SHuang Ying 				nr_failed++;
1799124abcedSHuang Ying 				stats->nr_thp_failed += is_thp;
18005dfab109SHuang Ying 				stats->nr_failed_pages += nr_pages;
18015dfab109SHuang Ying 				break;
18025dfab109SHuang Ying 			}
18035dfab109SHuang Ying 			dst = dst2;
18045dfab109SHuang Ying 			dst2 = list_next_entry(dst, lru);
18055dfab109SHuang Ying 		}
18065dfab109SHuang Ying 	}
18075dfab109SHuang Ying 	nr_failed += retry;
18085dfab109SHuang Ying 	stats->nr_thp_failed += thp_retry;
18095dfab109SHuang Ying 	stats->nr_failed_pages += nr_retry_pages;
18105dfab109SHuang Ying 
1811124abcedSHuang Ying 	rc = rc_saved ? : nr_failed;
18125dfab109SHuang Ying out:
18135dfab109SHuang Ying 	/* Cleanup remaining folios */
18145dfab109SHuang Ying 	dst = list_first_entry(&dst_folios, struct folio, lru);
18155dfab109SHuang Ying 	dst2 = list_next_entry(dst, lru);
18165dfab109SHuang Ying 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1817eebb3dabSBaolin Wang 		int old_page_state = 0;
18185dfab109SHuang Ying 		struct anon_vma *anon_vma = NULL;
18195dfab109SHuang Ying 
1820eebb3dabSBaolin Wang 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1821eebb3dabSBaolin Wang 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1822eebb3dabSBaolin Wang 				       anon_vma, true, ret_folios);
18235dfab109SHuang Ying 		list_del(&dst->lru);
18244e096ae1SMatthew Wilcox (Oracle) 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
18255dfab109SHuang Ying 		dst = dst2;
18265dfab109SHuang Ying 		dst2 = list_next_entry(dst, lru);
18275dfab109SHuang Ying 	}
18285dfab109SHuang Ying 
182942012e04SHuang Ying 	return rc;
183042012e04SHuang Ying }
183142012e04SHuang Ying 
18324e096ae1SMatthew Wilcox (Oracle) static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
18334e096ae1SMatthew Wilcox (Oracle) 		free_folio_t put_new_folio, unsigned long private,
18344e096ae1SMatthew Wilcox (Oracle) 		enum migrate_mode mode, int reason,
18354e096ae1SMatthew Wilcox (Oracle) 		struct list_head *ret_folios, struct list_head *split_folios,
18364e096ae1SMatthew Wilcox (Oracle) 		struct migrate_pages_stats *stats)
18372ef7dbb2SHuang Ying {
18382ef7dbb2SHuang Ying 	int rc, nr_failed = 0;
18392ef7dbb2SHuang Ying 	LIST_HEAD(folios);
18402ef7dbb2SHuang Ying 	struct migrate_pages_stats astats;
18412ef7dbb2SHuang Ying 
18422ef7dbb2SHuang Ying 	memset(&astats, 0, sizeof(astats));
18432ef7dbb2SHuang Ying 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
18444e096ae1SMatthew Wilcox (Oracle) 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
18452ef7dbb2SHuang Ying 				 reason, &folios, split_folios, &astats,
18462ef7dbb2SHuang Ying 				 NR_MAX_MIGRATE_ASYNC_RETRY);
18472ef7dbb2SHuang Ying 	stats->nr_succeeded += astats.nr_succeeded;
18482ef7dbb2SHuang Ying 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
18492ef7dbb2SHuang Ying 	stats->nr_thp_split += astats.nr_thp_split;
1850a259945eSZi Yan 	stats->nr_split += astats.nr_split;
18512ef7dbb2SHuang Ying 	if (rc < 0) {
18522ef7dbb2SHuang Ying 		stats->nr_failed_pages += astats.nr_failed_pages;
18532ef7dbb2SHuang Ying 		stats->nr_thp_failed += astats.nr_thp_failed;
18542ef7dbb2SHuang Ying 		list_splice_tail(&folios, ret_folios);
18552ef7dbb2SHuang Ying 		return rc;
18562ef7dbb2SHuang Ying 	}
18572ef7dbb2SHuang Ying 	stats->nr_thp_failed += astats.nr_thp_split;
1858a259945eSZi Yan 	/*
1859a259945eSZi Yan 	 * Do not count rc, as pages will be retried below.
1860a259945eSZi Yan 	 * Count nr_split only, since it includes nr_thp_split.
1861a259945eSZi Yan 	 */
1862a259945eSZi Yan 	nr_failed += astats.nr_split;
18632ef7dbb2SHuang Ying 	/*
18642ef7dbb2SHuang Ying 	 * Fall back to migrate all failed folios one by one synchronously. All
18652ef7dbb2SHuang Ying 	 * failed folios except split THPs will be retried, so their failure
18662ef7dbb2SHuang Ying 	 * isn't counted
18672ef7dbb2SHuang Ying 	 */
18682ef7dbb2SHuang Ying 	list_splice_tail_init(&folios, from);
18692ef7dbb2SHuang Ying 	while (!list_empty(from)) {
18702ef7dbb2SHuang Ying 		list_move(from->next, &folios);
18714e096ae1SMatthew Wilcox (Oracle) 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
18722ef7dbb2SHuang Ying 					 private, mode, reason, ret_folios,
18732ef7dbb2SHuang Ying 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
18742ef7dbb2SHuang Ying 		list_splice_tail_init(&folios, ret_folios);
18752ef7dbb2SHuang Ying 		if (rc < 0)
18762ef7dbb2SHuang Ying 			return rc;
18772ef7dbb2SHuang Ying 		nr_failed += rc;
18782ef7dbb2SHuang Ying 	}
18792ef7dbb2SHuang Ying 
18802ef7dbb2SHuang Ying 	return nr_failed;
18812ef7dbb2SHuang Ying }
18822ef7dbb2SHuang Ying 
1883e24f0b8fSChristoph Lameter /*
1884eaec4e63SHuang Ying  * migrate_pages - migrate the folios specified in a list, to the free folios
1885c73e5c9cSSrivatsa S. Bhat  *		   supplied as the target for the page migration
1886e24f0b8fSChristoph Lameter  *
1887eaec4e63SHuang Ying  * @from:		The list of folios to be migrated.
18884e096ae1SMatthew Wilcox (Oracle)  * @get_new_folio:	The function used to allocate free folios to be used
1889eaec4e63SHuang Ying  *			as the target of the folio migration.
18904e096ae1SMatthew Wilcox (Oracle)  * @put_new_folio:	The function used to free target folios if migration
189168711a74SDavid Rientjes  *			fails, or NULL if no special handling is necessary.
18924e096ae1SMatthew Wilcox (Oracle)  * @private:		Private data to be passed on to get_new_folio()
1893c73e5c9cSSrivatsa S. Bhat  * @mode:		The migration mode that specifies the constraints for
1894eaec4e63SHuang Ying  *			folio migration, if any.
1895eaec4e63SHuang Ying  * @reason:		The reason for folio migration.
1896eaec4e63SHuang Ying  * @ret_succeeded:	Set to the number of folios migrated successfully if
18975ac95884SYang Shi  *			the caller passes a non-NULL pointer.
1898e24f0b8fSChristoph Lameter  *
1899e5bfff8bSHuang Ying  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1900e5bfff8bSHuang Ying  * are movable any more because the list has become empty or no retryable folios
1901e5bfff8bSHuang Ying  * exist any more. It is caller's responsibility to call putback_movable_pages()
1902e5bfff8bSHuang Ying  * only if ret != 0.
1903e24f0b8fSChristoph Lameter  *
1904eaec4e63SHuang Ying  * Returns the number of {normal folio, large folio, hugetlb} that were not
1905eaec4e63SHuang Ying  * migrated, or an error code. The number of large folio splits will be
1906eaec4e63SHuang Ying  * considered as the number of non-migrated large folio, no matter how many
1907eaec4e63SHuang Ying  * split folios of the large folio are migrated successfully.
1908e24f0b8fSChristoph Lameter  */
19094e096ae1SMatthew Wilcox (Oracle) int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
19104e096ae1SMatthew Wilcox (Oracle) 		free_folio_t put_new_folio, unsigned long private,
19115ac95884SYang Shi 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1912e24f0b8fSChristoph Lameter {
191342012e04SHuang Ying 	int rc, rc_gather;
19142ef7dbb2SHuang Ying 	int nr_pages;
1915eaec4e63SHuang Ying 	struct folio *folio, *folio2;
191642012e04SHuang Ying 	LIST_HEAD(folios);
1917eaec4e63SHuang Ying 	LIST_HEAD(ret_folios);
1918a21d2133SHuang Ying 	LIST_HEAD(split_folios);
19195b855937SHuang Ying 	struct migrate_pages_stats stats;
19202d1db3b1SChristoph Lameter 
19217bc1aec5SLiam Mark 	trace_mm_migrate_pages_start(mode, reason);
19227bc1aec5SLiam Mark 
19235b855937SHuang Ying 	memset(&stats, 0, sizeof(stats));
1924e24f0b8fSChristoph Lameter 
19254e096ae1SMatthew Wilcox (Oracle) 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
192642012e04SHuang Ying 				     mode, reason, &stats, &ret_folios);
192742012e04SHuang Ying 	if (rc_gather < 0)
192895a402c3SChristoph Lameter 		goto out;
1929fb3592c4SHuang Ying 
193042012e04SHuang Ying again:
193142012e04SHuang Ying 	nr_pages = 0;
1932b20a3503SChristoph Lameter 	list_for_each_entry_safe(folio, folio2, from, lru) {
1933e5bfff8bSHuang Ying 		/* Retried hugetlb folios will be kept in list  */
1934e5bfff8bSHuang Ying 		if (folio_test_hugetlb(folio)) {
1935e5bfff8bSHuang Ying 			list_move_tail(&folio->lru, &ret_folios);
1936e5bfff8bSHuang Ying 			continue;
1937eaec4e63SHuang Ying 		}
1938f430893bSMiaohe Lin 
193942012e04SHuang Ying 		nr_pages += folio_nr_pages(folio);
19402ef7dbb2SHuang Ying 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1941e24f0b8fSChristoph Lameter 			break;
1942b20a3503SChristoph Lameter 	}
19432ef7dbb2SHuang Ying 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1944fb3592c4SHuang Ying 		list_cut_before(&folios, from, &folio2->lru);
194542012e04SHuang Ying 	else
194642012e04SHuang Ying 		list_splice_init(from, &folios);
19472ef7dbb2SHuang Ying 	if (mode == MIGRATE_ASYNC)
19484e096ae1SMatthew Wilcox (Oracle) 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
19494e096ae1SMatthew Wilcox (Oracle) 				private, mode, reason, &ret_folios,
19504e096ae1SMatthew Wilcox (Oracle) 				&split_folios, &stats,
1951a21d2133SHuang Ying 				NR_MAX_MIGRATE_PAGES_RETRY);
19522ef7dbb2SHuang Ying 	else
19534e096ae1SMatthew Wilcox (Oracle) 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
19544e096ae1SMatthew Wilcox (Oracle) 				private, mode, reason, &ret_folios,
19554e096ae1SMatthew Wilcox (Oracle) 				&split_folios, &stats);
195642012e04SHuang Ying 	list_splice_tail_init(&folios, &ret_folios);
195742012e04SHuang Ying 	if (rc < 0) {
195842012e04SHuang Ying 		rc_gather = rc;
1959a21d2133SHuang Ying 		list_splice_tail(&split_folios, &ret_folios);
1960b20a3503SChristoph Lameter 		goto out;
1961b20a3503SChristoph Lameter 	}
1962a21d2133SHuang Ying 	if (!list_empty(&split_folios)) {
1963a21d2133SHuang Ying 		/*
1964a21d2133SHuang Ying 		 * Failure isn't counted since all split folios of a large folio
1965a21d2133SHuang Ying 		 * is counted as 1 failure already.  And, we only try to migrate
1966a21d2133SHuang Ying 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1967a21d2133SHuang Ying 		 */
19684e096ae1SMatthew Wilcox (Oracle) 		migrate_pages_batch(&split_folios, get_new_folio,
19694e096ae1SMatthew Wilcox (Oracle) 				put_new_folio, private, MIGRATE_ASYNC, reason,
19704e096ae1SMatthew Wilcox (Oracle) 				&ret_folios, NULL, &stats, 1);
1971a21d2133SHuang Ying 		list_splice_tail_init(&split_folios, &ret_folios);
1972a21d2133SHuang Ying 	}
197342012e04SHuang Ying 	rc_gather += rc;
197442012e04SHuang Ying 	if (!list_empty(from))
197542012e04SHuang Ying 		goto again;
197695a402c3SChristoph Lameter out:
1977dd4ae78aSYang Shi 	/*
1978eaec4e63SHuang Ying 	 * Put the permanent failure folio back to migration list, they
1979dd4ae78aSYang Shi 	 * will be put back to the right list by the caller.
1980dd4ae78aSYang Shi 	 */
1981eaec4e63SHuang Ying 	list_splice(&ret_folios, from);
1982dd4ae78aSYang Shi 
198303e5f82eSBaolin Wang 	/*
1984eaec4e63SHuang Ying 	 * Return 0 in case all split folios of fail-to-migrate large folios
1985eaec4e63SHuang Ying 	 * are migrated successfully.
198603e5f82eSBaolin Wang 	 */
198703e5f82eSBaolin Wang 	if (list_empty(from))
198842012e04SHuang Ying 		rc_gather = 0;
198903e5f82eSBaolin Wang 
19905b855937SHuang Ying 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
19915b855937SHuang Ying 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
19925b855937SHuang Ying 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
19935b855937SHuang Ying 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
19945b855937SHuang Ying 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
19955b855937SHuang Ying 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
19965b855937SHuang Ying 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
199749cac03aSZi Yan 			       stats.nr_thp_split, stats.nr_split, mode,
199849cac03aSZi Yan 			       reason);
19997b2a2d4aSMel Gorman 
20005ac95884SYang Shi 	if (ret_succeeded)
20015b855937SHuang Ying 		*ret_succeeded = stats.nr_succeeded;
20025ac95884SYang Shi 
200342012e04SHuang Ying 	return rc_gather;
2004b20a3503SChristoph Lameter }
2005b20a3503SChristoph Lameter 
20064e096ae1SMatthew Wilcox (Oracle) struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2007b4b38223SJoonsoo Kim {
200819fc7bedSJoonsoo Kim 	struct migration_target_control *mtc;
200919fc7bedSJoonsoo Kim 	gfp_t gfp_mask;
2010b4b38223SJoonsoo Kim 	unsigned int order = 0;
201119fc7bedSJoonsoo Kim 	int nid;
201219fc7bedSJoonsoo Kim 	int zidx;
201319fc7bedSJoonsoo Kim 
201419fc7bedSJoonsoo Kim 	mtc = (struct migration_target_control *)private;
201519fc7bedSJoonsoo Kim 	gfp_mask = mtc->gfp_mask;
201619fc7bedSJoonsoo Kim 	nid = mtc->nid;
201719fc7bedSJoonsoo Kim 	if (nid == NUMA_NO_NODE)
20184e096ae1SMatthew Wilcox (Oracle) 		nid = folio_nid(src);
2019b4b38223SJoonsoo Kim 
20204e096ae1SMatthew Wilcox (Oracle) 	if (folio_test_hugetlb(src)) {
20214e096ae1SMatthew Wilcox (Oracle) 		struct hstate *h = folio_hstate(src);
2022d92bbc27SJoonsoo Kim 
202319fc7bedSJoonsoo Kim 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
20244e096ae1SMatthew Wilcox (Oracle) 		return alloc_hugetlb_folio_nodemask(h, nid,
2025e37d3e83SSidhartha Kumar 						mtc->nmask, gfp_mask);
2026d92bbc27SJoonsoo Kim 	}
2027b4b38223SJoonsoo Kim 
20284e096ae1SMatthew Wilcox (Oracle) 	if (folio_test_large(src)) {
20299933a0c8SJoonsoo Kim 		/*
20309933a0c8SJoonsoo Kim 		 * clear __GFP_RECLAIM to make the migration callback
20319933a0c8SJoonsoo Kim 		 * consistent with regular THP allocations.
20329933a0c8SJoonsoo Kim 		 */
20339933a0c8SJoonsoo Kim 		gfp_mask &= ~__GFP_RECLAIM;
2034b4b38223SJoonsoo Kim 		gfp_mask |= GFP_TRANSHUGE;
20354e096ae1SMatthew Wilcox (Oracle) 		order = folio_order(src);
2036b4b38223SJoonsoo Kim 	}
20374e096ae1SMatthew Wilcox (Oracle) 	zidx = zone_idx(folio_zone(src));
203819fc7bedSJoonsoo Kim 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2039b4b38223SJoonsoo Kim 		gfp_mask |= __GFP_HIGHMEM;
2040b4b38223SJoonsoo Kim 
20414e096ae1SMatthew Wilcox (Oracle) 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2042b4b38223SJoonsoo Kim }
2043b4b38223SJoonsoo Kim 
2044742755a1SChristoph Lameter #ifdef CONFIG_NUMA
2045742755a1SChristoph Lameter 
2046a49bd4d7SMichal Hocko static int store_status(int __user *status, int start, int value, int nr)
2047742755a1SChristoph Lameter {
2048a49bd4d7SMichal Hocko 	while (nr-- > 0) {
2049a49bd4d7SMichal Hocko 		if (put_user(value, status + start))
2050a49bd4d7SMichal Hocko 			return -EFAULT;
2051a49bd4d7SMichal Hocko 		start++;
2052a49bd4d7SMichal Hocko 	}
2053742755a1SChristoph Lameter 
2054a49bd4d7SMichal Hocko 	return 0;
2055a49bd4d7SMichal Hocko }
2056742755a1SChristoph Lameter 
2057ec47e250SGregory Price static int do_move_pages_to_node(struct list_head *pagelist, int node)
2058a49bd4d7SMichal Hocko {
2059a49bd4d7SMichal Hocko 	int err;
2060a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
2061a0976311SJoonsoo Kim 		.nid = node,
2062a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2063a0976311SJoonsoo Kim 	};
2064742755a1SChristoph Lameter 
2065a0976311SJoonsoo Kim 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
20665ac95884SYang Shi 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2067a49bd4d7SMichal Hocko 	if (err)
2068a49bd4d7SMichal Hocko 		putback_movable_pages(pagelist);
2069a49bd4d7SMichal Hocko 	return err;
2070742755a1SChristoph Lameter }
2071742755a1SChristoph Lameter 
2072742755a1SChristoph Lameter /*
2073a49bd4d7SMichal Hocko  * Resolves the given address to a struct page, isolates it from the LRU and
2074a49bd4d7SMichal Hocko  * puts it to the given pagelist.
2075e0153fc2SYang Shi  * Returns:
2076e0153fc2SYang Shi  *     errno - if the page cannot be found/isolated
2077e0153fc2SYang Shi  *     0 - when it doesn't have to be migrated because it is already on the
2078e0153fc2SYang Shi  *         target node
2079e0153fc2SYang Shi  *     1 - when it has been queued
2080742755a1SChristoph Lameter  */
2081428e106aSKirill A. Shutemov static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2082a49bd4d7SMichal Hocko 		int node, struct list_head *pagelist, bool migrate_all)
2083742755a1SChristoph Lameter {
2084742755a1SChristoph Lameter 	struct vm_area_struct *vma;
2085428e106aSKirill A. Shutemov 	unsigned long addr;
2086742755a1SChristoph Lameter 	struct page *page;
2087d64cfccbSKefeng Wang 	struct folio *folio;
2088a49bd4d7SMichal Hocko 	int err;
2089742755a1SChristoph Lameter 
2090d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
2091428e106aSKirill A. Shutemov 	addr = (unsigned long)untagged_addr_remote(mm, p);
2092428e106aSKirill A. Shutemov 
2093742755a1SChristoph Lameter 	err = -EFAULT;
2094cb1c37b1SMiaohe Lin 	vma = vma_lookup(mm, addr);
2095cb1c37b1SMiaohe Lin 	if (!vma || !vma_migratable(vma))
2096a49bd4d7SMichal Hocko 		goto out;
2097742755a1SChristoph Lameter 
2098d899844eSKirill A. Shutemov 	/* FOLL_DUMP to ignore special (like zero) pages */
209987d2762eSMiaohe Lin 	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
210089f5b7daSLinus Torvalds 
210189f5b7daSLinus Torvalds 	err = PTR_ERR(page);
210289f5b7daSLinus Torvalds 	if (IS_ERR(page))
2103a49bd4d7SMichal Hocko 		goto out;
210489f5b7daSLinus Torvalds 
2105742755a1SChristoph Lameter 	err = -ENOENT;
2106f7091ed6SHaiyue Wang 	if (!page)
2107a49bd4d7SMichal Hocko 		goto out;
2108742755a1SChristoph Lameter 
2109d64cfccbSKefeng Wang 	folio = page_folio(page);
2110d64cfccbSKefeng Wang 	if (folio_is_zone_device(folio))
2111d64cfccbSKefeng Wang 		goto out_putfolio;
2112f7091ed6SHaiyue Wang 
2113a49bd4d7SMichal Hocko 	err = 0;
2114d64cfccbSKefeng Wang 	if (folio_nid(folio) == node)
2115d64cfccbSKefeng Wang 		goto out_putfolio;
2116742755a1SChristoph Lameter 
2117742755a1SChristoph Lameter 	err = -EACCES;
2118a49bd4d7SMichal Hocko 	if (page_mapcount(page) > 1 && !migrate_all)
2119d64cfccbSKefeng Wang 		goto out_putfolio;
2120742755a1SChristoph Lameter 
2121f7f9c00dSBaolin Wang 	err = -EBUSY;
2122fa1df3f6SKefeng Wang 	if (folio_test_hugetlb(folio)) {
2123fa1df3f6SKefeng Wang 		if (isolate_hugetlb(folio, pagelist))
2124fa1df3f6SKefeng Wang 			err = 1;
2125fa1df3f6SKefeng Wang 	} else {
2126fa1df3f6SKefeng Wang 		if (!folio_isolate_lru(folio))
2127d64cfccbSKefeng Wang 			goto out_putfolio;
2128a49bd4d7SMichal Hocko 
2129e0153fc2SYang Shi 		err = 1;
2130d64cfccbSKefeng Wang 		list_add_tail(&folio->lru, pagelist);
2131d64cfccbSKefeng Wang 		node_stat_mod_folio(folio,
2132d64cfccbSKefeng Wang 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2133d64cfccbSKefeng Wang 			folio_nr_pages(folio));
21346d9c285aSKOSAKI Motohiro 	}
2135d64cfccbSKefeng Wang out_putfolio:
2136742755a1SChristoph Lameter 	/*
2137d64cfccbSKefeng Wang 	 * Either remove the duplicate refcount from folio_isolate_lru()
2138d64cfccbSKefeng Wang 	 * or drop the folio ref if it was not isolated.
2139742755a1SChristoph Lameter 	 */
2140d64cfccbSKefeng Wang 	folio_put(folio);
2141a49bd4d7SMichal Hocko out:
2142d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
2143742755a1SChristoph Lameter 	return err;
2144742755a1SChristoph Lameter }
2145742755a1SChristoph Lameter 
2146ec47e250SGregory Price static int move_pages_and_store_status(int node,
21477ca8783aSWei Yang 		struct list_head *pagelist, int __user *status,
21487ca8783aSWei Yang 		int start, int i, unsigned long nr_pages)
21497ca8783aSWei Yang {
21507ca8783aSWei Yang 	int err;
21517ca8783aSWei Yang 
21525d7ae891SWei Yang 	if (list_empty(pagelist))
21535d7ae891SWei Yang 		return 0;
21545d7ae891SWei Yang 
2155ec47e250SGregory Price 	err = do_move_pages_to_node(pagelist, node);
21567ca8783aSWei Yang 	if (err) {
21577ca8783aSWei Yang 		/*
21587ca8783aSWei Yang 		 * Positive err means the number of failed
21597ca8783aSWei Yang 		 * pages to migrate.  Since we are going to
21607ca8783aSWei Yang 		 * abort and return the number of non-migrated
2161ab9dd4f8SLong Li 		 * pages, so need to include the rest of the
21627ca8783aSWei Yang 		 * nr_pages that have not been attempted as
21637ca8783aSWei Yang 		 * well.
21647ca8783aSWei Yang 		 */
21657ca8783aSWei Yang 		if (err > 0)
2166a7504ed1SHuang Ying 			err += nr_pages - i;
21677ca8783aSWei Yang 		return err;
21687ca8783aSWei Yang 	}
21697ca8783aSWei Yang 	return store_status(status, start, node, i - start);
21707ca8783aSWei Yang }
21717ca8783aSWei Yang 
2172742755a1SChristoph Lameter /*
21735e9a0f02SBrice Goglin  * Migrate an array of page address onto an array of nodes and fill
21745e9a0f02SBrice Goglin  * the corresponding array of status.
21755e9a0f02SBrice Goglin  */
21763268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
21775e9a0f02SBrice Goglin 			 unsigned long nr_pages,
21785e9a0f02SBrice Goglin 			 const void __user * __user *pages,
21795e9a0f02SBrice Goglin 			 const int __user *nodes,
21805e9a0f02SBrice Goglin 			 int __user *status, int flags)
21815e9a0f02SBrice Goglin {
2182229e2253SGregory Price 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2183a49bd4d7SMichal Hocko 	int current_node = NUMA_NO_NODE;
2184a49bd4d7SMichal Hocko 	LIST_HEAD(pagelist);
2185a49bd4d7SMichal Hocko 	int start, i;
2186a49bd4d7SMichal Hocko 	int err = 0, err1;
218735282a2dSBrice Goglin 
2188361a2a22SMinchan Kim 	lru_cache_disable();
218935282a2dSBrice Goglin 
2190a49bd4d7SMichal Hocko 	for (i = start = 0; i < nr_pages; i++) {
21915e9a0f02SBrice Goglin 		const void __user *p;
21925e9a0f02SBrice Goglin 		int node;
21935e9a0f02SBrice Goglin 
21943140a227SBrice Goglin 		err = -EFAULT;
2195229e2253SGregory Price 		if (in_compat_syscall()) {
2196229e2253SGregory Price 			compat_uptr_t cp;
2197229e2253SGregory Price 
2198229e2253SGregory Price 			if (get_user(cp, compat_pages + i))
2199229e2253SGregory Price 				goto out_flush;
2200229e2253SGregory Price 
2201229e2253SGregory Price 			p = compat_ptr(cp);
2202229e2253SGregory Price 		} else {
2203a49bd4d7SMichal Hocko 			if (get_user(p, pages + i))
2204a49bd4d7SMichal Hocko 				goto out_flush;
2205229e2253SGregory Price 		}
2206a49bd4d7SMichal Hocko 		if (get_user(node, nodes + i))
2207a49bd4d7SMichal Hocko 			goto out_flush;
22085e9a0f02SBrice Goglin 
22095e9a0f02SBrice Goglin 		err = -ENODEV;
22106f5a55f1SLinus Torvalds 		if (node < 0 || node >= MAX_NUMNODES)
2211a49bd4d7SMichal Hocko 			goto out_flush;
2212389162c2SLai Jiangshan 		if (!node_state(node, N_MEMORY))
2213a49bd4d7SMichal Hocko 			goto out_flush;
22145e9a0f02SBrice Goglin 
22155e9a0f02SBrice Goglin 		err = -EACCES;
22165e9a0f02SBrice Goglin 		if (!node_isset(node, task_nodes))
2217a49bd4d7SMichal Hocko 			goto out_flush;
22185e9a0f02SBrice Goglin 
2219a49bd4d7SMichal Hocko 		if (current_node == NUMA_NO_NODE) {
2220a49bd4d7SMichal Hocko 			current_node = node;
2221a49bd4d7SMichal Hocko 			start = i;
2222a49bd4d7SMichal Hocko 		} else if (node != current_node) {
2223ec47e250SGregory Price 			err = move_pages_and_store_status(current_node,
22247ca8783aSWei Yang 					&pagelist, status, start, i, nr_pages);
2225a49bd4d7SMichal Hocko 			if (err)
2226a49bd4d7SMichal Hocko 				goto out;
2227a49bd4d7SMichal Hocko 			start = i;
2228a49bd4d7SMichal Hocko 			current_node = node;
22295e9a0f02SBrice Goglin 		}
22305e9a0f02SBrice Goglin 
2231a49bd4d7SMichal Hocko 		/*
2232a49bd4d7SMichal Hocko 		 * Errors in the page lookup or isolation are not fatal and we simply
2233a49bd4d7SMichal Hocko 		 * report them via status
2234a49bd4d7SMichal Hocko 		 */
2235428e106aSKirill A. Shutemov 		err = add_page_for_migration(mm, p, current_node, &pagelist,
2236428e106aSKirill A. Shutemov 					     flags & MPOL_MF_MOVE_ALL);
2237e0153fc2SYang Shi 
2238d08221a0SWei Yang 		if (err > 0) {
2239e0153fc2SYang Shi 			/* The page is successfully queued for migration */
2240e0153fc2SYang Shi 			continue;
2241e0153fc2SYang Shi 		}
22423140a227SBrice Goglin 
2243d08221a0SWei Yang 		/*
224465462462SJohn Hubbard 		 * The move_pages() man page does not have an -EEXIST choice, so
224565462462SJohn Hubbard 		 * use -EFAULT instead.
224665462462SJohn Hubbard 		 */
224765462462SJohn Hubbard 		if (err == -EEXIST)
224865462462SJohn Hubbard 			err = -EFAULT;
224965462462SJohn Hubbard 
225065462462SJohn Hubbard 		/*
2251d08221a0SWei Yang 		 * If the page is already on the target node (!err), store the
2252d08221a0SWei Yang 		 * node, otherwise, store the err.
2253d08221a0SWei Yang 		 */
2254d08221a0SWei Yang 		err = store_status(status, i, err ? : current_node, 1);
2255a49bd4d7SMichal Hocko 		if (err)
2256a49bd4d7SMichal Hocko 			goto out_flush;
22573140a227SBrice Goglin 
2258ec47e250SGregory Price 		err = move_pages_and_store_status(current_node, &pagelist,
22597ca8783aSWei Yang 				status, start, i, nr_pages);
2260a7504ed1SHuang Ying 		if (err) {
2261a7504ed1SHuang Ying 			/* We have accounted for page i */
2262a7504ed1SHuang Ying 			if (err > 0)
2263a7504ed1SHuang Ying 				err--;
2264a49bd4d7SMichal Hocko 			goto out;
2265a7504ed1SHuang Ying 		}
2266a49bd4d7SMichal Hocko 		current_node = NUMA_NO_NODE;
22673140a227SBrice Goglin 	}
2268a49bd4d7SMichal Hocko out_flush:
2269a49bd4d7SMichal Hocko 	/* Make sure we do not overwrite the existing error */
2270ec47e250SGregory Price 	err1 = move_pages_and_store_status(current_node, &pagelist,
22717ca8783aSWei Yang 				status, start, i, nr_pages);
2272dfe9aa23SWei Yang 	if (err >= 0)
2273a49bd4d7SMichal Hocko 		err = err1;
22745e9a0f02SBrice Goglin out:
2275361a2a22SMinchan Kim 	lru_cache_enable();
22765e9a0f02SBrice Goglin 	return err;
22775e9a0f02SBrice Goglin }
22785e9a0f02SBrice Goglin 
22795e9a0f02SBrice Goglin /*
22802f007e74SBrice Goglin  * Determine the nodes of an array of pages and store it in an array of status.
2281742755a1SChristoph Lameter  */
228280bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
228380bba129SBrice Goglin 				const void __user **pages, int *status)
2284742755a1SChristoph Lameter {
22852f007e74SBrice Goglin 	unsigned long i;
2286742755a1SChristoph Lameter 
2287d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
22882f007e74SBrice Goglin 
22892f007e74SBrice Goglin 	for (i = 0; i < nr_pages; i++) {
229080bba129SBrice Goglin 		unsigned long addr = (unsigned long)(*pages);
22912f007e74SBrice Goglin 		struct vm_area_struct *vma;
22922f007e74SBrice Goglin 		struct page *page;
2293c095adbcSKOSAKI Motohiro 		int err = -EFAULT;
22942f007e74SBrice Goglin 
2295059b8b48SLiam Howlett 		vma = vma_lookup(mm, addr);
2296059b8b48SLiam Howlett 		if (!vma)
2297742755a1SChristoph Lameter 			goto set_status;
2298742755a1SChristoph Lameter 
2299d899844eSKirill A. Shutemov 		/* FOLL_DUMP to ignore special (like zero) pages */
230016fd6b31SBaolin Wang 		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
230189f5b7daSLinus Torvalds 
230289f5b7daSLinus Torvalds 		err = PTR_ERR(page);
230389f5b7daSLinus Torvalds 		if (IS_ERR(page))
230489f5b7daSLinus Torvalds 			goto set_status;
230589f5b7daSLinus Torvalds 
2306f7091ed6SHaiyue Wang 		err = -ENOENT;
2307f7091ed6SHaiyue Wang 		if (!page)
2308f7091ed6SHaiyue Wang 			goto set_status;
2309f7091ed6SHaiyue Wang 
2310f7091ed6SHaiyue Wang 		if (!is_zone_device_page(page))
23114cd61484SMiaohe Lin 			err = page_to_nid(page);
2312f7091ed6SHaiyue Wang 
23134cd61484SMiaohe Lin 		put_page(page);
2314742755a1SChristoph Lameter set_status:
231580bba129SBrice Goglin 		*status = err;
231680bba129SBrice Goglin 
231780bba129SBrice Goglin 		pages++;
231880bba129SBrice Goglin 		status++;
231980bba129SBrice Goglin 	}
232080bba129SBrice Goglin 
2321d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
232280bba129SBrice Goglin }
232380bba129SBrice Goglin 
23245b1b561bSArnd Bergmann static int get_compat_pages_array(const void __user *chunk_pages[],
23255b1b561bSArnd Bergmann 				  const void __user * __user *pages,
23265b1b561bSArnd Bergmann 				  unsigned long chunk_nr)
23275b1b561bSArnd Bergmann {
23285b1b561bSArnd Bergmann 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
23295b1b561bSArnd Bergmann 	compat_uptr_t p;
23305b1b561bSArnd Bergmann 	int i;
23315b1b561bSArnd Bergmann 
23325b1b561bSArnd Bergmann 	for (i = 0; i < chunk_nr; i++) {
23335b1b561bSArnd Bergmann 		if (get_user(p, pages32 + i))
23345b1b561bSArnd Bergmann 			return -EFAULT;
23355b1b561bSArnd Bergmann 		chunk_pages[i] = compat_ptr(p);
23365b1b561bSArnd Bergmann 	}
23375b1b561bSArnd Bergmann 
23385b1b561bSArnd Bergmann 	return 0;
23395b1b561bSArnd Bergmann }
23405b1b561bSArnd Bergmann 
234180bba129SBrice Goglin /*
234280bba129SBrice Goglin  * Determine the nodes of a user array of pages and store it in
234380bba129SBrice Goglin  * a user array of status.
234480bba129SBrice Goglin  */
234580bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
234680bba129SBrice Goglin 			 const void __user * __user *pages,
234780bba129SBrice Goglin 			 int __user *status)
234880bba129SBrice Goglin {
23493eefb826SMiaohe Lin #define DO_PAGES_STAT_CHUNK_NR 16UL
235080bba129SBrice Goglin 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
235180bba129SBrice Goglin 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
235280bba129SBrice Goglin 
235387b8d1adSH. Peter Anvin 	while (nr_pages) {
23543eefb826SMiaohe Lin 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
235587b8d1adSH. Peter Anvin 
23565b1b561bSArnd Bergmann 		if (in_compat_syscall()) {
23575b1b561bSArnd Bergmann 			if (get_compat_pages_array(chunk_pages, pages,
23585b1b561bSArnd Bergmann 						   chunk_nr))
235987b8d1adSH. Peter Anvin 				break;
23605b1b561bSArnd Bergmann 		} else {
23615b1b561bSArnd Bergmann 			if (copy_from_user(chunk_pages, pages,
23625b1b561bSArnd Bergmann 				      chunk_nr * sizeof(*chunk_pages)))
23635b1b561bSArnd Bergmann 				break;
23645b1b561bSArnd Bergmann 		}
236580bba129SBrice Goglin 
236680bba129SBrice Goglin 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
236780bba129SBrice Goglin 
236887b8d1adSH. Peter Anvin 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
236987b8d1adSH. Peter Anvin 			break;
2370742755a1SChristoph Lameter 
237187b8d1adSH. Peter Anvin 		pages += chunk_nr;
237287b8d1adSH. Peter Anvin 		status += chunk_nr;
237387b8d1adSH. Peter Anvin 		nr_pages -= chunk_nr;
237487b8d1adSH. Peter Anvin 	}
237587b8d1adSH. Peter Anvin 	return nr_pages ? -EFAULT : 0;
2376742755a1SChristoph Lameter }
2377742755a1SChristoph Lameter 
23784dc200ceSMiaohe Lin static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
23794dc200ceSMiaohe Lin {
23804dc200ceSMiaohe Lin 	struct task_struct *task;
23814dc200ceSMiaohe Lin 	struct mm_struct *mm;
23824dc200ceSMiaohe Lin 
23834dc200ceSMiaohe Lin 	/*
23844dc200ceSMiaohe Lin 	 * There is no need to check if current process has the right to modify
23854dc200ceSMiaohe Lin 	 * the specified process when they are same.
23864dc200ceSMiaohe Lin 	 */
23874dc200ceSMiaohe Lin 	if (!pid) {
23884dc200ceSMiaohe Lin 		mmget(current->mm);
23894dc200ceSMiaohe Lin 		*mem_nodes = cpuset_mems_allowed(current);
23904dc200ceSMiaohe Lin 		return current->mm;
23914dc200ceSMiaohe Lin 	}
23924dc200ceSMiaohe Lin 
23934dc200ceSMiaohe Lin 	/* Find the mm_struct */
23944dc200ceSMiaohe Lin 	rcu_read_lock();
23954dc200ceSMiaohe Lin 	task = find_task_by_vpid(pid);
23964dc200ceSMiaohe Lin 	if (!task) {
23974dc200ceSMiaohe Lin 		rcu_read_unlock();
23984dc200ceSMiaohe Lin 		return ERR_PTR(-ESRCH);
23994dc200ceSMiaohe Lin 	}
24004dc200ceSMiaohe Lin 	get_task_struct(task);
24014dc200ceSMiaohe Lin 
24024dc200ceSMiaohe Lin 	/*
24034dc200ceSMiaohe Lin 	 * Check if this process has the right to modify the specified
24044dc200ceSMiaohe Lin 	 * process. Use the regular "ptrace_may_access()" checks.
24054dc200ceSMiaohe Lin 	 */
24064dc200ceSMiaohe Lin 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
24074dc200ceSMiaohe Lin 		rcu_read_unlock();
24084dc200ceSMiaohe Lin 		mm = ERR_PTR(-EPERM);
24094dc200ceSMiaohe Lin 		goto out;
24104dc200ceSMiaohe Lin 	}
24114dc200ceSMiaohe Lin 	rcu_read_unlock();
24124dc200ceSMiaohe Lin 
24134dc200ceSMiaohe Lin 	mm = ERR_PTR(security_task_movememory(task));
24144dc200ceSMiaohe Lin 	if (IS_ERR(mm))
24154dc200ceSMiaohe Lin 		goto out;
24164dc200ceSMiaohe Lin 	*mem_nodes = cpuset_mems_allowed(task);
24174dc200ceSMiaohe Lin 	mm = get_task_mm(task);
24184dc200ceSMiaohe Lin out:
24194dc200ceSMiaohe Lin 	put_task_struct(task);
24204dc200ceSMiaohe Lin 	if (!mm)
24214dc200ceSMiaohe Lin 		mm = ERR_PTR(-EINVAL);
24224dc200ceSMiaohe Lin 	return mm;
24234dc200ceSMiaohe Lin }
24244dc200ceSMiaohe Lin 
2425742755a1SChristoph Lameter /*
2426742755a1SChristoph Lameter  * Move a list of pages in the address space of the currently executing
2427742755a1SChristoph Lameter  * process.
2428742755a1SChristoph Lameter  */
24297addf443SDominik Brodowski static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
24307addf443SDominik Brodowski 			     const void __user * __user *pages,
24317addf443SDominik Brodowski 			     const int __user *nodes,
24327addf443SDominik Brodowski 			     int __user *status, int flags)
2433742755a1SChristoph Lameter {
2434742755a1SChristoph Lameter 	struct mm_struct *mm;
24355e9a0f02SBrice Goglin 	int err;
24363268c63eSChristoph Lameter 	nodemask_t task_nodes;
2437742755a1SChristoph Lameter 
2438742755a1SChristoph Lameter 	/* Check flags */
2439742755a1SChristoph Lameter 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2440742755a1SChristoph Lameter 		return -EINVAL;
2441742755a1SChristoph Lameter 
2442742755a1SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2443742755a1SChristoph Lameter 		return -EPERM;
2444742755a1SChristoph Lameter 
24454dc200ceSMiaohe Lin 	mm = find_mm_struct(pid, &task_nodes);
24464dc200ceSMiaohe Lin 	if (IS_ERR(mm))
24474dc200ceSMiaohe Lin 		return PTR_ERR(mm);
24486e8b09eaSSasha Levin 
24493268c63eSChristoph Lameter 	if (nodes)
24503268c63eSChristoph Lameter 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
24513268c63eSChristoph Lameter 				    nodes, status, flags);
24523268c63eSChristoph Lameter 	else
24535e9a0f02SBrice Goglin 		err = do_pages_stat(mm, nr_pages, pages, status);
24543268c63eSChristoph Lameter 
24553268c63eSChristoph Lameter 	mmput(mm);
24563268c63eSChristoph Lameter 	return err;
2457742755a1SChristoph Lameter }
2458742755a1SChristoph Lameter 
24597addf443SDominik Brodowski SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
24607addf443SDominik Brodowski 		const void __user * __user *, pages,
24617addf443SDominik Brodowski 		const int __user *, nodes,
24627addf443SDominik Brodowski 		int __user *, status, int, flags)
24637addf443SDominik Brodowski {
24647addf443SDominik Brodowski 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
24657addf443SDominik Brodowski }
24667addf443SDominik Brodowski 
24677039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
24687039e1dbSPeter Zijlstra /*
24697039e1dbSPeter Zijlstra  * Returns true if this is a safe migration target node for misplaced NUMA
2470bc53008eSWei Yang  * pages. Currently it only checks the watermarks which is crude.
24717039e1dbSPeter Zijlstra  */
24727039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
24733abef4e6SMel Gorman 				   unsigned long nr_migrate_pages)
24747039e1dbSPeter Zijlstra {
24757039e1dbSPeter Zijlstra 	int z;
2476599d0c95SMel Gorman 
24777039e1dbSPeter Zijlstra 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
24787039e1dbSPeter Zijlstra 		struct zone *zone = pgdat->node_zones + z;
24797039e1dbSPeter Zijlstra 
2480bc53008eSWei Yang 		if (!managed_zone(zone))
24817039e1dbSPeter Zijlstra 			continue;
24827039e1dbSPeter Zijlstra 
24837039e1dbSPeter Zijlstra 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
24847039e1dbSPeter Zijlstra 		if (!zone_watermark_ok(zone, 0,
24857039e1dbSPeter Zijlstra 				       high_wmark_pages(zone) +
24867039e1dbSPeter Zijlstra 				       nr_migrate_pages,
2487bfe9d006SHuang Ying 				       ZONE_MOVABLE, 0))
24887039e1dbSPeter Zijlstra 			continue;
24897039e1dbSPeter Zijlstra 		return true;
24907039e1dbSPeter Zijlstra 	}
24917039e1dbSPeter Zijlstra 	return false;
24927039e1dbSPeter Zijlstra }
24937039e1dbSPeter Zijlstra 
24944e096ae1SMatthew Wilcox (Oracle) static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2495666feb21SMichal Hocko 					   unsigned long data)
24967039e1dbSPeter Zijlstra {
24977039e1dbSPeter Zijlstra 	int nid = (int) data;
24984e096ae1SMatthew Wilcox (Oracle) 	int order = folio_order(src);
2499c185e494SMatthew Wilcox (Oracle) 	gfp_t gfp = __GFP_THISNODE;
25007039e1dbSPeter Zijlstra 
2501c185e494SMatthew Wilcox (Oracle) 	if (order > 0)
2502c185e494SMatthew Wilcox (Oracle) 		gfp |= GFP_TRANSHUGE_LIGHT;
2503c185e494SMatthew Wilcox (Oracle) 	else {
2504c185e494SMatthew Wilcox (Oracle) 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2505c185e494SMatthew Wilcox (Oracle) 			__GFP_NOWARN;
2506c185e494SMatthew Wilcox (Oracle) 		gfp &= ~__GFP_RECLAIM;
25077039e1dbSPeter Zijlstra 	}
25084e096ae1SMatthew Wilcox (Oracle) 	return __folio_alloc_node(gfp, order, nid);
2509c5b5a3ddSYang Shi }
2510c5b5a3ddSYang Shi 
25112ac9e99fSKefeng Wang static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2512b32967ffSMel Gorman {
25132ac9e99fSKefeng Wang 	int nr_pages = folio_nr_pages(folio);
2514b32967ffSMel Gorman 
2515b32967ffSMel Gorman 	/* Avoid migrating to a node that is nearly full */
2516c574bbe9SHuang Ying 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2517c574bbe9SHuang Ying 		int z;
2518c574bbe9SHuang Ying 
2519c574bbe9SHuang Ying 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2520340ef390SHugh Dickins 			return 0;
2521c574bbe9SHuang Ying 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522bc53008eSWei Yang 			if (managed_zone(pgdat->node_zones + z))
2523c574bbe9SHuang Ying 				break;
2524c574bbe9SHuang Ying 		}
25252774f256SByungchul Park 
25262774f256SByungchul Park 		/*
25272774f256SByungchul Park 		 * If there are no managed zones, it should not proceed
25282774f256SByungchul Park 		 * further.
25292774f256SByungchul Park 		 */
25302774f256SByungchul Park 		if (z < 0)
25312774f256SByungchul Park 			return 0;
25322774f256SByungchul Park 
25332ac9e99fSKefeng Wang 		wakeup_kswapd(pgdat->node_zones + z, 0,
25342ac9e99fSKefeng Wang 			      folio_order(folio), ZONE_MOVABLE);
2535c574bbe9SHuang Ying 		return 0;
2536c574bbe9SHuang Ying 	}
2537b32967ffSMel Gorman 
25382ac9e99fSKefeng Wang 	if (!folio_isolate_lru(folio))
2539340ef390SHugh Dickins 		return 0;
2540340ef390SHugh Dickins 
25412ac9e99fSKefeng Wang 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
25422b9b624fSBaolin Wang 			    nr_pages);
2543b32967ffSMel Gorman 
2544b32967ffSMel Gorman 	/*
25452ac9e99fSKefeng Wang 	 * Isolating the folio has taken another reference, so the
25462ac9e99fSKefeng Wang 	 * caller's reference can be safely dropped without the folio
2547340ef390SHugh Dickins 	 * disappearing underneath us during migration.
2548b32967ffSMel Gorman 	 */
25492ac9e99fSKefeng Wang 	folio_put(folio);
2550340ef390SHugh Dickins 	return 1;
2551b32967ffSMel Gorman }
2552b32967ffSMel Gorman 
2553a8f60772SMel Gorman /*
255473eab3caSKefeng Wang  * Attempt to migrate a misplaced folio to the specified destination
25557039e1dbSPeter Zijlstra  * node. Caller is expected to have an elevated reference count on
255673eab3caSKefeng Wang  * the folio that will be dropped by this function before returning.
25577039e1dbSPeter Zijlstra  */
255873eab3caSKefeng Wang int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
25591bc115d8SMel Gorman 			    int node)
25607039e1dbSPeter Zijlstra {
2561a8f60772SMel Gorman 	pg_data_t *pgdat = NODE_DATA(node);
2562340ef390SHugh Dickins 	int isolated;
2563b32967ffSMel Gorman 	int nr_remaining;
2564e39bb6beSHuang Ying 	unsigned int nr_succeeded;
25657039e1dbSPeter Zijlstra 	LIST_HEAD(migratepages);
256673eab3caSKefeng Wang 	int nr_pages = folio_nr_pages(folio);
2567c5b5a3ddSYang Shi 
2568c5b5a3ddSYang Shi 	/*
256973eab3caSKefeng Wang 	 * Don't migrate file folios that are mapped in multiple processes
25701bc115d8SMel Gorman 	 * with execute permissions as they are probably shared libraries.
257173eab3caSKefeng Wang 	 * To check if the folio is shared, ideally we want to make sure
257273eab3caSKefeng Wang 	 * every page is mapped to the same process. Doing that is very
257373eab3caSKefeng Wang 	 * expensive, so check the estimated mapcount of the folio instead.
25747039e1dbSPeter Zijlstra 	 */
257573eab3caSKefeng Wang 	if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
25767ee820eeSMiaohe Lin 	    (vma->vm_flags & VM_EXEC))
25777039e1dbSPeter Zijlstra 		goto out;
25787039e1dbSPeter Zijlstra 
2579a8f60772SMel Gorman 	/*
258073eab3caSKefeng Wang 	 * Also do not migrate dirty folios as not all filesystems can move
258173eab3caSKefeng Wang 	 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
258209a913a7SMel Gorman 	 */
258373eab3caSKefeng Wang 	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
258409a913a7SMel Gorman 		goto out;
258509a913a7SMel Gorman 
258673eab3caSKefeng Wang 	isolated = numamigrate_isolate_folio(pgdat, folio);
2587b32967ffSMel Gorman 	if (!isolated)
25887039e1dbSPeter Zijlstra 		goto out;
25897039e1dbSPeter Zijlstra 
259073eab3caSKefeng Wang 	list_add(&folio->lru, &migratepages);
25914e096ae1SMatthew Wilcox (Oracle) 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2592c185e494SMatthew Wilcox (Oracle) 				     NULL, node, MIGRATE_ASYNC,
2593c185e494SMatthew Wilcox (Oracle) 				     MR_NUMA_MISPLACED, &nr_succeeded);
25947039e1dbSPeter Zijlstra 	if (nr_remaining) {
259559c82b70SJoonsoo Kim 		if (!list_empty(&migratepages)) {
259673eab3caSKefeng Wang 			list_del(&folio->lru);
259773eab3caSKefeng Wang 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
259873eab3caSKefeng Wang 					folio_is_file_lru(folio), -nr_pages);
259973eab3caSKefeng Wang 			folio_putback_lru(folio);
260059c82b70SJoonsoo Kim 		}
26017039e1dbSPeter Zijlstra 		isolated = 0;
2602e39bb6beSHuang Ying 	}
2603e39bb6beSHuang Ying 	if (nr_succeeded) {
2604e39bb6beSHuang Ying 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
260573eab3caSKefeng Wang 		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2606e39bb6beSHuang Ying 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2607e39bb6beSHuang Ying 					    nr_succeeded);
2608e39bb6beSHuang Ying 	}
26097039e1dbSPeter Zijlstra 	BUG_ON(!list_empty(&migratepages));
26107039e1dbSPeter Zijlstra 	return isolated;
2611340ef390SHugh Dickins 
2612340ef390SHugh Dickins out:
261373eab3caSKefeng Wang 	folio_put(folio);
2614340ef390SHugh Dickins 	return 0;
26157039e1dbSPeter Zijlstra }
2616220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26177d6e2d96SOscar Salvador #endif /* CONFIG_NUMA */
2618