1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b20a3503SChristoph Lameter /* 314e0f9bcSHugh Dickins * Memory Migration functionality - linux/mm/migrate.c 4b20a3503SChristoph Lameter * 5b20a3503SChristoph Lameter * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6b20a3503SChristoph Lameter * 7b20a3503SChristoph Lameter * Page migration was first developed in the context of the memory hotplug 8b20a3503SChristoph Lameter * project. The main authors of the migration code are: 9b20a3503SChristoph Lameter * 10b20a3503SChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11b20a3503SChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 12b20a3503SChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 13cde53535SChristoph Lameter * Christoph Lameter 14b20a3503SChristoph Lameter */ 15b20a3503SChristoph Lameter 16b20a3503SChristoph Lameter #include <linux/migrate.h> 17b95f1b31SPaul Gortmaker #include <linux/export.h> 18b20a3503SChristoph Lameter #include <linux/swap.h> 190697212aSChristoph Lameter #include <linux/swapops.h> 20b20a3503SChristoph Lameter #include <linux/pagemap.h> 21e23ca00bSChristoph Lameter #include <linux/buffer_head.h> 22b20a3503SChristoph Lameter #include <linux/mm_inline.h> 23b488893aSPavel Emelyanov #include <linux/nsproxy.h> 24b20a3503SChristoph Lameter #include <linux/pagevec.h> 25e9995ef9SHugh Dickins #include <linux/ksm.h> 26b20a3503SChristoph Lameter #include <linux/rmap.h> 27b20a3503SChristoph Lameter #include <linux/topology.h> 28b20a3503SChristoph Lameter #include <linux/cpu.h> 29b20a3503SChristoph Lameter #include <linux/cpuset.h> 3004e62a29SChristoph Lameter #include <linux/writeback.h> 31742755a1SChristoph Lameter #include <linux/mempolicy.h> 32742755a1SChristoph Lameter #include <linux/vmalloc.h> 3386c3a764SDavid Quigley #include <linux/security.h> 3442cb14b1SHugh Dickins #include <linux/backing-dev.h> 35bda807d4SMinchan Kim #include <linux/compaction.h> 364f5ca265SAdrian Bunk #include <linux/syscalls.h> 377addf443SDominik Brodowski #include <linux/compat.h> 38290408d4SNaoya Horiguchi #include <linux/hugetlb.h> 398e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 405a0e3ad6STejun Heo #include <linux/gfp.h> 41df6ad698SJérôme Glisse #include <linux/pfn_t.h> 42a5430ddaSJérôme Glisse #include <linux/memremap.h> 438315ada7SJérôme Glisse #include <linux/userfaultfd_k.h> 44bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 4533c3fc71SVladimir Davydov #include <linux/page_idle.h> 46d435edcaSVlastimil Babka #include <linux/page_owner.h> 476e84f315SIngo Molnar #include <linux/sched/mm.h> 48197e7e52SLinus Torvalds #include <linux/ptrace.h> 4934290e2cSRalph Campbell #include <linux/oom.h> 50884a6e5dSDave Hansen #include <linux/memory.h> 51ac16ec83SBaolin Wang #include <linux/random.h> 52c574bbe9SHuang Ying #include <linux/sched/sysctl.h> 53467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h> 54b20a3503SChristoph Lameter 550d1836c3SMichal Nazarewicz #include <asm/tlbflush.h> 560d1836c3SMichal Nazarewicz 577b2a2d4aSMel Gorman #include <trace/events/migrate.h> 587b2a2d4aSMel Gorman 59b20a3503SChristoph Lameter #include "internal.h" 60b20a3503SChristoph Lameter 61cd775580SBaolin Wang bool isolate_movable_page(struct page *page, isolate_mode_t mode) 62bda807d4SMinchan Kim { 6319979497SVishal Moola (Oracle) struct folio *folio = folio_get_nontail_page(page); 6468f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops; 65bda807d4SMinchan Kim 66bda807d4SMinchan Kim /* 67bda807d4SMinchan Kim * Avoid burning cycles with pages that are yet under __free_pages(), 68bda807d4SMinchan Kim * or just got freed under us. 69bda807d4SMinchan Kim * 70bda807d4SMinchan Kim * In case we 'win' a race for a movable page being freed under us and 71bda807d4SMinchan Kim * raise its refcount preventing __free_pages() from doing its job 72bda807d4SMinchan Kim * the put_page() at the end of this block will take care of 73bda807d4SMinchan Kim * release this page, thus avoiding a nasty leakage. 74bda807d4SMinchan Kim */ 7519979497SVishal Moola (Oracle) if (!folio) 76bda807d4SMinchan Kim goto out; 77bda807d4SMinchan Kim 7819979497SVishal Moola (Oracle) if (unlikely(folio_test_slab(folio))) 7919979497SVishal Moola (Oracle) goto out_putfolio; 808b881763SVlastimil Babka /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ 818b881763SVlastimil Babka smp_rmb(); 82bda807d4SMinchan Kim /* 838b881763SVlastimil Babka * Check movable flag before taking the page lock because 848b881763SVlastimil Babka * we use non-atomic bitops on newly allocated page flags so 858b881763SVlastimil Babka * unconditionally grabbing the lock ruins page's owner side. 86bda807d4SMinchan Kim */ 8719979497SVishal Moola (Oracle) if (unlikely(!__folio_test_movable(folio))) 8819979497SVishal Moola (Oracle) goto out_putfolio; 898b881763SVlastimil Babka /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ 908b881763SVlastimil Babka smp_rmb(); 9119979497SVishal Moola (Oracle) if (unlikely(folio_test_slab(folio))) 9219979497SVishal Moola (Oracle) goto out_putfolio; 938b881763SVlastimil Babka 94bda807d4SMinchan Kim /* 95bda807d4SMinchan Kim * As movable pages are not isolated from LRU lists, concurrent 96bda807d4SMinchan Kim * compaction threads can race against page migration functions 97bda807d4SMinchan Kim * as well as race against the releasing a page. 98bda807d4SMinchan Kim * 99bda807d4SMinchan Kim * In order to avoid having an already isolated movable page 100bda807d4SMinchan Kim * being (wrongly) re-isolated while it is under migration, 101bda807d4SMinchan Kim * or to avoid attempting to isolate pages being released, 102bda807d4SMinchan Kim * lets be sure we have the page lock 103bda807d4SMinchan Kim * before proceeding with the movable page isolation steps. 104bda807d4SMinchan Kim */ 10519979497SVishal Moola (Oracle) if (unlikely(!folio_trylock(folio))) 10619979497SVishal Moola (Oracle) goto out_putfolio; 107bda807d4SMinchan Kim 10819979497SVishal Moola (Oracle) if (!folio_test_movable(folio) || folio_test_isolated(folio)) 109bda807d4SMinchan Kim goto out_no_isolated; 110bda807d4SMinchan Kim 11119979497SVishal Moola (Oracle) mops = folio_movable_ops(folio); 11219979497SVishal Moola (Oracle) VM_BUG_ON_FOLIO(!mops, folio); 113bda807d4SMinchan Kim 11419979497SVishal Moola (Oracle) if (!mops->isolate_page(&folio->page, mode)) 115bda807d4SMinchan Kim goto out_no_isolated; 116bda807d4SMinchan Kim 117bda807d4SMinchan Kim /* Driver shouldn't use PG_isolated bit of page->flags */ 11819979497SVishal Moola (Oracle) WARN_ON_ONCE(folio_test_isolated(folio)); 11919979497SVishal Moola (Oracle) folio_set_isolated(folio); 12019979497SVishal Moola (Oracle) folio_unlock(folio); 121bda807d4SMinchan Kim 122cd775580SBaolin Wang return true; 123bda807d4SMinchan Kim 124bda807d4SMinchan Kim out_no_isolated: 12519979497SVishal Moola (Oracle) folio_unlock(folio); 12619979497SVishal Moola (Oracle) out_putfolio: 12719979497SVishal Moola (Oracle) folio_put(folio); 128bda807d4SMinchan Kim out: 129cd775580SBaolin Wang return false; 130bda807d4SMinchan Kim } 131bda807d4SMinchan Kim 132280d724aSVishal Moola (Oracle) static void putback_movable_folio(struct folio *folio) 133bda807d4SMinchan Kim { 134280d724aSVishal Moola (Oracle) const struct movable_operations *mops = folio_movable_ops(folio); 135bda807d4SMinchan Kim 136280d724aSVishal Moola (Oracle) mops->putback_page(&folio->page); 137280d724aSVishal Moola (Oracle) folio_clear_isolated(folio); 138bda807d4SMinchan Kim } 139bda807d4SMinchan Kim 140b20a3503SChristoph Lameter /* 1415733c7d1SRafael Aquini * Put previously isolated pages back onto the appropriate lists 1425733c7d1SRafael Aquini * from where they were once taken off for compaction/migration. 1435733c7d1SRafael Aquini * 14459c82b70SJoonsoo Kim * This function shall be used whenever the isolated pageset has been 14559c82b70SJoonsoo Kim * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 1467ce82f4cSMiaohe Lin * and isolate_hugetlb(). 1475733c7d1SRafael Aquini */ 1485733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l) 1495733c7d1SRafael Aquini { 150280d724aSVishal Moola (Oracle) struct folio *folio; 151280d724aSVishal Moola (Oracle) struct folio *folio2; 1525733c7d1SRafael Aquini 153280d724aSVishal Moola (Oracle) list_for_each_entry_safe(folio, folio2, l, lru) { 154280d724aSVishal Moola (Oracle) if (unlikely(folio_test_hugetlb(folio))) { 155280d724aSVishal Moola (Oracle) folio_putback_active_hugetlb(folio); 15631caf665SNaoya Horiguchi continue; 15731caf665SNaoya Horiguchi } 158280d724aSVishal Moola (Oracle) list_del(&folio->lru); 159bda807d4SMinchan Kim /* 160280d724aSVishal Moola (Oracle) * We isolated non-lru movable folio so here we can use 161280d724aSVishal Moola (Oracle) * __PageMovable because LRU folio's mapping cannot have 162bda807d4SMinchan Kim * PAGE_MAPPING_MOVABLE. 163bda807d4SMinchan Kim */ 164280d724aSVishal Moola (Oracle) if (unlikely(__folio_test_movable(folio))) { 165280d724aSVishal Moola (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); 166280d724aSVishal Moola (Oracle) folio_lock(folio); 167280d724aSVishal Moola (Oracle) if (folio_test_movable(folio)) 168280d724aSVishal Moola (Oracle) putback_movable_folio(folio); 169bf6bddf1SRafael Aquini else 170280d724aSVishal Moola (Oracle) folio_clear_isolated(folio); 171280d724aSVishal Moola (Oracle) folio_unlock(folio); 172280d724aSVishal Moola (Oracle) folio_put(folio); 173bda807d4SMinchan Kim } else { 174280d724aSVishal Moola (Oracle) node_stat_mod_folio(folio, NR_ISOLATED_ANON + 175280d724aSVishal Moola (Oracle) folio_is_file_lru(folio), -folio_nr_pages(folio)); 176280d724aSVishal Moola (Oracle) folio_putback_lru(folio); 177b20a3503SChristoph Lameter } 178b20a3503SChristoph Lameter } 179bda807d4SMinchan Kim } 180b20a3503SChristoph Lameter 1810697212aSChristoph Lameter /* 1820697212aSChristoph Lameter * Restore a potential migration pte to a working pte entry 1830697212aSChristoph Lameter */ 1842f031c6fSMatthew Wilcox (Oracle) static bool remove_migration_pte(struct folio *folio, 1852f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, void *old) 1860697212aSChristoph Lameter { 1874eecb8b9SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 1880697212aSChristoph Lameter 1893fe87967SKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1906c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_NONE; 1910697212aSChristoph Lameter pte_t pte; 1920697212aSChristoph Lameter swp_entry_t entry; 1934eecb8b9SMatthew Wilcox (Oracle) struct page *new; 1944eecb8b9SMatthew Wilcox (Oracle) unsigned long idx = 0; 1950697212aSChristoph Lameter 1964eecb8b9SMatthew Wilcox (Oracle) /* pgoff is invalid for ksm pages, but they are never large */ 1974eecb8b9SMatthew Wilcox (Oracle) if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 1984eecb8b9SMatthew Wilcox (Oracle) idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 1994eecb8b9SMatthew Wilcox (Oracle) new = folio_page(folio, idx); 2000697212aSChristoph Lameter 201616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 202616b8371SZi Yan /* PMD-mapped THP migration entry */ 203616b8371SZi Yan if (!pvmw.pte) { 2044eecb8b9SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 2054eecb8b9SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio); 206616b8371SZi Yan remove_migration_pmd(&pvmw, new); 207616b8371SZi Yan continue; 208616b8371SZi Yan } 209616b8371SZi Yan #endif 210616b8371SZi Yan 2114eecb8b9SMatthew Wilcox (Oracle) folio_get(folio); 2122e346877SPeter Xu pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); 2133fe87967SKirill A. Shutemov if (pte_swp_soft_dirty(*pvmw.pte)) 214c3d16e16SCyrill Gorcunov pte = pte_mksoft_dirty(pte); 215d3cb8bf6SMel Gorman 2163fe87967SKirill A. Shutemov /* 2173fe87967SKirill A. Shutemov * Recheck VMA as permissions can change since migration started 2183fe87967SKirill A. Shutemov */ 2193fe87967SKirill A. Shutemov entry = pte_to_swp_entry(*pvmw.pte); 2202e346877SPeter Xu if (!is_migration_entry_young(entry)) 2212e346877SPeter Xu pte = pte_mkold(pte); 2222e346877SPeter Xu if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) 2232e346877SPeter Xu pte = pte_mkdirty(pte); 2244dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) 225d3cb8bf6SMel Gorman pte = maybe_mkwrite(pte, vma); 226f45ec5ffSPeter Xu else if (pte_swp_uffd_wp(*pvmw.pte)) 227f45ec5ffSPeter Xu pte = pte_mkuffd_wp(pte); 22896a9c287SPeter Xu else 22996a9c287SPeter Xu pte = pte_wrprotect(pte); 230d3cb8bf6SMel Gorman 2316c287605SDavid Hildenbrand if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 2326c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 2336c287605SDavid Hildenbrand 2346128763fSRalph Campbell if (unlikely(is_device_private_page(new))) { 2354dd845b5SAlistair Popple if (pte_write(pte)) 2364dd845b5SAlistair Popple entry = make_writable_device_private_entry( 2374dd845b5SAlistair Popple page_to_pfn(new)); 2384dd845b5SAlistair Popple else 2394dd845b5SAlistair Popple entry = make_readable_device_private_entry( 2404dd845b5SAlistair Popple page_to_pfn(new)); 241a5430ddaSJérôme Glisse pte = swp_entry_to_pte(entry); 2423d321bf8SRalph Campbell if (pte_swp_soft_dirty(*pvmw.pte)) 2433d321bf8SRalph Campbell pte = pte_swp_mksoft_dirty(pte); 244f45ec5ffSPeter Xu if (pte_swp_uffd_wp(*pvmw.pte)) 245ebdf8321SAlistair Popple pte = pte_swp_mkuffd_wp(pte); 246df6ad698SJérôme Glisse } 247a5430ddaSJérôme Glisse 2483ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE 2494eecb8b9SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 25079c1c594SChristophe Leroy unsigned int shift = huge_page_shift(hstate_vma(vma)); 25179c1c594SChristophe Leroy 252290408d4SNaoya Horiguchi pte = pte_mkhuge(pte); 25379c1c594SChristophe Leroy pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 2544eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 25528c5209dSDavid Hildenbrand hugepage_add_anon_rmap(new, vma, pvmw.address, 2566c287605SDavid Hildenbrand rmap_flags); 257290408d4SNaoya Horiguchi else 258fb3d824dSDavid Hildenbrand page_dup_file_rmap(new, true); 2591eba86c0SPasha Tatashin set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 260383321abSAneesh Kumar K.V } else 261383321abSAneesh Kumar K.V #endif 262383321abSAneesh Kumar K.V { 2634eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 264f1e2db12SDavid Hildenbrand page_add_anon_rmap(new, vma, pvmw.address, 2656c287605SDavid Hildenbrand rmap_flags); 26604e62a29SChristoph Lameter else 267cea86fe2SHugh Dickins page_add_file_rmap(new, vma, false); 2681eba86c0SPasha Tatashin set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 269383321abSAneesh Kumar K.V } 270b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 27196f97c43SLorenzo Stoakes mlock_drain_local(); 272e125fe40SKirill A. Shutemov 2734cc79b33SAnshuman Khandual trace_remove_migration_pte(pvmw.address, pte_val(pte), 2744cc79b33SAnshuman Khandual compound_order(new)); 2754cc79b33SAnshuman Khandual 27604e62a29SChristoph Lameter /* No need to invalidate - it was non-present before */ 2773fe87967SKirill A. Shutemov update_mmu_cache(vma, pvmw.address, pvmw.pte); 2783fe87967SKirill A. Shutemov } 2793fe87967SKirill A. Shutemov 280e4b82222SMinchan Kim return true; 2810697212aSChristoph Lameter } 2820697212aSChristoph Lameter 2830697212aSChristoph Lameter /* 28404e62a29SChristoph Lameter * Get rid of all migration entries and replace them by 28504e62a29SChristoph Lameter * references to the indicated page. 28604e62a29SChristoph Lameter */ 2874eecb8b9SMatthew Wilcox (Oracle) void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 28804e62a29SChristoph Lameter { 289051ac83aSJoonsoo Kim struct rmap_walk_control rwc = { 290051ac83aSJoonsoo Kim .rmap_one = remove_migration_pte, 2914eecb8b9SMatthew Wilcox (Oracle) .arg = src, 292051ac83aSJoonsoo Kim }; 293051ac83aSJoonsoo Kim 294e388466dSKirill A. Shutemov if (locked) 2952f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(dst, &rwc); 296e388466dSKirill A. Shutemov else 2972f031c6fSMatthew Wilcox (Oracle) rmap_walk(dst, &rwc); 29804e62a29SChristoph Lameter } 29904e62a29SChristoph Lameter 30004e62a29SChristoph Lameter /* 3010697212aSChristoph Lameter * Something used the pte of a page under migration. We need to 3020697212aSChristoph Lameter * get to the page and wait until migration is finished. 3030697212aSChristoph Lameter * When we return from this function the fault will be retried. 3040697212aSChristoph Lameter */ 305e66f17ffSNaoya Horiguchi void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 30630dad309SNaoya Horiguchi spinlock_t *ptl) 3070697212aSChristoph Lameter { 30830dad309SNaoya Horiguchi pte_t pte; 3090697212aSChristoph Lameter swp_entry_t entry; 3100697212aSChristoph Lameter 31130dad309SNaoya Horiguchi spin_lock(ptl); 3120697212aSChristoph Lameter pte = *ptep; 3130697212aSChristoph Lameter if (!is_swap_pte(pte)) 3140697212aSChristoph Lameter goto out; 3150697212aSChristoph Lameter 3160697212aSChristoph Lameter entry = pte_to_swp_entry(pte); 3170697212aSChristoph Lameter if (!is_migration_entry(entry)) 3180697212aSChristoph Lameter goto out; 3190697212aSChristoph Lameter 320ffa65753SAlistair Popple migration_entry_wait_on_locked(entry, ptep, ptl); 3210697212aSChristoph Lameter return; 3220697212aSChristoph Lameter out: 3230697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3240697212aSChristoph Lameter } 3250697212aSChristoph Lameter 32630dad309SNaoya Horiguchi void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 32730dad309SNaoya Horiguchi unsigned long address) 32830dad309SNaoya Horiguchi { 32930dad309SNaoya Horiguchi spinlock_t *ptl = pte_lockptr(mm, pmd); 33030dad309SNaoya Horiguchi pte_t *ptep = pte_offset_map(pmd, address); 33130dad309SNaoya Horiguchi __migration_entry_wait(mm, ptep, ptl); 33230dad309SNaoya Horiguchi } 33330dad309SNaoya Horiguchi 334ad1ac596SMiaohe Lin #ifdef CONFIG_HUGETLB_PAGE 335fcd48540SPeter Xu /* 336fcd48540SPeter Xu * The vma read lock must be held upon entry. Holding that lock prevents either 337fcd48540SPeter Xu * the pte or the ptl from being freed. 338fcd48540SPeter Xu * 339fcd48540SPeter Xu * This function will release the vma lock before returning. 340fcd48540SPeter Xu */ 341fcd48540SPeter Xu void __migration_entry_wait_huge(struct vm_area_struct *vma, 342fcd48540SPeter Xu pte_t *ptep, spinlock_t *ptl) 34330dad309SNaoya Horiguchi { 344ad1ac596SMiaohe Lin pte_t pte; 345ad1ac596SMiaohe Lin 346fcd48540SPeter Xu hugetlb_vma_assert_locked(vma); 347ad1ac596SMiaohe Lin spin_lock(ptl); 348ad1ac596SMiaohe Lin pte = huge_ptep_get(ptep); 349ad1ac596SMiaohe Lin 350fcd48540SPeter Xu if (unlikely(!is_hugetlb_entry_migration(pte))) { 351ad1ac596SMiaohe Lin spin_unlock(ptl); 352fcd48540SPeter Xu hugetlb_vma_unlock_read(vma); 353fcd48540SPeter Xu } else { 354fcd48540SPeter Xu /* 355fcd48540SPeter Xu * If migration entry existed, safe to release vma lock 356fcd48540SPeter Xu * here because the pgtable page won't be freed without the 357fcd48540SPeter Xu * pgtable lock released. See comment right above pgtable 358fcd48540SPeter Xu * lock release in migration_entry_wait_on_locked(). 359fcd48540SPeter Xu */ 360fcd48540SPeter Xu hugetlb_vma_unlock_read(vma); 361ad1ac596SMiaohe Lin migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 36230dad309SNaoya Horiguchi } 363fcd48540SPeter Xu } 36430dad309SNaoya Horiguchi 365ad1ac596SMiaohe Lin void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 366ad1ac596SMiaohe Lin { 367ad1ac596SMiaohe Lin spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 368ad1ac596SMiaohe Lin 369fcd48540SPeter Xu __migration_entry_wait_huge(vma, pte, ptl); 370ad1ac596SMiaohe Lin } 371ad1ac596SMiaohe Lin #endif 372ad1ac596SMiaohe Lin 373616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 374616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 375616b8371SZi Yan { 376616b8371SZi Yan spinlock_t *ptl; 377616b8371SZi Yan 378616b8371SZi Yan ptl = pmd_lock(mm, pmd); 379616b8371SZi Yan if (!is_pmd_migration_entry(*pmd)) 380616b8371SZi Yan goto unlock; 381ffa65753SAlistair Popple migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 382616b8371SZi Yan return; 383616b8371SZi Yan unlock: 384616b8371SZi Yan spin_unlock(ptl); 385616b8371SZi Yan } 386616b8371SZi Yan #endif 387616b8371SZi Yan 388108ca835SMatthew Wilcox (Oracle) static int folio_expected_refs(struct address_space *mapping, 389108ca835SMatthew Wilcox (Oracle) struct folio *folio) 3900b3901b3SJan Kara { 391108ca835SMatthew Wilcox (Oracle) int refs = 1; 392108ca835SMatthew Wilcox (Oracle) if (!mapping) 393108ca835SMatthew Wilcox (Oracle) return refs; 3940b3901b3SJan Kara 395108ca835SMatthew Wilcox (Oracle) refs += folio_nr_pages(folio); 396108ca835SMatthew Wilcox (Oracle) if (folio_test_private(folio)) 397108ca835SMatthew Wilcox (Oracle) refs++; 398108ca835SMatthew Wilcox (Oracle) 399108ca835SMatthew Wilcox (Oracle) return refs; 4000b3901b3SJan Kara } 4010b3901b3SJan Kara 402b20a3503SChristoph Lameter /* 403c3fcf8a5SChristoph Lameter * Replace the page in the mapping. 4045b5c7120SChristoph Lameter * 4055b5c7120SChristoph Lameter * The number of remaining references must be: 4065b5c7120SChristoph Lameter * 1 for anonymous pages without a mapping 4075b5c7120SChristoph Lameter * 2 for pages with a mapping 408266cf658SDavid Howells * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 409b20a3503SChristoph Lameter */ 4103417013eSMatthew Wilcox (Oracle) int folio_migrate_mapping(struct address_space *mapping, 4113417013eSMatthew Wilcox (Oracle) struct folio *newfolio, struct folio *folio, int extra_count) 412b20a3503SChristoph Lameter { 4133417013eSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 41442cb14b1SHugh Dickins struct zone *oldzone, *newzone; 41542cb14b1SHugh Dickins int dirty; 416108ca835SMatthew Wilcox (Oracle) int expected_count = folio_expected_refs(mapping, folio) + extra_count; 4173417013eSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 4188763cb45SJérôme Glisse 4196c5240aeSChristoph Lameter if (!mapping) { 4200e8c7d0fSChristoph Lameter /* Anonymous page without mapping */ 4213417013eSMatthew Wilcox (Oracle) if (folio_ref_count(folio) != expected_count) 4226c5240aeSChristoph Lameter return -EAGAIN; 423cf4b769aSHugh Dickins 424cf4b769aSHugh Dickins /* No turning back from here */ 4253417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index; 4263417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping; 4273417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) 4283417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio); 429cf4b769aSHugh Dickins 43078bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 4316c5240aeSChristoph Lameter } 4326c5240aeSChristoph Lameter 4333417013eSMatthew Wilcox (Oracle) oldzone = folio_zone(folio); 4343417013eSMatthew Wilcox (Oracle) newzone = folio_zone(newfolio); 43542cb14b1SHugh Dickins 43689eb946aSMatthew Wilcox xas_lock_irq(&xas); 4373417013eSMatthew Wilcox (Oracle) if (!folio_ref_freeze(folio, expected_count)) { 43889eb946aSMatthew Wilcox xas_unlock_irq(&xas); 439e286781dSNick Piggin return -EAGAIN; 440e286781dSNick Piggin } 441e286781dSNick Piggin 442b20a3503SChristoph Lameter /* 4433417013eSMatthew Wilcox (Oracle) * Now we know that no one else is looking at the folio: 444cf4b769aSHugh Dickins * no turning back from here. 445b20a3503SChristoph Lameter */ 4463417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index; 4473417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping; 4483417013eSMatthew Wilcox (Oracle) folio_ref_add(newfolio, nr); /* add cache reference */ 4493417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) { 4503417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio); 4513417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) { 4523417013eSMatthew Wilcox (Oracle) folio_set_swapcache(newfolio); 4533417013eSMatthew Wilcox (Oracle) newfolio->private = folio_get_private(folio); 454b20a3503SChristoph Lameter } 4556326fec1SNicholas Piggin } else { 4563417013eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 4576326fec1SNicholas Piggin } 458b20a3503SChristoph Lameter 45942cb14b1SHugh Dickins /* Move dirty while page refs frozen and newpage not yet exposed */ 4603417013eSMatthew Wilcox (Oracle) dirty = folio_test_dirty(folio); 46142cb14b1SHugh Dickins if (dirty) { 4623417013eSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 4633417013eSMatthew Wilcox (Oracle) folio_set_dirty(newfolio); 46442cb14b1SHugh Dickins } 46542cb14b1SHugh Dickins 4663417013eSMatthew Wilcox (Oracle) xas_store(&xas, newfolio); 4677cf9c2c7SNick Piggin 4687cf9c2c7SNick Piggin /* 469937a94c9SJacobo Giralt * Drop cache reference from old page by unfreezing 470937a94c9SJacobo Giralt * to one less reference. 4717cf9c2c7SNick Piggin * We know this isn't the last reference. 4727cf9c2c7SNick Piggin */ 4733417013eSMatthew Wilcox (Oracle) folio_ref_unfreeze(folio, expected_count - nr); 4747cf9c2c7SNick Piggin 47589eb946aSMatthew Wilcox xas_unlock(&xas); 47642cb14b1SHugh Dickins /* Leave irq disabled to prevent preemption while updating stats */ 47742cb14b1SHugh Dickins 4780e8c7d0fSChristoph Lameter /* 4790e8c7d0fSChristoph Lameter * If moved to a different zone then also account 4800e8c7d0fSChristoph Lameter * the page for that zone. Other VM counters will be 4810e8c7d0fSChristoph Lameter * taken care of when we establish references to the 4820e8c7d0fSChristoph Lameter * new page and drop references to the old page. 4830e8c7d0fSChristoph Lameter * 4840e8c7d0fSChristoph Lameter * Note that anonymous pages are accounted for 4854b9d0fabSMel Gorman * via NR_FILE_PAGES and NR_ANON_MAPPED if they 4860e8c7d0fSChristoph Lameter * are mapped to swap space. 4870e8c7d0fSChristoph Lameter */ 48842cb14b1SHugh Dickins if (newzone != oldzone) { 4890d1c2072SJohannes Weiner struct lruvec *old_lruvec, *new_lruvec; 4900d1c2072SJohannes Weiner struct mem_cgroup *memcg; 4910d1c2072SJohannes Weiner 4923417013eSMatthew Wilcox (Oracle) memcg = folio_memcg(folio); 4930d1c2072SJohannes Weiner old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 4940d1c2072SJohannes Weiner new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 4950d1c2072SJohannes Weiner 4965c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 4975c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 4983417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 4995c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 5005c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 5014b02108aSKOSAKI Motohiro } 502b6038942SShakeel Butt #ifdef CONFIG_SWAP 5033417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) { 504b6038942SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 505b6038942SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 506b6038942SShakeel Butt } 507b6038942SShakeel Butt #endif 508f56753acSChristoph Hellwig if (dirty && mapping_can_writeback(mapping)) { 5095c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 5105c447d27SShakeel Butt __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 5115c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 5125c447d27SShakeel Butt __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 51342cb14b1SHugh Dickins } 51442cb14b1SHugh Dickins } 51542cb14b1SHugh Dickins local_irq_enable(); 516b20a3503SChristoph Lameter 51778bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 518b20a3503SChristoph Lameter } 5193417013eSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_mapping); 520b20a3503SChristoph Lameter 521b20a3503SChristoph Lameter /* 522290408d4SNaoya Horiguchi * The expected number of remaining references is the same as that 5233417013eSMatthew Wilcox (Oracle) * of folio_migrate_mapping(). 524290408d4SNaoya Horiguchi */ 525290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping, 526b890ec2aSMatthew Wilcox (Oracle) struct folio *dst, struct folio *src) 527290408d4SNaoya Horiguchi { 528b890ec2aSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(src)); 529290408d4SNaoya Horiguchi int expected_count; 530290408d4SNaoya Horiguchi 53189eb946aSMatthew Wilcox xas_lock_irq(&xas); 532b890ec2aSMatthew Wilcox (Oracle) expected_count = 2 + folio_has_private(src); 533b890ec2aSMatthew Wilcox (Oracle) if (!folio_ref_freeze(src, expected_count)) { 53489eb946aSMatthew Wilcox xas_unlock_irq(&xas); 535290408d4SNaoya Horiguchi return -EAGAIN; 536290408d4SNaoya Horiguchi } 537290408d4SNaoya Horiguchi 538b890ec2aSMatthew Wilcox (Oracle) dst->index = src->index; 539b890ec2aSMatthew Wilcox (Oracle) dst->mapping = src->mapping; 5406a93ca8fSJohannes Weiner 541b890ec2aSMatthew Wilcox (Oracle) folio_get(dst); 542290408d4SNaoya Horiguchi 543b890ec2aSMatthew Wilcox (Oracle) xas_store(&xas, dst); 544290408d4SNaoya Horiguchi 545b890ec2aSMatthew Wilcox (Oracle) folio_ref_unfreeze(src, expected_count - 1); 546290408d4SNaoya Horiguchi 54789eb946aSMatthew Wilcox xas_unlock_irq(&xas); 5486a93ca8fSJohannes Weiner 54978bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 550290408d4SNaoya Horiguchi } 551290408d4SNaoya Horiguchi 552290408d4SNaoya Horiguchi /* 55319138349SMatthew Wilcox (Oracle) * Copy the flags and some other ancillary information 554b20a3503SChristoph Lameter */ 55519138349SMatthew Wilcox (Oracle) void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 556b20a3503SChristoph Lameter { 5577851a45cSRik van Riel int cpupid; 5587851a45cSRik van Riel 55919138349SMatthew Wilcox (Oracle) if (folio_test_error(folio)) 56019138349SMatthew Wilcox (Oracle) folio_set_error(newfolio); 56119138349SMatthew Wilcox (Oracle) if (folio_test_referenced(folio)) 56219138349SMatthew Wilcox (Oracle) folio_set_referenced(newfolio); 56319138349SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 56419138349SMatthew Wilcox (Oracle) folio_mark_uptodate(newfolio); 56519138349SMatthew Wilcox (Oracle) if (folio_test_clear_active(folio)) { 56619138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 56719138349SMatthew Wilcox (Oracle) folio_set_active(newfolio); 56819138349SMatthew Wilcox (Oracle) } else if (folio_test_clear_unevictable(folio)) 56919138349SMatthew Wilcox (Oracle) folio_set_unevictable(newfolio); 57019138349SMatthew Wilcox (Oracle) if (folio_test_workingset(folio)) 57119138349SMatthew Wilcox (Oracle) folio_set_workingset(newfolio); 57219138349SMatthew Wilcox (Oracle) if (folio_test_checked(folio)) 57319138349SMatthew Wilcox (Oracle) folio_set_checked(newfolio); 5746c287605SDavid Hildenbrand /* 5756c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 5766c287605SDavid Hildenbrand * migration entries. We can still have PG_anon_exclusive set on an 5776c287605SDavid Hildenbrand * effectively unmapped and unreferenced first sub-pages of an 5786c287605SDavid Hildenbrand * anonymous THP: we can simply copy it here via PG_mappedtodisk. 5796c287605SDavid Hildenbrand */ 58019138349SMatthew Wilcox (Oracle) if (folio_test_mappedtodisk(folio)) 58119138349SMatthew Wilcox (Oracle) folio_set_mappedtodisk(newfolio); 582b20a3503SChristoph Lameter 5833417013eSMatthew Wilcox (Oracle) /* Move dirty on pages not done by folio_migrate_mapping() */ 58419138349SMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) 58519138349SMatthew Wilcox (Oracle) folio_set_dirty(newfolio); 586b20a3503SChristoph Lameter 58719138349SMatthew Wilcox (Oracle) if (folio_test_young(folio)) 58819138349SMatthew Wilcox (Oracle) folio_set_young(newfolio); 58919138349SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 59019138349SMatthew Wilcox (Oracle) folio_set_idle(newfolio); 59133c3fc71SVladimir Davydov 5927851a45cSRik van Riel /* 5937851a45cSRik van Riel * Copy NUMA information to the new page, to prevent over-eager 5947851a45cSRik van Riel * future migrations of this same page. 5957851a45cSRik van Riel */ 59619138349SMatthew Wilcox (Oracle) cpupid = page_cpupid_xchg_last(&folio->page, -1); 59733024536SHuang Ying /* 59833024536SHuang Ying * For memory tiering mode, when migrate between slow and fast 59933024536SHuang Ying * memory node, reset cpupid, because that is used to record 60033024536SHuang Ying * page access time in slow memory node. 60133024536SHuang Ying */ 60233024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { 60333024536SHuang Ying bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); 60433024536SHuang Ying bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); 60533024536SHuang Ying 60633024536SHuang Ying if (f_toptier != t_toptier) 60733024536SHuang Ying cpupid = -1; 60833024536SHuang Ying } 60919138349SMatthew Wilcox (Oracle) page_cpupid_xchg_last(&newfolio->page, cpupid); 6107851a45cSRik van Riel 61119138349SMatthew Wilcox (Oracle) folio_migrate_ksm(newfolio, folio); 612c8d6553bSHugh Dickins /* 613c8d6553bSHugh Dickins * Please do not reorder this without considering how mm/ksm.c's 614c8d6553bSHugh Dickins * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 615c8d6553bSHugh Dickins */ 61619138349SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 61719138349SMatthew Wilcox (Oracle) folio_clear_swapcache(folio); 61819138349SMatthew Wilcox (Oracle) folio_clear_private(folio); 619ad2fa371SMuchun Song 620ad2fa371SMuchun Song /* page->private contains hugetlb specific flags */ 62119138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 62219138349SMatthew Wilcox (Oracle) folio->private = NULL; 623b20a3503SChristoph Lameter 624b20a3503SChristoph Lameter /* 625b20a3503SChristoph Lameter * If any waiters have accumulated on the new page then 626b20a3503SChristoph Lameter * wake them up. 627b20a3503SChristoph Lameter */ 62819138349SMatthew Wilcox (Oracle) if (folio_test_writeback(newfolio)) 62919138349SMatthew Wilcox (Oracle) folio_end_writeback(newfolio); 630d435edcaSVlastimil Babka 6316aeff241SYang Shi /* 6326aeff241SYang Shi * PG_readahead shares the same bit with PG_reclaim. The above 6336aeff241SYang Shi * end_page_writeback() may clear PG_readahead mistakenly, so set the 6346aeff241SYang Shi * bit after that. 6356aeff241SYang Shi */ 63619138349SMatthew Wilcox (Oracle) if (folio_test_readahead(folio)) 63719138349SMatthew Wilcox (Oracle) folio_set_readahead(newfolio); 6386aeff241SYang Shi 63919138349SMatthew Wilcox (Oracle) folio_copy_owner(newfolio, folio); 64074485cf2SJohannes Weiner 64119138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 642d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(folio, newfolio); 643b20a3503SChristoph Lameter } 64419138349SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_flags); 6452916ecc0SJérôme Glisse 646715cbfd6SMatthew Wilcox (Oracle) void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 6472916ecc0SJérôme Glisse { 648715cbfd6SMatthew Wilcox (Oracle) folio_copy(newfolio, folio); 649715cbfd6SMatthew Wilcox (Oracle) folio_migrate_flags(newfolio, folio); 6502916ecc0SJérôme Glisse } 651715cbfd6SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_copy); 652b20a3503SChristoph Lameter 6531d8b85ccSChristoph Lameter /************************************************************ 6541d8b85ccSChristoph Lameter * Migration functions 6551d8b85ccSChristoph Lameter ***********************************************************/ 6561d8b85ccSChristoph Lameter 65716ce101dSAlistair Popple int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 65816ce101dSAlistair Popple struct folio *src, enum migrate_mode mode, int extra_count) 65916ce101dSAlistair Popple { 66016ce101dSAlistair Popple int rc; 66116ce101dSAlistair Popple 66216ce101dSAlistair Popple BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 66316ce101dSAlistair Popple 66416ce101dSAlistair Popple rc = folio_migrate_mapping(mapping, dst, src, extra_count); 66516ce101dSAlistair Popple 66616ce101dSAlistair Popple if (rc != MIGRATEPAGE_SUCCESS) 66716ce101dSAlistair Popple return rc; 66816ce101dSAlistair Popple 66916ce101dSAlistair Popple if (mode != MIGRATE_SYNC_NO_COPY) 67016ce101dSAlistair Popple folio_migrate_copy(dst, src); 67116ce101dSAlistair Popple else 67216ce101dSAlistair Popple folio_migrate_flags(dst, src); 67316ce101dSAlistair Popple return MIGRATEPAGE_SUCCESS; 67416ce101dSAlistair Popple } 67516ce101dSAlistair Popple 67654184650SMatthew Wilcox (Oracle) /** 67754184650SMatthew Wilcox (Oracle) * migrate_folio() - Simple folio migration. 67854184650SMatthew Wilcox (Oracle) * @mapping: The address_space containing the folio. 67954184650SMatthew Wilcox (Oracle) * @dst: The folio to migrate the data to. 68054184650SMatthew Wilcox (Oracle) * @src: The folio containing the current data. 68154184650SMatthew Wilcox (Oracle) * @mode: How to migrate the page. 682b20a3503SChristoph Lameter * 68354184650SMatthew Wilcox (Oracle) * Common logic to directly migrate a single LRU folio suitable for 68454184650SMatthew Wilcox (Oracle) * folios that do not use PagePrivate/PagePrivate2. 68554184650SMatthew Wilcox (Oracle) * 68654184650SMatthew Wilcox (Oracle) * Folios are locked upon entry and exit. 687b20a3503SChristoph Lameter */ 68854184650SMatthew Wilcox (Oracle) int migrate_folio(struct address_space *mapping, struct folio *dst, 68954184650SMatthew Wilcox (Oracle) struct folio *src, enum migrate_mode mode) 690b20a3503SChristoph Lameter { 69116ce101dSAlistair Popple return migrate_folio_extra(mapping, dst, src, mode, 0); 692b20a3503SChristoph Lameter } 69354184650SMatthew Wilcox (Oracle) EXPORT_SYMBOL(migrate_folio); 694b20a3503SChristoph Lameter 6959361401eSDavid Howells #ifdef CONFIG_BLOCK 69684ade7c1SJan Kara /* Returns true if all buffers are successfully locked */ 69784ade7c1SJan Kara static bool buffer_migrate_lock_buffers(struct buffer_head *head, 69884ade7c1SJan Kara enum migrate_mode mode) 69984ade7c1SJan Kara { 70084ade7c1SJan Kara struct buffer_head *bh = head; 70184ade7c1SJan Kara 70284ade7c1SJan Kara /* Simple case, sync compaction */ 70384ade7c1SJan Kara if (mode != MIGRATE_ASYNC) { 70484ade7c1SJan Kara do { 70584ade7c1SJan Kara lock_buffer(bh); 70684ade7c1SJan Kara bh = bh->b_this_page; 70784ade7c1SJan Kara 70884ade7c1SJan Kara } while (bh != head); 70984ade7c1SJan Kara 71084ade7c1SJan Kara return true; 71184ade7c1SJan Kara } 71284ade7c1SJan Kara 71384ade7c1SJan Kara /* async case, we cannot block on lock_buffer so use trylock_buffer */ 71484ade7c1SJan Kara do { 71584ade7c1SJan Kara if (!trylock_buffer(bh)) { 71684ade7c1SJan Kara /* 71784ade7c1SJan Kara * We failed to lock the buffer and cannot stall in 71884ade7c1SJan Kara * async migration. Release the taken locks 71984ade7c1SJan Kara */ 72084ade7c1SJan Kara struct buffer_head *failed_bh = bh; 72184ade7c1SJan Kara bh = head; 72284ade7c1SJan Kara while (bh != failed_bh) { 72384ade7c1SJan Kara unlock_buffer(bh); 72484ade7c1SJan Kara bh = bh->b_this_page; 72584ade7c1SJan Kara } 72684ade7c1SJan Kara return false; 72784ade7c1SJan Kara } 72884ade7c1SJan Kara 72984ade7c1SJan Kara bh = bh->b_this_page; 73084ade7c1SJan Kara } while (bh != head); 73184ade7c1SJan Kara return true; 73284ade7c1SJan Kara } 73384ade7c1SJan Kara 73467235182SMatthew Wilcox (Oracle) static int __buffer_migrate_folio(struct address_space *mapping, 73567235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode, 73689cb0888SJan Kara bool check_refs) 7371d8b85ccSChristoph Lameter { 7381d8b85ccSChristoph Lameter struct buffer_head *bh, *head; 7391d8b85ccSChristoph Lameter int rc; 740cc4f11e6SJan Kara int expected_count; 7411d8b85ccSChristoph Lameter 74267235182SMatthew Wilcox (Oracle) head = folio_buffers(src); 74367235182SMatthew Wilcox (Oracle) if (!head) 74454184650SMatthew Wilcox (Oracle) return migrate_folio(mapping, dst, src, mode); 7451d8b85ccSChristoph Lameter 746cc4f11e6SJan Kara /* Check whether page does not have extra refs before we do more work */ 747108ca835SMatthew Wilcox (Oracle) expected_count = folio_expected_refs(mapping, src); 74867235182SMatthew Wilcox (Oracle) if (folio_ref_count(src) != expected_count) 749cc4f11e6SJan Kara return -EAGAIN; 750cc4f11e6SJan Kara 751cc4f11e6SJan Kara if (!buffer_migrate_lock_buffers(head, mode)) 752cc4f11e6SJan Kara return -EAGAIN; 7531d8b85ccSChristoph Lameter 75489cb0888SJan Kara if (check_refs) { 75589cb0888SJan Kara bool busy; 75689cb0888SJan Kara bool invalidated = false; 75789cb0888SJan Kara 75889cb0888SJan Kara recheck_buffers: 75989cb0888SJan Kara busy = false; 76089cb0888SJan Kara spin_lock(&mapping->private_lock); 76189cb0888SJan Kara bh = head; 76289cb0888SJan Kara do { 76389cb0888SJan Kara if (atomic_read(&bh->b_count)) { 76489cb0888SJan Kara busy = true; 76589cb0888SJan Kara break; 76689cb0888SJan Kara } 76789cb0888SJan Kara bh = bh->b_this_page; 76889cb0888SJan Kara } while (bh != head); 76989cb0888SJan Kara if (busy) { 77089cb0888SJan Kara if (invalidated) { 77189cb0888SJan Kara rc = -EAGAIN; 77289cb0888SJan Kara goto unlock_buffers; 77389cb0888SJan Kara } 774ebdf4de5SJan Kara spin_unlock(&mapping->private_lock); 77589cb0888SJan Kara invalidate_bh_lrus(); 77689cb0888SJan Kara invalidated = true; 77789cb0888SJan Kara goto recheck_buffers; 77889cb0888SJan Kara } 77989cb0888SJan Kara } 78089cb0888SJan Kara 78167235182SMatthew Wilcox (Oracle) rc = folio_migrate_mapping(mapping, dst, src, 0); 78278bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 783cc4f11e6SJan Kara goto unlock_buffers; 7841d8b85ccSChristoph Lameter 78567235182SMatthew Wilcox (Oracle) folio_attach_private(dst, folio_detach_private(src)); 7861d8b85ccSChristoph Lameter 7871d8b85ccSChristoph Lameter bh = head; 7881d8b85ccSChristoph Lameter do { 78967235182SMatthew Wilcox (Oracle) set_bh_page(bh, &dst->page, bh_offset(bh)); 7901d8b85ccSChristoph Lameter bh = bh->b_this_page; 7911d8b85ccSChristoph Lameter } while (bh != head); 7921d8b85ccSChristoph Lameter 7932916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 79467235182SMatthew Wilcox (Oracle) folio_migrate_copy(dst, src); 7952916ecc0SJérôme Glisse else 79667235182SMatthew Wilcox (Oracle) folio_migrate_flags(dst, src); 7971d8b85ccSChristoph Lameter 798cc4f11e6SJan Kara rc = MIGRATEPAGE_SUCCESS; 799cc4f11e6SJan Kara unlock_buffers: 800ebdf4de5SJan Kara if (check_refs) 801ebdf4de5SJan Kara spin_unlock(&mapping->private_lock); 8021d8b85ccSChristoph Lameter bh = head; 8031d8b85ccSChristoph Lameter do { 8041d8b85ccSChristoph Lameter unlock_buffer(bh); 8051d8b85ccSChristoph Lameter bh = bh->b_this_page; 8061d8b85ccSChristoph Lameter } while (bh != head); 8071d8b85ccSChristoph Lameter 808cc4f11e6SJan Kara return rc; 8091d8b85ccSChristoph Lameter } 81089cb0888SJan Kara 81167235182SMatthew Wilcox (Oracle) /** 81267235182SMatthew Wilcox (Oracle) * buffer_migrate_folio() - Migration function for folios with buffers. 81367235182SMatthew Wilcox (Oracle) * @mapping: The address space containing @src. 81467235182SMatthew Wilcox (Oracle) * @dst: The folio to migrate to. 81567235182SMatthew Wilcox (Oracle) * @src: The folio to migrate from. 81667235182SMatthew Wilcox (Oracle) * @mode: How to migrate the folio. 81767235182SMatthew Wilcox (Oracle) * 81867235182SMatthew Wilcox (Oracle) * This function can only be used if the underlying filesystem guarantees 81967235182SMatthew Wilcox (Oracle) * that no other references to @src exist. For example attached buffer 82067235182SMatthew Wilcox (Oracle) * heads are accessed only under the folio lock. If your filesystem cannot 82167235182SMatthew Wilcox (Oracle) * provide this guarantee, buffer_migrate_folio_norefs() may be more 82267235182SMatthew Wilcox (Oracle) * appropriate. 82367235182SMatthew Wilcox (Oracle) * 82467235182SMatthew Wilcox (Oracle) * Return: 0 on success or a negative errno on failure. 82589cb0888SJan Kara */ 82667235182SMatthew Wilcox (Oracle) int buffer_migrate_folio(struct address_space *mapping, 82767235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode) 82889cb0888SJan Kara { 82967235182SMatthew Wilcox (Oracle) return __buffer_migrate_folio(mapping, dst, src, mode, false); 83089cb0888SJan Kara } 83167235182SMatthew Wilcox (Oracle) EXPORT_SYMBOL(buffer_migrate_folio); 83289cb0888SJan Kara 83367235182SMatthew Wilcox (Oracle) /** 83467235182SMatthew Wilcox (Oracle) * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 83567235182SMatthew Wilcox (Oracle) * @mapping: The address space containing @src. 83667235182SMatthew Wilcox (Oracle) * @dst: The folio to migrate to. 83767235182SMatthew Wilcox (Oracle) * @src: The folio to migrate from. 83867235182SMatthew Wilcox (Oracle) * @mode: How to migrate the folio. 83967235182SMatthew Wilcox (Oracle) * 84067235182SMatthew Wilcox (Oracle) * Like buffer_migrate_folio() except that this variant is more careful 84167235182SMatthew Wilcox (Oracle) * and checks that there are also no buffer head references. This function 84267235182SMatthew Wilcox (Oracle) * is the right one for mappings where buffer heads are directly looked 84367235182SMatthew Wilcox (Oracle) * up and referenced (such as block device mappings). 84467235182SMatthew Wilcox (Oracle) * 84567235182SMatthew Wilcox (Oracle) * Return: 0 on success or a negative errno on failure. 84689cb0888SJan Kara */ 84767235182SMatthew Wilcox (Oracle) int buffer_migrate_folio_norefs(struct address_space *mapping, 84867235182SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode) 84989cb0888SJan Kara { 85067235182SMatthew Wilcox (Oracle) return __buffer_migrate_folio(mapping, dst, src, mode, true); 85189cb0888SJan Kara } 852e26355e2SJan Kara EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); 8539361401eSDavid Howells #endif 8541d8b85ccSChristoph Lameter 8552ec810d5SMatthew Wilcox (Oracle) int filemap_migrate_folio(struct address_space *mapping, 8562ec810d5SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode) 8572ec810d5SMatthew Wilcox (Oracle) { 8582ec810d5SMatthew Wilcox (Oracle) int ret; 8592ec810d5SMatthew Wilcox (Oracle) 8602ec810d5SMatthew Wilcox (Oracle) ret = folio_migrate_mapping(mapping, dst, src, 0); 8612ec810d5SMatthew Wilcox (Oracle) if (ret != MIGRATEPAGE_SUCCESS) 8622ec810d5SMatthew Wilcox (Oracle) return ret; 8632ec810d5SMatthew Wilcox (Oracle) 8642ec810d5SMatthew Wilcox (Oracle) if (folio_get_private(src)) 8652ec810d5SMatthew Wilcox (Oracle) folio_attach_private(dst, folio_detach_private(src)); 8662ec810d5SMatthew Wilcox (Oracle) 8672ec810d5SMatthew Wilcox (Oracle) if (mode != MIGRATE_SYNC_NO_COPY) 8682ec810d5SMatthew Wilcox (Oracle) folio_migrate_copy(dst, src); 8692ec810d5SMatthew Wilcox (Oracle) else 8702ec810d5SMatthew Wilcox (Oracle) folio_migrate_flags(dst, src); 8712ec810d5SMatthew Wilcox (Oracle) return MIGRATEPAGE_SUCCESS; 8722ec810d5SMatthew Wilcox (Oracle) } 8732ec810d5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(filemap_migrate_folio); 8742ec810d5SMatthew Wilcox (Oracle) 87504e62a29SChristoph Lameter /* 8762be7fa10SMatthew Wilcox (Oracle) * Writeback a folio to clean the dirty state 87704e62a29SChristoph Lameter */ 8782be7fa10SMatthew Wilcox (Oracle) static int writeout(struct address_space *mapping, struct folio *folio) 87904e62a29SChristoph Lameter { 88004e62a29SChristoph Lameter struct writeback_control wbc = { 88104e62a29SChristoph Lameter .sync_mode = WB_SYNC_NONE, 88204e62a29SChristoph Lameter .nr_to_write = 1, 88304e62a29SChristoph Lameter .range_start = 0, 88404e62a29SChristoph Lameter .range_end = LLONG_MAX, 88504e62a29SChristoph Lameter .for_reclaim = 1 88604e62a29SChristoph Lameter }; 88704e62a29SChristoph Lameter int rc; 88804e62a29SChristoph Lameter 88904e62a29SChristoph Lameter if (!mapping->a_ops->writepage) 89004e62a29SChristoph Lameter /* No write method for the address space */ 89104e62a29SChristoph Lameter return -EINVAL; 89204e62a29SChristoph Lameter 8932be7fa10SMatthew Wilcox (Oracle) if (!folio_clear_dirty_for_io(folio)) 89404e62a29SChristoph Lameter /* Someone else already triggered a write */ 89504e62a29SChristoph Lameter return -EAGAIN; 89604e62a29SChristoph Lameter 89704e62a29SChristoph Lameter /* 8982be7fa10SMatthew Wilcox (Oracle) * A dirty folio may imply that the underlying filesystem has 8992be7fa10SMatthew Wilcox (Oracle) * the folio on some queue. So the folio must be clean for 9002be7fa10SMatthew Wilcox (Oracle) * migration. Writeout may mean we lose the lock and the 9012be7fa10SMatthew Wilcox (Oracle) * folio state is no longer what we checked for earlier. 90204e62a29SChristoph Lameter * At this point we know that the migration attempt cannot 90304e62a29SChristoph Lameter * be successful. 90404e62a29SChristoph Lameter */ 9054eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, false); 90604e62a29SChristoph Lameter 9072be7fa10SMatthew Wilcox (Oracle) rc = mapping->a_ops->writepage(&folio->page, &wbc); 90804e62a29SChristoph Lameter 90904e62a29SChristoph Lameter if (rc != AOP_WRITEPAGE_ACTIVATE) 91004e62a29SChristoph Lameter /* unlocked. Relock */ 9112be7fa10SMatthew Wilcox (Oracle) folio_lock(folio); 91204e62a29SChristoph Lameter 913bda8550dSHugh Dickins return (rc < 0) ? -EIO : -EAGAIN; 91404e62a29SChristoph Lameter } 91504e62a29SChristoph Lameter 91604e62a29SChristoph Lameter /* 91704e62a29SChristoph Lameter * Default handling if a filesystem does not provide a migration function. 91804e62a29SChristoph Lameter */ 9198faa8ef5SMatthew Wilcox (Oracle) static int fallback_migrate_folio(struct address_space *mapping, 9208faa8ef5SMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, enum migrate_mode mode) 9218351a6e4SChristoph Lameter { 9228faa8ef5SMatthew Wilcox (Oracle) if (folio_test_dirty(src)) { 9238faa8ef5SMatthew Wilcox (Oracle) /* Only writeback folios in full synchronous migration */ 9242916ecc0SJérôme Glisse switch (mode) { 9252916ecc0SJérôme Glisse case MIGRATE_SYNC: 9262916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 9272916ecc0SJérôme Glisse break; 9282916ecc0SJérôme Glisse default: 929b969c4abSMel Gorman return -EBUSY; 9302916ecc0SJérôme Glisse } 9312be7fa10SMatthew Wilcox (Oracle) return writeout(mapping, src); 932b969c4abSMel Gorman } 9338351a6e4SChristoph Lameter 9348351a6e4SChristoph Lameter /* 9358351a6e4SChristoph Lameter * Buffers may be managed in a filesystem specific way. 9368351a6e4SChristoph Lameter * We must have no buffers or drop them. 9378351a6e4SChristoph Lameter */ 9388faa8ef5SMatthew Wilcox (Oracle) if (folio_test_private(src) && 9398faa8ef5SMatthew Wilcox (Oracle) !filemap_release_folio(src, GFP_KERNEL)) 940806031bbSMel Gorman return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 9418351a6e4SChristoph Lameter 94254184650SMatthew Wilcox (Oracle) return migrate_folio(mapping, dst, src, mode); 9438351a6e4SChristoph Lameter } 9448351a6e4SChristoph Lameter 9451d8b85ccSChristoph Lameter /* 946e24f0b8fSChristoph Lameter * Move a page to a newly allocated page 947e24f0b8fSChristoph Lameter * The page is locked and all ptes have been successfully removed. 948b20a3503SChristoph Lameter * 949e24f0b8fSChristoph Lameter * The new page will have replaced the old page if this function 950e24f0b8fSChristoph Lameter * is successful. 951894bc310SLee Schermerhorn * 952894bc310SLee Schermerhorn * Return value: 953894bc310SLee Schermerhorn * < 0 - error code 95478bd5209SRafael Aquini * MIGRATEPAGE_SUCCESS - success 955b20a3503SChristoph Lameter */ 956e7e3ffebSMatthew Wilcox (Oracle) static int move_to_new_folio(struct folio *dst, struct folio *src, 9575c3f9a67SHugh Dickins enum migrate_mode mode) 958b20a3503SChristoph Lameter { 959bda807d4SMinchan Kim int rc = -EAGAIN; 960e7e3ffebSMatthew Wilcox (Oracle) bool is_lru = !__PageMovable(&src->page); 961b20a3503SChristoph Lameter 962e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 963e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 964b20a3503SChristoph Lameter 965bda807d4SMinchan Kim if (likely(is_lru)) { 96668f2736aSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(src); 96768f2736aSMatthew Wilcox (Oracle) 968b20a3503SChristoph Lameter if (!mapping) 96954184650SMatthew Wilcox (Oracle) rc = migrate_folio(mapping, dst, src, mode); 9705490da4fSMatthew Wilcox (Oracle) else if (mapping->a_ops->migrate_folio) 971b20a3503SChristoph Lameter /* 9725490da4fSMatthew Wilcox (Oracle) * Most folios have a mapping and most filesystems 9735490da4fSMatthew Wilcox (Oracle) * provide a migrate_folio callback. Anonymous folios 974bda807d4SMinchan Kim * are part of swap space which also has its own 9755490da4fSMatthew Wilcox (Oracle) * migrate_folio callback. This is the most common path 976bda807d4SMinchan Kim * for page migration. 977b20a3503SChristoph Lameter */ 9785490da4fSMatthew Wilcox (Oracle) rc = mapping->a_ops->migrate_folio(mapping, dst, src, 9795490da4fSMatthew Wilcox (Oracle) mode); 9808351a6e4SChristoph Lameter else 9818faa8ef5SMatthew Wilcox (Oracle) rc = fallback_migrate_folio(mapping, dst, src, mode); 982bda807d4SMinchan Kim } else { 98368f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops; 98468f2736aSMatthew Wilcox (Oracle) 985bda807d4SMinchan Kim /* 986bda807d4SMinchan Kim * In case of non-lru page, it could be released after 987bda807d4SMinchan Kim * isolation step. In that case, we shouldn't try migration. 988bda807d4SMinchan Kim */ 989e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 990e7e3ffebSMatthew Wilcox (Oracle) if (!folio_test_movable(src)) { 991bda807d4SMinchan Kim rc = MIGRATEPAGE_SUCCESS; 992e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src); 993bda807d4SMinchan Kim goto out; 994bda807d4SMinchan Kim } 995bda807d4SMinchan Kim 996da707a6dSVishal Moola (Oracle) mops = folio_movable_ops(src); 99768f2736aSMatthew Wilcox (Oracle) rc = mops->migrate_page(&dst->page, &src->page, mode); 998bda807d4SMinchan Kim WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 999e7e3ffebSMatthew Wilcox (Oracle) !folio_test_isolated(src)); 1000bda807d4SMinchan Kim } 1001b20a3503SChristoph Lameter 10025c3f9a67SHugh Dickins /* 1003e7e3ffebSMatthew Wilcox (Oracle) * When successful, old pagecache src->mapping must be cleared before 1004e7e3ffebSMatthew Wilcox (Oracle) * src is freed; but stats require that PageAnon be left as PageAnon. 10055c3f9a67SHugh Dickins */ 10065c3f9a67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 1007e7e3ffebSMatthew Wilcox (Oracle) if (__PageMovable(&src->page)) { 1008e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 1009bda807d4SMinchan Kim 1010bda807d4SMinchan Kim /* 1011bda807d4SMinchan Kim * We clear PG_movable under page_lock so any compactor 1012bda807d4SMinchan Kim * cannot try to migrate this page. 1013bda807d4SMinchan Kim */ 1014e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src); 1015bda807d4SMinchan Kim } 1016bda807d4SMinchan Kim 1017bda807d4SMinchan Kim /* 1018e7e3ffebSMatthew Wilcox (Oracle) * Anonymous and movable src->mapping will be cleared by 1019bda807d4SMinchan Kim * free_pages_prepare so don't reset it here for keeping 1020bda807d4SMinchan Kim * the type to work PageAnon, for example. 1021bda807d4SMinchan Kim */ 1022e7e3ffebSMatthew Wilcox (Oracle) if (!folio_mapping_flags(src)) 1023e7e3ffebSMatthew Wilcox (Oracle) src->mapping = NULL; 1024d2b2c6ddSLars Persson 1025e7e3ffebSMatthew Wilcox (Oracle) if (likely(!folio_is_zone_device(dst))) 1026e7e3ffebSMatthew Wilcox (Oracle) flush_dcache_folio(dst); 10273fe2011fSMel Gorman } 1028bda807d4SMinchan Kim out: 1029e24f0b8fSChristoph Lameter return rc; 1030e24f0b8fSChristoph Lameter } 1031e24f0b8fSChristoph Lameter 103264c8902eSHuang Ying /* 103364c8902eSHuang Ying * To record some information during migration, we use some unused 103464c8902eSHuang Ying * fields (mapping and private) of struct folio of the newly allocated 103564c8902eSHuang Ying * destination folio. This is safe because nobody is using them 103664c8902eSHuang Ying * except us. 103764c8902eSHuang Ying */ 1038e77d587aSLinus Torvalds union migration_ptr { 1039e77d587aSLinus Torvalds struct anon_vma *anon_vma; 1040e77d587aSLinus Torvalds struct address_space *mapping; 1041e77d587aSLinus Torvalds }; 104264c8902eSHuang Ying static void __migrate_folio_record(struct folio *dst, 104364c8902eSHuang Ying unsigned long page_was_mapped, 104464c8902eSHuang Ying struct anon_vma *anon_vma) 1045e24f0b8fSChristoph Lameter { 1046e77d587aSLinus Torvalds union migration_ptr ptr = { .anon_vma = anon_vma }; 1047e77d587aSLinus Torvalds dst->mapping = ptr.mapping; 104864c8902eSHuang Ying dst->private = (void *)page_was_mapped; 104964c8902eSHuang Ying } 105064c8902eSHuang Ying 105164c8902eSHuang Ying static void __migrate_folio_extract(struct folio *dst, 105264c8902eSHuang Ying int *page_was_mappedp, 105364c8902eSHuang Ying struct anon_vma **anon_vmap) 105464c8902eSHuang Ying { 1055e77d587aSLinus Torvalds union migration_ptr ptr = { .mapping = dst->mapping }; 1056e77d587aSLinus Torvalds *anon_vmap = ptr.anon_vma; 105764c8902eSHuang Ying *page_was_mappedp = (unsigned long)dst->private; 105864c8902eSHuang Ying dst->mapping = NULL; 105964c8902eSHuang Ying dst->private = NULL; 106064c8902eSHuang Ying } 106164c8902eSHuang Ying 10625dfab109SHuang Ying /* Restore the source folio to the original state upon failure */ 10635dfab109SHuang Ying static void migrate_folio_undo_src(struct folio *src, 10645dfab109SHuang Ying int page_was_mapped, 10655dfab109SHuang Ying struct anon_vma *anon_vma, 1066ebe75e47SHuang Ying bool locked, 10675dfab109SHuang Ying struct list_head *ret) 10685dfab109SHuang Ying { 10695dfab109SHuang Ying if (page_was_mapped) 10705dfab109SHuang Ying remove_migration_ptes(src, src, false); 10715dfab109SHuang Ying /* Drop an anon_vma reference if we took one */ 10725dfab109SHuang Ying if (anon_vma) 10735dfab109SHuang Ying put_anon_vma(anon_vma); 1074ebe75e47SHuang Ying if (locked) 10755dfab109SHuang Ying folio_unlock(src); 1076ebe75e47SHuang Ying if (ret) 10775dfab109SHuang Ying list_move_tail(&src->lru, ret); 10785dfab109SHuang Ying } 10795dfab109SHuang Ying 10805dfab109SHuang Ying /* Restore the destination folio to the original state upon failure */ 10815dfab109SHuang Ying static void migrate_folio_undo_dst(struct folio *dst, 1082ebe75e47SHuang Ying bool locked, 10835dfab109SHuang Ying free_page_t put_new_page, 10845dfab109SHuang Ying unsigned long private) 10855dfab109SHuang Ying { 1086ebe75e47SHuang Ying if (locked) 10875dfab109SHuang Ying folio_unlock(dst); 10885dfab109SHuang Ying if (put_new_page) 10895dfab109SHuang Ying put_new_page(&dst->page, private); 10905dfab109SHuang Ying else 10915dfab109SHuang Ying folio_put(dst); 10925dfab109SHuang Ying } 10935dfab109SHuang Ying 109464c8902eSHuang Ying /* Cleanup src folio upon migration success */ 109564c8902eSHuang Ying static void migrate_folio_done(struct folio *src, 109664c8902eSHuang Ying enum migrate_reason reason) 109764c8902eSHuang Ying { 109864c8902eSHuang Ying /* 109964c8902eSHuang Ying * Compaction can migrate also non-LRU pages which are 110064c8902eSHuang Ying * not accounted to NR_ISOLATED_*. They can be recognized 110164c8902eSHuang Ying * as __PageMovable 110264c8902eSHuang Ying */ 110364c8902eSHuang Ying if (likely(!__folio_test_movable(src))) 110464c8902eSHuang Ying mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + 110564c8902eSHuang Ying folio_is_file_lru(src), -folio_nr_pages(src)); 110664c8902eSHuang Ying 110764c8902eSHuang Ying if (reason != MR_MEMORY_FAILURE) 110864c8902eSHuang Ying /* We release the page in page_handle_poison. */ 110964c8902eSHuang Ying folio_put(src); 111064c8902eSHuang Ying } 111164c8902eSHuang Ying 1112ebe75e47SHuang Ying /* Obtain the lock on page, remove all ptes. */ 1113ebe75e47SHuang Ying static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page, 1114ebe75e47SHuang Ying unsigned long private, struct folio *src, 1115*fb3592c4SHuang Ying struct folio **dstp, int force, 1116ebe75e47SHuang Ying enum migrate_mode mode, enum migrate_reason reason, 1117ebe75e47SHuang Ying struct list_head *ret) 1118e24f0b8fSChristoph Lameter { 1119ebe75e47SHuang Ying struct folio *dst; 11200dabec93SMinchan Kim int rc = -EAGAIN; 1121ebe75e47SHuang Ying struct page *newpage = NULL; 112264c8902eSHuang Ying int page_was_mapped = 0; 11233f6c8272SMel Gorman struct anon_vma *anon_vma = NULL; 1124682a71a1SMatthew Wilcox (Oracle) bool is_lru = !__PageMovable(&src->page); 1125ebe75e47SHuang Ying bool locked = false; 1126ebe75e47SHuang Ying bool dst_locked = false; 1127ebe75e47SHuang Ying 1128ebe75e47SHuang Ying if (folio_ref_count(src) == 1) { 1129ebe75e47SHuang Ying /* Folio was freed from under us. So we are done. */ 1130ebe75e47SHuang Ying folio_clear_active(src); 1131ebe75e47SHuang Ying folio_clear_unevictable(src); 1132ebe75e47SHuang Ying /* free_pages_prepare() will clear PG_isolated. */ 1133ebe75e47SHuang Ying list_del(&src->lru); 1134ebe75e47SHuang Ying migrate_folio_done(src, reason); 1135ebe75e47SHuang Ying return MIGRATEPAGE_SUCCESS; 1136ebe75e47SHuang Ying } 1137ebe75e47SHuang Ying 1138ebe75e47SHuang Ying newpage = get_new_page(&src->page, private); 1139ebe75e47SHuang Ying if (!newpage) 1140ebe75e47SHuang Ying return -ENOMEM; 1141ebe75e47SHuang Ying dst = page_folio(newpage); 1142ebe75e47SHuang Ying *dstp = dst; 1143ebe75e47SHuang Ying 1144ebe75e47SHuang Ying dst->private = NULL; 114595a402c3SChristoph Lameter 1146682a71a1SMatthew Wilcox (Oracle) if (!folio_trylock(src)) { 1147a6bc32b8SMel Gorman if (!force || mode == MIGRATE_ASYNC) 11480dabec93SMinchan Kim goto out; 11493e7d3449SMel Gorman 11503e7d3449SMel Gorman /* 11513e7d3449SMel Gorman * It's not safe for direct compaction to call lock_page. 11523e7d3449SMel Gorman * For example, during page readahead pages are added locked 11533e7d3449SMel Gorman * to the LRU. Later, when the IO completes the pages are 11543e7d3449SMel Gorman * marked uptodate and unlocked. However, the queueing 11553e7d3449SMel Gorman * could be merging multiple pages for one bio (e.g. 1156d4388340SMatthew Wilcox (Oracle) * mpage_readahead). If an allocation happens for the 11573e7d3449SMel Gorman * second or third page, the process can end up locking 11583e7d3449SMel Gorman * the same page twice and deadlocking. Rather than 11593e7d3449SMel Gorman * trying to be clever about what pages can be locked, 11603e7d3449SMel Gorman * avoid the use of lock_page for direct compaction 11613e7d3449SMel Gorman * altogether. 11623e7d3449SMel Gorman */ 11633e7d3449SMel Gorman if (current->flags & PF_MEMALLOC) 11640dabec93SMinchan Kim goto out; 11653e7d3449SMel Gorman 1166682a71a1SMatthew Wilcox (Oracle) folio_lock(src); 1167e24f0b8fSChristoph Lameter } 1168ebe75e47SHuang Ying locked = true; 1169e24f0b8fSChristoph Lameter 1170682a71a1SMatthew Wilcox (Oracle) if (folio_test_writeback(src)) { 117111bc82d6SAndrea Arcangeli /* 1172fed5b64aSJianguo Wu * Only in the case of a full synchronous migration is it 1173a6bc32b8SMel Gorman * necessary to wait for PageWriteback. In the async case, 1174a6bc32b8SMel Gorman * the retry loop is too short and in the sync-light case, 1175a6bc32b8SMel Gorman * the overhead of stalling is too much 117611bc82d6SAndrea Arcangeli */ 11772916ecc0SJérôme Glisse switch (mode) { 11782916ecc0SJérôme Glisse case MIGRATE_SYNC: 11792916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 11802916ecc0SJérôme Glisse break; 11812916ecc0SJérôme Glisse default: 118211bc82d6SAndrea Arcangeli rc = -EBUSY; 1183ebe75e47SHuang Ying goto out; 118411bc82d6SAndrea Arcangeli } 118511bc82d6SAndrea Arcangeli if (!force) 1186ebe75e47SHuang Ying goto out; 1187682a71a1SMatthew Wilcox (Oracle) folio_wait_writeback(src); 1188e24f0b8fSChristoph Lameter } 118903f15c86SHugh Dickins 1190e24f0b8fSChristoph Lameter /* 1191682a71a1SMatthew Wilcox (Oracle) * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, 1192682a71a1SMatthew Wilcox (Oracle) * we cannot notice that anon_vma is freed while we migrate a page. 11931ce82b69SHugh Dickins * This get_anon_vma() delays freeing anon_vma pointer until the end 1194dc386d4dSKAMEZAWA Hiroyuki * of migration. File cache pages are no problem because of page_lock() 1195989f89c5SKAMEZAWA Hiroyuki * File Caches may use write_page() or lock_page() in migration, then, 1196989f89c5SKAMEZAWA Hiroyuki * just care Anon page here. 11973fe2011fSMel Gorman * 119829eea9b5SMatthew Wilcox (Oracle) * Only folio_get_anon_vma() understands the subtleties of 119903f15c86SHugh Dickins * getting a hold on an anon_vma from outside one of its mms. 120003f15c86SHugh Dickins * But if we cannot get anon_vma, then we won't need it anyway, 120103f15c86SHugh Dickins * because that implies that the anon page is no longer mapped 120203f15c86SHugh Dickins * (and cannot be remapped so long as we hold the page lock). 12033fe2011fSMel Gorman */ 1204682a71a1SMatthew Wilcox (Oracle) if (folio_test_anon(src) && !folio_test_ksm(src)) 120529eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(src); 120662e1c553SShaohua Li 12077db7671fSHugh Dickins /* 12087db7671fSHugh Dickins * Block others from accessing the new page when we get around to 12097db7671fSHugh Dickins * establishing additional references. We are usually the only one 1210682a71a1SMatthew Wilcox (Oracle) * holding a reference to dst at this point. We used to have a BUG 1211682a71a1SMatthew Wilcox (Oracle) * here if folio_trylock(dst) fails, but would like to allow for 1212682a71a1SMatthew Wilcox (Oracle) * cases where there might be a race with the previous use of dst. 12137db7671fSHugh Dickins * This is much like races on refcount of oldpage: just don't BUG(). 12147db7671fSHugh Dickins */ 1215682a71a1SMatthew Wilcox (Oracle) if (unlikely(!folio_trylock(dst))) 1216ebe75e47SHuang Ying goto out; 1217ebe75e47SHuang Ying dst_locked = true; 12187db7671fSHugh Dickins 1219bda807d4SMinchan Kim if (unlikely(!is_lru)) { 122064c8902eSHuang Ying __migrate_folio_record(dst, page_was_mapped, anon_vma); 122164c8902eSHuang Ying return MIGRATEPAGE_UNMAP; 1222bda807d4SMinchan Kim } 1223bda807d4SMinchan Kim 1224dc386d4dSKAMEZAWA Hiroyuki /* 122562e1c553SShaohua Li * Corner case handling: 122662e1c553SShaohua Li * 1. When a new swap-cache page is read into, it is added to the LRU 122762e1c553SShaohua Li * and treated as swapcache but it has no rmap yet. 1228682a71a1SMatthew Wilcox (Oracle) * Calling try_to_unmap() against a src->mapping==NULL page will 122962e1c553SShaohua Li * trigger a BUG. So handle it here. 1230d12b8951SYang Shi * 2. An orphaned page (see truncate_cleanup_page) might have 123162e1c553SShaohua Li * fs-private metadata. The page can be picked up due to memory 123262e1c553SShaohua Li * offlining. Everywhere else except page reclaim, the page is 123362e1c553SShaohua Li * invisible to the vm, so the page can not be migrated. So try to 123462e1c553SShaohua Li * free the metadata, so the page can be freed. 1235dc386d4dSKAMEZAWA Hiroyuki */ 1236682a71a1SMatthew Wilcox (Oracle) if (!src->mapping) { 1237682a71a1SMatthew Wilcox (Oracle) if (folio_test_private(src)) { 1238682a71a1SMatthew Wilcox (Oracle) try_to_free_buffers(src); 1239ebe75e47SHuang Ying goto out; 124062e1c553SShaohua Li } 1241682a71a1SMatthew Wilcox (Oracle) } else if (folio_mapped(src)) { 12427db7671fSHugh Dickins /* Establish migration ptes */ 1243682a71a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_anon(src) && 1244682a71a1SMatthew Wilcox (Oracle) !folio_test_ksm(src) && !anon_vma, src); 1245*fb3592c4SHuang Ying try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); 124664c8902eSHuang Ying page_was_mapped = 1; 12472ebba6b7SHugh Dickins } 1248dc386d4dSKAMEZAWA Hiroyuki 124964c8902eSHuang Ying if (!folio_mapped(src)) { 125064c8902eSHuang Ying __migrate_folio_record(dst, page_was_mapped, anon_vma); 125164c8902eSHuang Ying return MIGRATEPAGE_UNMAP; 125264c8902eSHuang Ying } 125364c8902eSHuang Ying 125464c8902eSHuang Ying out: 125580562ba0SHuang Ying /* 125680562ba0SHuang Ying * A folio that has not been unmapped will be restored to 125780562ba0SHuang Ying * right list unless we want to retry. 125880562ba0SHuang Ying */ 1259*fb3592c4SHuang Ying if (rc == -EAGAIN) 1260ebe75e47SHuang Ying ret = NULL; 126180562ba0SHuang Ying 1262ebe75e47SHuang Ying migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); 1263ebe75e47SHuang Ying migrate_folio_undo_dst(dst, dst_locked, put_new_page, private); 126480562ba0SHuang Ying 126580562ba0SHuang Ying return rc; 126680562ba0SHuang Ying } 126780562ba0SHuang Ying 1268ebe75e47SHuang Ying /* Migrate the folio to the newly allocated folio in dst. */ 1269ebe75e47SHuang Ying static int migrate_folio_move(free_page_t put_new_page, unsigned long private, 1270ebe75e47SHuang Ying struct folio *src, struct folio *dst, 1271ebe75e47SHuang Ying enum migrate_mode mode, enum migrate_reason reason, 1272ebe75e47SHuang Ying struct list_head *ret) 127364c8902eSHuang Ying { 127464c8902eSHuang Ying int rc; 127564c8902eSHuang Ying int page_was_mapped = 0; 127664c8902eSHuang Ying struct anon_vma *anon_vma = NULL; 127764c8902eSHuang Ying bool is_lru = !__PageMovable(&src->page); 12785dfab109SHuang Ying struct list_head *prev; 127964c8902eSHuang Ying 128064c8902eSHuang Ying __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 12815dfab109SHuang Ying prev = dst->lru.prev; 12825dfab109SHuang Ying list_del(&dst->lru); 128364c8902eSHuang Ying 1284682a71a1SMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, src, mode); 1285ebe75e47SHuang Ying if (rc) 1286ebe75e47SHuang Ying goto out; 12875dfab109SHuang Ying 128864c8902eSHuang Ying if (unlikely(!is_lru)) 128964c8902eSHuang Ying goto out_unlock_both; 1290e24f0b8fSChristoph Lameter 1291c3096e67SHugh Dickins /* 1292682a71a1SMatthew Wilcox (Oracle) * When successful, push dst to LRU immediately: so that if it 1293c3096e67SHugh Dickins * turns out to be an mlocked page, remove_migration_ptes() will 1294682a71a1SMatthew Wilcox (Oracle) * automatically build up the correct dst->mlock_count for it. 1295c3096e67SHugh Dickins * 1296c3096e67SHugh Dickins * We would like to do something similar for the old page, when 1297c3096e67SHugh Dickins * unsuccessful, and other cases when a page has been temporarily 1298c3096e67SHugh Dickins * isolated from the unevictable LRU: but this case is the easiest. 1299c3096e67SHugh Dickins */ 1300682a71a1SMatthew Wilcox (Oracle) folio_add_lru(dst); 13015c3f9a67SHugh Dickins if (page_was_mapped) 1302c3096e67SHugh Dickins lru_add_drain(); 1303c3096e67SHugh Dickins 13045c3f9a67SHugh Dickins if (page_was_mapped) 1305ebe75e47SHuang Ying remove_migration_ptes(src, dst, false); 13063f6c8272SMel Gorman 13077db7671fSHugh Dickins out_unlock_both: 1308682a71a1SMatthew Wilcox (Oracle) folio_unlock(dst); 1309ebe75e47SHuang Ying set_page_owner_migrate_reason(&dst->page, reason); 1310c6c919ebSMinchan Kim /* 1311682a71a1SMatthew Wilcox (Oracle) * If migration is successful, decrease refcount of dst, 1312c6c919ebSMinchan Kim * which will not free the page because new page owner increased 1313c3096e67SHugh Dickins * refcounter. 1314c6c919ebSMinchan Kim */ 1315682a71a1SMatthew Wilcox (Oracle) folio_put(dst); 1316c6c919ebSMinchan Kim 1317ebe75e47SHuang Ying /* 1318ebe75e47SHuang Ying * A folio that has been migrated has all references removed 1319ebe75e47SHuang Ying * and will be freed. 1320ebe75e47SHuang Ying */ 1321ebe75e47SHuang Ying list_del(&src->lru); 1322ebe75e47SHuang Ying /* Drop an anon_vma reference if we took one */ 1323ebe75e47SHuang Ying if (anon_vma) 1324ebe75e47SHuang Ying put_anon_vma(anon_vma); 1325ebe75e47SHuang Ying folio_unlock(src); 1326ebe75e47SHuang Ying migrate_folio_done(src, reason); 1327ebe75e47SHuang Ying 1328ebe75e47SHuang Ying return rc; 1329ebe75e47SHuang Ying out: 1330ebe75e47SHuang Ying /* 1331ebe75e47SHuang Ying * A folio that has not been migrated will be restored to 1332ebe75e47SHuang Ying * right list unless we want to retry. 1333ebe75e47SHuang Ying */ 1334ebe75e47SHuang Ying if (rc == -EAGAIN) { 1335ebe75e47SHuang Ying list_add(&dst->lru, prev); 1336ebe75e47SHuang Ying __migrate_folio_record(dst, page_was_mapped, anon_vma); 13370dabec93SMinchan Kim return rc; 13380dabec93SMinchan Kim } 133995a402c3SChristoph Lameter 1340ebe75e47SHuang Ying migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); 1341ebe75e47SHuang Ying migrate_folio_undo_dst(dst, true, put_new_page, private); 134268711a74SDavid Rientjes 1343e24f0b8fSChristoph Lameter return rc; 1344e24f0b8fSChristoph Lameter } 1345b20a3503SChristoph Lameter 1346e24f0b8fSChristoph Lameter /* 1347290408d4SNaoya Horiguchi * Counterpart of unmap_and_move_page() for hugepage migration. 1348290408d4SNaoya Horiguchi * 1349290408d4SNaoya Horiguchi * This function doesn't wait the completion of hugepage I/O 1350290408d4SNaoya Horiguchi * because there is no race between I/O and migration for hugepage. 1351290408d4SNaoya Horiguchi * Note that currently hugepage I/O occurs only in direct I/O 1352290408d4SNaoya Horiguchi * where no lock is held and PG_writeback is irrelevant, 1353290408d4SNaoya Horiguchi * and writeback status of all subpages are counted in the reference 1354290408d4SNaoya Horiguchi * count of the head page (i.e. if all subpages of a 2MB hugepage are 1355290408d4SNaoya Horiguchi * under direct I/O, the reference of the head page is 512 and a bit more.) 1356290408d4SNaoya Horiguchi * This means that when we try to migrate hugepage whose subpages are 1357290408d4SNaoya Horiguchi * doing direct I/O, some references remain after try_to_unmap() and 1358290408d4SNaoya Horiguchi * hugepage migration fails without data corruption. 1359290408d4SNaoya Horiguchi * 1360290408d4SNaoya Horiguchi * There is also no race when direct I/O is issued on the page under migration, 1361290408d4SNaoya Horiguchi * because then pte is replaced with migration swap entry and direct I/O code 1362290408d4SNaoya Horiguchi * will wait in the page fault for migration to complete. 1363290408d4SNaoya Horiguchi */ 1364290408d4SNaoya Horiguchi static int unmap_and_move_huge_page(new_page_t get_new_page, 136568711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 136668711a74SDavid Rientjes struct page *hpage, int force, 1367dd4ae78aSYang Shi enum migrate_mode mode, int reason, 1368dd4ae78aSYang Shi struct list_head *ret) 1369290408d4SNaoya Horiguchi { 13704eecb8b9SMatthew Wilcox (Oracle) struct folio *dst, *src = page_folio(hpage); 13712def7424SHugh Dickins int rc = -EAGAIN; 13722ebba6b7SHugh Dickins int page_was_mapped = 0; 137332665f2bSJoonsoo Kim struct page *new_hpage; 1374290408d4SNaoya Horiguchi struct anon_vma *anon_vma = NULL; 1375c0d0381aSMike Kravetz struct address_space *mapping = NULL; 1376290408d4SNaoya Horiguchi 1377c33db292SMatthew Wilcox (Oracle) if (folio_ref_count(src) == 1) { 137871a64f61SMuchun Song /* page was freed from under us. So we are done. */ 1379ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(src); 138071a64f61SMuchun Song return MIGRATEPAGE_SUCCESS; 138171a64f61SMuchun Song } 138271a64f61SMuchun Song 1383666feb21SMichal Hocko new_hpage = get_new_page(hpage, private); 1384290408d4SNaoya Horiguchi if (!new_hpage) 1385290408d4SNaoya Horiguchi return -ENOMEM; 13864eecb8b9SMatthew Wilcox (Oracle) dst = page_folio(new_hpage); 1387290408d4SNaoya Horiguchi 1388c33db292SMatthew Wilcox (Oracle) if (!folio_trylock(src)) { 13892916ecc0SJérôme Glisse if (!force) 1390290408d4SNaoya Horiguchi goto out; 13912916ecc0SJérôme Glisse switch (mode) { 13922916ecc0SJérôme Glisse case MIGRATE_SYNC: 13932916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 13942916ecc0SJérôme Glisse break; 13952916ecc0SJérôme Glisse default: 13962916ecc0SJérôme Glisse goto out; 13972916ecc0SJérôme Glisse } 1398c33db292SMatthew Wilcox (Oracle) folio_lock(src); 1399290408d4SNaoya Horiguchi } 1400290408d4SNaoya Horiguchi 1401cb6acd01SMike Kravetz /* 1402cb6acd01SMike Kravetz * Check for pages which are in the process of being freed. Without 1403c33db292SMatthew Wilcox (Oracle) * folio_mapping() set, hugetlbfs specific move page routine will not 1404cb6acd01SMike Kravetz * be called and we could leak usage counts for subpools. 1405cb6acd01SMike Kravetz */ 1406345c62d1SSidhartha Kumar if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { 1407cb6acd01SMike Kravetz rc = -EBUSY; 1408cb6acd01SMike Kravetz goto out_unlock; 1409cb6acd01SMike Kravetz } 1410cb6acd01SMike Kravetz 1411c33db292SMatthew Wilcox (Oracle) if (folio_test_anon(src)) 141229eea9b5SMatthew Wilcox (Oracle) anon_vma = folio_get_anon_vma(src); 1413290408d4SNaoya Horiguchi 1414c33db292SMatthew Wilcox (Oracle) if (unlikely(!folio_trylock(dst))) 14157db7671fSHugh Dickins goto put_anon; 14167db7671fSHugh Dickins 1417c33db292SMatthew Wilcox (Oracle) if (folio_mapped(src)) { 1418a98a2f0cSAlistair Popple enum ttu_flags ttu = 0; 1419336bf30eSMike Kravetz 1420c33db292SMatthew Wilcox (Oracle) if (!folio_test_anon(src)) { 1421c0d0381aSMike Kravetz /* 1422336bf30eSMike Kravetz * In shared mappings, try_to_unmap could potentially 1423336bf30eSMike Kravetz * call huge_pmd_unshare. Because of this, take 1424336bf30eSMike Kravetz * semaphore in write mode here and set TTU_RMAP_LOCKED 1425336bf30eSMike Kravetz * to let lower levels know we have taken the lock. 1426c0d0381aSMike Kravetz */ 1427c0d0381aSMike Kravetz mapping = hugetlb_page_mapping_lock_write(hpage); 1428c0d0381aSMike Kravetz if (unlikely(!mapping)) 1429c0d0381aSMike Kravetz goto unlock_put_anon; 1430c0d0381aSMike Kravetz 14315202978bSMiaohe Lin ttu = TTU_RMAP_LOCKED; 1432336bf30eSMike Kravetz } 1433336bf30eSMike Kravetz 14344b8554c5SMatthew Wilcox (Oracle) try_to_migrate(src, ttu); 14352ebba6b7SHugh Dickins page_was_mapped = 1; 1436336bf30eSMike Kravetz 14375202978bSMiaohe Lin if (ttu & TTU_RMAP_LOCKED) 1438336bf30eSMike Kravetz i_mmap_unlock_write(mapping); 14392ebba6b7SHugh Dickins } 1440290408d4SNaoya Horiguchi 1441c33db292SMatthew Wilcox (Oracle) if (!folio_mapped(src)) 1442e7e3ffebSMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, src, mode); 1443290408d4SNaoya Horiguchi 1444336bf30eSMike Kravetz if (page_was_mapped) 14454eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(src, 14464eecb8b9SMatthew Wilcox (Oracle) rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1447290408d4SNaoya Horiguchi 1448c0d0381aSMike Kravetz unlock_put_anon: 1449c33db292SMatthew Wilcox (Oracle) folio_unlock(dst); 14507db7671fSHugh Dickins 14517db7671fSHugh Dickins put_anon: 1452fd4a4663SHugh Dickins if (anon_vma) 14539e60109fSPeter Zijlstra put_anon_vma(anon_vma); 14548e6ac7faSAneesh Kumar K.V 14552def7424SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 1456345c62d1SSidhartha Kumar move_hugetlb_state(src, dst, reason); 14572def7424SHugh Dickins put_new_page = NULL; 14582def7424SHugh Dickins } 14598e6ac7faSAneesh Kumar K.V 1460cb6acd01SMike Kravetz out_unlock: 1461c33db292SMatthew Wilcox (Oracle) folio_unlock(src); 146209761333SHillf Danton out: 1463dd4ae78aSYang Shi if (rc == MIGRATEPAGE_SUCCESS) 1464ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(src); 1465a04840c6SMiaohe Lin else if (rc != -EAGAIN) 1466c33db292SMatthew Wilcox (Oracle) list_move_tail(&src->lru, ret); 146768711a74SDavid Rientjes 146868711a74SDavid Rientjes /* 146968711a74SDavid Rientjes * If migration was not successful and there's a freeing callback, use 147068711a74SDavid Rientjes * it. Otherwise, put_page() will drop the reference grabbed during 147168711a74SDavid Rientjes * isolation. 147268711a74SDavid Rientjes */ 14732def7424SHugh Dickins if (put_new_page) 147468711a74SDavid Rientjes put_new_page(new_hpage, private); 147568711a74SDavid Rientjes else 1476ea8e72f4SSidhartha Kumar folio_putback_active_hugetlb(dst); 147768711a74SDavid Rientjes 1478290408d4SNaoya Horiguchi return rc; 1479290408d4SNaoya Horiguchi } 1480290408d4SNaoya Horiguchi 1481eaec4e63SHuang Ying static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) 1482d532e2e5SYang Shi { 14839c62ff00SHuang Ying int rc; 1484d532e2e5SYang Shi 1485eaec4e63SHuang Ying folio_lock(folio); 1486eaec4e63SHuang Ying rc = split_folio_to_list(folio, split_folios); 1487eaec4e63SHuang Ying folio_unlock(folio); 1488e6fa8a79SHuang Ying if (!rc) 1489eaec4e63SHuang Ying list_move_tail(&folio->lru, split_folios); 1490d532e2e5SYang Shi 1491d532e2e5SYang Shi return rc; 1492d532e2e5SYang Shi } 1493d532e2e5SYang Shi 149442012e04SHuang Ying #ifdef CONFIG_TRANSPARENT_HUGEPAGE 149542012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR 149642012e04SHuang Ying #else 149742012e04SHuang Ying #define NR_MAX_BATCHED_MIGRATION 512 149842012e04SHuang Ying #endif 1499e5bfff8bSHuang Ying #define NR_MAX_MIGRATE_PAGES_RETRY 10 1500e5bfff8bSHuang Ying 15015b855937SHuang Ying struct migrate_pages_stats { 15025b855937SHuang Ying int nr_succeeded; /* Normal and large folios migrated successfully, in 15035b855937SHuang Ying units of base pages */ 15045b855937SHuang Ying int nr_failed_pages; /* Normal and large folios failed to be migrated, in 15055b855937SHuang Ying units of base pages. Untried folios aren't counted */ 15065b855937SHuang Ying int nr_thp_succeeded; /* THP migrated successfully */ 15075b855937SHuang Ying int nr_thp_failed; /* THP failed to be migrated */ 15085b855937SHuang Ying int nr_thp_split; /* THP split before migrating */ 15095b855937SHuang Ying }; 15105b855937SHuang Ying 1511290408d4SNaoya Horiguchi /* 1512e5bfff8bSHuang Ying * Returns the number of hugetlb folios that were not migrated, or an error code 1513e5bfff8bSHuang Ying * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable 1514e5bfff8bSHuang Ying * any more because the list has become empty or no retryable hugetlb folios 1515e5bfff8bSHuang Ying * exist any more. It is caller's responsibility to call putback_movable_pages() 1516e5bfff8bSHuang Ying * only if ret != 0. 1517e5bfff8bSHuang Ying */ 1518e5bfff8bSHuang Ying static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, 1519e5bfff8bSHuang Ying free_page_t put_new_page, unsigned long private, 1520e5bfff8bSHuang Ying enum migrate_mode mode, int reason, 1521e5bfff8bSHuang Ying struct migrate_pages_stats *stats, 1522e5bfff8bSHuang Ying struct list_head *ret_folios) 1523e5bfff8bSHuang Ying { 1524e5bfff8bSHuang Ying int retry = 1; 1525e5bfff8bSHuang Ying int nr_failed = 0; 1526e5bfff8bSHuang Ying int nr_retry_pages = 0; 1527e5bfff8bSHuang Ying int pass = 0; 1528e5bfff8bSHuang Ying struct folio *folio, *folio2; 1529e5bfff8bSHuang Ying int rc, nr_pages; 1530e5bfff8bSHuang Ying 1531e5bfff8bSHuang Ying for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { 1532e5bfff8bSHuang Ying retry = 0; 1533e5bfff8bSHuang Ying nr_retry_pages = 0; 1534e5bfff8bSHuang Ying 1535e5bfff8bSHuang Ying list_for_each_entry_safe(folio, folio2, from, lru) { 1536e5bfff8bSHuang Ying if (!folio_test_hugetlb(folio)) 1537e5bfff8bSHuang Ying continue; 1538e5bfff8bSHuang Ying 1539e5bfff8bSHuang Ying nr_pages = folio_nr_pages(folio); 1540e5bfff8bSHuang Ying 1541e5bfff8bSHuang Ying cond_resched(); 1542e5bfff8bSHuang Ying 15436f7d760eSHuang Ying /* 15446f7d760eSHuang Ying * Migratability of hugepages depends on architectures and 15456f7d760eSHuang Ying * their size. This check is necessary because some callers 15466f7d760eSHuang Ying * of hugepage migration like soft offline and memory 15476f7d760eSHuang Ying * hotremove don't walk through page tables or check whether 15486f7d760eSHuang Ying * the hugepage is pmd-based or not before kicking migration. 15496f7d760eSHuang Ying */ 15506f7d760eSHuang Ying if (!hugepage_migration_supported(folio_hstate(folio))) { 15516f7d760eSHuang Ying nr_failed++; 15526f7d760eSHuang Ying stats->nr_failed_pages += nr_pages; 15536f7d760eSHuang Ying list_move_tail(&folio->lru, ret_folios); 15546f7d760eSHuang Ying continue; 15556f7d760eSHuang Ying } 15566f7d760eSHuang Ying 1557e5bfff8bSHuang Ying rc = unmap_and_move_huge_page(get_new_page, 1558e5bfff8bSHuang Ying put_new_page, private, 1559e5bfff8bSHuang Ying &folio->page, pass > 2, mode, 1560e5bfff8bSHuang Ying reason, ret_folios); 1561e5bfff8bSHuang Ying /* 1562e5bfff8bSHuang Ying * The rules are: 1563e5bfff8bSHuang Ying * Success: hugetlb folio will be put back 1564e5bfff8bSHuang Ying * -EAGAIN: stay on the from list 1565e5bfff8bSHuang Ying * -ENOMEM: stay on the from list 1566e5bfff8bSHuang Ying * Other errno: put on ret_folios list 1567e5bfff8bSHuang Ying */ 1568e5bfff8bSHuang Ying switch(rc) { 1569e5bfff8bSHuang Ying case -ENOMEM: 1570e5bfff8bSHuang Ying /* 1571e5bfff8bSHuang Ying * When memory is low, don't bother to try to migrate 1572e5bfff8bSHuang Ying * other folios, just exit. 1573e5bfff8bSHuang Ying */ 1574e5bfff8bSHuang Ying stats->nr_failed_pages += nr_pages + nr_retry_pages; 1575e5bfff8bSHuang Ying return -ENOMEM; 1576e5bfff8bSHuang Ying case -EAGAIN: 1577e5bfff8bSHuang Ying retry++; 1578e5bfff8bSHuang Ying nr_retry_pages += nr_pages; 1579e5bfff8bSHuang Ying break; 1580e5bfff8bSHuang Ying case MIGRATEPAGE_SUCCESS: 1581e5bfff8bSHuang Ying stats->nr_succeeded += nr_pages; 1582e5bfff8bSHuang Ying break; 1583e5bfff8bSHuang Ying default: 1584e5bfff8bSHuang Ying /* 1585e5bfff8bSHuang Ying * Permanent failure (-EBUSY, etc.): 1586e5bfff8bSHuang Ying * unlike -EAGAIN case, the failed folio is 1587e5bfff8bSHuang Ying * removed from migration folio list and not 1588e5bfff8bSHuang Ying * retried in the next outer loop. 1589e5bfff8bSHuang Ying */ 1590e5bfff8bSHuang Ying nr_failed++; 1591e5bfff8bSHuang Ying stats->nr_failed_pages += nr_pages; 1592e5bfff8bSHuang Ying break; 1593e5bfff8bSHuang Ying } 1594e5bfff8bSHuang Ying } 1595e5bfff8bSHuang Ying } 1596e5bfff8bSHuang Ying /* 1597e5bfff8bSHuang Ying * nr_failed is number of hugetlb folios failed to be migrated. After 1598e5bfff8bSHuang Ying * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb 1599e5bfff8bSHuang Ying * folios as failed. 1600e5bfff8bSHuang Ying */ 1601e5bfff8bSHuang Ying nr_failed += retry; 1602e5bfff8bSHuang Ying stats->nr_failed_pages += nr_retry_pages; 1603e5bfff8bSHuang Ying 1604e5bfff8bSHuang Ying return nr_failed; 1605e5bfff8bSHuang Ying } 1606e5bfff8bSHuang Ying 16075dfab109SHuang Ying /* 16085dfab109SHuang Ying * migrate_pages_batch() first unmaps folios in the from list as many as 16095dfab109SHuang Ying * possible, then move the unmapped folios. 1610*fb3592c4SHuang Ying * 1611*fb3592c4SHuang Ying * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a 1612*fb3592c4SHuang Ying * lock or bit when we have locked more than one folio. Which may cause 1613*fb3592c4SHuang Ying * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the 1614*fb3592c4SHuang Ying * length of the from list must be <= 1. 16155dfab109SHuang Ying */ 161642012e04SHuang Ying static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, 161742012e04SHuang Ying free_page_t put_new_page, unsigned long private, 161842012e04SHuang Ying enum migrate_mode mode, int reason, struct list_head *ret_folios, 161942012e04SHuang Ying struct migrate_pages_stats *stats) 162042012e04SHuang Ying { 16215dfab109SHuang Ying int retry; 162242012e04SHuang Ying int large_retry = 1; 162342012e04SHuang Ying int thp_retry = 1; 162442012e04SHuang Ying int nr_failed = 0; 162542012e04SHuang Ying int nr_retry_pages = 0; 162642012e04SHuang Ying int nr_large_failed = 0; 162742012e04SHuang Ying int pass = 0; 162842012e04SHuang Ying bool is_large = false; 162942012e04SHuang Ying bool is_thp = false; 16305dfab109SHuang Ying struct folio *folio, *folio2, *dst = NULL, *dst2; 16315dfab109SHuang Ying int rc, rc_saved, nr_pages; 163242012e04SHuang Ying LIST_HEAD(split_folios); 16335dfab109SHuang Ying LIST_HEAD(unmap_folios); 16345dfab109SHuang Ying LIST_HEAD(dst_folios); 163542012e04SHuang Ying bool nosplit = (reason == MR_NUMA_MISPLACED); 163642012e04SHuang Ying bool no_split_folio_counting = false; 163742012e04SHuang Ying 1638*fb3592c4SHuang Ying VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && 1639*fb3592c4SHuang Ying !list_empty(from) && !list_is_singular(from)); 16405dfab109SHuang Ying retry: 16415dfab109SHuang Ying rc_saved = 0; 16425dfab109SHuang Ying retry = 1; 164342012e04SHuang Ying for (pass = 0; 164442012e04SHuang Ying pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry); 164542012e04SHuang Ying pass++) { 164642012e04SHuang Ying retry = 0; 164742012e04SHuang Ying large_retry = 0; 164842012e04SHuang Ying thp_retry = 0; 164942012e04SHuang Ying nr_retry_pages = 0; 165042012e04SHuang Ying 165142012e04SHuang Ying list_for_each_entry_safe(folio, folio2, from, lru) { 165242012e04SHuang Ying /* 165342012e04SHuang Ying * Large folio statistics is based on the source large 165442012e04SHuang Ying * folio. Capture required information that might get 165542012e04SHuang Ying * lost during migration. 165642012e04SHuang Ying */ 165742012e04SHuang Ying is_large = folio_test_large(folio); 165842012e04SHuang Ying is_thp = is_large && folio_test_pmd_mappable(folio); 165942012e04SHuang Ying nr_pages = folio_nr_pages(folio); 166042012e04SHuang Ying 166142012e04SHuang Ying cond_resched(); 166242012e04SHuang Ying 16636f7d760eSHuang Ying /* 16646f7d760eSHuang Ying * Large folio migration might be unsupported or 16656f7d760eSHuang Ying * the allocation might be failed so we should retry 16666f7d760eSHuang Ying * on the same folio with the large folio split 16676f7d760eSHuang Ying * to normal folios. 16686f7d760eSHuang Ying * 16696f7d760eSHuang Ying * Split folios are put in split_folios, and 16706f7d760eSHuang Ying * we will migrate them after the rest of the 16716f7d760eSHuang Ying * list is processed. 16726f7d760eSHuang Ying */ 16736f7d760eSHuang Ying if (!thp_migration_supported() && is_thp) { 16746f7d760eSHuang Ying nr_large_failed++; 16756f7d760eSHuang Ying stats->nr_thp_failed++; 16766f7d760eSHuang Ying if (!try_split_folio(folio, &split_folios)) { 16776f7d760eSHuang Ying stats->nr_thp_split++; 16786f7d760eSHuang Ying continue; 16796f7d760eSHuang Ying } 16806f7d760eSHuang Ying stats->nr_failed_pages += nr_pages; 16816f7d760eSHuang Ying list_move_tail(&folio->lru, ret_folios); 16826f7d760eSHuang Ying continue; 16836f7d760eSHuang Ying } 16846f7d760eSHuang Ying 168564c8902eSHuang Ying rc = migrate_folio_unmap(get_new_page, put_new_page, private, 1686*fb3592c4SHuang Ying folio, &dst, pass > 2, mode, 1687*fb3592c4SHuang Ying reason, ret_folios); 168842012e04SHuang Ying /* 168942012e04SHuang Ying * The rules are: 169042012e04SHuang Ying * Success: folio will be freed 16915dfab109SHuang Ying * Unmap: folio will be put on unmap_folios list, 16925dfab109SHuang Ying * dst folio put on dst_folios list 169342012e04SHuang Ying * -EAGAIN: stay on the from list 169442012e04SHuang Ying * -ENOMEM: stay on the from list 169542012e04SHuang Ying * Other errno: put on ret_folios list 169642012e04SHuang Ying */ 169742012e04SHuang Ying switch(rc) { 169842012e04SHuang Ying case -ENOMEM: 169942012e04SHuang Ying /* 170042012e04SHuang Ying * When memory is low, don't bother to try to migrate 17015dfab109SHuang Ying * other folios, move unmapped folios, then exit. 170242012e04SHuang Ying */ 170342012e04SHuang Ying if (is_large) { 170442012e04SHuang Ying nr_large_failed++; 170542012e04SHuang Ying stats->nr_thp_failed += is_thp; 170642012e04SHuang Ying /* Large folio NUMA faulting doesn't split to retry. */ 170742012e04SHuang Ying if (!nosplit) { 170842012e04SHuang Ying int ret = try_split_folio(folio, &split_folios); 170942012e04SHuang Ying 171042012e04SHuang Ying if (!ret) { 171142012e04SHuang Ying stats->nr_thp_split += is_thp; 171242012e04SHuang Ying break; 171342012e04SHuang Ying } else if (reason == MR_LONGTERM_PIN && 171442012e04SHuang Ying ret == -EAGAIN) { 171542012e04SHuang Ying /* 171642012e04SHuang Ying * Try again to split large folio to 171742012e04SHuang Ying * mitigate the failure of longterm pinning. 171842012e04SHuang Ying */ 171942012e04SHuang Ying large_retry++; 172042012e04SHuang Ying thp_retry += is_thp; 172142012e04SHuang Ying nr_retry_pages += nr_pages; 172242012e04SHuang Ying break; 172342012e04SHuang Ying } 172442012e04SHuang Ying } 172542012e04SHuang Ying } else if (!no_split_folio_counting) { 172642012e04SHuang Ying nr_failed++; 172742012e04SHuang Ying } 172842012e04SHuang Ying 172942012e04SHuang Ying stats->nr_failed_pages += nr_pages + nr_retry_pages; 173042012e04SHuang Ying /* 173142012e04SHuang Ying * There might be some split folios of fail-to-migrate large 173242012e04SHuang Ying * folios left in split_folios list. Move them to ret_folios 173342012e04SHuang Ying * list so that they could be put back to the right list by 173442012e04SHuang Ying * the caller otherwise the folio refcnt will be leaked. 173542012e04SHuang Ying */ 173642012e04SHuang Ying list_splice_init(&split_folios, ret_folios); 173742012e04SHuang Ying /* nr_failed isn't updated for not used */ 173842012e04SHuang Ying nr_large_failed += large_retry; 173942012e04SHuang Ying stats->nr_thp_failed += thp_retry; 17405dfab109SHuang Ying rc_saved = rc; 17415dfab109SHuang Ying if (list_empty(&unmap_folios)) 174242012e04SHuang Ying goto out; 17435dfab109SHuang Ying else 17445dfab109SHuang Ying goto move; 174542012e04SHuang Ying case -EAGAIN: 174642012e04SHuang Ying if (is_large) { 174742012e04SHuang Ying large_retry++; 174842012e04SHuang Ying thp_retry += is_thp; 174942012e04SHuang Ying } else if (!no_split_folio_counting) { 175042012e04SHuang Ying retry++; 175142012e04SHuang Ying } 175242012e04SHuang Ying nr_retry_pages += nr_pages; 175342012e04SHuang Ying break; 175442012e04SHuang Ying case MIGRATEPAGE_SUCCESS: 175542012e04SHuang Ying stats->nr_succeeded += nr_pages; 175642012e04SHuang Ying stats->nr_thp_succeeded += is_thp; 175742012e04SHuang Ying break; 17585dfab109SHuang Ying case MIGRATEPAGE_UNMAP: 17595dfab109SHuang Ying list_move_tail(&folio->lru, &unmap_folios); 17605dfab109SHuang Ying list_add_tail(&dst->lru, &dst_folios); 17615dfab109SHuang Ying break; 176242012e04SHuang Ying default: 176342012e04SHuang Ying /* 176442012e04SHuang Ying * Permanent failure (-EBUSY, etc.): 176542012e04SHuang Ying * unlike -EAGAIN case, the failed folio is 176642012e04SHuang Ying * removed from migration folio list and not 176742012e04SHuang Ying * retried in the next outer loop. 176842012e04SHuang Ying */ 176942012e04SHuang Ying if (is_large) { 177042012e04SHuang Ying nr_large_failed++; 177142012e04SHuang Ying stats->nr_thp_failed += is_thp; 177242012e04SHuang Ying } else if (!no_split_folio_counting) { 177342012e04SHuang Ying nr_failed++; 177442012e04SHuang Ying } 177542012e04SHuang Ying 177642012e04SHuang Ying stats->nr_failed_pages += nr_pages; 177742012e04SHuang Ying break; 177842012e04SHuang Ying } 177942012e04SHuang Ying } 178042012e04SHuang Ying } 178142012e04SHuang Ying nr_failed += retry; 178242012e04SHuang Ying nr_large_failed += large_retry; 178342012e04SHuang Ying stats->nr_thp_failed += thp_retry; 178442012e04SHuang Ying stats->nr_failed_pages += nr_retry_pages; 17855dfab109SHuang Ying move: 17867e12beb8SHuang Ying /* Flush TLBs for all unmapped folios */ 17877e12beb8SHuang Ying try_to_unmap_flush(); 17887e12beb8SHuang Ying 17895dfab109SHuang Ying retry = 1; 17905dfab109SHuang Ying for (pass = 0; 17915dfab109SHuang Ying pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry); 17925dfab109SHuang Ying pass++) { 17935dfab109SHuang Ying retry = 0; 17945dfab109SHuang Ying large_retry = 0; 17955dfab109SHuang Ying thp_retry = 0; 17965dfab109SHuang Ying nr_retry_pages = 0; 17975dfab109SHuang Ying 17985dfab109SHuang Ying dst = list_first_entry(&dst_folios, struct folio, lru); 17995dfab109SHuang Ying dst2 = list_next_entry(dst, lru); 18005dfab109SHuang Ying list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 18015dfab109SHuang Ying is_large = folio_test_large(folio); 18025dfab109SHuang Ying is_thp = is_large && folio_test_pmd_mappable(folio); 18035dfab109SHuang Ying nr_pages = folio_nr_pages(folio); 18045dfab109SHuang Ying 18055dfab109SHuang Ying cond_resched(); 18065dfab109SHuang Ying 18075dfab109SHuang Ying rc = migrate_folio_move(put_new_page, private, 18085dfab109SHuang Ying folio, dst, mode, 18095dfab109SHuang Ying reason, ret_folios); 18105dfab109SHuang Ying /* 18115dfab109SHuang Ying * The rules are: 18125dfab109SHuang Ying * Success: folio will be freed 18135dfab109SHuang Ying * -EAGAIN: stay on the unmap_folios list 18145dfab109SHuang Ying * Other errno: put on ret_folios list 18155dfab109SHuang Ying */ 18165dfab109SHuang Ying switch(rc) { 18175dfab109SHuang Ying case -EAGAIN: 18185dfab109SHuang Ying if (is_large) { 18195dfab109SHuang Ying large_retry++; 18205dfab109SHuang Ying thp_retry += is_thp; 18215dfab109SHuang Ying } else if (!no_split_folio_counting) { 18225dfab109SHuang Ying retry++; 18235dfab109SHuang Ying } 18245dfab109SHuang Ying nr_retry_pages += nr_pages; 18255dfab109SHuang Ying break; 18265dfab109SHuang Ying case MIGRATEPAGE_SUCCESS: 18275dfab109SHuang Ying stats->nr_succeeded += nr_pages; 18285dfab109SHuang Ying stats->nr_thp_succeeded += is_thp; 18295dfab109SHuang Ying break; 18305dfab109SHuang Ying default: 18315dfab109SHuang Ying if (is_large) { 18325dfab109SHuang Ying nr_large_failed++; 18335dfab109SHuang Ying stats->nr_thp_failed += is_thp; 18345dfab109SHuang Ying } else if (!no_split_folio_counting) { 18355dfab109SHuang Ying nr_failed++; 18365dfab109SHuang Ying } 18375dfab109SHuang Ying 18385dfab109SHuang Ying stats->nr_failed_pages += nr_pages; 18395dfab109SHuang Ying break; 18405dfab109SHuang Ying } 18415dfab109SHuang Ying dst = dst2; 18425dfab109SHuang Ying dst2 = list_next_entry(dst, lru); 18435dfab109SHuang Ying } 18445dfab109SHuang Ying } 18455dfab109SHuang Ying nr_failed += retry; 18465dfab109SHuang Ying nr_large_failed += large_retry; 18475dfab109SHuang Ying stats->nr_thp_failed += thp_retry; 18485dfab109SHuang Ying stats->nr_failed_pages += nr_retry_pages; 18495dfab109SHuang Ying 18505dfab109SHuang Ying if (rc_saved) 18515dfab109SHuang Ying rc = rc_saved; 18525dfab109SHuang Ying else 18535dfab109SHuang Ying rc = nr_failed + nr_large_failed; 18545dfab109SHuang Ying out: 18555dfab109SHuang Ying /* Cleanup remaining folios */ 18565dfab109SHuang Ying dst = list_first_entry(&dst_folios, struct folio, lru); 18575dfab109SHuang Ying dst2 = list_next_entry(dst, lru); 18585dfab109SHuang Ying list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { 18595dfab109SHuang Ying int page_was_mapped = 0; 18605dfab109SHuang Ying struct anon_vma *anon_vma = NULL; 18615dfab109SHuang Ying 18625dfab109SHuang Ying __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); 18635dfab109SHuang Ying migrate_folio_undo_src(folio, page_was_mapped, anon_vma, 1864ebe75e47SHuang Ying true, ret_folios); 18655dfab109SHuang Ying list_del(&dst->lru); 1866ebe75e47SHuang Ying migrate_folio_undo_dst(dst, true, put_new_page, private); 18675dfab109SHuang Ying dst = dst2; 18685dfab109SHuang Ying dst2 = list_next_entry(dst, lru); 18695dfab109SHuang Ying } 18705dfab109SHuang Ying 187142012e04SHuang Ying /* 187242012e04SHuang Ying * Try to migrate split folios of fail-to-migrate large folios, no 187342012e04SHuang Ying * nr_failed counting in this round, since all split folios of a 187442012e04SHuang Ying * large folio is counted as 1 failure in the first round. 187542012e04SHuang Ying */ 18765dfab109SHuang Ying if (rc >= 0 && !list_empty(&split_folios)) { 187742012e04SHuang Ying /* 187842012e04SHuang Ying * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY 187942012e04SHuang Ying * retries) to ret_folios to avoid migrating them again. 188042012e04SHuang Ying */ 188142012e04SHuang Ying list_splice_init(from, ret_folios); 188242012e04SHuang Ying list_splice_init(&split_folios, from); 1883*fb3592c4SHuang Ying /* 1884*fb3592c4SHuang Ying * Force async mode to avoid to wait lock or bit when we have 1885*fb3592c4SHuang Ying * locked more than one folios. 1886*fb3592c4SHuang Ying */ 1887*fb3592c4SHuang Ying mode = MIGRATE_ASYNC; 188842012e04SHuang Ying no_split_folio_counting = true; 18895dfab109SHuang Ying goto retry; 189042012e04SHuang Ying } 189142012e04SHuang Ying 189242012e04SHuang Ying return rc; 189342012e04SHuang Ying } 189442012e04SHuang Ying 1895e24f0b8fSChristoph Lameter /* 1896eaec4e63SHuang Ying * migrate_pages - migrate the folios specified in a list, to the free folios 1897c73e5c9cSSrivatsa S. Bhat * supplied as the target for the page migration 1898e24f0b8fSChristoph Lameter * 1899eaec4e63SHuang Ying * @from: The list of folios to be migrated. 1900eaec4e63SHuang Ying * @get_new_page: The function used to allocate free folios to be used 1901eaec4e63SHuang Ying * as the target of the folio migration. 1902eaec4e63SHuang Ying * @put_new_page: The function used to free target folios if migration 190368711a74SDavid Rientjes * fails, or NULL if no special handling is necessary. 1904c73e5c9cSSrivatsa S. Bhat * @private: Private data to be passed on to get_new_page() 1905c73e5c9cSSrivatsa S. Bhat * @mode: The migration mode that specifies the constraints for 1906eaec4e63SHuang Ying * folio migration, if any. 1907eaec4e63SHuang Ying * @reason: The reason for folio migration. 1908eaec4e63SHuang Ying * @ret_succeeded: Set to the number of folios migrated successfully if 19095ac95884SYang Shi * the caller passes a non-NULL pointer. 1910e24f0b8fSChristoph Lameter * 1911e5bfff8bSHuang Ying * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios 1912e5bfff8bSHuang Ying * are movable any more because the list has become empty or no retryable folios 1913e5bfff8bSHuang Ying * exist any more. It is caller's responsibility to call putback_movable_pages() 1914e5bfff8bSHuang Ying * only if ret != 0. 1915e24f0b8fSChristoph Lameter * 1916eaec4e63SHuang Ying * Returns the number of {normal folio, large folio, hugetlb} that were not 1917eaec4e63SHuang Ying * migrated, or an error code. The number of large folio splits will be 1918eaec4e63SHuang Ying * considered as the number of non-migrated large folio, no matter how many 1919eaec4e63SHuang Ying * split folios of the large folio are migrated successfully. 1920e24f0b8fSChristoph Lameter */ 19219c620e2bSHugh Dickins int migrate_pages(struct list_head *from, new_page_t get_new_page, 192268711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 19235ac95884SYang Shi enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1924e24f0b8fSChristoph Lameter { 192542012e04SHuang Ying int rc, rc_gather; 1926*fb3592c4SHuang Ying int nr_pages, batch; 1927eaec4e63SHuang Ying struct folio *folio, *folio2; 192842012e04SHuang Ying LIST_HEAD(folios); 1929eaec4e63SHuang Ying LIST_HEAD(ret_folios); 19305b855937SHuang Ying struct migrate_pages_stats stats; 19312d1db3b1SChristoph Lameter 19327bc1aec5SLiam Mark trace_mm_migrate_pages_start(mode, reason); 19337bc1aec5SLiam Mark 19345b855937SHuang Ying memset(&stats, 0, sizeof(stats)); 1935e24f0b8fSChristoph Lameter 193642012e04SHuang Ying rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private, 193742012e04SHuang Ying mode, reason, &stats, &ret_folios); 193842012e04SHuang Ying if (rc_gather < 0) 193995a402c3SChristoph Lameter goto out; 1940*fb3592c4SHuang Ying 1941*fb3592c4SHuang Ying if (mode == MIGRATE_ASYNC) 1942*fb3592c4SHuang Ying batch = NR_MAX_BATCHED_MIGRATION; 1943*fb3592c4SHuang Ying else 1944*fb3592c4SHuang Ying batch = 1; 194542012e04SHuang Ying again: 194642012e04SHuang Ying nr_pages = 0; 1947b20a3503SChristoph Lameter list_for_each_entry_safe(folio, folio2, from, lru) { 1948e5bfff8bSHuang Ying /* Retried hugetlb folios will be kept in list */ 1949e5bfff8bSHuang Ying if (folio_test_hugetlb(folio)) { 1950e5bfff8bSHuang Ying list_move_tail(&folio->lru, &ret_folios); 1951e5bfff8bSHuang Ying continue; 1952eaec4e63SHuang Ying } 1953f430893bSMiaohe Lin 195442012e04SHuang Ying nr_pages += folio_nr_pages(folio); 1955*fb3592c4SHuang Ying if (nr_pages >= batch) 1956e24f0b8fSChristoph Lameter break; 1957b20a3503SChristoph Lameter } 1958*fb3592c4SHuang Ying if (nr_pages >= batch) 1959*fb3592c4SHuang Ying list_cut_before(&folios, from, &folio2->lru); 196042012e04SHuang Ying else 196142012e04SHuang Ying list_splice_init(from, &folios); 196242012e04SHuang Ying rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, 196342012e04SHuang Ying mode, reason, &ret_folios, &stats); 196442012e04SHuang Ying list_splice_tail_init(&folios, &ret_folios); 196542012e04SHuang Ying if (rc < 0) { 196642012e04SHuang Ying rc_gather = rc; 1967b20a3503SChristoph Lameter goto out; 1968b20a3503SChristoph Lameter } 196942012e04SHuang Ying rc_gather += rc; 197042012e04SHuang Ying if (!list_empty(from)) 197142012e04SHuang Ying goto again; 197295a402c3SChristoph Lameter out: 1973dd4ae78aSYang Shi /* 1974eaec4e63SHuang Ying * Put the permanent failure folio back to migration list, they 1975dd4ae78aSYang Shi * will be put back to the right list by the caller. 1976dd4ae78aSYang Shi */ 1977eaec4e63SHuang Ying list_splice(&ret_folios, from); 1978dd4ae78aSYang Shi 197903e5f82eSBaolin Wang /* 1980eaec4e63SHuang Ying * Return 0 in case all split folios of fail-to-migrate large folios 1981eaec4e63SHuang Ying * are migrated successfully. 198203e5f82eSBaolin Wang */ 198303e5f82eSBaolin Wang if (list_empty(from)) 198442012e04SHuang Ying rc_gather = 0; 198503e5f82eSBaolin Wang 19865b855937SHuang Ying count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); 19875b855937SHuang Ying count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); 19885b855937SHuang Ying count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); 19895b855937SHuang Ying count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); 19905b855937SHuang Ying count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); 19915b855937SHuang Ying trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, 19925b855937SHuang Ying stats.nr_thp_succeeded, stats.nr_thp_failed, 19935b855937SHuang Ying stats.nr_thp_split, mode, reason); 19947b2a2d4aSMel Gorman 19955ac95884SYang Shi if (ret_succeeded) 19965b855937SHuang Ying *ret_succeeded = stats.nr_succeeded; 19975ac95884SYang Shi 199842012e04SHuang Ying return rc_gather; 1999b20a3503SChristoph Lameter } 2000b20a3503SChristoph Lameter 200119fc7bedSJoonsoo Kim struct page *alloc_migration_target(struct page *page, unsigned long private) 2002b4b38223SJoonsoo Kim { 2003ffe06786SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 200419fc7bedSJoonsoo Kim struct migration_target_control *mtc; 200519fc7bedSJoonsoo Kim gfp_t gfp_mask; 2006b4b38223SJoonsoo Kim unsigned int order = 0; 2007e37d3e83SSidhartha Kumar struct folio *hugetlb_folio = NULL; 2008ffe06786SMatthew Wilcox (Oracle) struct folio *new_folio = NULL; 200919fc7bedSJoonsoo Kim int nid; 201019fc7bedSJoonsoo Kim int zidx; 201119fc7bedSJoonsoo Kim 201219fc7bedSJoonsoo Kim mtc = (struct migration_target_control *)private; 201319fc7bedSJoonsoo Kim gfp_mask = mtc->gfp_mask; 201419fc7bedSJoonsoo Kim nid = mtc->nid; 201519fc7bedSJoonsoo Kim if (nid == NUMA_NO_NODE) 2016ffe06786SMatthew Wilcox (Oracle) nid = folio_nid(folio); 2017b4b38223SJoonsoo Kim 2018ffe06786SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 2019e51da3a9SSidhartha Kumar struct hstate *h = folio_hstate(folio); 2020d92bbc27SJoonsoo Kim 202119fc7bedSJoonsoo Kim gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 2022e37d3e83SSidhartha Kumar hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid, 2023e37d3e83SSidhartha Kumar mtc->nmask, gfp_mask); 2024e37d3e83SSidhartha Kumar return &hugetlb_folio->page; 2025d92bbc27SJoonsoo Kim } 2026b4b38223SJoonsoo Kim 2027ffe06786SMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 20289933a0c8SJoonsoo Kim /* 20299933a0c8SJoonsoo Kim * clear __GFP_RECLAIM to make the migration callback 20309933a0c8SJoonsoo Kim * consistent with regular THP allocations. 20319933a0c8SJoonsoo Kim */ 20329933a0c8SJoonsoo Kim gfp_mask &= ~__GFP_RECLAIM; 2033b4b38223SJoonsoo Kim gfp_mask |= GFP_TRANSHUGE; 2034ffe06786SMatthew Wilcox (Oracle) order = folio_order(folio); 2035b4b38223SJoonsoo Kim } 2036ffe06786SMatthew Wilcox (Oracle) zidx = zone_idx(folio_zone(folio)); 203719fc7bedSJoonsoo Kim if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 2038b4b38223SJoonsoo Kim gfp_mask |= __GFP_HIGHMEM; 2039b4b38223SJoonsoo Kim 2040ffe06786SMatthew Wilcox (Oracle) new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 2041b4b38223SJoonsoo Kim 2042ffe06786SMatthew Wilcox (Oracle) return &new_folio->page; 2043b4b38223SJoonsoo Kim } 2044b4b38223SJoonsoo Kim 2045742755a1SChristoph Lameter #ifdef CONFIG_NUMA 2046742755a1SChristoph Lameter 2047a49bd4d7SMichal Hocko static int store_status(int __user *status, int start, int value, int nr) 2048742755a1SChristoph Lameter { 2049a49bd4d7SMichal Hocko while (nr-- > 0) { 2050a49bd4d7SMichal Hocko if (put_user(value, status + start)) 2051a49bd4d7SMichal Hocko return -EFAULT; 2052a49bd4d7SMichal Hocko start++; 2053a49bd4d7SMichal Hocko } 2054742755a1SChristoph Lameter 2055a49bd4d7SMichal Hocko return 0; 2056a49bd4d7SMichal Hocko } 2057742755a1SChristoph Lameter 2058a49bd4d7SMichal Hocko static int do_move_pages_to_node(struct mm_struct *mm, 2059a49bd4d7SMichal Hocko struct list_head *pagelist, int node) 2060a49bd4d7SMichal Hocko { 2061a49bd4d7SMichal Hocko int err; 2062a0976311SJoonsoo Kim struct migration_target_control mtc = { 2063a0976311SJoonsoo Kim .nid = node, 2064a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 2065a0976311SJoonsoo Kim }; 2066742755a1SChristoph Lameter 2067a0976311SJoonsoo Kim err = migrate_pages(pagelist, alloc_migration_target, NULL, 20685ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 2069a49bd4d7SMichal Hocko if (err) 2070a49bd4d7SMichal Hocko putback_movable_pages(pagelist); 2071a49bd4d7SMichal Hocko return err; 2072742755a1SChristoph Lameter } 2073742755a1SChristoph Lameter 2074742755a1SChristoph Lameter /* 2075a49bd4d7SMichal Hocko * Resolves the given address to a struct page, isolates it from the LRU and 2076a49bd4d7SMichal Hocko * puts it to the given pagelist. 2077e0153fc2SYang Shi * Returns: 2078e0153fc2SYang Shi * errno - if the page cannot be found/isolated 2079e0153fc2SYang Shi * 0 - when it doesn't have to be migrated because it is already on the 2080e0153fc2SYang Shi * target node 2081e0153fc2SYang Shi * 1 - when it has been queued 2082742755a1SChristoph Lameter */ 2083a49bd4d7SMichal Hocko static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 2084a49bd4d7SMichal Hocko int node, struct list_head *pagelist, bool migrate_all) 2085742755a1SChristoph Lameter { 2086742755a1SChristoph Lameter struct vm_area_struct *vma; 2087742755a1SChristoph Lameter struct page *page; 2088a49bd4d7SMichal Hocko int err; 20899747b9e9SBaolin Wang bool isolated; 2090742755a1SChristoph Lameter 2091d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 2092742755a1SChristoph Lameter err = -EFAULT; 2093cb1c37b1SMiaohe Lin vma = vma_lookup(mm, addr); 2094cb1c37b1SMiaohe Lin if (!vma || !vma_migratable(vma)) 2095a49bd4d7SMichal Hocko goto out; 2096742755a1SChristoph Lameter 2097d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 209887d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 209989f5b7daSLinus Torvalds 210089f5b7daSLinus Torvalds err = PTR_ERR(page); 210189f5b7daSLinus Torvalds if (IS_ERR(page)) 2102a49bd4d7SMichal Hocko goto out; 210389f5b7daSLinus Torvalds 2104742755a1SChristoph Lameter err = -ENOENT; 2105f7091ed6SHaiyue Wang if (!page) 2106a49bd4d7SMichal Hocko goto out; 2107742755a1SChristoph Lameter 2108f7091ed6SHaiyue Wang if (is_zone_device_page(page)) 2109f7091ed6SHaiyue Wang goto out_putpage; 2110f7091ed6SHaiyue Wang 2111a49bd4d7SMichal Hocko err = 0; 2112a49bd4d7SMichal Hocko if (page_to_nid(page) == node) 2113a49bd4d7SMichal Hocko goto out_putpage; 2114742755a1SChristoph Lameter 2115742755a1SChristoph Lameter err = -EACCES; 2116a49bd4d7SMichal Hocko if (page_mapcount(page) > 1 && !migrate_all) 2117a49bd4d7SMichal Hocko goto out_putpage; 2118742755a1SChristoph Lameter 2119e632a938SNaoya Horiguchi if (PageHuge(page)) { 2120e8db67ebSNaoya Horiguchi if (PageHead(page)) { 21219747b9e9SBaolin Wang isolated = isolate_hugetlb(page_folio(page), pagelist); 21229747b9e9SBaolin Wang err = isolated ? 1 : -EBUSY; 2123e8db67ebSNaoya Horiguchi } 2124a49bd4d7SMichal Hocko } else { 2125a49bd4d7SMichal Hocko struct page *head; 2126e632a938SNaoya Horiguchi 2127e8db67ebSNaoya Horiguchi head = compound_head(page); 2128f7f9c00dSBaolin Wang isolated = isolate_lru_page(head); 2129f7f9c00dSBaolin Wang if (!isolated) { 2130f7f9c00dSBaolin Wang err = -EBUSY; 2131a49bd4d7SMichal Hocko goto out_putpage; 2132f7f9c00dSBaolin Wang } 2133a49bd4d7SMichal Hocko 2134e0153fc2SYang Shi err = 1; 2135a49bd4d7SMichal Hocko list_add_tail(&head->lru, pagelist); 2136e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(head), 21379de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 21386c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 21396d9c285aSKOSAKI Motohiro } 2140a49bd4d7SMichal Hocko out_putpage: 2141742755a1SChristoph Lameter /* 2142742755a1SChristoph Lameter * Either remove the duplicate refcount from 2143742755a1SChristoph Lameter * isolate_lru_page() or drop the page ref if it was 2144742755a1SChristoph Lameter * not isolated. 2145742755a1SChristoph Lameter */ 2146742755a1SChristoph Lameter put_page(page); 2147a49bd4d7SMichal Hocko out: 2148d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2149742755a1SChristoph Lameter return err; 2150742755a1SChristoph Lameter } 2151742755a1SChristoph Lameter 21527ca8783aSWei Yang static int move_pages_and_store_status(struct mm_struct *mm, int node, 21537ca8783aSWei Yang struct list_head *pagelist, int __user *status, 21547ca8783aSWei Yang int start, int i, unsigned long nr_pages) 21557ca8783aSWei Yang { 21567ca8783aSWei Yang int err; 21577ca8783aSWei Yang 21585d7ae891SWei Yang if (list_empty(pagelist)) 21595d7ae891SWei Yang return 0; 21605d7ae891SWei Yang 21617ca8783aSWei Yang err = do_move_pages_to_node(mm, pagelist, node); 21627ca8783aSWei Yang if (err) { 21637ca8783aSWei Yang /* 21647ca8783aSWei Yang * Positive err means the number of failed 21657ca8783aSWei Yang * pages to migrate. Since we are going to 21667ca8783aSWei Yang * abort and return the number of non-migrated 2167ab9dd4f8SLong Li * pages, so need to include the rest of the 21687ca8783aSWei Yang * nr_pages that have not been attempted as 21697ca8783aSWei Yang * well. 21707ca8783aSWei Yang */ 21717ca8783aSWei Yang if (err > 0) 2172a7504ed1SHuang Ying err += nr_pages - i; 21737ca8783aSWei Yang return err; 21747ca8783aSWei Yang } 21757ca8783aSWei Yang return store_status(status, start, node, i - start); 21767ca8783aSWei Yang } 21777ca8783aSWei Yang 2178742755a1SChristoph Lameter /* 21795e9a0f02SBrice Goglin * Migrate an array of page address onto an array of nodes and fill 21805e9a0f02SBrice Goglin * the corresponding array of status. 21815e9a0f02SBrice Goglin */ 21823268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 21835e9a0f02SBrice Goglin unsigned long nr_pages, 21845e9a0f02SBrice Goglin const void __user * __user *pages, 21855e9a0f02SBrice Goglin const int __user *nodes, 21865e9a0f02SBrice Goglin int __user *status, int flags) 21875e9a0f02SBrice Goglin { 2188a49bd4d7SMichal Hocko int current_node = NUMA_NO_NODE; 2189a49bd4d7SMichal Hocko LIST_HEAD(pagelist); 2190a49bd4d7SMichal Hocko int start, i; 2191a49bd4d7SMichal Hocko int err = 0, err1; 219235282a2dSBrice Goglin 2193361a2a22SMinchan Kim lru_cache_disable(); 219435282a2dSBrice Goglin 2195a49bd4d7SMichal Hocko for (i = start = 0; i < nr_pages; i++) { 21965e9a0f02SBrice Goglin const void __user *p; 2197a49bd4d7SMichal Hocko unsigned long addr; 21985e9a0f02SBrice Goglin int node; 21995e9a0f02SBrice Goglin 22003140a227SBrice Goglin err = -EFAULT; 2201a49bd4d7SMichal Hocko if (get_user(p, pages + i)) 2202a49bd4d7SMichal Hocko goto out_flush; 2203a49bd4d7SMichal Hocko if (get_user(node, nodes + i)) 2204a49bd4d7SMichal Hocko goto out_flush; 2205057d3389SAndrey Konovalov addr = (unsigned long)untagged_addr(p); 22065e9a0f02SBrice Goglin 22075e9a0f02SBrice Goglin err = -ENODEV; 22086f5a55f1SLinus Torvalds if (node < 0 || node >= MAX_NUMNODES) 2209a49bd4d7SMichal Hocko goto out_flush; 2210389162c2SLai Jiangshan if (!node_state(node, N_MEMORY)) 2211a49bd4d7SMichal Hocko goto out_flush; 22125e9a0f02SBrice Goglin 22135e9a0f02SBrice Goglin err = -EACCES; 22145e9a0f02SBrice Goglin if (!node_isset(node, task_nodes)) 2215a49bd4d7SMichal Hocko goto out_flush; 22165e9a0f02SBrice Goglin 2217a49bd4d7SMichal Hocko if (current_node == NUMA_NO_NODE) { 2218a49bd4d7SMichal Hocko current_node = node; 2219a49bd4d7SMichal Hocko start = i; 2220a49bd4d7SMichal Hocko } else if (node != current_node) { 22217ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node, 22227ca8783aSWei Yang &pagelist, status, start, i, nr_pages); 2223a49bd4d7SMichal Hocko if (err) 2224a49bd4d7SMichal Hocko goto out; 2225a49bd4d7SMichal Hocko start = i; 2226a49bd4d7SMichal Hocko current_node = node; 22275e9a0f02SBrice Goglin } 22285e9a0f02SBrice Goglin 2229a49bd4d7SMichal Hocko /* 2230a49bd4d7SMichal Hocko * Errors in the page lookup or isolation are not fatal and we simply 2231a49bd4d7SMichal Hocko * report them via status 2232a49bd4d7SMichal Hocko */ 2233a49bd4d7SMichal Hocko err = add_page_for_migration(mm, addr, current_node, 2234a49bd4d7SMichal Hocko &pagelist, flags & MPOL_MF_MOVE_ALL); 2235e0153fc2SYang Shi 2236d08221a0SWei Yang if (err > 0) { 2237e0153fc2SYang Shi /* The page is successfully queued for migration */ 2238e0153fc2SYang Shi continue; 2239e0153fc2SYang Shi } 22403140a227SBrice Goglin 2241d08221a0SWei Yang /* 224265462462SJohn Hubbard * The move_pages() man page does not have an -EEXIST choice, so 224365462462SJohn Hubbard * use -EFAULT instead. 224465462462SJohn Hubbard */ 224565462462SJohn Hubbard if (err == -EEXIST) 224665462462SJohn Hubbard err = -EFAULT; 224765462462SJohn Hubbard 224865462462SJohn Hubbard /* 2249d08221a0SWei Yang * If the page is already on the target node (!err), store the 2250d08221a0SWei Yang * node, otherwise, store the err. 2251d08221a0SWei Yang */ 2252d08221a0SWei Yang err = store_status(status, i, err ? : current_node, 1); 2253a49bd4d7SMichal Hocko if (err) 2254a49bd4d7SMichal Hocko goto out_flush; 22553140a227SBrice Goglin 22567ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node, &pagelist, 22577ca8783aSWei Yang status, start, i, nr_pages); 2258a7504ed1SHuang Ying if (err) { 2259a7504ed1SHuang Ying /* We have accounted for page i */ 2260a7504ed1SHuang Ying if (err > 0) 2261a7504ed1SHuang Ying err--; 2262a49bd4d7SMichal Hocko goto out; 2263a7504ed1SHuang Ying } 2264a49bd4d7SMichal Hocko current_node = NUMA_NO_NODE; 22653140a227SBrice Goglin } 2266a49bd4d7SMichal Hocko out_flush: 2267a49bd4d7SMichal Hocko /* Make sure we do not overwrite the existing error */ 22687ca8783aSWei Yang err1 = move_pages_and_store_status(mm, current_node, &pagelist, 22697ca8783aSWei Yang status, start, i, nr_pages); 2270dfe9aa23SWei Yang if (err >= 0) 2271a49bd4d7SMichal Hocko err = err1; 22725e9a0f02SBrice Goglin out: 2273361a2a22SMinchan Kim lru_cache_enable(); 22745e9a0f02SBrice Goglin return err; 22755e9a0f02SBrice Goglin } 22765e9a0f02SBrice Goglin 22775e9a0f02SBrice Goglin /* 22782f007e74SBrice Goglin * Determine the nodes of an array of pages and store it in an array of status. 2279742755a1SChristoph Lameter */ 228080bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 228180bba129SBrice Goglin const void __user **pages, int *status) 2282742755a1SChristoph Lameter { 22832f007e74SBrice Goglin unsigned long i; 2284742755a1SChristoph Lameter 2285d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 22862f007e74SBrice Goglin 22872f007e74SBrice Goglin for (i = 0; i < nr_pages; i++) { 228880bba129SBrice Goglin unsigned long addr = (unsigned long)(*pages); 22892f007e74SBrice Goglin struct vm_area_struct *vma; 22902f007e74SBrice Goglin struct page *page; 2291c095adbcSKOSAKI Motohiro int err = -EFAULT; 22922f007e74SBrice Goglin 2293059b8b48SLiam Howlett vma = vma_lookup(mm, addr); 2294059b8b48SLiam Howlett if (!vma) 2295742755a1SChristoph Lameter goto set_status; 2296742755a1SChristoph Lameter 2297d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 229816fd6b31SBaolin Wang page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 229989f5b7daSLinus Torvalds 230089f5b7daSLinus Torvalds err = PTR_ERR(page); 230189f5b7daSLinus Torvalds if (IS_ERR(page)) 230289f5b7daSLinus Torvalds goto set_status; 230389f5b7daSLinus Torvalds 2304f7091ed6SHaiyue Wang err = -ENOENT; 2305f7091ed6SHaiyue Wang if (!page) 2306f7091ed6SHaiyue Wang goto set_status; 2307f7091ed6SHaiyue Wang 2308f7091ed6SHaiyue Wang if (!is_zone_device_page(page)) 23094cd61484SMiaohe Lin err = page_to_nid(page); 2310f7091ed6SHaiyue Wang 23114cd61484SMiaohe Lin put_page(page); 2312742755a1SChristoph Lameter set_status: 231380bba129SBrice Goglin *status = err; 231480bba129SBrice Goglin 231580bba129SBrice Goglin pages++; 231680bba129SBrice Goglin status++; 231780bba129SBrice Goglin } 231880bba129SBrice Goglin 2319d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 232080bba129SBrice Goglin } 232180bba129SBrice Goglin 23225b1b561bSArnd Bergmann static int get_compat_pages_array(const void __user *chunk_pages[], 23235b1b561bSArnd Bergmann const void __user * __user *pages, 23245b1b561bSArnd Bergmann unsigned long chunk_nr) 23255b1b561bSArnd Bergmann { 23265b1b561bSArnd Bergmann compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 23275b1b561bSArnd Bergmann compat_uptr_t p; 23285b1b561bSArnd Bergmann int i; 23295b1b561bSArnd Bergmann 23305b1b561bSArnd Bergmann for (i = 0; i < chunk_nr; i++) { 23315b1b561bSArnd Bergmann if (get_user(p, pages32 + i)) 23325b1b561bSArnd Bergmann return -EFAULT; 23335b1b561bSArnd Bergmann chunk_pages[i] = compat_ptr(p); 23345b1b561bSArnd Bergmann } 23355b1b561bSArnd Bergmann 23365b1b561bSArnd Bergmann return 0; 23375b1b561bSArnd Bergmann } 23385b1b561bSArnd Bergmann 233980bba129SBrice Goglin /* 234080bba129SBrice Goglin * Determine the nodes of a user array of pages and store it in 234180bba129SBrice Goglin * a user array of status. 234280bba129SBrice Goglin */ 234380bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 234480bba129SBrice Goglin const void __user * __user *pages, 234580bba129SBrice Goglin int __user *status) 234680bba129SBrice Goglin { 23473eefb826SMiaohe Lin #define DO_PAGES_STAT_CHUNK_NR 16UL 234880bba129SBrice Goglin const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 234980bba129SBrice Goglin int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 235080bba129SBrice Goglin 235187b8d1adSH. Peter Anvin while (nr_pages) { 23523eefb826SMiaohe Lin unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 235387b8d1adSH. Peter Anvin 23545b1b561bSArnd Bergmann if (in_compat_syscall()) { 23555b1b561bSArnd Bergmann if (get_compat_pages_array(chunk_pages, pages, 23565b1b561bSArnd Bergmann chunk_nr)) 235787b8d1adSH. Peter Anvin break; 23585b1b561bSArnd Bergmann } else { 23595b1b561bSArnd Bergmann if (copy_from_user(chunk_pages, pages, 23605b1b561bSArnd Bergmann chunk_nr * sizeof(*chunk_pages))) 23615b1b561bSArnd Bergmann break; 23625b1b561bSArnd Bergmann } 236380bba129SBrice Goglin 236480bba129SBrice Goglin do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 236580bba129SBrice Goglin 236687b8d1adSH. Peter Anvin if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 236787b8d1adSH. Peter Anvin break; 2368742755a1SChristoph Lameter 236987b8d1adSH. Peter Anvin pages += chunk_nr; 237087b8d1adSH. Peter Anvin status += chunk_nr; 237187b8d1adSH. Peter Anvin nr_pages -= chunk_nr; 237287b8d1adSH. Peter Anvin } 237387b8d1adSH. Peter Anvin return nr_pages ? -EFAULT : 0; 2374742755a1SChristoph Lameter } 2375742755a1SChristoph Lameter 23764dc200ceSMiaohe Lin static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 23774dc200ceSMiaohe Lin { 23784dc200ceSMiaohe Lin struct task_struct *task; 23794dc200ceSMiaohe Lin struct mm_struct *mm; 23804dc200ceSMiaohe Lin 23814dc200ceSMiaohe Lin /* 23824dc200ceSMiaohe Lin * There is no need to check if current process has the right to modify 23834dc200ceSMiaohe Lin * the specified process when they are same. 23844dc200ceSMiaohe Lin */ 23854dc200ceSMiaohe Lin if (!pid) { 23864dc200ceSMiaohe Lin mmget(current->mm); 23874dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(current); 23884dc200ceSMiaohe Lin return current->mm; 23894dc200ceSMiaohe Lin } 23904dc200ceSMiaohe Lin 23914dc200ceSMiaohe Lin /* Find the mm_struct */ 23924dc200ceSMiaohe Lin rcu_read_lock(); 23934dc200ceSMiaohe Lin task = find_task_by_vpid(pid); 23944dc200ceSMiaohe Lin if (!task) { 23954dc200ceSMiaohe Lin rcu_read_unlock(); 23964dc200ceSMiaohe Lin return ERR_PTR(-ESRCH); 23974dc200ceSMiaohe Lin } 23984dc200ceSMiaohe Lin get_task_struct(task); 23994dc200ceSMiaohe Lin 24004dc200ceSMiaohe Lin /* 24014dc200ceSMiaohe Lin * Check if this process has the right to modify the specified 24024dc200ceSMiaohe Lin * process. Use the regular "ptrace_may_access()" checks. 24034dc200ceSMiaohe Lin */ 24044dc200ceSMiaohe Lin if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 24054dc200ceSMiaohe Lin rcu_read_unlock(); 24064dc200ceSMiaohe Lin mm = ERR_PTR(-EPERM); 24074dc200ceSMiaohe Lin goto out; 24084dc200ceSMiaohe Lin } 24094dc200ceSMiaohe Lin rcu_read_unlock(); 24104dc200ceSMiaohe Lin 24114dc200ceSMiaohe Lin mm = ERR_PTR(security_task_movememory(task)); 24124dc200ceSMiaohe Lin if (IS_ERR(mm)) 24134dc200ceSMiaohe Lin goto out; 24144dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(task); 24154dc200ceSMiaohe Lin mm = get_task_mm(task); 24164dc200ceSMiaohe Lin out: 24174dc200ceSMiaohe Lin put_task_struct(task); 24184dc200ceSMiaohe Lin if (!mm) 24194dc200ceSMiaohe Lin mm = ERR_PTR(-EINVAL); 24204dc200ceSMiaohe Lin return mm; 24214dc200ceSMiaohe Lin } 24224dc200ceSMiaohe Lin 2423742755a1SChristoph Lameter /* 2424742755a1SChristoph Lameter * Move a list of pages in the address space of the currently executing 2425742755a1SChristoph Lameter * process. 2426742755a1SChristoph Lameter */ 24277addf443SDominik Brodowski static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 24287addf443SDominik Brodowski const void __user * __user *pages, 24297addf443SDominik Brodowski const int __user *nodes, 24307addf443SDominik Brodowski int __user *status, int flags) 2431742755a1SChristoph Lameter { 2432742755a1SChristoph Lameter struct mm_struct *mm; 24335e9a0f02SBrice Goglin int err; 24343268c63eSChristoph Lameter nodemask_t task_nodes; 2435742755a1SChristoph Lameter 2436742755a1SChristoph Lameter /* Check flags */ 2437742755a1SChristoph Lameter if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 2438742755a1SChristoph Lameter return -EINVAL; 2439742755a1SChristoph Lameter 2440742755a1SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 2441742755a1SChristoph Lameter return -EPERM; 2442742755a1SChristoph Lameter 24434dc200ceSMiaohe Lin mm = find_mm_struct(pid, &task_nodes); 24444dc200ceSMiaohe Lin if (IS_ERR(mm)) 24454dc200ceSMiaohe Lin return PTR_ERR(mm); 24466e8b09eaSSasha Levin 24473268c63eSChristoph Lameter if (nodes) 24483268c63eSChristoph Lameter err = do_pages_move(mm, task_nodes, nr_pages, pages, 24493268c63eSChristoph Lameter nodes, status, flags); 24503268c63eSChristoph Lameter else 24515e9a0f02SBrice Goglin err = do_pages_stat(mm, nr_pages, pages, status); 24523268c63eSChristoph Lameter 24533268c63eSChristoph Lameter mmput(mm); 24543268c63eSChristoph Lameter return err; 2455742755a1SChristoph Lameter } 2456742755a1SChristoph Lameter 24577addf443SDominik Brodowski SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 24587addf443SDominik Brodowski const void __user * __user *, pages, 24597addf443SDominik Brodowski const int __user *, nodes, 24607addf443SDominik Brodowski int __user *, status, int, flags) 24617addf443SDominik Brodowski { 24627addf443SDominik Brodowski return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 24637addf443SDominik Brodowski } 24647addf443SDominik Brodowski 24657039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 24667039e1dbSPeter Zijlstra /* 24677039e1dbSPeter Zijlstra * Returns true if this is a safe migration target node for misplaced NUMA 2468bc53008eSWei Yang * pages. Currently it only checks the watermarks which is crude. 24697039e1dbSPeter Zijlstra */ 24707039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 24713abef4e6SMel Gorman unsigned long nr_migrate_pages) 24727039e1dbSPeter Zijlstra { 24737039e1dbSPeter Zijlstra int z; 2474599d0c95SMel Gorman 24757039e1dbSPeter Zijlstra for (z = pgdat->nr_zones - 1; z >= 0; z--) { 24767039e1dbSPeter Zijlstra struct zone *zone = pgdat->node_zones + z; 24777039e1dbSPeter Zijlstra 2478bc53008eSWei Yang if (!managed_zone(zone)) 24797039e1dbSPeter Zijlstra continue; 24807039e1dbSPeter Zijlstra 24817039e1dbSPeter Zijlstra /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 24827039e1dbSPeter Zijlstra if (!zone_watermark_ok(zone, 0, 24837039e1dbSPeter Zijlstra high_wmark_pages(zone) + 24847039e1dbSPeter Zijlstra nr_migrate_pages, 2485bfe9d006SHuang Ying ZONE_MOVABLE, 0)) 24867039e1dbSPeter Zijlstra continue; 24877039e1dbSPeter Zijlstra return true; 24887039e1dbSPeter Zijlstra } 24897039e1dbSPeter Zijlstra return false; 24907039e1dbSPeter Zijlstra } 24917039e1dbSPeter Zijlstra 24927039e1dbSPeter Zijlstra static struct page *alloc_misplaced_dst_page(struct page *page, 2493666feb21SMichal Hocko unsigned long data) 24947039e1dbSPeter Zijlstra { 24957039e1dbSPeter Zijlstra int nid = (int) data; 2496c185e494SMatthew Wilcox (Oracle) int order = compound_order(page); 2497c185e494SMatthew Wilcox (Oracle) gfp_t gfp = __GFP_THISNODE; 2498c185e494SMatthew Wilcox (Oracle) struct folio *new; 24997039e1dbSPeter Zijlstra 2500c185e494SMatthew Wilcox (Oracle) if (order > 0) 2501c185e494SMatthew Wilcox (Oracle) gfp |= GFP_TRANSHUGE_LIGHT; 2502c185e494SMatthew Wilcox (Oracle) else { 2503c185e494SMatthew Wilcox (Oracle) gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2504c185e494SMatthew Wilcox (Oracle) __GFP_NOWARN; 2505c185e494SMatthew Wilcox (Oracle) gfp &= ~__GFP_RECLAIM; 25067039e1dbSPeter Zijlstra } 2507c185e494SMatthew Wilcox (Oracle) new = __folio_alloc_node(gfp, order, nid); 25087039e1dbSPeter Zijlstra 2509c185e494SMatthew Wilcox (Oracle) return &new->page; 2510c5b5a3ddSYang Shi } 2511c5b5a3ddSYang Shi 25121c30e017SMel Gorman static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2513b32967ffSMel Gorman { 25142b9b624fSBaolin Wang int nr_pages = thp_nr_pages(page); 2515c574bbe9SHuang Ying int order = compound_order(page); 2516b32967ffSMel Gorman 2517c574bbe9SHuang Ying VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 25183abef4e6SMel Gorman 2519662aeea7SYang Shi /* Do not migrate THP mapped by multiple processes */ 2520662aeea7SYang Shi if (PageTransHuge(page) && total_mapcount(page) > 1) 2521662aeea7SYang Shi return 0; 2522662aeea7SYang Shi 2523b32967ffSMel Gorman /* Avoid migrating to a node that is nearly full */ 2524c574bbe9SHuang Ying if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2525c574bbe9SHuang Ying int z; 2526c574bbe9SHuang Ying 2527c574bbe9SHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2528340ef390SHugh Dickins return 0; 2529c574bbe9SHuang Ying for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2530bc53008eSWei Yang if (managed_zone(pgdat->node_zones + z)) 2531c574bbe9SHuang Ying break; 2532c574bbe9SHuang Ying } 2533c574bbe9SHuang Ying wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2534c574bbe9SHuang Ying return 0; 2535c574bbe9SHuang Ying } 2536b32967ffSMel Gorman 2537f7f9c00dSBaolin Wang if (!isolate_lru_page(page)) 2538340ef390SHugh Dickins return 0; 2539340ef390SHugh Dickins 2540b75454e1SMiaohe Lin mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 25412b9b624fSBaolin Wang nr_pages); 2542b32967ffSMel Gorman 2543b32967ffSMel Gorman /* 2544340ef390SHugh Dickins * Isolating the page has taken another reference, so the 2545340ef390SHugh Dickins * caller's reference can be safely dropped without the page 2546340ef390SHugh Dickins * disappearing underneath us during migration. 2547b32967ffSMel Gorman */ 2548b32967ffSMel Gorman put_page(page); 2549340ef390SHugh Dickins return 1; 2550b32967ffSMel Gorman } 2551b32967ffSMel Gorman 2552a8f60772SMel Gorman /* 25537039e1dbSPeter Zijlstra * Attempt to migrate a misplaced page to the specified destination 25547039e1dbSPeter Zijlstra * node. Caller is expected to have an elevated reference count on 25557039e1dbSPeter Zijlstra * the page that will be dropped by this function before returning. 25567039e1dbSPeter Zijlstra */ 25571bc115d8SMel Gorman int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 25581bc115d8SMel Gorman int node) 25597039e1dbSPeter Zijlstra { 2560a8f60772SMel Gorman pg_data_t *pgdat = NODE_DATA(node); 2561340ef390SHugh Dickins int isolated; 2562b32967ffSMel Gorman int nr_remaining; 2563e39bb6beSHuang Ying unsigned int nr_succeeded; 25647039e1dbSPeter Zijlstra LIST_HEAD(migratepages); 2565b5916c02SAneesh Kumar K.V int nr_pages = thp_nr_pages(page); 2566c5b5a3ddSYang Shi 2567c5b5a3ddSYang Shi /* 25681bc115d8SMel Gorman * Don't migrate file pages that are mapped in multiple processes 25691bc115d8SMel Gorman * with execute permissions as they are probably shared libraries. 25707039e1dbSPeter Zijlstra */ 25717ee820eeSMiaohe Lin if (page_mapcount(page) != 1 && page_is_file_lru(page) && 25727ee820eeSMiaohe Lin (vma->vm_flags & VM_EXEC)) 25737039e1dbSPeter Zijlstra goto out; 25747039e1dbSPeter Zijlstra 2575a8f60772SMel Gorman /* 257609a913a7SMel Gorman * Also do not migrate dirty pages as not all filesystems can move 257709a913a7SMel Gorman * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 257809a913a7SMel Gorman */ 25799de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page)) 258009a913a7SMel Gorman goto out; 258109a913a7SMel Gorman 2582b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 2583b32967ffSMel Gorman if (!isolated) 25847039e1dbSPeter Zijlstra goto out; 25857039e1dbSPeter Zijlstra 25867039e1dbSPeter Zijlstra list_add(&page->lru, &migratepages); 2587c185e494SMatthew Wilcox (Oracle) nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2588c185e494SMatthew Wilcox (Oracle) NULL, node, MIGRATE_ASYNC, 2589c185e494SMatthew Wilcox (Oracle) MR_NUMA_MISPLACED, &nr_succeeded); 25907039e1dbSPeter Zijlstra if (nr_remaining) { 259159c82b70SJoonsoo Kim if (!list_empty(&migratepages)) { 259259c82b70SJoonsoo Kim list_del(&page->lru); 2593c5fc5c3aSYang Shi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2594c5fc5c3aSYang Shi page_is_file_lru(page), -nr_pages); 259559c82b70SJoonsoo Kim putback_lru_page(page); 259659c82b70SJoonsoo Kim } 25977039e1dbSPeter Zijlstra isolated = 0; 2598e39bb6beSHuang Ying } 2599e39bb6beSHuang Ying if (nr_succeeded) { 2600e39bb6beSHuang Ying count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2601e39bb6beSHuang Ying if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2602e39bb6beSHuang Ying mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2603e39bb6beSHuang Ying nr_succeeded); 2604e39bb6beSHuang Ying } 26057039e1dbSPeter Zijlstra BUG_ON(!list_empty(&migratepages)); 26067039e1dbSPeter Zijlstra return isolated; 2607340ef390SHugh Dickins 2608340ef390SHugh Dickins out: 2609340ef390SHugh Dickins put_page(page); 2610340ef390SHugh Dickins return 0; 26117039e1dbSPeter Zijlstra } 2612220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 26137d6e2d96SOscar Salvador #endif /* CONFIG_NUMA */ 2614