1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b20a3503SChristoph Lameter /* 314e0f9bcSHugh Dickins * Memory Migration functionality - linux/mm/migrate.c 4b20a3503SChristoph Lameter * 5b20a3503SChristoph Lameter * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6b20a3503SChristoph Lameter * 7b20a3503SChristoph Lameter * Page migration was first developed in the context of the memory hotplug 8b20a3503SChristoph Lameter * project. The main authors of the migration code are: 9b20a3503SChristoph Lameter * 10b20a3503SChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11b20a3503SChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 12b20a3503SChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 13cde53535SChristoph Lameter * Christoph Lameter 14b20a3503SChristoph Lameter */ 15b20a3503SChristoph Lameter 16b20a3503SChristoph Lameter #include <linux/migrate.h> 17b95f1b31SPaul Gortmaker #include <linux/export.h> 18b20a3503SChristoph Lameter #include <linux/swap.h> 190697212aSChristoph Lameter #include <linux/swapops.h> 20b20a3503SChristoph Lameter #include <linux/pagemap.h> 21e23ca00bSChristoph Lameter #include <linux/buffer_head.h> 22b20a3503SChristoph Lameter #include <linux/mm_inline.h> 23b488893aSPavel Emelyanov #include <linux/nsproxy.h> 24b20a3503SChristoph Lameter #include <linux/pagevec.h> 25e9995ef9SHugh Dickins #include <linux/ksm.h> 26b20a3503SChristoph Lameter #include <linux/rmap.h> 27b20a3503SChristoph Lameter #include <linux/topology.h> 28b20a3503SChristoph Lameter #include <linux/cpu.h> 29b20a3503SChristoph Lameter #include <linux/cpuset.h> 3004e62a29SChristoph Lameter #include <linux/writeback.h> 31742755a1SChristoph Lameter #include <linux/mempolicy.h> 32742755a1SChristoph Lameter #include <linux/vmalloc.h> 3386c3a764SDavid Quigley #include <linux/security.h> 3442cb14b1SHugh Dickins #include <linux/backing-dev.h> 35bda807d4SMinchan Kim #include <linux/compaction.h> 364f5ca265SAdrian Bunk #include <linux/syscalls.h> 377addf443SDominik Brodowski #include <linux/compat.h> 38290408d4SNaoya Horiguchi #include <linux/hugetlb.h> 398e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 405a0e3ad6STejun Heo #include <linux/gfp.h> 41df6ad698SJérôme Glisse #include <linux/pfn_t.h> 42a5430ddaSJérôme Glisse #include <linux/memremap.h> 438315ada7SJérôme Glisse #include <linux/userfaultfd_k.h> 44bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 4533c3fc71SVladimir Davydov #include <linux/page_idle.h> 46d435edcaSVlastimil Babka #include <linux/page_owner.h> 476e84f315SIngo Molnar #include <linux/sched/mm.h> 48197e7e52SLinus Torvalds #include <linux/ptrace.h> 4934290e2cSRalph Campbell #include <linux/oom.h> 50884a6e5dSDave Hansen #include <linux/memory.h> 51ac16ec83SBaolin Wang #include <linux/random.h> 52c574bbe9SHuang Ying #include <linux/sched/sysctl.h> 53b20a3503SChristoph Lameter 540d1836c3SMichal Nazarewicz #include <asm/tlbflush.h> 550d1836c3SMichal Nazarewicz 567b2a2d4aSMel Gorman #include <trace/events/migrate.h> 577b2a2d4aSMel Gorman 58b20a3503SChristoph Lameter #include "internal.h" 59b20a3503SChristoph Lameter 609e5bcd61SYisheng Xie int isolate_movable_page(struct page *page, isolate_mode_t mode) 61bda807d4SMinchan Kim { 62bda807d4SMinchan Kim struct address_space *mapping; 63bda807d4SMinchan Kim 64bda807d4SMinchan Kim /* 65bda807d4SMinchan Kim * Avoid burning cycles with pages that are yet under __free_pages(), 66bda807d4SMinchan Kim * or just got freed under us. 67bda807d4SMinchan Kim * 68bda807d4SMinchan Kim * In case we 'win' a race for a movable page being freed under us and 69bda807d4SMinchan Kim * raise its refcount preventing __free_pages() from doing its job 70bda807d4SMinchan Kim * the put_page() at the end of this block will take care of 71bda807d4SMinchan Kim * release this page, thus avoiding a nasty leakage. 72bda807d4SMinchan Kim */ 73bda807d4SMinchan Kim if (unlikely(!get_page_unless_zero(page))) 74bda807d4SMinchan Kim goto out; 75bda807d4SMinchan Kim 76bda807d4SMinchan Kim /* 77bda807d4SMinchan Kim * Check PageMovable before holding a PG_lock because page's owner 78bda807d4SMinchan Kim * assumes anybody doesn't touch PG_lock of newly allocated page 798bb4e7a2SWei Yang * so unconditionally grabbing the lock ruins page's owner side. 80bda807d4SMinchan Kim */ 81bda807d4SMinchan Kim if (unlikely(!__PageMovable(page))) 82bda807d4SMinchan Kim goto out_putpage; 83bda807d4SMinchan Kim /* 84bda807d4SMinchan Kim * As movable pages are not isolated from LRU lists, concurrent 85bda807d4SMinchan Kim * compaction threads can race against page migration functions 86bda807d4SMinchan Kim * as well as race against the releasing a page. 87bda807d4SMinchan Kim * 88bda807d4SMinchan Kim * In order to avoid having an already isolated movable page 89bda807d4SMinchan Kim * being (wrongly) re-isolated while it is under migration, 90bda807d4SMinchan Kim * or to avoid attempting to isolate pages being released, 91bda807d4SMinchan Kim * lets be sure we have the page lock 92bda807d4SMinchan Kim * before proceeding with the movable page isolation steps. 93bda807d4SMinchan Kim */ 94bda807d4SMinchan Kim if (unlikely(!trylock_page(page))) 95bda807d4SMinchan Kim goto out_putpage; 96bda807d4SMinchan Kim 97bda807d4SMinchan Kim if (!PageMovable(page) || PageIsolated(page)) 98bda807d4SMinchan Kim goto out_no_isolated; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim mapping = page_mapping(page); 101bda807d4SMinchan Kim VM_BUG_ON_PAGE(!mapping, page); 102bda807d4SMinchan Kim 103bda807d4SMinchan Kim if (!mapping->a_ops->isolate_page(page, mode)) 104bda807d4SMinchan Kim goto out_no_isolated; 105bda807d4SMinchan Kim 106bda807d4SMinchan Kim /* Driver shouldn't use PG_isolated bit of page->flags */ 107bda807d4SMinchan Kim WARN_ON_ONCE(PageIsolated(page)); 108356ea386Sandrew.yang SetPageIsolated(page); 109bda807d4SMinchan Kim unlock_page(page); 110bda807d4SMinchan Kim 1119e5bcd61SYisheng Xie return 0; 112bda807d4SMinchan Kim 113bda807d4SMinchan Kim out_no_isolated: 114bda807d4SMinchan Kim unlock_page(page); 115bda807d4SMinchan Kim out_putpage: 116bda807d4SMinchan Kim put_page(page); 117bda807d4SMinchan Kim out: 1189e5bcd61SYisheng Xie return -EBUSY; 119bda807d4SMinchan Kim } 120bda807d4SMinchan Kim 121606a6f71SMiaohe Lin static void putback_movable_page(struct page *page) 122bda807d4SMinchan Kim { 123bda807d4SMinchan Kim struct address_space *mapping; 124bda807d4SMinchan Kim 125bda807d4SMinchan Kim mapping = page_mapping(page); 126bda807d4SMinchan Kim mapping->a_ops->putback_page(page); 127356ea386Sandrew.yang ClearPageIsolated(page); 128bda807d4SMinchan Kim } 129bda807d4SMinchan Kim 130b20a3503SChristoph Lameter /* 1315733c7d1SRafael Aquini * Put previously isolated pages back onto the appropriate lists 1325733c7d1SRafael Aquini * from where they were once taken off for compaction/migration. 1335733c7d1SRafael Aquini * 13459c82b70SJoonsoo Kim * This function shall be used whenever the isolated pageset has been 13559c82b70SJoonsoo Kim * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 1367ce82f4cSMiaohe Lin * and isolate_hugetlb(). 1375733c7d1SRafael Aquini */ 1385733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l) 1395733c7d1SRafael Aquini { 1405733c7d1SRafael Aquini struct page *page; 1415733c7d1SRafael Aquini struct page *page2; 1425733c7d1SRafael Aquini 1435733c7d1SRafael Aquini list_for_each_entry_safe(page, page2, l, lru) { 14431caf665SNaoya Horiguchi if (unlikely(PageHuge(page))) { 14531caf665SNaoya Horiguchi putback_active_hugepage(page); 14631caf665SNaoya Horiguchi continue; 14731caf665SNaoya Horiguchi } 1485733c7d1SRafael Aquini list_del(&page->lru); 149bda807d4SMinchan Kim /* 150bda807d4SMinchan Kim * We isolated non-lru movable page so here we can use 151bda807d4SMinchan Kim * __PageMovable because LRU page's mapping cannot have 152bda807d4SMinchan Kim * PAGE_MAPPING_MOVABLE. 153bda807d4SMinchan Kim */ 154b1123ea6SMinchan Kim if (unlikely(__PageMovable(page))) { 155bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 156bda807d4SMinchan Kim lock_page(page); 157bda807d4SMinchan Kim if (PageMovable(page)) 158bda807d4SMinchan Kim putback_movable_page(page); 159bf6bddf1SRafael Aquini else 160356ea386Sandrew.yang ClearPageIsolated(page); 161bda807d4SMinchan Kim unlock_page(page); 162bda807d4SMinchan Kim put_page(page); 163bda807d4SMinchan Kim } else { 164e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1656c357848SMatthew Wilcox (Oracle) page_is_file_lru(page), -thp_nr_pages(page)); 166fc280fe8SRabin Vincent putback_lru_page(page); 167b20a3503SChristoph Lameter } 168b20a3503SChristoph Lameter } 169bda807d4SMinchan Kim } 170b20a3503SChristoph Lameter 1710697212aSChristoph Lameter /* 1720697212aSChristoph Lameter * Restore a potential migration pte to a working pte entry 1730697212aSChristoph Lameter */ 1742f031c6fSMatthew Wilcox (Oracle) static bool remove_migration_pte(struct folio *folio, 1752f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long addr, void *old) 1760697212aSChristoph Lameter { 1774eecb8b9SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); 1780697212aSChristoph Lameter 1793fe87967SKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1806c287605SDavid Hildenbrand rmap_t rmap_flags = RMAP_NONE; 1810697212aSChristoph Lameter pte_t pte; 1820697212aSChristoph Lameter swp_entry_t entry; 1834eecb8b9SMatthew Wilcox (Oracle) struct page *new; 1844eecb8b9SMatthew Wilcox (Oracle) unsigned long idx = 0; 1850697212aSChristoph Lameter 1864eecb8b9SMatthew Wilcox (Oracle) /* pgoff is invalid for ksm pages, but they are never large */ 1874eecb8b9SMatthew Wilcox (Oracle) if (folio_test_large(folio) && !folio_test_hugetlb(folio)) 1884eecb8b9SMatthew Wilcox (Oracle) idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; 1894eecb8b9SMatthew Wilcox (Oracle) new = folio_page(folio, idx); 1900697212aSChristoph Lameter 191616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 192616b8371SZi Yan /* PMD-mapped THP migration entry */ 193616b8371SZi Yan if (!pvmw.pte) { 1944eecb8b9SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 1954eecb8b9SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio); 196616b8371SZi Yan remove_migration_pmd(&pvmw, new); 197616b8371SZi Yan continue; 198616b8371SZi Yan } 199616b8371SZi Yan #endif 200616b8371SZi Yan 2014eecb8b9SMatthew Wilcox (Oracle) folio_get(folio); 2026d2329f8SAndrea Arcangeli pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 2033fe87967SKirill A. Shutemov if (pte_swp_soft_dirty(*pvmw.pte)) 204c3d16e16SCyrill Gorcunov pte = pte_mksoft_dirty(pte); 205d3cb8bf6SMel Gorman 2063fe87967SKirill A. Shutemov /* 2073fe87967SKirill A. Shutemov * Recheck VMA as permissions can change since migration started 2083fe87967SKirill A. Shutemov */ 2093fe87967SKirill A. Shutemov entry = pte_to_swp_entry(*pvmw.pte); 2104dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) 211d3cb8bf6SMel Gorman pte = maybe_mkwrite(pte, vma); 212f45ec5ffSPeter Xu else if (pte_swp_uffd_wp(*pvmw.pte)) 213f45ec5ffSPeter Xu pte = pte_mkuffd_wp(pte); 214d3cb8bf6SMel Gorman 2156c287605SDavid Hildenbrand if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) 2166c287605SDavid Hildenbrand rmap_flags |= RMAP_EXCLUSIVE; 2176c287605SDavid Hildenbrand 2186128763fSRalph Campbell if (unlikely(is_device_private_page(new))) { 2194dd845b5SAlistair Popple if (pte_write(pte)) 2204dd845b5SAlistair Popple entry = make_writable_device_private_entry( 2214dd845b5SAlistair Popple page_to_pfn(new)); 2224dd845b5SAlistair Popple else 2234dd845b5SAlistair Popple entry = make_readable_device_private_entry( 2244dd845b5SAlistair Popple page_to_pfn(new)); 225a5430ddaSJérôme Glisse pte = swp_entry_to_pte(entry); 2263d321bf8SRalph Campbell if (pte_swp_soft_dirty(*pvmw.pte)) 2273d321bf8SRalph Campbell pte = pte_swp_mksoft_dirty(pte); 228f45ec5ffSPeter Xu if (pte_swp_uffd_wp(*pvmw.pte)) 229ebdf8321SAlistair Popple pte = pte_swp_mkuffd_wp(pte); 230df6ad698SJérôme Glisse } 231a5430ddaSJérôme Glisse 2323ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE 2334eecb8b9SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 23479c1c594SChristophe Leroy unsigned int shift = huge_page_shift(hstate_vma(vma)); 23579c1c594SChristophe Leroy 236290408d4SNaoya Horiguchi pte = pte_mkhuge(pte); 23779c1c594SChristophe Leroy pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 2384eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 23928c5209dSDavid Hildenbrand hugepage_add_anon_rmap(new, vma, pvmw.address, 2406c287605SDavid Hildenbrand rmap_flags); 241290408d4SNaoya Horiguchi else 242fb3d824dSDavid Hildenbrand page_dup_file_rmap(new, true); 2431eba86c0SPasha Tatashin set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 244383321abSAneesh Kumar K.V } else 245383321abSAneesh Kumar K.V #endif 246383321abSAneesh Kumar K.V { 2474eecb8b9SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 248f1e2db12SDavid Hildenbrand page_add_anon_rmap(new, vma, pvmw.address, 2496c287605SDavid Hildenbrand rmap_flags); 25004e62a29SChristoph Lameter else 251cea86fe2SHugh Dickins page_add_file_rmap(new, vma, false); 2521eba86c0SPasha Tatashin set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 253383321abSAneesh Kumar K.V } 254b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 255adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 256e125fe40SKirill A. Shutemov 2574cc79b33SAnshuman Khandual trace_remove_migration_pte(pvmw.address, pte_val(pte), 2584cc79b33SAnshuman Khandual compound_order(new)); 2594cc79b33SAnshuman Khandual 26004e62a29SChristoph Lameter /* No need to invalidate - it was non-present before */ 2613fe87967SKirill A. Shutemov update_mmu_cache(vma, pvmw.address, pvmw.pte); 2623fe87967SKirill A. Shutemov } 2633fe87967SKirill A. Shutemov 264e4b82222SMinchan Kim return true; 2650697212aSChristoph Lameter } 2660697212aSChristoph Lameter 2670697212aSChristoph Lameter /* 26804e62a29SChristoph Lameter * Get rid of all migration entries and replace them by 26904e62a29SChristoph Lameter * references to the indicated page. 27004e62a29SChristoph Lameter */ 2714eecb8b9SMatthew Wilcox (Oracle) void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) 27204e62a29SChristoph Lameter { 273051ac83aSJoonsoo Kim struct rmap_walk_control rwc = { 274051ac83aSJoonsoo Kim .rmap_one = remove_migration_pte, 2754eecb8b9SMatthew Wilcox (Oracle) .arg = src, 276051ac83aSJoonsoo Kim }; 277051ac83aSJoonsoo Kim 278e388466dSKirill A. Shutemov if (locked) 2792f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(dst, &rwc); 280e388466dSKirill A. Shutemov else 2812f031c6fSMatthew Wilcox (Oracle) rmap_walk(dst, &rwc); 28204e62a29SChristoph Lameter } 28304e62a29SChristoph Lameter 28404e62a29SChristoph Lameter /* 2850697212aSChristoph Lameter * Something used the pte of a page under migration. We need to 2860697212aSChristoph Lameter * get to the page and wait until migration is finished. 2870697212aSChristoph Lameter * When we return from this function the fault will be retried. 2880697212aSChristoph Lameter */ 289e66f17ffSNaoya Horiguchi void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 29030dad309SNaoya Horiguchi spinlock_t *ptl) 2910697212aSChristoph Lameter { 29230dad309SNaoya Horiguchi pte_t pte; 2930697212aSChristoph Lameter swp_entry_t entry; 2940697212aSChristoph Lameter 29530dad309SNaoya Horiguchi spin_lock(ptl); 2960697212aSChristoph Lameter pte = *ptep; 2970697212aSChristoph Lameter if (!is_swap_pte(pte)) 2980697212aSChristoph Lameter goto out; 2990697212aSChristoph Lameter 3000697212aSChristoph Lameter entry = pte_to_swp_entry(pte); 3010697212aSChristoph Lameter if (!is_migration_entry(entry)) 3020697212aSChristoph Lameter goto out; 3030697212aSChristoph Lameter 304ffa65753SAlistair Popple migration_entry_wait_on_locked(entry, ptep, ptl); 3050697212aSChristoph Lameter return; 3060697212aSChristoph Lameter out: 3070697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3080697212aSChristoph Lameter } 3090697212aSChristoph Lameter 31030dad309SNaoya Horiguchi void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 31130dad309SNaoya Horiguchi unsigned long address) 31230dad309SNaoya Horiguchi { 31330dad309SNaoya Horiguchi spinlock_t *ptl = pte_lockptr(mm, pmd); 31430dad309SNaoya Horiguchi pte_t *ptep = pte_offset_map(pmd, address); 31530dad309SNaoya Horiguchi __migration_entry_wait(mm, ptep, ptl); 31630dad309SNaoya Horiguchi } 31730dad309SNaoya Horiguchi 318*ad1ac596SMiaohe Lin #ifdef CONFIG_HUGETLB_PAGE 319*ad1ac596SMiaohe Lin void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) 32030dad309SNaoya Horiguchi { 321*ad1ac596SMiaohe Lin pte_t pte; 322*ad1ac596SMiaohe Lin 323*ad1ac596SMiaohe Lin spin_lock(ptl); 324*ad1ac596SMiaohe Lin pte = huge_ptep_get(ptep); 325*ad1ac596SMiaohe Lin 326*ad1ac596SMiaohe Lin if (unlikely(!is_hugetlb_entry_migration(pte))) 327*ad1ac596SMiaohe Lin spin_unlock(ptl); 328*ad1ac596SMiaohe Lin else 329*ad1ac596SMiaohe Lin migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 33030dad309SNaoya Horiguchi } 33130dad309SNaoya Horiguchi 332*ad1ac596SMiaohe Lin void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 333*ad1ac596SMiaohe Lin { 334*ad1ac596SMiaohe Lin spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 335*ad1ac596SMiaohe Lin 336*ad1ac596SMiaohe Lin __migration_entry_wait_huge(pte, ptl); 337*ad1ac596SMiaohe Lin } 338*ad1ac596SMiaohe Lin #endif 339*ad1ac596SMiaohe Lin 340616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 341616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 342616b8371SZi Yan { 343616b8371SZi Yan spinlock_t *ptl; 344616b8371SZi Yan 345616b8371SZi Yan ptl = pmd_lock(mm, pmd); 346616b8371SZi Yan if (!is_pmd_migration_entry(*pmd)) 347616b8371SZi Yan goto unlock; 348ffa65753SAlistair Popple migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 349616b8371SZi Yan return; 350616b8371SZi Yan unlock: 351616b8371SZi Yan spin_unlock(ptl); 352616b8371SZi Yan } 353616b8371SZi Yan #endif 354616b8371SZi Yan 355f900482dSJan Kara static int expected_page_refs(struct address_space *mapping, struct page *page) 3560b3901b3SJan Kara { 3570b3901b3SJan Kara int expected_count = 1; 3580b3901b3SJan Kara 359f900482dSJan Kara if (mapping) 3603417013eSMatthew Wilcox (Oracle) expected_count += compound_nr(page) + page_has_private(page); 3610b3901b3SJan Kara return expected_count; 3620b3901b3SJan Kara } 3630b3901b3SJan Kara 364b20a3503SChristoph Lameter /* 365c3fcf8a5SChristoph Lameter * Replace the page in the mapping. 3665b5c7120SChristoph Lameter * 3675b5c7120SChristoph Lameter * The number of remaining references must be: 3685b5c7120SChristoph Lameter * 1 for anonymous pages without a mapping 3695b5c7120SChristoph Lameter * 2 for pages with a mapping 370266cf658SDavid Howells * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 371b20a3503SChristoph Lameter */ 3723417013eSMatthew Wilcox (Oracle) int folio_migrate_mapping(struct address_space *mapping, 3733417013eSMatthew Wilcox (Oracle) struct folio *newfolio, struct folio *folio, int extra_count) 374b20a3503SChristoph Lameter { 3753417013eSMatthew Wilcox (Oracle) XA_STATE(xas, &mapping->i_pages, folio_index(folio)); 37642cb14b1SHugh Dickins struct zone *oldzone, *newzone; 37742cb14b1SHugh Dickins int dirty; 3783417013eSMatthew Wilcox (Oracle) int expected_count = expected_page_refs(mapping, &folio->page) + extra_count; 3793417013eSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 3808763cb45SJérôme Glisse 3816c5240aeSChristoph Lameter if (!mapping) { 3820e8c7d0fSChristoph Lameter /* Anonymous page without mapping */ 3833417013eSMatthew Wilcox (Oracle) if (folio_ref_count(folio) != expected_count) 3846c5240aeSChristoph Lameter return -EAGAIN; 385cf4b769aSHugh Dickins 386cf4b769aSHugh Dickins /* No turning back from here */ 3873417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index; 3883417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping; 3893417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) 3903417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio); 391cf4b769aSHugh Dickins 39278bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 3936c5240aeSChristoph Lameter } 3946c5240aeSChristoph Lameter 3953417013eSMatthew Wilcox (Oracle) oldzone = folio_zone(folio); 3963417013eSMatthew Wilcox (Oracle) newzone = folio_zone(newfolio); 39742cb14b1SHugh Dickins 39889eb946aSMatthew Wilcox xas_lock_irq(&xas); 3993417013eSMatthew Wilcox (Oracle) if (!folio_ref_freeze(folio, expected_count)) { 40089eb946aSMatthew Wilcox xas_unlock_irq(&xas); 401e286781dSNick Piggin return -EAGAIN; 402e286781dSNick Piggin } 403e286781dSNick Piggin 404b20a3503SChristoph Lameter /* 4053417013eSMatthew Wilcox (Oracle) * Now we know that no one else is looking at the folio: 406cf4b769aSHugh Dickins * no turning back from here. 407b20a3503SChristoph Lameter */ 4083417013eSMatthew Wilcox (Oracle) newfolio->index = folio->index; 4093417013eSMatthew Wilcox (Oracle) newfolio->mapping = folio->mapping; 4103417013eSMatthew Wilcox (Oracle) folio_ref_add(newfolio, nr); /* add cache reference */ 4113417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio)) { 4123417013eSMatthew Wilcox (Oracle) __folio_set_swapbacked(newfolio); 4133417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) { 4143417013eSMatthew Wilcox (Oracle) folio_set_swapcache(newfolio); 4153417013eSMatthew Wilcox (Oracle) newfolio->private = folio_get_private(folio); 416b20a3503SChristoph Lameter } 4176326fec1SNicholas Piggin } else { 4183417013eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 4196326fec1SNicholas Piggin } 420b20a3503SChristoph Lameter 42142cb14b1SHugh Dickins /* Move dirty while page refs frozen and newpage not yet exposed */ 4223417013eSMatthew Wilcox (Oracle) dirty = folio_test_dirty(folio); 42342cb14b1SHugh Dickins if (dirty) { 4243417013eSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 4253417013eSMatthew Wilcox (Oracle) folio_set_dirty(newfolio); 42642cb14b1SHugh Dickins } 42742cb14b1SHugh Dickins 4283417013eSMatthew Wilcox (Oracle) xas_store(&xas, newfolio); 4297cf9c2c7SNick Piggin 4307cf9c2c7SNick Piggin /* 431937a94c9SJacobo Giralt * Drop cache reference from old page by unfreezing 432937a94c9SJacobo Giralt * to one less reference. 4337cf9c2c7SNick Piggin * We know this isn't the last reference. 4347cf9c2c7SNick Piggin */ 4353417013eSMatthew Wilcox (Oracle) folio_ref_unfreeze(folio, expected_count - nr); 4367cf9c2c7SNick Piggin 43789eb946aSMatthew Wilcox xas_unlock(&xas); 43842cb14b1SHugh Dickins /* Leave irq disabled to prevent preemption while updating stats */ 43942cb14b1SHugh Dickins 4400e8c7d0fSChristoph Lameter /* 4410e8c7d0fSChristoph Lameter * If moved to a different zone then also account 4420e8c7d0fSChristoph Lameter * the page for that zone. Other VM counters will be 4430e8c7d0fSChristoph Lameter * taken care of when we establish references to the 4440e8c7d0fSChristoph Lameter * new page and drop references to the old page. 4450e8c7d0fSChristoph Lameter * 4460e8c7d0fSChristoph Lameter * Note that anonymous pages are accounted for 4474b9d0fabSMel Gorman * via NR_FILE_PAGES and NR_ANON_MAPPED if they 4480e8c7d0fSChristoph Lameter * are mapped to swap space. 4490e8c7d0fSChristoph Lameter */ 45042cb14b1SHugh Dickins if (newzone != oldzone) { 4510d1c2072SJohannes Weiner struct lruvec *old_lruvec, *new_lruvec; 4520d1c2072SJohannes Weiner struct mem_cgroup *memcg; 4530d1c2072SJohannes Weiner 4543417013eSMatthew Wilcox (Oracle) memcg = folio_memcg(folio); 4550d1c2072SJohannes Weiner old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); 4560d1c2072SJohannes Weiner new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); 4570d1c2072SJohannes Weiner 4585c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); 4595c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); 4603417013eSMatthew Wilcox (Oracle) if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { 4615c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); 4625c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); 4634b02108aSKOSAKI Motohiro } 464b6038942SShakeel Butt #ifdef CONFIG_SWAP 4653417013eSMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) { 466b6038942SShakeel Butt __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); 467b6038942SShakeel Butt __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); 468b6038942SShakeel Butt } 469b6038942SShakeel Butt #endif 470f56753acSChristoph Hellwig if (dirty && mapping_can_writeback(mapping)) { 4715c447d27SShakeel Butt __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); 4725c447d27SShakeel Butt __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); 4735c447d27SShakeel Butt __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); 4745c447d27SShakeel Butt __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); 47542cb14b1SHugh Dickins } 47642cb14b1SHugh Dickins } 47742cb14b1SHugh Dickins local_irq_enable(); 478b20a3503SChristoph Lameter 47978bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 480b20a3503SChristoph Lameter } 4813417013eSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_mapping); 482b20a3503SChristoph Lameter 483b20a3503SChristoph Lameter /* 484290408d4SNaoya Horiguchi * The expected number of remaining references is the same as that 4853417013eSMatthew Wilcox (Oracle) * of folio_migrate_mapping(). 486290408d4SNaoya Horiguchi */ 487290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping, 488290408d4SNaoya Horiguchi struct page *newpage, struct page *page) 489290408d4SNaoya Horiguchi { 49089eb946aSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, page_index(page)); 491290408d4SNaoya Horiguchi int expected_count; 492290408d4SNaoya Horiguchi 49389eb946aSMatthew Wilcox xas_lock_irq(&xas); 494290408d4SNaoya Horiguchi expected_count = 2 + page_has_private(page); 495fe896d18SJoonsoo Kim if (!page_ref_freeze(page, expected_count)) { 49689eb946aSMatthew Wilcox xas_unlock_irq(&xas); 497290408d4SNaoya Horiguchi return -EAGAIN; 498290408d4SNaoya Horiguchi } 499290408d4SNaoya Horiguchi 500cf4b769aSHugh Dickins newpage->index = page->index; 501cf4b769aSHugh Dickins newpage->mapping = page->mapping; 5026a93ca8fSJohannes Weiner 503290408d4SNaoya Horiguchi get_page(newpage); 504290408d4SNaoya Horiguchi 50589eb946aSMatthew Wilcox xas_store(&xas, newpage); 506290408d4SNaoya Horiguchi 507fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count - 1); 508290408d4SNaoya Horiguchi 50989eb946aSMatthew Wilcox xas_unlock_irq(&xas); 5106a93ca8fSJohannes Weiner 51178bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 512290408d4SNaoya Horiguchi } 513290408d4SNaoya Horiguchi 514290408d4SNaoya Horiguchi /* 51519138349SMatthew Wilcox (Oracle) * Copy the flags and some other ancillary information 516b20a3503SChristoph Lameter */ 51719138349SMatthew Wilcox (Oracle) void folio_migrate_flags(struct folio *newfolio, struct folio *folio) 518b20a3503SChristoph Lameter { 5197851a45cSRik van Riel int cpupid; 5207851a45cSRik van Riel 52119138349SMatthew Wilcox (Oracle) if (folio_test_error(folio)) 52219138349SMatthew Wilcox (Oracle) folio_set_error(newfolio); 52319138349SMatthew Wilcox (Oracle) if (folio_test_referenced(folio)) 52419138349SMatthew Wilcox (Oracle) folio_set_referenced(newfolio); 52519138349SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 52619138349SMatthew Wilcox (Oracle) folio_mark_uptodate(newfolio); 52719138349SMatthew Wilcox (Oracle) if (folio_test_clear_active(folio)) { 52819138349SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); 52919138349SMatthew Wilcox (Oracle) folio_set_active(newfolio); 53019138349SMatthew Wilcox (Oracle) } else if (folio_test_clear_unevictable(folio)) 53119138349SMatthew Wilcox (Oracle) folio_set_unevictable(newfolio); 53219138349SMatthew Wilcox (Oracle) if (folio_test_workingset(folio)) 53319138349SMatthew Wilcox (Oracle) folio_set_workingset(newfolio); 53419138349SMatthew Wilcox (Oracle) if (folio_test_checked(folio)) 53519138349SMatthew Wilcox (Oracle) folio_set_checked(newfolio); 5366c287605SDavid Hildenbrand /* 5376c287605SDavid Hildenbrand * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via 5386c287605SDavid Hildenbrand * migration entries. We can still have PG_anon_exclusive set on an 5396c287605SDavid Hildenbrand * effectively unmapped and unreferenced first sub-pages of an 5406c287605SDavid Hildenbrand * anonymous THP: we can simply copy it here via PG_mappedtodisk. 5416c287605SDavid Hildenbrand */ 54219138349SMatthew Wilcox (Oracle) if (folio_test_mappedtodisk(folio)) 54319138349SMatthew Wilcox (Oracle) folio_set_mappedtodisk(newfolio); 544b20a3503SChristoph Lameter 5453417013eSMatthew Wilcox (Oracle) /* Move dirty on pages not done by folio_migrate_mapping() */ 54619138349SMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) 54719138349SMatthew Wilcox (Oracle) folio_set_dirty(newfolio); 548b20a3503SChristoph Lameter 54919138349SMatthew Wilcox (Oracle) if (folio_test_young(folio)) 55019138349SMatthew Wilcox (Oracle) folio_set_young(newfolio); 55119138349SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 55219138349SMatthew Wilcox (Oracle) folio_set_idle(newfolio); 55333c3fc71SVladimir Davydov 5547851a45cSRik van Riel /* 5557851a45cSRik van Riel * Copy NUMA information to the new page, to prevent over-eager 5567851a45cSRik van Riel * future migrations of this same page. 5577851a45cSRik van Riel */ 55819138349SMatthew Wilcox (Oracle) cpupid = page_cpupid_xchg_last(&folio->page, -1); 55919138349SMatthew Wilcox (Oracle) page_cpupid_xchg_last(&newfolio->page, cpupid); 5607851a45cSRik van Riel 56119138349SMatthew Wilcox (Oracle) folio_migrate_ksm(newfolio, folio); 562c8d6553bSHugh Dickins /* 563c8d6553bSHugh Dickins * Please do not reorder this without considering how mm/ksm.c's 564c8d6553bSHugh Dickins * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 565c8d6553bSHugh Dickins */ 56619138349SMatthew Wilcox (Oracle) if (folio_test_swapcache(folio)) 56719138349SMatthew Wilcox (Oracle) folio_clear_swapcache(folio); 56819138349SMatthew Wilcox (Oracle) folio_clear_private(folio); 569ad2fa371SMuchun Song 570ad2fa371SMuchun Song /* page->private contains hugetlb specific flags */ 57119138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 57219138349SMatthew Wilcox (Oracle) folio->private = NULL; 573b20a3503SChristoph Lameter 574b20a3503SChristoph Lameter /* 575b20a3503SChristoph Lameter * If any waiters have accumulated on the new page then 576b20a3503SChristoph Lameter * wake them up. 577b20a3503SChristoph Lameter */ 57819138349SMatthew Wilcox (Oracle) if (folio_test_writeback(newfolio)) 57919138349SMatthew Wilcox (Oracle) folio_end_writeback(newfolio); 580d435edcaSVlastimil Babka 5816aeff241SYang Shi /* 5826aeff241SYang Shi * PG_readahead shares the same bit with PG_reclaim. The above 5836aeff241SYang Shi * end_page_writeback() may clear PG_readahead mistakenly, so set the 5846aeff241SYang Shi * bit after that. 5856aeff241SYang Shi */ 58619138349SMatthew Wilcox (Oracle) if (folio_test_readahead(folio)) 58719138349SMatthew Wilcox (Oracle) folio_set_readahead(newfolio); 5886aeff241SYang Shi 58919138349SMatthew Wilcox (Oracle) folio_copy_owner(newfolio, folio); 59074485cf2SJohannes Weiner 59119138349SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 592d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(folio, newfolio); 593b20a3503SChristoph Lameter } 59419138349SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_flags); 5952916ecc0SJérôme Glisse 596715cbfd6SMatthew Wilcox (Oracle) void folio_migrate_copy(struct folio *newfolio, struct folio *folio) 5972916ecc0SJérôme Glisse { 598715cbfd6SMatthew Wilcox (Oracle) folio_copy(newfolio, folio); 599715cbfd6SMatthew Wilcox (Oracle) folio_migrate_flags(newfolio, folio); 6002916ecc0SJérôme Glisse } 601715cbfd6SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_migrate_copy); 602b20a3503SChristoph Lameter 6031d8b85ccSChristoph Lameter /************************************************************ 6041d8b85ccSChristoph Lameter * Migration functions 6051d8b85ccSChristoph Lameter ***********************************************************/ 6061d8b85ccSChristoph Lameter 607b20a3503SChristoph Lameter /* 608bda807d4SMinchan Kim * Common logic to directly migrate a single LRU page suitable for 609266cf658SDavid Howells * pages that do not use PagePrivate/PagePrivate2. 610b20a3503SChristoph Lameter * 611b20a3503SChristoph Lameter * Pages are locked upon entry and exit. 612b20a3503SChristoph Lameter */ 6132d1db3b1SChristoph Lameter int migrate_page(struct address_space *mapping, 614a6bc32b8SMel Gorman struct page *newpage, struct page *page, 615a6bc32b8SMel Gorman enum migrate_mode mode) 616b20a3503SChristoph Lameter { 6173417013eSMatthew Wilcox (Oracle) struct folio *newfolio = page_folio(newpage); 6183417013eSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 619b20a3503SChristoph Lameter int rc; 620b20a3503SChristoph Lameter 6213417013eSMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ 622b20a3503SChristoph Lameter 6233417013eSMatthew Wilcox (Oracle) rc = folio_migrate_mapping(mapping, newfolio, folio, 0); 624b20a3503SChristoph Lameter 62578bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 626b20a3503SChristoph Lameter return rc; 627b20a3503SChristoph Lameter 6282916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 629715cbfd6SMatthew Wilcox (Oracle) folio_migrate_copy(newfolio, folio); 6302916ecc0SJérôme Glisse else 63119138349SMatthew Wilcox (Oracle) folio_migrate_flags(newfolio, folio); 63278bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 633b20a3503SChristoph Lameter } 634b20a3503SChristoph Lameter EXPORT_SYMBOL(migrate_page); 635b20a3503SChristoph Lameter 6369361401eSDavid Howells #ifdef CONFIG_BLOCK 63784ade7c1SJan Kara /* Returns true if all buffers are successfully locked */ 63884ade7c1SJan Kara static bool buffer_migrate_lock_buffers(struct buffer_head *head, 63984ade7c1SJan Kara enum migrate_mode mode) 64084ade7c1SJan Kara { 64184ade7c1SJan Kara struct buffer_head *bh = head; 64284ade7c1SJan Kara 64384ade7c1SJan Kara /* Simple case, sync compaction */ 64484ade7c1SJan Kara if (mode != MIGRATE_ASYNC) { 64584ade7c1SJan Kara do { 64684ade7c1SJan Kara lock_buffer(bh); 64784ade7c1SJan Kara bh = bh->b_this_page; 64884ade7c1SJan Kara 64984ade7c1SJan Kara } while (bh != head); 65084ade7c1SJan Kara 65184ade7c1SJan Kara return true; 65284ade7c1SJan Kara } 65384ade7c1SJan Kara 65484ade7c1SJan Kara /* async case, we cannot block on lock_buffer so use trylock_buffer */ 65584ade7c1SJan Kara do { 65684ade7c1SJan Kara if (!trylock_buffer(bh)) { 65784ade7c1SJan Kara /* 65884ade7c1SJan Kara * We failed to lock the buffer and cannot stall in 65984ade7c1SJan Kara * async migration. Release the taken locks 66084ade7c1SJan Kara */ 66184ade7c1SJan Kara struct buffer_head *failed_bh = bh; 66284ade7c1SJan Kara bh = head; 66384ade7c1SJan Kara while (bh != failed_bh) { 66484ade7c1SJan Kara unlock_buffer(bh); 66584ade7c1SJan Kara bh = bh->b_this_page; 66684ade7c1SJan Kara } 66784ade7c1SJan Kara return false; 66884ade7c1SJan Kara } 66984ade7c1SJan Kara 67084ade7c1SJan Kara bh = bh->b_this_page; 67184ade7c1SJan Kara } while (bh != head); 67284ade7c1SJan Kara return true; 67384ade7c1SJan Kara } 67484ade7c1SJan Kara 67589cb0888SJan Kara static int __buffer_migrate_page(struct address_space *mapping, 67689cb0888SJan Kara struct page *newpage, struct page *page, enum migrate_mode mode, 67789cb0888SJan Kara bool check_refs) 6781d8b85ccSChristoph Lameter { 6791d8b85ccSChristoph Lameter struct buffer_head *bh, *head; 6801d8b85ccSChristoph Lameter int rc; 681cc4f11e6SJan Kara int expected_count; 6821d8b85ccSChristoph Lameter 6831d8b85ccSChristoph Lameter if (!page_has_buffers(page)) 684a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 6851d8b85ccSChristoph Lameter 686cc4f11e6SJan Kara /* Check whether page does not have extra refs before we do more work */ 687f900482dSJan Kara expected_count = expected_page_refs(mapping, page); 688cc4f11e6SJan Kara if (page_count(page) != expected_count) 689cc4f11e6SJan Kara return -EAGAIN; 690cc4f11e6SJan Kara 6911d8b85ccSChristoph Lameter head = page_buffers(page); 692cc4f11e6SJan Kara if (!buffer_migrate_lock_buffers(head, mode)) 693cc4f11e6SJan Kara return -EAGAIN; 6941d8b85ccSChristoph Lameter 69589cb0888SJan Kara if (check_refs) { 69689cb0888SJan Kara bool busy; 69789cb0888SJan Kara bool invalidated = false; 69889cb0888SJan Kara 69989cb0888SJan Kara recheck_buffers: 70089cb0888SJan Kara busy = false; 70189cb0888SJan Kara spin_lock(&mapping->private_lock); 70289cb0888SJan Kara bh = head; 70389cb0888SJan Kara do { 70489cb0888SJan Kara if (atomic_read(&bh->b_count)) { 70589cb0888SJan Kara busy = true; 70689cb0888SJan Kara break; 70789cb0888SJan Kara } 70889cb0888SJan Kara bh = bh->b_this_page; 70989cb0888SJan Kara } while (bh != head); 71089cb0888SJan Kara if (busy) { 71189cb0888SJan Kara if (invalidated) { 71289cb0888SJan Kara rc = -EAGAIN; 71389cb0888SJan Kara goto unlock_buffers; 71489cb0888SJan Kara } 715ebdf4de5SJan Kara spin_unlock(&mapping->private_lock); 71689cb0888SJan Kara invalidate_bh_lrus(); 71789cb0888SJan Kara invalidated = true; 71889cb0888SJan Kara goto recheck_buffers; 71989cb0888SJan Kara } 72089cb0888SJan Kara } 72189cb0888SJan Kara 72237109694SKeith Busch rc = migrate_page_move_mapping(mapping, newpage, page, 0); 72378bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 724cc4f11e6SJan Kara goto unlock_buffers; 7251d8b85ccSChristoph Lameter 726cd0f3715SGuoqing Jiang attach_page_private(newpage, detach_page_private(page)); 7271d8b85ccSChristoph Lameter 7281d8b85ccSChristoph Lameter bh = head; 7291d8b85ccSChristoph Lameter do { 7301d8b85ccSChristoph Lameter set_bh_page(bh, newpage, bh_offset(bh)); 7311d8b85ccSChristoph Lameter bh = bh->b_this_page; 7321d8b85ccSChristoph Lameter 7331d8b85ccSChristoph Lameter } while (bh != head); 7341d8b85ccSChristoph Lameter 7352916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 7361d8b85ccSChristoph Lameter migrate_page_copy(newpage, page); 7372916ecc0SJérôme Glisse else 7382916ecc0SJérôme Glisse migrate_page_states(newpage, page); 7391d8b85ccSChristoph Lameter 740cc4f11e6SJan Kara rc = MIGRATEPAGE_SUCCESS; 741cc4f11e6SJan Kara unlock_buffers: 742ebdf4de5SJan Kara if (check_refs) 743ebdf4de5SJan Kara spin_unlock(&mapping->private_lock); 7441d8b85ccSChristoph Lameter bh = head; 7451d8b85ccSChristoph Lameter do { 7461d8b85ccSChristoph Lameter unlock_buffer(bh); 7471d8b85ccSChristoph Lameter bh = bh->b_this_page; 7481d8b85ccSChristoph Lameter 7491d8b85ccSChristoph Lameter } while (bh != head); 7501d8b85ccSChristoph Lameter 751cc4f11e6SJan Kara return rc; 7521d8b85ccSChristoph Lameter } 75389cb0888SJan Kara 75489cb0888SJan Kara /* 75589cb0888SJan Kara * Migration function for pages with buffers. This function can only be used 75689cb0888SJan Kara * if the underlying filesystem guarantees that no other references to "page" 75789cb0888SJan Kara * exist. For example attached buffer heads are accessed only under page lock. 75889cb0888SJan Kara */ 75989cb0888SJan Kara int buffer_migrate_page(struct address_space *mapping, 76089cb0888SJan Kara struct page *newpage, struct page *page, enum migrate_mode mode) 76189cb0888SJan Kara { 76289cb0888SJan Kara return __buffer_migrate_page(mapping, newpage, page, mode, false); 76389cb0888SJan Kara } 7641d8b85ccSChristoph Lameter EXPORT_SYMBOL(buffer_migrate_page); 76589cb0888SJan Kara 76689cb0888SJan Kara /* 76789cb0888SJan Kara * Same as above except that this variant is more careful and checks that there 76889cb0888SJan Kara * are also no buffer head references. This function is the right one for 76989cb0888SJan Kara * mappings where buffer heads are directly looked up and referenced (such as 77089cb0888SJan Kara * block device mappings). 77189cb0888SJan Kara */ 77289cb0888SJan Kara int buffer_migrate_page_norefs(struct address_space *mapping, 77389cb0888SJan Kara struct page *newpage, struct page *page, enum migrate_mode mode) 77489cb0888SJan Kara { 77589cb0888SJan Kara return __buffer_migrate_page(mapping, newpage, page, mode, true); 77689cb0888SJan Kara } 7779361401eSDavid Howells #endif 7781d8b85ccSChristoph Lameter 77904e62a29SChristoph Lameter /* 78004e62a29SChristoph Lameter * Writeback a page to clean the dirty state 78104e62a29SChristoph Lameter */ 78204e62a29SChristoph Lameter static int writeout(struct address_space *mapping, struct page *page) 78304e62a29SChristoph Lameter { 7844eecb8b9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 78504e62a29SChristoph Lameter struct writeback_control wbc = { 78604e62a29SChristoph Lameter .sync_mode = WB_SYNC_NONE, 78704e62a29SChristoph Lameter .nr_to_write = 1, 78804e62a29SChristoph Lameter .range_start = 0, 78904e62a29SChristoph Lameter .range_end = LLONG_MAX, 79004e62a29SChristoph Lameter .for_reclaim = 1 79104e62a29SChristoph Lameter }; 79204e62a29SChristoph Lameter int rc; 79304e62a29SChristoph Lameter 79404e62a29SChristoph Lameter if (!mapping->a_ops->writepage) 79504e62a29SChristoph Lameter /* No write method for the address space */ 79604e62a29SChristoph Lameter return -EINVAL; 79704e62a29SChristoph Lameter 79804e62a29SChristoph Lameter if (!clear_page_dirty_for_io(page)) 79904e62a29SChristoph Lameter /* Someone else already triggered a write */ 80004e62a29SChristoph Lameter return -EAGAIN; 80104e62a29SChristoph Lameter 80204e62a29SChristoph Lameter /* 80304e62a29SChristoph Lameter * A dirty page may imply that the underlying filesystem has 80404e62a29SChristoph Lameter * the page on some queue. So the page must be clean for 80504e62a29SChristoph Lameter * migration. Writeout may mean we loose the lock and the 80604e62a29SChristoph Lameter * page state is no longer what we checked for earlier. 80704e62a29SChristoph Lameter * At this point we know that the migration attempt cannot 80804e62a29SChristoph Lameter * be successful. 80904e62a29SChristoph Lameter */ 8104eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, folio, false); 81104e62a29SChristoph Lameter 81204e62a29SChristoph Lameter rc = mapping->a_ops->writepage(page, &wbc); 81304e62a29SChristoph Lameter 81404e62a29SChristoph Lameter if (rc != AOP_WRITEPAGE_ACTIVATE) 81504e62a29SChristoph Lameter /* unlocked. Relock */ 81604e62a29SChristoph Lameter lock_page(page); 81704e62a29SChristoph Lameter 818bda8550dSHugh Dickins return (rc < 0) ? -EIO : -EAGAIN; 81904e62a29SChristoph Lameter } 82004e62a29SChristoph Lameter 82104e62a29SChristoph Lameter /* 82204e62a29SChristoph Lameter * Default handling if a filesystem does not provide a migration function. 82304e62a29SChristoph Lameter */ 8248351a6e4SChristoph Lameter static int fallback_migrate_page(struct address_space *mapping, 825a6bc32b8SMel Gorman struct page *newpage, struct page *page, enum migrate_mode mode) 8268351a6e4SChristoph Lameter { 827b969c4abSMel Gorman if (PageDirty(page)) { 828a6bc32b8SMel Gorman /* Only writeback pages in full synchronous migration */ 8292916ecc0SJérôme Glisse switch (mode) { 8302916ecc0SJérôme Glisse case MIGRATE_SYNC: 8312916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 8322916ecc0SJérôme Glisse break; 8332916ecc0SJérôme Glisse default: 834b969c4abSMel Gorman return -EBUSY; 8352916ecc0SJérôme Glisse } 83604e62a29SChristoph Lameter return writeout(mapping, page); 837b969c4abSMel Gorman } 8388351a6e4SChristoph Lameter 8398351a6e4SChristoph Lameter /* 8408351a6e4SChristoph Lameter * Buffers may be managed in a filesystem specific way. 8418351a6e4SChristoph Lameter * We must have no buffers or drop them. 8428351a6e4SChristoph Lameter */ 843266cf658SDavid Howells if (page_has_private(page) && 8448351a6e4SChristoph Lameter !try_to_release_page(page, GFP_KERNEL)) 845806031bbSMel Gorman return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; 8468351a6e4SChristoph Lameter 847a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 8488351a6e4SChristoph Lameter } 8498351a6e4SChristoph Lameter 8501d8b85ccSChristoph Lameter /* 851e24f0b8fSChristoph Lameter * Move a page to a newly allocated page 852e24f0b8fSChristoph Lameter * The page is locked and all ptes have been successfully removed. 853b20a3503SChristoph Lameter * 854e24f0b8fSChristoph Lameter * The new page will have replaced the old page if this function 855e24f0b8fSChristoph Lameter * is successful. 856894bc310SLee Schermerhorn * 857894bc310SLee Schermerhorn * Return value: 858894bc310SLee Schermerhorn * < 0 - error code 85978bd5209SRafael Aquini * MIGRATEPAGE_SUCCESS - success 860b20a3503SChristoph Lameter */ 861e7e3ffebSMatthew Wilcox (Oracle) static int move_to_new_folio(struct folio *dst, struct folio *src, 8625c3f9a67SHugh Dickins enum migrate_mode mode) 863b20a3503SChristoph Lameter { 864e24f0b8fSChristoph Lameter struct address_space *mapping; 865bda807d4SMinchan Kim int rc = -EAGAIN; 866e7e3ffebSMatthew Wilcox (Oracle) bool is_lru = !__PageMovable(&src->page); 867b20a3503SChristoph Lameter 868e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(src), src); 869e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); 870b20a3503SChristoph Lameter 871e7e3ffebSMatthew Wilcox (Oracle) mapping = folio_mapping(src); 872bda807d4SMinchan Kim 873bda807d4SMinchan Kim if (likely(is_lru)) { 874b20a3503SChristoph Lameter if (!mapping) 875e7e3ffebSMatthew Wilcox (Oracle) rc = migrate_page(mapping, &dst->page, &src->page, mode); 8766c5240aeSChristoph Lameter else if (mapping->a_ops->migratepage) 877b20a3503SChristoph Lameter /* 878bda807d4SMinchan Kim * Most pages have a mapping and most filesystems 879bda807d4SMinchan Kim * provide a migratepage callback. Anonymous pages 880bda807d4SMinchan Kim * are part of swap space which also has its own 881bda807d4SMinchan Kim * migratepage callback. This is the most common path 882bda807d4SMinchan Kim * for page migration. 883b20a3503SChristoph Lameter */ 884e7e3ffebSMatthew Wilcox (Oracle) rc = mapping->a_ops->migratepage(mapping, &dst->page, 885e7e3ffebSMatthew Wilcox (Oracle) &src->page, mode); 8868351a6e4SChristoph Lameter else 887e7e3ffebSMatthew Wilcox (Oracle) rc = fallback_migrate_page(mapping, &dst->page, 888e7e3ffebSMatthew Wilcox (Oracle) &src->page, mode); 889bda807d4SMinchan Kim } else { 890bda807d4SMinchan Kim /* 891bda807d4SMinchan Kim * In case of non-lru page, it could be released after 892bda807d4SMinchan Kim * isolation step. In that case, we shouldn't try migration. 893bda807d4SMinchan Kim */ 894e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 895e7e3ffebSMatthew Wilcox (Oracle) if (!folio_test_movable(src)) { 896bda807d4SMinchan Kim rc = MIGRATEPAGE_SUCCESS; 897e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src); 898bda807d4SMinchan Kim goto out; 899bda807d4SMinchan Kim } 900bda807d4SMinchan Kim 901e7e3ffebSMatthew Wilcox (Oracle) rc = mapping->a_ops->migratepage(mapping, &dst->page, 902e7e3ffebSMatthew Wilcox (Oracle) &src->page, mode); 903bda807d4SMinchan Kim WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 904e7e3ffebSMatthew Wilcox (Oracle) !folio_test_isolated(src)); 905bda807d4SMinchan Kim } 906b20a3503SChristoph Lameter 9075c3f9a67SHugh Dickins /* 908e7e3ffebSMatthew Wilcox (Oracle) * When successful, old pagecache src->mapping must be cleared before 909e7e3ffebSMatthew Wilcox (Oracle) * src is freed; but stats require that PageAnon be left as PageAnon. 9105c3f9a67SHugh Dickins */ 9115c3f9a67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 912e7e3ffebSMatthew Wilcox (Oracle) if (__PageMovable(&src->page)) { 913e7e3ffebSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); 914bda807d4SMinchan Kim 915bda807d4SMinchan Kim /* 916bda807d4SMinchan Kim * We clear PG_movable under page_lock so any compactor 917bda807d4SMinchan Kim * cannot try to migrate this page. 918bda807d4SMinchan Kim */ 919e7e3ffebSMatthew Wilcox (Oracle) folio_clear_isolated(src); 920bda807d4SMinchan Kim } 921bda807d4SMinchan Kim 922bda807d4SMinchan Kim /* 923e7e3ffebSMatthew Wilcox (Oracle) * Anonymous and movable src->mapping will be cleared by 924bda807d4SMinchan Kim * free_pages_prepare so don't reset it here for keeping 925bda807d4SMinchan Kim * the type to work PageAnon, for example. 926bda807d4SMinchan Kim */ 927e7e3ffebSMatthew Wilcox (Oracle) if (!folio_mapping_flags(src)) 928e7e3ffebSMatthew Wilcox (Oracle) src->mapping = NULL; 929d2b2c6ddSLars Persson 930e7e3ffebSMatthew Wilcox (Oracle) if (likely(!folio_is_zone_device(dst))) 931e7e3ffebSMatthew Wilcox (Oracle) flush_dcache_folio(dst); 9323fe2011fSMel Gorman } 933bda807d4SMinchan Kim out: 934e24f0b8fSChristoph Lameter return rc; 935e24f0b8fSChristoph Lameter } 936e24f0b8fSChristoph Lameter 9370dabec93SMinchan Kim static int __unmap_and_move(struct page *page, struct page *newpage, 9389c620e2bSHugh Dickins int force, enum migrate_mode mode) 939e24f0b8fSChristoph Lameter { 9404b8554c5SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 9414eecb8b9SMatthew Wilcox (Oracle) struct folio *dst = page_folio(newpage); 9420dabec93SMinchan Kim int rc = -EAGAIN; 943213ecb31SBaolin Wang bool page_was_mapped = false; 9443f6c8272SMel Gorman struct anon_vma *anon_vma = NULL; 945bda807d4SMinchan Kim bool is_lru = !__PageMovable(page); 94695a402c3SChristoph Lameter 947529ae9aaSNick Piggin if (!trylock_page(page)) { 948a6bc32b8SMel Gorman if (!force || mode == MIGRATE_ASYNC) 9490dabec93SMinchan Kim goto out; 9503e7d3449SMel Gorman 9513e7d3449SMel Gorman /* 9523e7d3449SMel Gorman * It's not safe for direct compaction to call lock_page. 9533e7d3449SMel Gorman * For example, during page readahead pages are added locked 9543e7d3449SMel Gorman * to the LRU. Later, when the IO completes the pages are 9553e7d3449SMel Gorman * marked uptodate and unlocked. However, the queueing 9563e7d3449SMel Gorman * could be merging multiple pages for one bio (e.g. 957d4388340SMatthew Wilcox (Oracle) * mpage_readahead). If an allocation happens for the 9583e7d3449SMel Gorman * second or third page, the process can end up locking 9593e7d3449SMel Gorman * the same page twice and deadlocking. Rather than 9603e7d3449SMel Gorman * trying to be clever about what pages can be locked, 9613e7d3449SMel Gorman * avoid the use of lock_page for direct compaction 9623e7d3449SMel Gorman * altogether. 9633e7d3449SMel Gorman */ 9643e7d3449SMel Gorman if (current->flags & PF_MEMALLOC) 9650dabec93SMinchan Kim goto out; 9663e7d3449SMel Gorman 967e24f0b8fSChristoph Lameter lock_page(page); 968e24f0b8fSChristoph Lameter } 969e24f0b8fSChristoph Lameter 970e24f0b8fSChristoph Lameter if (PageWriteback(page)) { 97111bc82d6SAndrea Arcangeli /* 972fed5b64aSJianguo Wu * Only in the case of a full synchronous migration is it 973a6bc32b8SMel Gorman * necessary to wait for PageWriteback. In the async case, 974a6bc32b8SMel Gorman * the retry loop is too short and in the sync-light case, 975a6bc32b8SMel Gorman * the overhead of stalling is too much 97611bc82d6SAndrea Arcangeli */ 9772916ecc0SJérôme Glisse switch (mode) { 9782916ecc0SJérôme Glisse case MIGRATE_SYNC: 9792916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 9802916ecc0SJérôme Glisse break; 9812916ecc0SJérôme Glisse default: 98211bc82d6SAndrea Arcangeli rc = -EBUSY; 9830a31bc97SJohannes Weiner goto out_unlock; 98411bc82d6SAndrea Arcangeli } 98511bc82d6SAndrea Arcangeli if (!force) 9860a31bc97SJohannes Weiner goto out_unlock; 987e24f0b8fSChristoph Lameter wait_on_page_writeback(page); 988e24f0b8fSChristoph Lameter } 98903f15c86SHugh Dickins 990e24f0b8fSChristoph Lameter /* 99168a9843fSBaolin Wang * By try_to_migrate(), page->mapcount goes down to 0 here. In this case, 992dc386d4dSKAMEZAWA Hiroyuki * we cannot notice that anon_vma is freed while we migrates a page. 9931ce82b69SHugh Dickins * This get_anon_vma() delays freeing anon_vma pointer until the end 994dc386d4dSKAMEZAWA Hiroyuki * of migration. File cache pages are no problem because of page_lock() 995989f89c5SKAMEZAWA Hiroyuki * File Caches may use write_page() or lock_page() in migration, then, 996989f89c5SKAMEZAWA Hiroyuki * just care Anon page here. 9973fe2011fSMel Gorman * 99803f15c86SHugh Dickins * Only page_get_anon_vma() understands the subtleties of 99903f15c86SHugh Dickins * getting a hold on an anon_vma from outside one of its mms. 100003f15c86SHugh Dickins * But if we cannot get anon_vma, then we won't need it anyway, 100103f15c86SHugh Dickins * because that implies that the anon page is no longer mapped 100203f15c86SHugh Dickins * (and cannot be remapped so long as we hold the page lock). 10033fe2011fSMel Gorman */ 100403f15c86SHugh Dickins if (PageAnon(page) && !PageKsm(page)) 100503f15c86SHugh Dickins anon_vma = page_get_anon_vma(page); 100662e1c553SShaohua Li 10077db7671fSHugh Dickins /* 10087db7671fSHugh Dickins * Block others from accessing the new page when we get around to 10097db7671fSHugh Dickins * establishing additional references. We are usually the only one 10107db7671fSHugh Dickins * holding a reference to newpage at this point. We used to have a BUG 10117db7671fSHugh Dickins * here if trylock_page(newpage) fails, but would like to allow for 10127db7671fSHugh Dickins * cases where there might be a race with the previous use of newpage. 10137db7671fSHugh Dickins * This is much like races on refcount of oldpage: just don't BUG(). 10147db7671fSHugh Dickins */ 10157db7671fSHugh Dickins if (unlikely(!trylock_page(newpage))) 10167db7671fSHugh Dickins goto out_unlock; 10177db7671fSHugh Dickins 1018bda807d4SMinchan Kim if (unlikely(!is_lru)) { 1019e7e3ffebSMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, folio, mode); 1020bda807d4SMinchan Kim goto out_unlock_both; 1021bda807d4SMinchan Kim } 1022bda807d4SMinchan Kim 1023dc386d4dSKAMEZAWA Hiroyuki /* 102462e1c553SShaohua Li * Corner case handling: 102562e1c553SShaohua Li * 1. When a new swap-cache page is read into, it is added to the LRU 102662e1c553SShaohua Li * and treated as swapcache but it has no rmap yet. 102762e1c553SShaohua Li * Calling try_to_unmap() against a page->mapping==NULL page will 102862e1c553SShaohua Li * trigger a BUG. So handle it here. 1029d12b8951SYang Shi * 2. An orphaned page (see truncate_cleanup_page) might have 103062e1c553SShaohua Li * fs-private metadata. The page can be picked up due to memory 103162e1c553SShaohua Li * offlining. Everywhere else except page reclaim, the page is 103262e1c553SShaohua Li * invisible to the vm, so the page can not be migrated. So try to 103362e1c553SShaohua Li * free the metadata, so the page can be freed. 1034dc386d4dSKAMEZAWA Hiroyuki */ 103562e1c553SShaohua Li if (!page->mapping) { 1036309381feSSasha Levin VM_BUG_ON_PAGE(PageAnon(page), page); 10371ce82b69SHugh Dickins if (page_has_private(page)) { 103868189fefSMatthew Wilcox (Oracle) try_to_free_buffers(folio); 10397db7671fSHugh Dickins goto out_unlock_both; 104062e1c553SShaohua Li } 10417db7671fSHugh Dickins } else if (page_mapped(page)) { 10427db7671fSHugh Dickins /* Establish migration ptes */ 104303f15c86SHugh Dickins VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 104403f15c86SHugh Dickins page); 10454b8554c5SMatthew Wilcox (Oracle) try_to_migrate(folio, 0); 1046213ecb31SBaolin Wang page_was_mapped = true; 10472ebba6b7SHugh Dickins } 1048dc386d4dSKAMEZAWA Hiroyuki 1049e24f0b8fSChristoph Lameter if (!page_mapped(page)) 1050e7e3ffebSMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, folio, mode); 1051e24f0b8fSChristoph Lameter 1052c3096e67SHugh Dickins /* 1053c3096e67SHugh Dickins * When successful, push newpage to LRU immediately: so that if it 1054c3096e67SHugh Dickins * turns out to be an mlocked page, remove_migration_ptes() will 1055c3096e67SHugh Dickins * automatically build up the correct newpage->mlock_count for it. 1056c3096e67SHugh Dickins * 1057c3096e67SHugh Dickins * We would like to do something similar for the old page, when 1058c3096e67SHugh Dickins * unsuccessful, and other cases when a page has been temporarily 1059c3096e67SHugh Dickins * isolated from the unevictable LRU: but this case is the easiest. 1060c3096e67SHugh Dickins */ 1061c3096e67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 1062c3096e67SHugh Dickins lru_cache_add(newpage); 10635c3f9a67SHugh Dickins if (page_was_mapped) 1064c3096e67SHugh Dickins lru_add_drain(); 1065c3096e67SHugh Dickins } 1066c3096e67SHugh Dickins 10675c3f9a67SHugh Dickins if (page_was_mapped) 10684eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(folio, 10694eecb8b9SMatthew Wilcox (Oracle) rc == MIGRATEPAGE_SUCCESS ? dst : folio, false); 10703f6c8272SMel Gorman 10717db7671fSHugh Dickins out_unlock_both: 10727db7671fSHugh Dickins unlock_page(newpage); 10737db7671fSHugh Dickins out_unlock: 10743f6c8272SMel Gorman /* Drop an anon_vma reference if we took one */ 107576545066SRik van Riel if (anon_vma) 10769e60109fSPeter Zijlstra put_anon_vma(anon_vma); 1077b20a3503SChristoph Lameter unlock_page(page); 10780dabec93SMinchan Kim out: 1079c6c919ebSMinchan Kim /* 1080c3096e67SHugh Dickins * If migration is successful, decrease refcount of the newpage, 1081c6c919ebSMinchan Kim * which will not free the page because new page owner increased 1082c3096e67SHugh Dickins * refcounter. 1083c6c919ebSMinchan Kim */ 1084c3096e67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) 1085c6c919ebSMinchan Kim put_page(newpage); 1086c6c919ebSMinchan Kim 10870dabec93SMinchan Kim return rc; 10880dabec93SMinchan Kim } 108995a402c3SChristoph Lameter 10900dabec93SMinchan Kim /* 10910dabec93SMinchan Kim * Obtain the lock on page, remove all ptes and migrate the page 10920dabec93SMinchan Kim * to the newly allocated page in newpage. 10930dabec93SMinchan Kim */ 10946ec4476aSLinus Torvalds static int unmap_and_move(new_page_t get_new_page, 1095ef2a5153SGeert Uytterhoeven free_page_t put_new_page, 1096ef2a5153SGeert Uytterhoeven unsigned long private, struct page *page, 1097add05cecSNaoya Horiguchi int force, enum migrate_mode mode, 1098dd4ae78aSYang Shi enum migrate_reason reason, 1099dd4ae78aSYang Shi struct list_head *ret) 11000dabec93SMinchan Kim { 11012def7424SHugh Dickins int rc = MIGRATEPAGE_SUCCESS; 110274d4a579SYang Shi struct page *newpage = NULL; 11030dabec93SMinchan Kim 110494723aafSMichal Hocko if (!thp_migration_supported() && PageTransHuge(page)) 1105d532e2e5SYang Shi return -ENOSYS; 110694723aafSMichal Hocko 11070dabec93SMinchan Kim if (page_count(page) == 1) { 1108160088b3SMiaohe Lin /* Page was freed from under us. So we are done. */ 1109c6c919ebSMinchan Kim ClearPageActive(page); 1110c6c919ebSMinchan Kim ClearPageUnevictable(page); 1111160088b3SMiaohe Lin /* free_pages_prepare() will clear PG_isolated. */ 11120dabec93SMinchan Kim goto out; 11130dabec93SMinchan Kim } 11140dabec93SMinchan Kim 111574d4a579SYang Shi newpage = get_new_page(page, private); 111674d4a579SYang Shi if (!newpage) 111774d4a579SYang Shi return -ENOMEM; 111874d4a579SYang Shi 1119b653db77SMatthew Wilcox (Oracle) newpage->private = 0; 11209c620e2bSHugh Dickins rc = __unmap_and_move(page, newpage, force, mode); 1121c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) 11227cd12b4aSVlastimil Babka set_page_owner_migrate_reason(newpage, reason); 1123bf6bddf1SRafael Aquini 11240dabec93SMinchan Kim out: 1125e24f0b8fSChristoph Lameter if (rc != -EAGAIN) { 1126aaa994b3SChristoph Lameter /* 1127aaa994b3SChristoph Lameter * A page that has been migrated has all references 1128aaa994b3SChristoph Lameter * removed and will be freed. A page that has not been 1129c23a0c99SRalph Campbell * migrated will have kept its references and be restored. 1130aaa994b3SChristoph Lameter */ 1131aaa994b3SChristoph Lameter list_del(&page->lru); 1132e24f0b8fSChristoph Lameter } 113368711a74SDavid Rientjes 113495a402c3SChristoph Lameter /* 1135c6c919ebSMinchan Kim * If migration is successful, releases reference grabbed during 1136c6c919ebSMinchan Kim * isolation. Otherwise, restore the page to right list unless 1137c6c919ebSMinchan Kim * we want to retry. 113895a402c3SChristoph Lameter */ 1139c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) { 1140dd4ae78aSYang Shi /* 1141dd4ae78aSYang Shi * Compaction can migrate also non-LRU pages which are 1142dd4ae78aSYang Shi * not accounted to NR_ISOLATED_*. They can be recognized 1143dd4ae78aSYang Shi * as __PageMovable 1144dd4ae78aSYang Shi */ 1145dd4ae78aSYang Shi if (likely(!__PageMovable(page))) 1146dd4ae78aSYang Shi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1147dd4ae78aSYang Shi page_is_file_lru(page), -thp_nr_pages(page)); 1148dd4ae78aSYang Shi 114979f5f8faSOscar Salvador if (reason != MR_MEMORY_FAILURE) 1150c6c919ebSMinchan Kim /* 115179f5f8faSOscar Salvador * We release the page in page_handle_poison. 1152c6c919ebSMinchan Kim */ 115379f5f8faSOscar Salvador put_page(page); 1154c6c919ebSMinchan Kim } else { 1155dd4ae78aSYang Shi if (rc != -EAGAIN) 1156dd4ae78aSYang Shi list_add_tail(&page->lru, ret); 1157bda807d4SMinchan Kim 1158cf4b769aSHugh Dickins if (put_new_page) 115968711a74SDavid Rientjes put_new_page(newpage, private); 1160c6c919ebSMinchan Kim else 1161d6d86c0aSKonstantin Khlebnikov put_page(newpage); 1162c6c919ebSMinchan Kim } 116368711a74SDavid Rientjes 1164e24f0b8fSChristoph Lameter return rc; 1165e24f0b8fSChristoph Lameter } 1166b20a3503SChristoph Lameter 1167e24f0b8fSChristoph Lameter /* 1168290408d4SNaoya Horiguchi * Counterpart of unmap_and_move_page() for hugepage migration. 1169290408d4SNaoya Horiguchi * 1170290408d4SNaoya Horiguchi * This function doesn't wait the completion of hugepage I/O 1171290408d4SNaoya Horiguchi * because there is no race between I/O and migration for hugepage. 1172290408d4SNaoya Horiguchi * Note that currently hugepage I/O occurs only in direct I/O 1173290408d4SNaoya Horiguchi * where no lock is held and PG_writeback is irrelevant, 1174290408d4SNaoya Horiguchi * and writeback status of all subpages are counted in the reference 1175290408d4SNaoya Horiguchi * count of the head page (i.e. if all subpages of a 2MB hugepage are 1176290408d4SNaoya Horiguchi * under direct I/O, the reference of the head page is 512 and a bit more.) 1177290408d4SNaoya Horiguchi * This means that when we try to migrate hugepage whose subpages are 1178290408d4SNaoya Horiguchi * doing direct I/O, some references remain after try_to_unmap() and 1179290408d4SNaoya Horiguchi * hugepage migration fails without data corruption. 1180290408d4SNaoya Horiguchi * 1181290408d4SNaoya Horiguchi * There is also no race when direct I/O is issued on the page under migration, 1182290408d4SNaoya Horiguchi * because then pte is replaced with migration swap entry and direct I/O code 1183290408d4SNaoya Horiguchi * will wait in the page fault for migration to complete. 1184290408d4SNaoya Horiguchi */ 1185290408d4SNaoya Horiguchi static int unmap_and_move_huge_page(new_page_t get_new_page, 118668711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 118768711a74SDavid Rientjes struct page *hpage, int force, 1188dd4ae78aSYang Shi enum migrate_mode mode, int reason, 1189dd4ae78aSYang Shi struct list_head *ret) 1190290408d4SNaoya Horiguchi { 11914eecb8b9SMatthew Wilcox (Oracle) struct folio *dst, *src = page_folio(hpage); 11922def7424SHugh Dickins int rc = -EAGAIN; 11932ebba6b7SHugh Dickins int page_was_mapped = 0; 119432665f2bSJoonsoo Kim struct page *new_hpage; 1195290408d4SNaoya Horiguchi struct anon_vma *anon_vma = NULL; 1196c0d0381aSMike Kravetz struct address_space *mapping = NULL; 1197290408d4SNaoya Horiguchi 119883467efbSNaoya Horiguchi /* 11997ed2c31dSAnshuman Khandual * Migratability of hugepages depends on architectures and their size. 120083467efbSNaoya Horiguchi * This check is necessary because some callers of hugepage migration 120183467efbSNaoya Horiguchi * like soft offline and memory hotremove don't walk through page 120283467efbSNaoya Horiguchi * tables or check whether the hugepage is pmd-based or not before 120383467efbSNaoya Horiguchi * kicking migration. 120483467efbSNaoya Horiguchi */ 1205100873d7SNaoya Horiguchi if (!hugepage_migration_supported(page_hstate(hpage))) { 1206dd4ae78aSYang Shi list_move_tail(&hpage->lru, ret); 120783467efbSNaoya Horiguchi return -ENOSYS; 120832665f2bSJoonsoo Kim } 120983467efbSNaoya Horiguchi 121071a64f61SMuchun Song if (page_count(hpage) == 1) { 121171a64f61SMuchun Song /* page was freed from under us. So we are done. */ 121271a64f61SMuchun Song putback_active_hugepage(hpage); 121371a64f61SMuchun Song return MIGRATEPAGE_SUCCESS; 121471a64f61SMuchun Song } 121571a64f61SMuchun Song 1216666feb21SMichal Hocko new_hpage = get_new_page(hpage, private); 1217290408d4SNaoya Horiguchi if (!new_hpage) 1218290408d4SNaoya Horiguchi return -ENOMEM; 12194eecb8b9SMatthew Wilcox (Oracle) dst = page_folio(new_hpage); 1220290408d4SNaoya Horiguchi 1221290408d4SNaoya Horiguchi if (!trylock_page(hpage)) { 12222916ecc0SJérôme Glisse if (!force) 1223290408d4SNaoya Horiguchi goto out; 12242916ecc0SJérôme Glisse switch (mode) { 12252916ecc0SJérôme Glisse case MIGRATE_SYNC: 12262916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 12272916ecc0SJérôme Glisse break; 12282916ecc0SJérôme Glisse default: 12292916ecc0SJérôme Glisse goto out; 12302916ecc0SJérôme Glisse } 1231290408d4SNaoya Horiguchi lock_page(hpage); 1232290408d4SNaoya Horiguchi } 1233290408d4SNaoya Horiguchi 1234cb6acd01SMike Kravetz /* 1235cb6acd01SMike Kravetz * Check for pages which are in the process of being freed. Without 1236cb6acd01SMike Kravetz * page_mapping() set, hugetlbfs specific move page routine will not 1237cb6acd01SMike Kravetz * be called and we could leak usage counts for subpools. 1238cb6acd01SMike Kravetz */ 12396acfb5baSMuchun Song if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { 1240cb6acd01SMike Kravetz rc = -EBUSY; 1241cb6acd01SMike Kravetz goto out_unlock; 1242cb6acd01SMike Kravetz } 1243cb6acd01SMike Kravetz 1244746b18d4SPeter Zijlstra if (PageAnon(hpage)) 1245746b18d4SPeter Zijlstra anon_vma = page_get_anon_vma(hpage); 1246290408d4SNaoya Horiguchi 12477db7671fSHugh Dickins if (unlikely(!trylock_page(new_hpage))) 12487db7671fSHugh Dickins goto put_anon; 12497db7671fSHugh Dickins 12502ebba6b7SHugh Dickins if (page_mapped(hpage)) { 1251a98a2f0cSAlistair Popple enum ttu_flags ttu = 0; 1252336bf30eSMike Kravetz 1253336bf30eSMike Kravetz if (!PageAnon(hpage)) { 1254c0d0381aSMike Kravetz /* 1255336bf30eSMike Kravetz * In shared mappings, try_to_unmap could potentially 1256336bf30eSMike Kravetz * call huge_pmd_unshare. Because of this, take 1257336bf30eSMike Kravetz * semaphore in write mode here and set TTU_RMAP_LOCKED 1258336bf30eSMike Kravetz * to let lower levels know we have taken the lock. 1259c0d0381aSMike Kravetz */ 1260c0d0381aSMike Kravetz mapping = hugetlb_page_mapping_lock_write(hpage); 1261c0d0381aSMike Kravetz if (unlikely(!mapping)) 1262c0d0381aSMike Kravetz goto unlock_put_anon; 1263c0d0381aSMike Kravetz 12645202978bSMiaohe Lin ttu = TTU_RMAP_LOCKED; 1265336bf30eSMike Kravetz } 1266336bf30eSMike Kravetz 12674b8554c5SMatthew Wilcox (Oracle) try_to_migrate(src, ttu); 12682ebba6b7SHugh Dickins page_was_mapped = 1; 1269336bf30eSMike Kravetz 12705202978bSMiaohe Lin if (ttu & TTU_RMAP_LOCKED) 1271336bf30eSMike Kravetz i_mmap_unlock_write(mapping); 12722ebba6b7SHugh Dickins } 1273290408d4SNaoya Horiguchi 1274290408d4SNaoya Horiguchi if (!page_mapped(hpage)) 1275e7e3ffebSMatthew Wilcox (Oracle) rc = move_to_new_folio(dst, src, mode); 1276290408d4SNaoya Horiguchi 1277336bf30eSMike Kravetz if (page_was_mapped) 12784eecb8b9SMatthew Wilcox (Oracle) remove_migration_ptes(src, 12794eecb8b9SMatthew Wilcox (Oracle) rc == MIGRATEPAGE_SUCCESS ? dst : src, false); 1280290408d4SNaoya Horiguchi 1281c0d0381aSMike Kravetz unlock_put_anon: 12827db7671fSHugh Dickins unlock_page(new_hpage); 12837db7671fSHugh Dickins 12847db7671fSHugh Dickins put_anon: 1285fd4a4663SHugh Dickins if (anon_vma) 12869e60109fSPeter Zijlstra put_anon_vma(anon_vma); 12878e6ac7faSAneesh Kumar K.V 12882def7424SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 1289ab5ac90aSMichal Hocko move_hugetlb_state(hpage, new_hpage, reason); 12902def7424SHugh Dickins put_new_page = NULL; 12912def7424SHugh Dickins } 12928e6ac7faSAneesh Kumar K.V 1293cb6acd01SMike Kravetz out_unlock: 1294290408d4SNaoya Horiguchi unlock_page(hpage); 129509761333SHillf Danton out: 1296dd4ae78aSYang Shi if (rc == MIGRATEPAGE_SUCCESS) 1297b8ec1ceeSNaoya Horiguchi putback_active_hugepage(hpage); 1298a04840c6SMiaohe Lin else if (rc != -EAGAIN) 1299dd4ae78aSYang Shi list_move_tail(&hpage->lru, ret); 130068711a74SDavid Rientjes 130168711a74SDavid Rientjes /* 130268711a74SDavid Rientjes * If migration was not successful and there's a freeing callback, use 130368711a74SDavid Rientjes * it. Otherwise, put_page() will drop the reference grabbed during 130468711a74SDavid Rientjes * isolation. 130568711a74SDavid Rientjes */ 13062def7424SHugh Dickins if (put_new_page) 130768711a74SDavid Rientjes put_new_page(new_hpage, private); 130868711a74SDavid Rientjes else 13093aaa76e1SNaoya Horiguchi putback_active_hugepage(new_hpage); 131068711a74SDavid Rientjes 1311290408d4SNaoya Horiguchi return rc; 1312290408d4SNaoya Horiguchi } 1313290408d4SNaoya Horiguchi 1314d532e2e5SYang Shi static inline int try_split_thp(struct page *page, struct page **page2, 1315d532e2e5SYang Shi struct list_head *from) 1316d532e2e5SYang Shi { 1317d532e2e5SYang Shi int rc = 0; 1318d532e2e5SYang Shi 1319d532e2e5SYang Shi lock_page(page); 1320d532e2e5SYang Shi rc = split_huge_page_to_list(page, from); 1321d532e2e5SYang Shi unlock_page(page); 1322d532e2e5SYang Shi if (!rc) 1323d532e2e5SYang Shi list_safe_reset_next(page, *page2, lru); 1324d532e2e5SYang Shi 1325d532e2e5SYang Shi return rc; 1326d532e2e5SYang Shi } 1327d532e2e5SYang Shi 1328290408d4SNaoya Horiguchi /* 1329c73e5c9cSSrivatsa S. Bhat * migrate_pages - migrate the pages specified in a list, to the free pages 1330c73e5c9cSSrivatsa S. Bhat * supplied as the target for the page migration 1331e24f0b8fSChristoph Lameter * 1332c73e5c9cSSrivatsa S. Bhat * @from: The list of pages to be migrated. 1333c73e5c9cSSrivatsa S. Bhat * @get_new_page: The function used to allocate free pages to be used 1334c73e5c9cSSrivatsa S. Bhat * as the target of the page migration. 133568711a74SDavid Rientjes * @put_new_page: The function used to free target pages if migration 133668711a74SDavid Rientjes * fails, or NULL if no special handling is necessary. 1337c73e5c9cSSrivatsa S. Bhat * @private: Private data to be passed on to get_new_page() 1338c73e5c9cSSrivatsa S. Bhat * @mode: The migration mode that specifies the constraints for 1339c73e5c9cSSrivatsa S. Bhat * page migration, if any. 1340c73e5c9cSSrivatsa S. Bhat * @reason: The reason for page migration. 1341b5bade97SBaolin Wang * @ret_succeeded: Set to the number of normal pages migrated successfully if 13425ac95884SYang Shi * the caller passes a non-NULL pointer. 1343e24f0b8fSChristoph Lameter * 1344c73e5c9cSSrivatsa S. Bhat * The function returns after 10 attempts or if no pages are movable any more 1345c73e5c9cSSrivatsa S. Bhat * because the list has become empty or no retryable pages exist any more. 1346dd4ae78aSYang Shi * It is caller's responsibility to call putback_movable_pages() to return pages 1347dd4ae78aSYang Shi * to the LRU or free list only if ret != 0. 1348e24f0b8fSChristoph Lameter * 13495d39a7ebSBaolin Wang * Returns the number of {normal page, THP, hugetlb} that were not migrated, or 13505d39a7ebSBaolin Wang * an error code. The number of THP splits will be considered as the number of 13515d39a7ebSBaolin Wang * non-migrated THP, no matter how many subpages of the THP are migrated successfully. 1352e24f0b8fSChristoph Lameter */ 13539c620e2bSHugh Dickins int migrate_pages(struct list_head *from, new_page_t get_new_page, 135468711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 13555ac95884SYang Shi enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 1356e24f0b8fSChristoph Lameter { 1357e24f0b8fSChristoph Lameter int retry = 1; 13581a5bae25SAnshuman Khandual int thp_retry = 1; 1359e24f0b8fSChristoph Lameter int nr_failed = 0; 1360b5bade97SBaolin Wang int nr_failed_pages = 0; 13615647bc29SMel Gorman int nr_succeeded = 0; 13621a5bae25SAnshuman Khandual int nr_thp_succeeded = 0; 13631a5bae25SAnshuman Khandual int nr_thp_failed = 0; 13641a5bae25SAnshuman Khandual int nr_thp_split = 0; 1365e24f0b8fSChristoph Lameter int pass = 0; 13661a5bae25SAnshuman Khandual bool is_thp = false; 1367e24f0b8fSChristoph Lameter struct page *page; 1368e24f0b8fSChristoph Lameter struct page *page2; 13691a5bae25SAnshuman Khandual int rc, nr_subpages; 1370dd4ae78aSYang Shi LIST_HEAD(ret_pages); 1371b5bade97SBaolin Wang LIST_HEAD(thp_split_pages); 1372b0b515bfSYang Shi bool nosplit = (reason == MR_NUMA_MISPLACED); 1373b5bade97SBaolin Wang bool no_subpage_counting = false; 13742d1db3b1SChristoph Lameter 13757bc1aec5SLiam Mark trace_mm_migrate_pages_start(mode, reason); 13767bc1aec5SLiam Mark 1377b5bade97SBaolin Wang thp_subpage_migration: 13781a5bae25SAnshuman Khandual for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { 1379e24f0b8fSChristoph Lameter retry = 0; 13801a5bae25SAnshuman Khandual thp_retry = 0; 1381e24f0b8fSChristoph Lameter 1382e24f0b8fSChristoph Lameter list_for_each_entry_safe(page, page2, from, lru) { 138394723aafSMichal Hocko retry: 13841a5bae25SAnshuman Khandual /* 13851a5bae25SAnshuman Khandual * THP statistics is based on the source huge page. 13861a5bae25SAnshuman Khandual * Capture required information that might get lost 13871a5bae25SAnshuman Khandual * during migration. 13881a5bae25SAnshuman Khandual */ 13896c5c7b9fSZi Yan is_thp = PageTransHuge(page) && !PageHuge(page); 13905d39a7ebSBaolin Wang nr_subpages = compound_nr(page); 1391e24f0b8fSChristoph Lameter cond_resched(); 1392e24f0b8fSChristoph Lameter 139331caf665SNaoya Horiguchi if (PageHuge(page)) 139431caf665SNaoya Horiguchi rc = unmap_and_move_huge_page(get_new_page, 139568711a74SDavid Rientjes put_new_page, private, page, 1396dd4ae78aSYang Shi pass > 2, mode, reason, 1397dd4ae78aSYang Shi &ret_pages); 139831caf665SNaoya Horiguchi else 139968711a74SDavid Rientjes rc = unmap_and_move(get_new_page, put_new_page, 1400add05cecSNaoya Horiguchi private, page, pass > 2, mode, 1401dd4ae78aSYang Shi reason, &ret_pages); 1402dd4ae78aSYang Shi /* 1403dd4ae78aSYang Shi * The rules are: 1404dd4ae78aSYang Shi * Success: non hugetlb page will be freed, hugetlb 1405dd4ae78aSYang Shi * page will be put back 1406dd4ae78aSYang Shi * -EAGAIN: stay on the from list 1407dd4ae78aSYang Shi * -ENOMEM: stay on the from list 1408dd4ae78aSYang Shi * Other errno: put on ret_pages list then splice to 1409dd4ae78aSYang Shi * from list 1410dd4ae78aSYang Shi */ 1411e24f0b8fSChristoph Lameter switch(rc) { 141294723aafSMichal Hocko /* 141394723aafSMichal Hocko * THP migration might be unsupported or the 141494723aafSMichal Hocko * allocation could've failed so we should 141594723aafSMichal Hocko * retry on the same page with the THP split 141694723aafSMichal Hocko * to base pages. 141794723aafSMichal Hocko * 141894723aafSMichal Hocko * Head page is retried immediately and tail 141994723aafSMichal Hocko * pages are added to the tail of the list so 142094723aafSMichal Hocko * we encounter them after the rest of the list 142194723aafSMichal Hocko * is processed. 142294723aafSMichal Hocko */ 1423d532e2e5SYang Shi case -ENOSYS: 1424d532e2e5SYang Shi /* THP migration is unsupported */ 14256c5c7b9fSZi Yan if (is_thp) { 1426b5bade97SBaolin Wang nr_thp_failed++; 1427b5bade97SBaolin Wang if (!try_split_thp(page, &page2, &thp_split_pages)) { 1428d532e2e5SYang Shi nr_thp_split++; 1429d532e2e5SYang Shi goto retry; 1430d532e2e5SYang Shi } 1431f430893bSMiaohe Lin /* Hugetlb migration is unsupported */ 1432f430893bSMiaohe Lin } else if (!no_subpage_counting) { 1433f430893bSMiaohe Lin nr_failed++; 1434d532e2e5SYang Shi } 1435d532e2e5SYang Shi 14365d39a7ebSBaolin Wang nr_failed_pages += nr_subpages; 1437d532e2e5SYang Shi break; 1438d532e2e5SYang Shi case -ENOMEM: 1439d532e2e5SYang Shi /* 1440d532e2e5SYang Shi * When memory is low, don't bother to try to migrate 1441d532e2e5SYang Shi * other pages, just exit. 1442b0b515bfSYang Shi * THP NUMA faulting doesn't split THP to retry. 1443d532e2e5SYang Shi */ 1444b0b515bfSYang Shi if (is_thp && !nosplit) { 1445b5bade97SBaolin Wang nr_thp_failed++; 1446b5bade97SBaolin Wang if (!try_split_thp(page, &page2, &thp_split_pages)) { 14471a5bae25SAnshuman Khandual nr_thp_split++; 144894723aafSMichal Hocko goto retry; 144994723aafSMichal Hocko } 1450f430893bSMiaohe Lin } else if (!no_subpage_counting) { 1451f430893bSMiaohe Lin nr_failed++; 14521a5bae25SAnshuman Khandual } 1453b5bade97SBaolin Wang 14545d39a7ebSBaolin Wang nr_failed_pages += nr_subpages; 145569a041ffSMiaohe Lin /* 145669a041ffSMiaohe Lin * There might be some subpages of fail-to-migrate THPs 145769a041ffSMiaohe Lin * left in thp_split_pages list. Move them back to migration 145869a041ffSMiaohe Lin * list so that they could be put back to the right list by 145969a041ffSMiaohe Lin * the caller otherwise the page refcnt will be leaked. 146069a041ffSMiaohe Lin */ 146169a041ffSMiaohe Lin list_splice_init(&thp_split_pages, from); 146269a041ffSMiaohe Lin nr_thp_failed += thp_retry; 146395a402c3SChristoph Lameter goto out; 1464e24f0b8fSChristoph Lameter case -EAGAIN: 1465f430893bSMiaohe Lin if (is_thp) 14661a5bae25SAnshuman Khandual thp_retry++; 1467f430893bSMiaohe Lin else 1468b20a3503SChristoph Lameter retry++; 1469e24f0b8fSChristoph Lameter break; 147078bd5209SRafael Aquini case MIGRATEPAGE_SUCCESS: 14715d39a7ebSBaolin Wang nr_succeeded += nr_subpages; 1472f430893bSMiaohe Lin if (is_thp) 14731a5bae25SAnshuman Khandual nr_thp_succeeded++; 14741a5bae25SAnshuman Khandual break; 1475e24f0b8fSChristoph Lameter default: 1476354a3363SNaoya Horiguchi /* 1477d532e2e5SYang Shi * Permanent failure (-EBUSY, etc.): 1478354a3363SNaoya Horiguchi * unlike -EAGAIN case, the failed page is 1479354a3363SNaoya Horiguchi * removed from migration page list and not 1480354a3363SNaoya Horiguchi * retried in the next outer loop. 1481354a3363SNaoya Horiguchi */ 1482f430893bSMiaohe Lin if (is_thp) 14831a5bae25SAnshuman Khandual nr_thp_failed++; 1484f430893bSMiaohe Lin else if (!no_subpage_counting) 1485b20a3503SChristoph Lameter nr_failed++; 1486f430893bSMiaohe Lin 14875d39a7ebSBaolin Wang nr_failed_pages += nr_subpages; 1488e24f0b8fSChristoph Lameter break; 1489b20a3503SChristoph Lameter } 1490b20a3503SChristoph Lameter } 1491e24f0b8fSChristoph Lameter } 1492b5bade97SBaolin Wang nr_failed += retry; 14931a5bae25SAnshuman Khandual nr_thp_failed += thp_retry; 1494b5bade97SBaolin Wang /* 1495b5bade97SBaolin Wang * Try to migrate subpages of fail-to-migrate THPs, no nr_failed 1496b5bade97SBaolin Wang * counting in this round, since all subpages of a THP is counted 1497b5bade97SBaolin Wang * as 1 failure in the first round. 1498b5bade97SBaolin Wang */ 1499b5bade97SBaolin Wang if (!list_empty(&thp_split_pages)) { 1500b5bade97SBaolin Wang /* 1501b5bade97SBaolin Wang * Move non-migrated pages (after 10 retries) to ret_pages 1502b5bade97SBaolin Wang * to avoid migrating them again. 1503b5bade97SBaolin Wang */ 1504b5bade97SBaolin Wang list_splice_init(from, &ret_pages); 1505b5bade97SBaolin Wang list_splice_init(&thp_split_pages, from); 1506b5bade97SBaolin Wang no_subpage_counting = true; 1507b5bade97SBaolin Wang retry = 1; 1508b5bade97SBaolin Wang goto thp_subpage_migration; 1509b5bade97SBaolin Wang } 1510b5bade97SBaolin Wang 1511b5bade97SBaolin Wang rc = nr_failed + nr_thp_failed; 151295a402c3SChristoph Lameter out: 1513dd4ae78aSYang Shi /* 1514dd4ae78aSYang Shi * Put the permanent failure page back to migration list, they 1515dd4ae78aSYang Shi * will be put back to the right list by the caller. 1516dd4ae78aSYang Shi */ 1517dd4ae78aSYang Shi list_splice(&ret_pages, from); 1518dd4ae78aSYang Shi 15195647bc29SMel Gorman count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 1520b5bade97SBaolin Wang count_vm_events(PGMIGRATE_FAIL, nr_failed_pages); 15211a5bae25SAnshuman Khandual count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); 15221a5bae25SAnshuman Khandual count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); 15231a5bae25SAnshuman Khandual count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); 1524b5bade97SBaolin Wang trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, 15251a5bae25SAnshuman Khandual nr_thp_failed, nr_thp_split, mode, reason); 15267b2a2d4aSMel Gorman 15275ac95884SYang Shi if (ret_succeeded) 15285ac95884SYang Shi *ret_succeeded = nr_succeeded; 15295ac95884SYang Shi 153095a402c3SChristoph Lameter return rc; 1531b20a3503SChristoph Lameter } 1532b20a3503SChristoph Lameter 153319fc7bedSJoonsoo Kim struct page *alloc_migration_target(struct page *page, unsigned long private) 1534b4b38223SJoonsoo Kim { 1535ffe06786SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 153619fc7bedSJoonsoo Kim struct migration_target_control *mtc; 153719fc7bedSJoonsoo Kim gfp_t gfp_mask; 1538b4b38223SJoonsoo Kim unsigned int order = 0; 1539ffe06786SMatthew Wilcox (Oracle) struct folio *new_folio = NULL; 154019fc7bedSJoonsoo Kim int nid; 154119fc7bedSJoonsoo Kim int zidx; 154219fc7bedSJoonsoo Kim 154319fc7bedSJoonsoo Kim mtc = (struct migration_target_control *)private; 154419fc7bedSJoonsoo Kim gfp_mask = mtc->gfp_mask; 154519fc7bedSJoonsoo Kim nid = mtc->nid; 154619fc7bedSJoonsoo Kim if (nid == NUMA_NO_NODE) 1547ffe06786SMatthew Wilcox (Oracle) nid = folio_nid(folio); 1548b4b38223SJoonsoo Kim 1549ffe06786SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1550ffe06786SMatthew Wilcox (Oracle) struct hstate *h = page_hstate(&folio->page); 1551d92bbc27SJoonsoo Kim 155219fc7bedSJoonsoo Kim gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 155319fc7bedSJoonsoo Kim return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); 1554d92bbc27SJoonsoo Kim } 1555b4b38223SJoonsoo Kim 1556ffe06786SMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 15579933a0c8SJoonsoo Kim /* 15589933a0c8SJoonsoo Kim * clear __GFP_RECLAIM to make the migration callback 15599933a0c8SJoonsoo Kim * consistent with regular THP allocations. 15609933a0c8SJoonsoo Kim */ 15619933a0c8SJoonsoo Kim gfp_mask &= ~__GFP_RECLAIM; 1562b4b38223SJoonsoo Kim gfp_mask |= GFP_TRANSHUGE; 1563ffe06786SMatthew Wilcox (Oracle) order = folio_order(folio); 1564b4b38223SJoonsoo Kim } 1565ffe06786SMatthew Wilcox (Oracle) zidx = zone_idx(folio_zone(folio)); 156619fc7bedSJoonsoo Kim if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) 1567b4b38223SJoonsoo Kim gfp_mask |= __GFP_HIGHMEM; 1568b4b38223SJoonsoo Kim 1569ffe06786SMatthew Wilcox (Oracle) new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); 1570b4b38223SJoonsoo Kim 1571ffe06786SMatthew Wilcox (Oracle) return &new_folio->page; 1572b4b38223SJoonsoo Kim } 1573b4b38223SJoonsoo Kim 1574742755a1SChristoph Lameter #ifdef CONFIG_NUMA 1575742755a1SChristoph Lameter 1576a49bd4d7SMichal Hocko static int store_status(int __user *status, int start, int value, int nr) 1577742755a1SChristoph Lameter { 1578a49bd4d7SMichal Hocko while (nr-- > 0) { 1579a49bd4d7SMichal Hocko if (put_user(value, status + start)) 1580a49bd4d7SMichal Hocko return -EFAULT; 1581a49bd4d7SMichal Hocko start++; 1582a49bd4d7SMichal Hocko } 1583742755a1SChristoph Lameter 1584a49bd4d7SMichal Hocko return 0; 1585a49bd4d7SMichal Hocko } 1586742755a1SChristoph Lameter 1587a49bd4d7SMichal Hocko static int do_move_pages_to_node(struct mm_struct *mm, 1588a49bd4d7SMichal Hocko struct list_head *pagelist, int node) 1589a49bd4d7SMichal Hocko { 1590a49bd4d7SMichal Hocko int err; 1591a0976311SJoonsoo Kim struct migration_target_control mtc = { 1592a0976311SJoonsoo Kim .nid = node, 1593a0976311SJoonsoo Kim .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1594a0976311SJoonsoo Kim }; 1595742755a1SChristoph Lameter 1596a0976311SJoonsoo Kim err = migrate_pages(pagelist, alloc_migration_target, NULL, 15975ac95884SYang Shi (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1598a49bd4d7SMichal Hocko if (err) 1599a49bd4d7SMichal Hocko putback_movable_pages(pagelist); 1600a49bd4d7SMichal Hocko return err; 1601742755a1SChristoph Lameter } 1602742755a1SChristoph Lameter 1603742755a1SChristoph Lameter /* 1604a49bd4d7SMichal Hocko * Resolves the given address to a struct page, isolates it from the LRU and 1605a49bd4d7SMichal Hocko * puts it to the given pagelist. 1606e0153fc2SYang Shi * Returns: 1607e0153fc2SYang Shi * errno - if the page cannot be found/isolated 1608e0153fc2SYang Shi * 0 - when it doesn't have to be migrated because it is already on the 1609e0153fc2SYang Shi * target node 1610e0153fc2SYang Shi * 1 - when it has been queued 1611742755a1SChristoph Lameter */ 1612a49bd4d7SMichal Hocko static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1613a49bd4d7SMichal Hocko int node, struct list_head *pagelist, bool migrate_all) 1614742755a1SChristoph Lameter { 1615742755a1SChristoph Lameter struct vm_area_struct *vma; 1616742755a1SChristoph Lameter struct page *page; 1617a49bd4d7SMichal Hocko int err; 1618742755a1SChristoph Lameter 1619d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 1620742755a1SChristoph Lameter err = -EFAULT; 1621cb1c37b1SMiaohe Lin vma = vma_lookup(mm, addr); 1622cb1c37b1SMiaohe Lin if (!vma || !vma_migratable(vma)) 1623a49bd4d7SMichal Hocko goto out; 1624742755a1SChristoph Lameter 1625d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 162687d2762eSMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 162789f5b7daSLinus Torvalds 162889f5b7daSLinus Torvalds err = PTR_ERR(page); 162989f5b7daSLinus Torvalds if (IS_ERR(page)) 1630a49bd4d7SMichal Hocko goto out; 163189f5b7daSLinus Torvalds 1632742755a1SChristoph Lameter err = -ENOENT; 1633742755a1SChristoph Lameter if (!page) 1634a49bd4d7SMichal Hocko goto out; 1635742755a1SChristoph Lameter 1636a49bd4d7SMichal Hocko err = 0; 1637a49bd4d7SMichal Hocko if (page_to_nid(page) == node) 1638a49bd4d7SMichal Hocko goto out_putpage; 1639742755a1SChristoph Lameter 1640742755a1SChristoph Lameter err = -EACCES; 1641a49bd4d7SMichal Hocko if (page_mapcount(page) > 1 && !migrate_all) 1642a49bd4d7SMichal Hocko goto out_putpage; 1643742755a1SChristoph Lameter 1644e632a938SNaoya Horiguchi if (PageHuge(page)) { 1645e8db67ebSNaoya Horiguchi if (PageHead(page)) { 16467ce82f4cSMiaohe Lin err = isolate_hugetlb(page, pagelist); 16477ce82f4cSMiaohe Lin if (!err) 1648e0153fc2SYang Shi err = 1; 1649e8db67ebSNaoya Horiguchi } 1650a49bd4d7SMichal Hocko } else { 1651a49bd4d7SMichal Hocko struct page *head; 1652e632a938SNaoya Horiguchi 1653e8db67ebSNaoya Horiguchi head = compound_head(page); 1654e8db67ebSNaoya Horiguchi err = isolate_lru_page(head); 1655a49bd4d7SMichal Hocko if (err) 1656a49bd4d7SMichal Hocko goto out_putpage; 1657a49bd4d7SMichal Hocko 1658e0153fc2SYang Shi err = 1; 1659a49bd4d7SMichal Hocko list_add_tail(&head->lru, pagelist); 1660e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(head), 16619de4f22aSHuang Ying NR_ISOLATED_ANON + page_is_file_lru(head), 16626c357848SMatthew Wilcox (Oracle) thp_nr_pages(head)); 16636d9c285aSKOSAKI Motohiro } 1664a49bd4d7SMichal Hocko out_putpage: 1665742755a1SChristoph Lameter /* 1666742755a1SChristoph Lameter * Either remove the duplicate refcount from 1667742755a1SChristoph Lameter * isolate_lru_page() or drop the page ref if it was 1668742755a1SChristoph Lameter * not isolated. 1669742755a1SChristoph Lameter */ 1670742755a1SChristoph Lameter put_page(page); 1671a49bd4d7SMichal Hocko out: 1672d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 1673742755a1SChristoph Lameter return err; 1674742755a1SChristoph Lameter } 1675742755a1SChristoph Lameter 16767ca8783aSWei Yang static int move_pages_and_store_status(struct mm_struct *mm, int node, 16777ca8783aSWei Yang struct list_head *pagelist, int __user *status, 16787ca8783aSWei Yang int start, int i, unsigned long nr_pages) 16797ca8783aSWei Yang { 16807ca8783aSWei Yang int err; 16817ca8783aSWei Yang 16825d7ae891SWei Yang if (list_empty(pagelist)) 16835d7ae891SWei Yang return 0; 16845d7ae891SWei Yang 16857ca8783aSWei Yang err = do_move_pages_to_node(mm, pagelist, node); 16867ca8783aSWei Yang if (err) { 16877ca8783aSWei Yang /* 16887ca8783aSWei Yang * Positive err means the number of failed 16897ca8783aSWei Yang * pages to migrate. Since we are going to 16907ca8783aSWei Yang * abort and return the number of non-migrated 1691ab9dd4f8SLong Li * pages, so need to include the rest of the 16927ca8783aSWei Yang * nr_pages that have not been attempted as 16937ca8783aSWei Yang * well. 16947ca8783aSWei Yang */ 16957ca8783aSWei Yang if (err > 0) 16967ca8783aSWei Yang err += nr_pages - i - 1; 16977ca8783aSWei Yang return err; 16987ca8783aSWei Yang } 16997ca8783aSWei Yang return store_status(status, start, node, i - start); 17007ca8783aSWei Yang } 17017ca8783aSWei Yang 1702742755a1SChristoph Lameter /* 17035e9a0f02SBrice Goglin * Migrate an array of page address onto an array of nodes and fill 17045e9a0f02SBrice Goglin * the corresponding array of status. 17055e9a0f02SBrice Goglin */ 17063268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 17075e9a0f02SBrice Goglin unsigned long nr_pages, 17085e9a0f02SBrice Goglin const void __user * __user *pages, 17095e9a0f02SBrice Goglin const int __user *nodes, 17105e9a0f02SBrice Goglin int __user *status, int flags) 17115e9a0f02SBrice Goglin { 1712a49bd4d7SMichal Hocko int current_node = NUMA_NO_NODE; 1713a49bd4d7SMichal Hocko LIST_HEAD(pagelist); 1714a49bd4d7SMichal Hocko int start, i; 1715a49bd4d7SMichal Hocko int err = 0, err1; 171635282a2dSBrice Goglin 1717361a2a22SMinchan Kim lru_cache_disable(); 171835282a2dSBrice Goglin 1719a49bd4d7SMichal Hocko for (i = start = 0; i < nr_pages; i++) { 17205e9a0f02SBrice Goglin const void __user *p; 1721a49bd4d7SMichal Hocko unsigned long addr; 17225e9a0f02SBrice Goglin int node; 17235e9a0f02SBrice Goglin 17243140a227SBrice Goglin err = -EFAULT; 1725a49bd4d7SMichal Hocko if (get_user(p, pages + i)) 1726a49bd4d7SMichal Hocko goto out_flush; 1727a49bd4d7SMichal Hocko if (get_user(node, nodes + i)) 1728a49bd4d7SMichal Hocko goto out_flush; 1729057d3389SAndrey Konovalov addr = (unsigned long)untagged_addr(p); 17305e9a0f02SBrice Goglin 17315e9a0f02SBrice Goglin err = -ENODEV; 17326f5a55f1SLinus Torvalds if (node < 0 || node >= MAX_NUMNODES) 1733a49bd4d7SMichal Hocko goto out_flush; 1734389162c2SLai Jiangshan if (!node_state(node, N_MEMORY)) 1735a49bd4d7SMichal Hocko goto out_flush; 17365e9a0f02SBrice Goglin 17375e9a0f02SBrice Goglin err = -EACCES; 17385e9a0f02SBrice Goglin if (!node_isset(node, task_nodes)) 1739a49bd4d7SMichal Hocko goto out_flush; 17405e9a0f02SBrice Goglin 1741a49bd4d7SMichal Hocko if (current_node == NUMA_NO_NODE) { 1742a49bd4d7SMichal Hocko current_node = node; 1743a49bd4d7SMichal Hocko start = i; 1744a49bd4d7SMichal Hocko } else if (node != current_node) { 17457ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node, 17467ca8783aSWei Yang &pagelist, status, start, i, nr_pages); 1747a49bd4d7SMichal Hocko if (err) 1748a49bd4d7SMichal Hocko goto out; 1749a49bd4d7SMichal Hocko start = i; 1750a49bd4d7SMichal Hocko current_node = node; 17515e9a0f02SBrice Goglin } 17525e9a0f02SBrice Goglin 1753a49bd4d7SMichal Hocko /* 1754a49bd4d7SMichal Hocko * Errors in the page lookup or isolation are not fatal and we simply 1755a49bd4d7SMichal Hocko * report them via status 1756a49bd4d7SMichal Hocko */ 1757a49bd4d7SMichal Hocko err = add_page_for_migration(mm, addr, current_node, 1758a49bd4d7SMichal Hocko &pagelist, flags & MPOL_MF_MOVE_ALL); 1759e0153fc2SYang Shi 1760d08221a0SWei Yang if (err > 0) { 1761e0153fc2SYang Shi /* The page is successfully queued for migration */ 1762e0153fc2SYang Shi continue; 1763e0153fc2SYang Shi } 17643140a227SBrice Goglin 1765d08221a0SWei Yang /* 176665462462SJohn Hubbard * The move_pages() man page does not have an -EEXIST choice, so 176765462462SJohn Hubbard * use -EFAULT instead. 176865462462SJohn Hubbard */ 176965462462SJohn Hubbard if (err == -EEXIST) 177065462462SJohn Hubbard err = -EFAULT; 177165462462SJohn Hubbard 177265462462SJohn Hubbard /* 1773d08221a0SWei Yang * If the page is already on the target node (!err), store the 1774d08221a0SWei Yang * node, otherwise, store the err. 1775d08221a0SWei Yang */ 1776d08221a0SWei Yang err = store_status(status, i, err ? : current_node, 1); 1777a49bd4d7SMichal Hocko if (err) 1778a49bd4d7SMichal Hocko goto out_flush; 17793140a227SBrice Goglin 17807ca8783aSWei Yang err = move_pages_and_store_status(mm, current_node, &pagelist, 17817ca8783aSWei Yang status, start, i, nr_pages); 1782a49bd4d7SMichal Hocko if (err) 1783a49bd4d7SMichal Hocko goto out; 1784a49bd4d7SMichal Hocko current_node = NUMA_NO_NODE; 17853140a227SBrice Goglin } 1786a49bd4d7SMichal Hocko out_flush: 1787a49bd4d7SMichal Hocko /* Make sure we do not overwrite the existing error */ 17887ca8783aSWei Yang err1 = move_pages_and_store_status(mm, current_node, &pagelist, 17897ca8783aSWei Yang status, start, i, nr_pages); 1790dfe9aa23SWei Yang if (err >= 0) 1791a49bd4d7SMichal Hocko err = err1; 17925e9a0f02SBrice Goglin out: 1793361a2a22SMinchan Kim lru_cache_enable(); 17945e9a0f02SBrice Goglin return err; 17955e9a0f02SBrice Goglin } 17965e9a0f02SBrice Goglin 17975e9a0f02SBrice Goglin /* 17982f007e74SBrice Goglin * Determine the nodes of an array of pages and store it in an array of status. 1799742755a1SChristoph Lameter */ 180080bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 180180bba129SBrice Goglin const void __user **pages, int *status) 1802742755a1SChristoph Lameter { 18032f007e74SBrice Goglin unsigned long i; 1804742755a1SChristoph Lameter 1805d8ed45c5SMichel Lespinasse mmap_read_lock(mm); 18062f007e74SBrice Goglin 18072f007e74SBrice Goglin for (i = 0; i < nr_pages; i++) { 180880bba129SBrice Goglin unsigned long addr = (unsigned long)(*pages); 18092f007e74SBrice Goglin struct vm_area_struct *vma; 18102f007e74SBrice Goglin struct page *page; 1811c095adbcSKOSAKI Motohiro int err = -EFAULT; 18122f007e74SBrice Goglin 1813059b8b48SLiam Howlett vma = vma_lookup(mm, addr); 1814059b8b48SLiam Howlett if (!vma) 1815742755a1SChristoph Lameter goto set_status; 1816742755a1SChristoph Lameter 1817d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 18184cd61484SMiaohe Lin page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 181989f5b7daSLinus Torvalds 182089f5b7daSLinus Torvalds err = PTR_ERR(page); 182189f5b7daSLinus Torvalds if (IS_ERR(page)) 182289f5b7daSLinus Torvalds goto set_status; 182389f5b7daSLinus Torvalds 18244cd61484SMiaohe Lin if (page) { 18254cd61484SMiaohe Lin err = page_to_nid(page); 18264cd61484SMiaohe Lin put_page(page); 18274cd61484SMiaohe Lin } else { 18284cd61484SMiaohe Lin err = -ENOENT; 18294cd61484SMiaohe Lin } 1830742755a1SChristoph Lameter set_status: 183180bba129SBrice Goglin *status = err; 183280bba129SBrice Goglin 183380bba129SBrice Goglin pages++; 183480bba129SBrice Goglin status++; 183580bba129SBrice Goglin } 183680bba129SBrice Goglin 1837d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 183880bba129SBrice Goglin } 183980bba129SBrice Goglin 18405b1b561bSArnd Bergmann static int get_compat_pages_array(const void __user *chunk_pages[], 18415b1b561bSArnd Bergmann const void __user * __user *pages, 18425b1b561bSArnd Bergmann unsigned long chunk_nr) 18435b1b561bSArnd Bergmann { 18445b1b561bSArnd Bergmann compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; 18455b1b561bSArnd Bergmann compat_uptr_t p; 18465b1b561bSArnd Bergmann int i; 18475b1b561bSArnd Bergmann 18485b1b561bSArnd Bergmann for (i = 0; i < chunk_nr; i++) { 18495b1b561bSArnd Bergmann if (get_user(p, pages32 + i)) 18505b1b561bSArnd Bergmann return -EFAULT; 18515b1b561bSArnd Bergmann chunk_pages[i] = compat_ptr(p); 18525b1b561bSArnd Bergmann } 18535b1b561bSArnd Bergmann 18545b1b561bSArnd Bergmann return 0; 18555b1b561bSArnd Bergmann } 18565b1b561bSArnd Bergmann 185780bba129SBrice Goglin /* 185880bba129SBrice Goglin * Determine the nodes of a user array of pages and store it in 185980bba129SBrice Goglin * a user array of status. 186080bba129SBrice Goglin */ 186180bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 186280bba129SBrice Goglin const void __user * __user *pages, 186380bba129SBrice Goglin int __user *status) 186480bba129SBrice Goglin { 18653eefb826SMiaohe Lin #define DO_PAGES_STAT_CHUNK_NR 16UL 186680bba129SBrice Goglin const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 186780bba129SBrice Goglin int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 186880bba129SBrice Goglin 186987b8d1adSH. Peter Anvin while (nr_pages) { 18703eefb826SMiaohe Lin unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); 187187b8d1adSH. Peter Anvin 18725b1b561bSArnd Bergmann if (in_compat_syscall()) { 18735b1b561bSArnd Bergmann if (get_compat_pages_array(chunk_pages, pages, 18745b1b561bSArnd Bergmann chunk_nr)) 187587b8d1adSH. Peter Anvin break; 18765b1b561bSArnd Bergmann } else { 18775b1b561bSArnd Bergmann if (copy_from_user(chunk_pages, pages, 18785b1b561bSArnd Bergmann chunk_nr * sizeof(*chunk_pages))) 18795b1b561bSArnd Bergmann break; 18805b1b561bSArnd Bergmann } 188180bba129SBrice Goglin 188280bba129SBrice Goglin do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 188380bba129SBrice Goglin 188487b8d1adSH. Peter Anvin if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 188587b8d1adSH. Peter Anvin break; 1886742755a1SChristoph Lameter 188787b8d1adSH. Peter Anvin pages += chunk_nr; 188887b8d1adSH. Peter Anvin status += chunk_nr; 188987b8d1adSH. Peter Anvin nr_pages -= chunk_nr; 189087b8d1adSH. Peter Anvin } 189187b8d1adSH. Peter Anvin return nr_pages ? -EFAULT : 0; 1892742755a1SChristoph Lameter } 1893742755a1SChristoph Lameter 18944dc200ceSMiaohe Lin static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) 18954dc200ceSMiaohe Lin { 18964dc200ceSMiaohe Lin struct task_struct *task; 18974dc200ceSMiaohe Lin struct mm_struct *mm; 18984dc200ceSMiaohe Lin 18994dc200ceSMiaohe Lin /* 19004dc200ceSMiaohe Lin * There is no need to check if current process has the right to modify 19014dc200ceSMiaohe Lin * the specified process when they are same. 19024dc200ceSMiaohe Lin */ 19034dc200ceSMiaohe Lin if (!pid) { 19044dc200ceSMiaohe Lin mmget(current->mm); 19054dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(current); 19064dc200ceSMiaohe Lin return current->mm; 19074dc200ceSMiaohe Lin } 19084dc200ceSMiaohe Lin 19094dc200ceSMiaohe Lin /* Find the mm_struct */ 19104dc200ceSMiaohe Lin rcu_read_lock(); 19114dc200ceSMiaohe Lin task = find_task_by_vpid(pid); 19124dc200ceSMiaohe Lin if (!task) { 19134dc200ceSMiaohe Lin rcu_read_unlock(); 19144dc200ceSMiaohe Lin return ERR_PTR(-ESRCH); 19154dc200ceSMiaohe Lin } 19164dc200ceSMiaohe Lin get_task_struct(task); 19174dc200ceSMiaohe Lin 19184dc200ceSMiaohe Lin /* 19194dc200ceSMiaohe Lin * Check if this process has the right to modify the specified 19204dc200ceSMiaohe Lin * process. Use the regular "ptrace_may_access()" checks. 19214dc200ceSMiaohe Lin */ 19224dc200ceSMiaohe Lin if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 19234dc200ceSMiaohe Lin rcu_read_unlock(); 19244dc200ceSMiaohe Lin mm = ERR_PTR(-EPERM); 19254dc200ceSMiaohe Lin goto out; 19264dc200ceSMiaohe Lin } 19274dc200ceSMiaohe Lin rcu_read_unlock(); 19284dc200ceSMiaohe Lin 19294dc200ceSMiaohe Lin mm = ERR_PTR(security_task_movememory(task)); 19304dc200ceSMiaohe Lin if (IS_ERR(mm)) 19314dc200ceSMiaohe Lin goto out; 19324dc200ceSMiaohe Lin *mem_nodes = cpuset_mems_allowed(task); 19334dc200ceSMiaohe Lin mm = get_task_mm(task); 19344dc200ceSMiaohe Lin out: 19354dc200ceSMiaohe Lin put_task_struct(task); 19364dc200ceSMiaohe Lin if (!mm) 19374dc200ceSMiaohe Lin mm = ERR_PTR(-EINVAL); 19384dc200ceSMiaohe Lin return mm; 19394dc200ceSMiaohe Lin } 19404dc200ceSMiaohe Lin 1941742755a1SChristoph Lameter /* 1942742755a1SChristoph Lameter * Move a list of pages in the address space of the currently executing 1943742755a1SChristoph Lameter * process. 1944742755a1SChristoph Lameter */ 19457addf443SDominik Brodowski static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 19467addf443SDominik Brodowski const void __user * __user *pages, 19477addf443SDominik Brodowski const int __user *nodes, 19487addf443SDominik Brodowski int __user *status, int flags) 1949742755a1SChristoph Lameter { 1950742755a1SChristoph Lameter struct mm_struct *mm; 19515e9a0f02SBrice Goglin int err; 19523268c63eSChristoph Lameter nodemask_t task_nodes; 1953742755a1SChristoph Lameter 1954742755a1SChristoph Lameter /* Check flags */ 1955742755a1SChristoph Lameter if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1956742755a1SChristoph Lameter return -EINVAL; 1957742755a1SChristoph Lameter 1958742755a1SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1959742755a1SChristoph Lameter return -EPERM; 1960742755a1SChristoph Lameter 19614dc200ceSMiaohe Lin mm = find_mm_struct(pid, &task_nodes); 19624dc200ceSMiaohe Lin if (IS_ERR(mm)) 19634dc200ceSMiaohe Lin return PTR_ERR(mm); 19646e8b09eaSSasha Levin 19653268c63eSChristoph Lameter if (nodes) 19663268c63eSChristoph Lameter err = do_pages_move(mm, task_nodes, nr_pages, pages, 19673268c63eSChristoph Lameter nodes, status, flags); 19683268c63eSChristoph Lameter else 19695e9a0f02SBrice Goglin err = do_pages_stat(mm, nr_pages, pages, status); 19703268c63eSChristoph Lameter 19713268c63eSChristoph Lameter mmput(mm); 19723268c63eSChristoph Lameter return err; 1973742755a1SChristoph Lameter } 1974742755a1SChristoph Lameter 19757addf443SDominik Brodowski SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 19767addf443SDominik Brodowski const void __user * __user *, pages, 19777addf443SDominik Brodowski const int __user *, nodes, 19787addf443SDominik Brodowski int __user *, status, int, flags) 19797addf443SDominik Brodowski { 19807addf443SDominik Brodowski return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 19817addf443SDominik Brodowski } 19827addf443SDominik Brodowski 19837039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 19847039e1dbSPeter Zijlstra /* 19857039e1dbSPeter Zijlstra * Returns true if this is a safe migration target node for misplaced NUMA 1986bc53008eSWei Yang * pages. Currently it only checks the watermarks which is crude. 19877039e1dbSPeter Zijlstra */ 19887039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 19893abef4e6SMel Gorman unsigned long nr_migrate_pages) 19907039e1dbSPeter Zijlstra { 19917039e1dbSPeter Zijlstra int z; 1992599d0c95SMel Gorman 19937039e1dbSPeter Zijlstra for (z = pgdat->nr_zones - 1; z >= 0; z--) { 19947039e1dbSPeter Zijlstra struct zone *zone = pgdat->node_zones + z; 19957039e1dbSPeter Zijlstra 1996bc53008eSWei Yang if (!managed_zone(zone)) 19977039e1dbSPeter Zijlstra continue; 19987039e1dbSPeter Zijlstra 19997039e1dbSPeter Zijlstra /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 20007039e1dbSPeter Zijlstra if (!zone_watermark_ok(zone, 0, 20017039e1dbSPeter Zijlstra high_wmark_pages(zone) + 20027039e1dbSPeter Zijlstra nr_migrate_pages, 2003bfe9d006SHuang Ying ZONE_MOVABLE, 0)) 20047039e1dbSPeter Zijlstra continue; 20057039e1dbSPeter Zijlstra return true; 20067039e1dbSPeter Zijlstra } 20077039e1dbSPeter Zijlstra return false; 20087039e1dbSPeter Zijlstra } 20097039e1dbSPeter Zijlstra 20107039e1dbSPeter Zijlstra static struct page *alloc_misplaced_dst_page(struct page *page, 2011666feb21SMichal Hocko unsigned long data) 20127039e1dbSPeter Zijlstra { 20137039e1dbSPeter Zijlstra int nid = (int) data; 2014c185e494SMatthew Wilcox (Oracle) int order = compound_order(page); 2015c185e494SMatthew Wilcox (Oracle) gfp_t gfp = __GFP_THISNODE; 2016c185e494SMatthew Wilcox (Oracle) struct folio *new; 20177039e1dbSPeter Zijlstra 2018c185e494SMatthew Wilcox (Oracle) if (order > 0) 2019c185e494SMatthew Wilcox (Oracle) gfp |= GFP_TRANSHUGE_LIGHT; 2020c185e494SMatthew Wilcox (Oracle) else { 2021c185e494SMatthew Wilcox (Oracle) gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | 2022c185e494SMatthew Wilcox (Oracle) __GFP_NOWARN; 2023c185e494SMatthew Wilcox (Oracle) gfp &= ~__GFP_RECLAIM; 20247039e1dbSPeter Zijlstra } 2025c185e494SMatthew Wilcox (Oracle) new = __folio_alloc_node(gfp, order, nid); 20267039e1dbSPeter Zijlstra 2027c185e494SMatthew Wilcox (Oracle) return &new->page; 2028c5b5a3ddSYang Shi } 2029c5b5a3ddSYang Shi 20301c30e017SMel Gorman static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 2031b32967ffSMel Gorman { 20322b9b624fSBaolin Wang int nr_pages = thp_nr_pages(page); 2033c574bbe9SHuang Ying int order = compound_order(page); 2034b32967ffSMel Gorman 2035c574bbe9SHuang Ying VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); 20363abef4e6SMel Gorman 2037662aeea7SYang Shi /* Do not migrate THP mapped by multiple processes */ 2038662aeea7SYang Shi if (PageTransHuge(page) && total_mapcount(page) > 1) 2039662aeea7SYang Shi return 0; 2040662aeea7SYang Shi 2041b32967ffSMel Gorman /* Avoid migrating to a node that is nearly full */ 2042c574bbe9SHuang Ying if (!migrate_balanced_pgdat(pgdat, nr_pages)) { 2043c574bbe9SHuang Ying int z; 2044c574bbe9SHuang Ying 2045c574bbe9SHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) 2046340ef390SHugh Dickins return 0; 2047c574bbe9SHuang Ying for (z = pgdat->nr_zones - 1; z >= 0; z--) { 2048bc53008eSWei Yang if (managed_zone(pgdat->node_zones + z)) 2049c574bbe9SHuang Ying break; 2050c574bbe9SHuang Ying } 2051c574bbe9SHuang Ying wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); 2052c574bbe9SHuang Ying return 0; 2053c574bbe9SHuang Ying } 2054b32967ffSMel Gorman 2055340ef390SHugh Dickins if (isolate_lru_page(page)) 2056340ef390SHugh Dickins return 0; 2057340ef390SHugh Dickins 2058b75454e1SMiaohe Lin mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), 20592b9b624fSBaolin Wang nr_pages); 2060b32967ffSMel Gorman 2061b32967ffSMel Gorman /* 2062340ef390SHugh Dickins * Isolating the page has taken another reference, so the 2063340ef390SHugh Dickins * caller's reference can be safely dropped without the page 2064340ef390SHugh Dickins * disappearing underneath us during migration. 2065b32967ffSMel Gorman */ 2066b32967ffSMel Gorman put_page(page); 2067340ef390SHugh Dickins return 1; 2068b32967ffSMel Gorman } 2069b32967ffSMel Gorman 2070a8f60772SMel Gorman /* 20717039e1dbSPeter Zijlstra * Attempt to migrate a misplaced page to the specified destination 20727039e1dbSPeter Zijlstra * node. Caller is expected to have an elevated reference count on 20737039e1dbSPeter Zijlstra * the page that will be dropped by this function before returning. 20747039e1dbSPeter Zijlstra */ 20751bc115d8SMel Gorman int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 20761bc115d8SMel Gorman int node) 20777039e1dbSPeter Zijlstra { 2078a8f60772SMel Gorman pg_data_t *pgdat = NODE_DATA(node); 2079340ef390SHugh Dickins int isolated; 2080b32967ffSMel Gorman int nr_remaining; 2081e39bb6beSHuang Ying unsigned int nr_succeeded; 20827039e1dbSPeter Zijlstra LIST_HEAD(migratepages); 2083b5916c02SAneesh Kumar K.V int nr_pages = thp_nr_pages(page); 2084c5b5a3ddSYang Shi 2085c5b5a3ddSYang Shi /* 20861bc115d8SMel Gorman * Don't migrate file pages that are mapped in multiple processes 20871bc115d8SMel Gorman * with execute permissions as they are probably shared libraries. 20887039e1dbSPeter Zijlstra */ 20897ee820eeSMiaohe Lin if (page_mapcount(page) != 1 && page_is_file_lru(page) && 20907ee820eeSMiaohe Lin (vma->vm_flags & VM_EXEC)) 20917039e1dbSPeter Zijlstra goto out; 20927039e1dbSPeter Zijlstra 2093a8f60772SMel Gorman /* 209409a913a7SMel Gorman * Also do not migrate dirty pages as not all filesystems can move 209509a913a7SMel Gorman * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 209609a913a7SMel Gorman */ 20979de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page)) 209809a913a7SMel Gorman goto out; 209909a913a7SMel Gorman 2100b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 2101b32967ffSMel Gorman if (!isolated) 21027039e1dbSPeter Zijlstra goto out; 21037039e1dbSPeter Zijlstra 21047039e1dbSPeter Zijlstra list_add(&page->lru, &migratepages); 2105c185e494SMatthew Wilcox (Oracle) nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 2106c185e494SMatthew Wilcox (Oracle) NULL, node, MIGRATE_ASYNC, 2107c185e494SMatthew Wilcox (Oracle) MR_NUMA_MISPLACED, &nr_succeeded); 21087039e1dbSPeter Zijlstra if (nr_remaining) { 210959c82b70SJoonsoo Kim if (!list_empty(&migratepages)) { 211059c82b70SJoonsoo Kim list_del(&page->lru); 2111c5fc5c3aSYang Shi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 2112c5fc5c3aSYang Shi page_is_file_lru(page), -nr_pages); 211359c82b70SJoonsoo Kim putback_lru_page(page); 211459c82b70SJoonsoo Kim } 21157039e1dbSPeter Zijlstra isolated = 0; 2116e39bb6beSHuang Ying } 2117e39bb6beSHuang Ying if (nr_succeeded) { 2118e39bb6beSHuang Ying count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); 2119e39bb6beSHuang Ying if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) 2120e39bb6beSHuang Ying mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, 2121e39bb6beSHuang Ying nr_succeeded); 2122e39bb6beSHuang Ying } 21237039e1dbSPeter Zijlstra BUG_ON(!list_empty(&migratepages)); 21247039e1dbSPeter Zijlstra return isolated; 2125340ef390SHugh Dickins 2126340ef390SHugh Dickins out: 2127340ef390SHugh Dickins put_page(page); 2128340ef390SHugh Dickins return 0; 21297039e1dbSPeter Zijlstra } 2130220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 21318763cb45SJérôme Glisse 2132dcee9bf5SHuang Ying /* 2133dcee9bf5SHuang Ying * node_demotion[] example: 2134dcee9bf5SHuang Ying * 2135dcee9bf5SHuang Ying * Consider a system with two sockets. Each socket has 2136dcee9bf5SHuang Ying * three classes of memory attached: fast, medium and slow. 2137dcee9bf5SHuang Ying * Each memory class is placed in its own NUMA node. The 2138dcee9bf5SHuang Ying * CPUs are placed in the node with the "fast" memory. The 2139dcee9bf5SHuang Ying * 6 NUMA nodes (0-5) might be split among the sockets like 2140dcee9bf5SHuang Ying * this: 2141dcee9bf5SHuang Ying * 2142dcee9bf5SHuang Ying * Socket A: 0, 1, 2 2143dcee9bf5SHuang Ying * Socket B: 3, 4, 5 2144dcee9bf5SHuang Ying * 2145dcee9bf5SHuang Ying * When Node 0 fills up, its memory should be migrated to 2146dcee9bf5SHuang Ying * Node 1. When Node 1 fills up, it should be migrated to 2147dcee9bf5SHuang Ying * Node 2. The migration path start on the nodes with the 2148dcee9bf5SHuang Ying * processors (since allocations default to this node) and 2149dcee9bf5SHuang Ying * fast memory, progress through medium and end with the 2150dcee9bf5SHuang Ying * slow memory: 2151dcee9bf5SHuang Ying * 2152dcee9bf5SHuang Ying * 0 -> 1 -> 2 -> stop 2153dcee9bf5SHuang Ying * 3 -> 4 -> 5 -> stop 2154dcee9bf5SHuang Ying * 2155dcee9bf5SHuang Ying * This is represented in the node_demotion[] like this: 2156dcee9bf5SHuang Ying * 2157dcee9bf5SHuang Ying * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1 2158dcee9bf5SHuang Ying * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2 2159dcee9bf5SHuang Ying * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate 2160dcee9bf5SHuang Ying * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4 2161dcee9bf5SHuang Ying * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5 2162dcee9bf5SHuang Ying * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate 2163dcee9bf5SHuang Ying * 2164dcee9bf5SHuang Ying * Moreover some systems may have multiple slow memory nodes. 2165dcee9bf5SHuang Ying * Suppose a system has one socket with 3 memory nodes, node 0 2166dcee9bf5SHuang Ying * is fast memory type, and node 1/2 both are slow memory 2167dcee9bf5SHuang Ying * type, and the distance between fast memory node and slow 2168dcee9bf5SHuang Ying * memory node is same. So the migration path should be: 2169dcee9bf5SHuang Ying * 2170dcee9bf5SHuang Ying * 0 -> 1/2 -> stop 2171dcee9bf5SHuang Ying * 2172dcee9bf5SHuang Ying * This is represented in the node_demotion[] like this: 2173dcee9bf5SHuang Ying * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2 2174dcee9bf5SHuang Ying * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate 2175dcee9bf5SHuang Ying * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate 2176dcee9bf5SHuang Ying */ 2177dcee9bf5SHuang Ying 2178dcee9bf5SHuang Ying /* 2179dcee9bf5SHuang Ying * Writes to this array occur without locking. Cycles are 2180dcee9bf5SHuang Ying * not allowed: Node X demotes to Y which demotes to X... 2181dcee9bf5SHuang Ying * 2182dcee9bf5SHuang Ying * If multiple reads are performed, a single rcu_read_lock() 2183dcee9bf5SHuang Ying * must be held over all reads to ensure that no cycles are 2184dcee9bf5SHuang Ying * observed. 2185dcee9bf5SHuang Ying */ 2186dcee9bf5SHuang Ying #define DEFAULT_DEMOTION_TARGET_NODES 15 2187dcee9bf5SHuang Ying 2188dcee9bf5SHuang Ying #if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES 2189dcee9bf5SHuang Ying #define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1) 2190dcee9bf5SHuang Ying #else 2191dcee9bf5SHuang Ying #define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES 2192dcee9bf5SHuang Ying #endif 2193dcee9bf5SHuang Ying 2194dcee9bf5SHuang Ying struct demotion_nodes { 2195dcee9bf5SHuang Ying unsigned short nr; 2196dcee9bf5SHuang Ying short nodes[DEMOTION_TARGET_NODES]; 2197dcee9bf5SHuang Ying }; 2198dcee9bf5SHuang Ying 2199dcee9bf5SHuang Ying static struct demotion_nodes *node_demotion __read_mostly; 2200dcee9bf5SHuang Ying 2201dcee9bf5SHuang Ying /** 2202dcee9bf5SHuang Ying * next_demotion_node() - Get the next node in the demotion path 2203dcee9bf5SHuang Ying * @node: The starting node to lookup the next node 2204dcee9bf5SHuang Ying * 2205dcee9bf5SHuang Ying * Return: node id for next memory node in the demotion path hierarchy 2206dcee9bf5SHuang Ying * from @node; NUMA_NO_NODE if @node is terminal. This does not keep 2207dcee9bf5SHuang Ying * @node online or guarantee that it *continues* to be the next demotion 2208dcee9bf5SHuang Ying * target. 2209dcee9bf5SHuang Ying */ 2210dcee9bf5SHuang Ying int next_demotion_node(int node) 2211dcee9bf5SHuang Ying { 2212dcee9bf5SHuang Ying struct demotion_nodes *nd; 2213dcee9bf5SHuang Ying unsigned short target_nr, index; 2214dcee9bf5SHuang Ying int target; 2215dcee9bf5SHuang Ying 2216dcee9bf5SHuang Ying if (!node_demotion) 2217dcee9bf5SHuang Ying return NUMA_NO_NODE; 2218dcee9bf5SHuang Ying 2219dcee9bf5SHuang Ying nd = &node_demotion[node]; 2220dcee9bf5SHuang Ying 2221dcee9bf5SHuang Ying /* 2222dcee9bf5SHuang Ying * node_demotion[] is updated without excluding this 2223dcee9bf5SHuang Ying * function from running. RCU doesn't provide any 2224dcee9bf5SHuang Ying * compiler barriers, so the READ_ONCE() is required 2225dcee9bf5SHuang Ying * to avoid compiler reordering or read merging. 2226dcee9bf5SHuang Ying * 2227dcee9bf5SHuang Ying * Make sure to use RCU over entire code blocks if 2228dcee9bf5SHuang Ying * node_demotion[] reads need to be consistent. 2229dcee9bf5SHuang Ying */ 2230dcee9bf5SHuang Ying rcu_read_lock(); 2231dcee9bf5SHuang Ying target_nr = READ_ONCE(nd->nr); 2232dcee9bf5SHuang Ying 2233dcee9bf5SHuang Ying switch (target_nr) { 2234dcee9bf5SHuang Ying case 0: 2235dcee9bf5SHuang Ying target = NUMA_NO_NODE; 2236dcee9bf5SHuang Ying goto out; 2237dcee9bf5SHuang Ying case 1: 2238dcee9bf5SHuang Ying index = 0; 2239dcee9bf5SHuang Ying break; 2240dcee9bf5SHuang Ying default: 2241dcee9bf5SHuang Ying /* 2242dcee9bf5SHuang Ying * If there are multiple target nodes, just select one 2243dcee9bf5SHuang Ying * target node randomly. 2244dcee9bf5SHuang Ying * 2245dcee9bf5SHuang Ying * In addition, we can also use round-robin to select 2246dcee9bf5SHuang Ying * target node, but we should introduce another variable 2247dcee9bf5SHuang Ying * for node_demotion[] to record last selected target node, 2248dcee9bf5SHuang Ying * that may cause cache ping-pong due to the changing of 2249dcee9bf5SHuang Ying * last target node. Or introducing per-cpu data to avoid 2250dcee9bf5SHuang Ying * caching issue, which seems more complicated. So selecting 2251dcee9bf5SHuang Ying * target node randomly seems better until now. 2252dcee9bf5SHuang Ying */ 2253dcee9bf5SHuang Ying index = get_random_int() % target_nr; 2254dcee9bf5SHuang Ying break; 2255dcee9bf5SHuang Ying } 2256dcee9bf5SHuang Ying 2257dcee9bf5SHuang Ying target = READ_ONCE(nd->nodes[index]); 2258dcee9bf5SHuang Ying 2259dcee9bf5SHuang Ying out: 2260dcee9bf5SHuang Ying rcu_read_unlock(); 2261dcee9bf5SHuang Ying return target; 2262dcee9bf5SHuang Ying } 2263dcee9bf5SHuang Ying 226479c28a41SDave Hansen /* Disable reclaim-based migration. */ 226579c28a41SDave Hansen static void __disable_all_migrate_targets(void) 226679c28a41SDave Hansen { 2267ac16ec83SBaolin Wang int node, i; 226879c28a41SDave Hansen 2269ac16ec83SBaolin Wang if (!node_demotion) 2270ac16ec83SBaolin Wang return; 2271ac16ec83SBaolin Wang 2272ac16ec83SBaolin Wang for_each_online_node(node) { 2273ac16ec83SBaolin Wang node_demotion[node].nr = 0; 2274ac16ec83SBaolin Wang for (i = 0; i < DEMOTION_TARGET_NODES; i++) 2275ac16ec83SBaolin Wang node_demotion[node].nodes[i] = NUMA_NO_NODE; 2276ac16ec83SBaolin Wang } 227779c28a41SDave Hansen } 227879c28a41SDave Hansen 227979c28a41SDave Hansen static void disable_all_migrate_targets(void) 228079c28a41SDave Hansen { 228179c28a41SDave Hansen __disable_all_migrate_targets(); 228279c28a41SDave Hansen 228379c28a41SDave Hansen /* 228479c28a41SDave Hansen * Ensure that the "disable" is visible across the system. 228579c28a41SDave Hansen * Readers will see either a combination of before+disable 228679c28a41SDave Hansen * state or disable+after. They will never see before and 228779c28a41SDave Hansen * after state together. 228879c28a41SDave Hansen * 228979c28a41SDave Hansen * The before+after state together might have cycles and 229079c28a41SDave Hansen * could cause readers to do things like loop until this 229179c28a41SDave Hansen * function finishes. This ensures they can only see a 229279c28a41SDave Hansen * single "bad" read and would, for instance, only loop 229379c28a41SDave Hansen * once. 229479c28a41SDave Hansen */ 229579c28a41SDave Hansen synchronize_rcu(); 229679c28a41SDave Hansen } 229779c28a41SDave Hansen 229879c28a41SDave Hansen /* 229979c28a41SDave Hansen * Find an automatic demotion target for 'node'. 230079c28a41SDave Hansen * Failing here is OK. It might just indicate 230179c28a41SDave Hansen * being at the end of a chain. 230279c28a41SDave Hansen */ 2303ac16ec83SBaolin Wang static int establish_migrate_target(int node, nodemask_t *used, 2304ac16ec83SBaolin Wang int best_distance) 230579c28a41SDave Hansen { 2306ac16ec83SBaolin Wang int migration_target, index, val; 2307ac16ec83SBaolin Wang struct demotion_nodes *nd; 230879c28a41SDave Hansen 2309ac16ec83SBaolin Wang if (!node_demotion) 231079c28a41SDave Hansen return NUMA_NO_NODE; 231179c28a41SDave Hansen 2312ac16ec83SBaolin Wang nd = &node_demotion[node]; 2313ac16ec83SBaolin Wang 231479c28a41SDave Hansen migration_target = find_next_best_node(node, used); 231579c28a41SDave Hansen if (migration_target == NUMA_NO_NODE) 231679c28a41SDave Hansen return NUMA_NO_NODE; 231779c28a41SDave Hansen 2318ac16ec83SBaolin Wang /* 2319ac16ec83SBaolin Wang * If the node has been set a migration target node before, 2320ac16ec83SBaolin Wang * which means it's the best distance between them. Still 2321ac16ec83SBaolin Wang * check if this node can be demoted to other target nodes 2322ac16ec83SBaolin Wang * if they have a same best distance. 2323ac16ec83SBaolin Wang */ 2324ac16ec83SBaolin Wang if (best_distance != -1) { 2325ac16ec83SBaolin Wang val = node_distance(node, migration_target); 2326ac16ec83SBaolin Wang if (val > best_distance) 2327fc89213aSHuang Ying goto out_clear; 2328ac16ec83SBaolin Wang } 2329ac16ec83SBaolin Wang 2330ac16ec83SBaolin Wang index = nd->nr; 2331ac16ec83SBaolin Wang if (WARN_ONCE(index >= DEMOTION_TARGET_NODES, 2332ac16ec83SBaolin Wang "Exceeds maximum demotion target nodes\n")) 2333fc89213aSHuang Ying goto out_clear; 2334ac16ec83SBaolin Wang 2335ac16ec83SBaolin Wang nd->nodes[index] = migration_target; 2336ac16ec83SBaolin Wang nd->nr++; 233779c28a41SDave Hansen 233879c28a41SDave Hansen return migration_target; 2339fc89213aSHuang Ying out_clear: 2340fc89213aSHuang Ying node_clear(migration_target, *used); 2341fc89213aSHuang Ying return NUMA_NO_NODE; 234279c28a41SDave Hansen } 234379c28a41SDave Hansen 234479c28a41SDave Hansen /* 234579c28a41SDave Hansen * When memory fills up on a node, memory contents can be 234679c28a41SDave Hansen * automatically migrated to another node instead of 234779c28a41SDave Hansen * discarded at reclaim. 234879c28a41SDave Hansen * 234979c28a41SDave Hansen * Establish a "migration path" which will start at nodes 235079c28a41SDave Hansen * with CPUs and will follow the priorities used to build the 235179c28a41SDave Hansen * page allocator zonelists. 235279c28a41SDave Hansen * 235379c28a41SDave Hansen * The difference here is that cycles must be avoided. If 235479c28a41SDave Hansen * node0 migrates to node1, then neither node1, nor anything 2355ac16ec83SBaolin Wang * node1 migrates to can migrate to node0. Also one node can 2356ac16ec83SBaolin Wang * be migrated to multiple nodes if the target nodes all have 2357ac16ec83SBaolin Wang * a same best-distance against the source node. 235879c28a41SDave Hansen * 235979c28a41SDave Hansen * This function can run simultaneously with readers of 236079c28a41SDave Hansen * node_demotion[]. However, it can not run simultaneously 236179c28a41SDave Hansen * with itself. Exclusion is provided by memory hotplug events 236279c28a41SDave Hansen * being single-threaded. 236379c28a41SDave Hansen */ 236479c28a41SDave Hansen static void __set_migration_target_nodes(void) 236579c28a41SDave Hansen { 236691925ab8SMiaohe Lin nodemask_t next_pass; 236791925ab8SMiaohe Lin nodemask_t this_pass; 236879c28a41SDave Hansen nodemask_t used_targets = NODE_MASK_NONE; 2369ac16ec83SBaolin Wang int node, best_distance; 237079c28a41SDave Hansen 237179c28a41SDave Hansen /* 237279c28a41SDave Hansen * Avoid any oddities like cycles that could occur 237379c28a41SDave Hansen * from changes in the topology. This will leave 237479c28a41SDave Hansen * a momentary gap when migration is disabled. 237579c28a41SDave Hansen */ 237679c28a41SDave Hansen disable_all_migrate_targets(); 237779c28a41SDave Hansen 237879c28a41SDave Hansen /* 237979c28a41SDave Hansen * Allocations go close to CPUs, first. Assume that 238079c28a41SDave Hansen * the migration path starts at the nodes with CPUs. 238179c28a41SDave Hansen */ 238279c28a41SDave Hansen next_pass = node_states[N_CPU]; 238379c28a41SDave Hansen again: 238479c28a41SDave Hansen this_pass = next_pass; 238579c28a41SDave Hansen next_pass = NODE_MASK_NONE; 238679c28a41SDave Hansen /* 238779c28a41SDave Hansen * To avoid cycles in the migration "graph", ensure 238879c28a41SDave Hansen * that migration sources are not future targets by 238979c28a41SDave Hansen * setting them in 'used_targets'. Do this only 239079c28a41SDave Hansen * once per pass so that multiple source nodes can 239179c28a41SDave Hansen * share a target node. 239279c28a41SDave Hansen * 239379c28a41SDave Hansen * 'used_targets' will become unavailable in future 239479c28a41SDave Hansen * passes. This limits some opportunities for 239579c28a41SDave Hansen * multiple source nodes to share a destination. 239679c28a41SDave Hansen */ 239779c28a41SDave Hansen nodes_or(used_targets, used_targets, this_pass); 2398ac16ec83SBaolin Wang 239979c28a41SDave Hansen for_each_node_mask(node, this_pass) { 2400ac16ec83SBaolin Wang best_distance = -1; 2401ac16ec83SBaolin Wang 2402ac16ec83SBaolin Wang /* 2403ac16ec83SBaolin Wang * Try to set up the migration path for the node, and the target 2404ac16ec83SBaolin Wang * migration nodes can be multiple, so doing a loop to find all 2405ac16ec83SBaolin Wang * the target nodes if they all have a best node distance. 2406ac16ec83SBaolin Wang */ 2407ac16ec83SBaolin Wang do { 2408ac16ec83SBaolin Wang int target_node = 2409ac16ec83SBaolin Wang establish_migrate_target(node, &used_targets, 2410ac16ec83SBaolin Wang best_distance); 241179c28a41SDave Hansen 241279c28a41SDave Hansen if (target_node == NUMA_NO_NODE) 2413ac16ec83SBaolin Wang break; 2414ac16ec83SBaolin Wang 2415ac16ec83SBaolin Wang if (best_distance == -1) 2416ac16ec83SBaolin Wang best_distance = node_distance(node, target_node); 241779c28a41SDave Hansen 241879c28a41SDave Hansen /* 241979c28a41SDave Hansen * Visit targets from this pass in the next pass. 242079c28a41SDave Hansen * Eventually, every node will have been part of 242179c28a41SDave Hansen * a pass, and will become set in 'used_targets'. 242279c28a41SDave Hansen */ 242379c28a41SDave Hansen node_set(target_node, next_pass); 2424ac16ec83SBaolin Wang } while (1); 242579c28a41SDave Hansen } 242679c28a41SDave Hansen /* 242779c28a41SDave Hansen * 'next_pass' contains nodes which became migration 242879c28a41SDave Hansen * targets in this pass. Make additional passes until 242979c28a41SDave Hansen * no more migrations targets are available. 243079c28a41SDave Hansen */ 243179c28a41SDave Hansen if (!nodes_empty(next_pass)) 243279c28a41SDave Hansen goto again; 243379c28a41SDave Hansen } 243479c28a41SDave Hansen 243579c28a41SDave Hansen /* 243679c28a41SDave Hansen * For callers that do not hold get_online_mems() already. 243779c28a41SDave Hansen */ 2438734c1570SOscar Salvador void set_migration_target_nodes(void) 243979c28a41SDave Hansen { 244079c28a41SDave Hansen get_online_mems(); 244179c28a41SDave Hansen __set_migration_target_nodes(); 244279c28a41SDave Hansen put_online_mems(); 244379c28a41SDave Hansen } 2444884a6e5dSDave Hansen 2445884a6e5dSDave Hansen /* 2446884a6e5dSDave Hansen * This leaves migrate-on-reclaim transiently disabled between 2447884a6e5dSDave Hansen * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs 2448884a6e5dSDave Hansen * whether reclaim-based migration is enabled or not, which 2449884a6e5dSDave Hansen * ensures that the user can turn reclaim-based migration at 2450884a6e5dSDave Hansen * any time without needing to recalculate migration targets. 2451884a6e5dSDave Hansen * 2452884a6e5dSDave Hansen * These callbacks already hold get_online_mems(). That is why 2453884a6e5dSDave Hansen * __set_migration_target_nodes() can be used as opposed to 2454884a6e5dSDave Hansen * set_migration_target_nodes(). 2455884a6e5dSDave Hansen */ 24567d6e2d96SOscar Salvador #ifdef CONFIG_MEMORY_HOTPLUG 2457884a6e5dSDave Hansen static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, 2458295be91fSDave Hansen unsigned long action, void *_arg) 2459884a6e5dSDave Hansen { 2460295be91fSDave Hansen struct memory_notify *arg = _arg; 2461295be91fSDave Hansen 2462295be91fSDave Hansen /* 2463295be91fSDave Hansen * Only update the node migration order when a node is 2464295be91fSDave Hansen * changing status, like online->offline. This avoids 2465295be91fSDave Hansen * the overhead of synchronize_rcu() in most cases. 2466295be91fSDave Hansen */ 2467295be91fSDave Hansen if (arg->status_change_nid < 0) 2468295be91fSDave Hansen return notifier_from_errno(0); 2469295be91fSDave Hansen 2470884a6e5dSDave Hansen switch (action) { 2471884a6e5dSDave Hansen case MEM_GOING_OFFLINE: 2472884a6e5dSDave Hansen /* 2473884a6e5dSDave Hansen * Make sure there are not transient states where 2474884a6e5dSDave Hansen * an offline node is a migration target. This 2475884a6e5dSDave Hansen * will leave migration disabled until the offline 2476884a6e5dSDave Hansen * completes and the MEM_OFFLINE case below runs. 2477884a6e5dSDave Hansen */ 2478884a6e5dSDave Hansen disable_all_migrate_targets(); 2479884a6e5dSDave Hansen break; 2480884a6e5dSDave Hansen case MEM_OFFLINE: 2481884a6e5dSDave Hansen case MEM_ONLINE: 2482884a6e5dSDave Hansen /* 2483884a6e5dSDave Hansen * Recalculate the target nodes once the node 2484884a6e5dSDave Hansen * reaches its final state (online or offline). 2485884a6e5dSDave Hansen */ 2486884a6e5dSDave Hansen __set_migration_target_nodes(); 2487884a6e5dSDave Hansen break; 2488884a6e5dSDave Hansen case MEM_CANCEL_OFFLINE: 2489884a6e5dSDave Hansen /* 2490884a6e5dSDave Hansen * MEM_GOING_OFFLINE disabled all the migration 2491884a6e5dSDave Hansen * targets. Reenable them. 2492884a6e5dSDave Hansen */ 2493884a6e5dSDave Hansen __set_migration_target_nodes(); 2494884a6e5dSDave Hansen break; 2495884a6e5dSDave Hansen case MEM_GOING_ONLINE: 2496884a6e5dSDave Hansen case MEM_CANCEL_ONLINE: 2497884a6e5dSDave Hansen break; 2498884a6e5dSDave Hansen } 2499884a6e5dSDave Hansen 2500884a6e5dSDave Hansen return notifier_from_errno(0); 2501884a6e5dSDave Hansen } 25027d6e2d96SOscar Salvador #endif 2503884a6e5dSDave Hansen 2504734c1570SOscar Salvador void __init migrate_on_reclaim_init(void) 250576af6a05SDave Hansen { 25063f26c88bSMiaohe Lin node_demotion = kcalloc(nr_node_ids, 2507ac16ec83SBaolin Wang sizeof(struct demotion_nodes), 2508ac16ec83SBaolin Wang GFP_KERNEL); 2509ac16ec83SBaolin Wang WARN_ON(!node_demotion); 25107d6e2d96SOscar Salvador #ifdef CONFIG_MEMORY_HOTPLUG 2511884a6e5dSDave Hansen hotplug_memory_notifier(migrate_on_reclaim_callback, 100); 25127d6e2d96SOscar Salvador #endif 2513734c1570SOscar Salvador /* 2514734c1570SOscar Salvador * At this point, all numa nodes with memory/CPus have their state 2515734c1570SOscar Salvador * properly set, so we can build the demotion order now. 2516734c1570SOscar Salvador * Let us hold the cpu_hotplug lock just, as we could possibily have 2517734c1570SOscar Salvador * CPU hotplug events during boot. 2518734c1570SOscar Salvador */ 2519734c1570SOscar Salvador cpus_read_lock(); 2520734c1570SOscar Salvador set_migration_target_nodes(); 2521734c1570SOscar Salvador cpus_read_unlock(); 2522884a6e5dSDave Hansen } 252320f9ba4fSYang Shi 252420f9ba4fSYang Shi bool numa_demotion_enabled = false; 252520f9ba4fSYang Shi 252620f9ba4fSYang Shi #ifdef CONFIG_SYSFS 252720f9ba4fSYang Shi static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 252820f9ba4fSYang Shi struct kobj_attribute *attr, char *buf) 252920f9ba4fSYang Shi { 253020f9ba4fSYang Shi return sysfs_emit(buf, "%s\n", 253120f9ba4fSYang Shi numa_demotion_enabled ? "true" : "false"); 253220f9ba4fSYang Shi } 253320f9ba4fSYang Shi 253420f9ba4fSYang Shi static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 253520f9ba4fSYang Shi struct kobj_attribute *attr, 253620f9ba4fSYang Shi const char *buf, size_t count) 253720f9ba4fSYang Shi { 2538717aeab4SJagdish Gediya ssize_t ret; 2539717aeab4SJagdish Gediya 2540717aeab4SJagdish Gediya ret = kstrtobool(buf, &numa_demotion_enabled); 2541717aeab4SJagdish Gediya if (ret) 2542717aeab4SJagdish Gediya return ret; 254320f9ba4fSYang Shi 254420f9ba4fSYang Shi return count; 254520f9ba4fSYang Shi } 254620f9ba4fSYang Shi 254720f9ba4fSYang Shi static struct kobj_attribute numa_demotion_enabled_attr = 254820f9ba4fSYang Shi __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 254920f9ba4fSYang Shi numa_demotion_enabled_store); 255020f9ba4fSYang Shi 255120f9ba4fSYang Shi static struct attribute *numa_attrs[] = { 255220f9ba4fSYang Shi &numa_demotion_enabled_attr.attr, 255320f9ba4fSYang Shi NULL, 255420f9ba4fSYang Shi }; 255520f9ba4fSYang Shi 255620f9ba4fSYang Shi static const struct attribute_group numa_attr_group = { 255720f9ba4fSYang Shi .attrs = numa_attrs, 255820f9ba4fSYang Shi }; 255920f9ba4fSYang Shi 256020f9ba4fSYang Shi static int __init numa_init_sysfs(void) 256120f9ba4fSYang Shi { 256220f9ba4fSYang Shi int err; 256320f9ba4fSYang Shi struct kobject *numa_kobj; 256420f9ba4fSYang Shi 256520f9ba4fSYang Shi numa_kobj = kobject_create_and_add("numa", mm_kobj); 256620f9ba4fSYang Shi if (!numa_kobj) { 256720f9ba4fSYang Shi pr_err("failed to create numa kobject\n"); 256820f9ba4fSYang Shi return -ENOMEM; 256920f9ba4fSYang Shi } 257020f9ba4fSYang Shi err = sysfs_create_group(numa_kobj, &numa_attr_group); 257120f9ba4fSYang Shi if (err) { 257220f9ba4fSYang Shi pr_err("failed to register numa group\n"); 257320f9ba4fSYang Shi goto delete_obj; 257420f9ba4fSYang Shi } 257520f9ba4fSYang Shi return 0; 257620f9ba4fSYang Shi 257720f9ba4fSYang Shi delete_obj: 257820f9ba4fSYang Shi kobject_put(numa_kobj); 257920f9ba4fSYang Shi return err; 258020f9ba4fSYang Shi } 258120f9ba4fSYang Shi subsys_initcall(numa_init_sysfs); 25827d6e2d96SOscar Salvador #endif /* CONFIG_SYSFS */ 25837d6e2d96SOscar Salvador #endif /* CONFIG_NUMA */ 2584