1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2b20a3503SChristoph Lameter /* 314e0f9bcSHugh Dickins * Memory Migration functionality - linux/mm/migrate.c 4b20a3503SChristoph Lameter * 5b20a3503SChristoph Lameter * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 6b20a3503SChristoph Lameter * 7b20a3503SChristoph Lameter * Page migration was first developed in the context of the memory hotplug 8b20a3503SChristoph Lameter * project. The main authors of the migration code are: 9b20a3503SChristoph Lameter * 10b20a3503SChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 11b20a3503SChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 12b20a3503SChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 13cde53535SChristoph Lameter * Christoph Lameter 14b20a3503SChristoph Lameter */ 15b20a3503SChristoph Lameter 16b20a3503SChristoph Lameter #include <linux/migrate.h> 17b95f1b31SPaul Gortmaker #include <linux/export.h> 18b20a3503SChristoph Lameter #include <linux/swap.h> 190697212aSChristoph Lameter #include <linux/swapops.h> 20b20a3503SChristoph Lameter #include <linux/pagemap.h> 21e23ca00bSChristoph Lameter #include <linux/buffer_head.h> 22b20a3503SChristoph Lameter #include <linux/mm_inline.h> 23b488893aSPavel Emelyanov #include <linux/nsproxy.h> 24b20a3503SChristoph Lameter #include <linux/pagevec.h> 25e9995ef9SHugh Dickins #include <linux/ksm.h> 26b20a3503SChristoph Lameter #include <linux/rmap.h> 27b20a3503SChristoph Lameter #include <linux/topology.h> 28b20a3503SChristoph Lameter #include <linux/cpu.h> 29b20a3503SChristoph Lameter #include <linux/cpuset.h> 3004e62a29SChristoph Lameter #include <linux/writeback.h> 31742755a1SChristoph Lameter #include <linux/mempolicy.h> 32742755a1SChristoph Lameter #include <linux/vmalloc.h> 3386c3a764SDavid Quigley #include <linux/security.h> 3442cb14b1SHugh Dickins #include <linux/backing-dev.h> 35bda807d4SMinchan Kim #include <linux/compaction.h> 364f5ca265SAdrian Bunk #include <linux/syscalls.h> 377addf443SDominik Brodowski #include <linux/compat.h> 38290408d4SNaoya Horiguchi #include <linux/hugetlb.h> 398e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 405a0e3ad6STejun Heo #include <linux/gfp.h> 41df6ad698SJérôme Glisse #include <linux/pfn_t.h> 42a5430ddaSJérôme Glisse #include <linux/memremap.h> 438315ada7SJérôme Glisse #include <linux/userfaultfd_k.h> 44bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 45f714f4f2SMel Gorman #include <linux/mmu_notifier.h> 4633c3fc71SVladimir Davydov #include <linux/page_idle.h> 47d435edcaSVlastimil Babka #include <linux/page_owner.h> 486e84f315SIngo Molnar #include <linux/sched/mm.h> 49197e7e52SLinus Torvalds #include <linux/ptrace.h> 50b20a3503SChristoph Lameter 510d1836c3SMichal Nazarewicz #include <asm/tlbflush.h> 520d1836c3SMichal Nazarewicz 537b2a2d4aSMel Gorman #define CREATE_TRACE_POINTS 547b2a2d4aSMel Gorman #include <trace/events/migrate.h> 557b2a2d4aSMel Gorman 56b20a3503SChristoph Lameter #include "internal.h" 57b20a3503SChristoph Lameter 58b20a3503SChristoph Lameter /* 59742755a1SChristoph Lameter * migrate_prep() needs to be called before we start compiling a list of pages 60748446bbSMel Gorman * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 61748446bbSMel Gorman * undesirable, use migrate_prep_local() 62b20a3503SChristoph Lameter */ 63b20a3503SChristoph Lameter int migrate_prep(void) 64b20a3503SChristoph Lameter { 65b20a3503SChristoph Lameter /* 66b20a3503SChristoph Lameter * Clear the LRU lists so pages can be isolated. 67b20a3503SChristoph Lameter * Note that pages may be moved off the LRU after we have 68b20a3503SChristoph Lameter * drained them. Those pages will fail to migrate like other 69b20a3503SChristoph Lameter * pages that may be busy. 70b20a3503SChristoph Lameter */ 71b20a3503SChristoph Lameter lru_add_drain_all(); 72b20a3503SChristoph Lameter 73b20a3503SChristoph Lameter return 0; 74b20a3503SChristoph Lameter } 75b20a3503SChristoph Lameter 76748446bbSMel Gorman /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 77748446bbSMel Gorman int migrate_prep_local(void) 78748446bbSMel Gorman { 79748446bbSMel Gorman lru_add_drain(); 80748446bbSMel Gorman 81748446bbSMel Gorman return 0; 82748446bbSMel Gorman } 83748446bbSMel Gorman 849e5bcd61SYisheng Xie int isolate_movable_page(struct page *page, isolate_mode_t mode) 85bda807d4SMinchan Kim { 86bda807d4SMinchan Kim struct address_space *mapping; 87bda807d4SMinchan Kim 88bda807d4SMinchan Kim /* 89bda807d4SMinchan Kim * Avoid burning cycles with pages that are yet under __free_pages(), 90bda807d4SMinchan Kim * or just got freed under us. 91bda807d4SMinchan Kim * 92bda807d4SMinchan Kim * In case we 'win' a race for a movable page being freed under us and 93bda807d4SMinchan Kim * raise its refcount preventing __free_pages() from doing its job 94bda807d4SMinchan Kim * the put_page() at the end of this block will take care of 95bda807d4SMinchan Kim * release this page, thus avoiding a nasty leakage. 96bda807d4SMinchan Kim */ 97bda807d4SMinchan Kim if (unlikely(!get_page_unless_zero(page))) 98bda807d4SMinchan Kim goto out; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim /* 101bda807d4SMinchan Kim * Check PageMovable before holding a PG_lock because page's owner 102bda807d4SMinchan Kim * assumes anybody doesn't touch PG_lock of newly allocated page 103bda807d4SMinchan Kim * so unconditionally grapping the lock ruins page's owner side. 104bda807d4SMinchan Kim */ 105bda807d4SMinchan Kim if (unlikely(!__PageMovable(page))) 106bda807d4SMinchan Kim goto out_putpage; 107bda807d4SMinchan Kim /* 108bda807d4SMinchan Kim * As movable pages are not isolated from LRU lists, concurrent 109bda807d4SMinchan Kim * compaction threads can race against page migration functions 110bda807d4SMinchan Kim * as well as race against the releasing a page. 111bda807d4SMinchan Kim * 112bda807d4SMinchan Kim * In order to avoid having an already isolated movable page 113bda807d4SMinchan Kim * being (wrongly) re-isolated while it is under migration, 114bda807d4SMinchan Kim * or to avoid attempting to isolate pages being released, 115bda807d4SMinchan Kim * lets be sure we have the page lock 116bda807d4SMinchan Kim * before proceeding with the movable page isolation steps. 117bda807d4SMinchan Kim */ 118bda807d4SMinchan Kim if (unlikely(!trylock_page(page))) 119bda807d4SMinchan Kim goto out_putpage; 120bda807d4SMinchan Kim 121bda807d4SMinchan Kim if (!PageMovable(page) || PageIsolated(page)) 122bda807d4SMinchan Kim goto out_no_isolated; 123bda807d4SMinchan Kim 124bda807d4SMinchan Kim mapping = page_mapping(page); 125bda807d4SMinchan Kim VM_BUG_ON_PAGE(!mapping, page); 126bda807d4SMinchan Kim 127bda807d4SMinchan Kim if (!mapping->a_ops->isolate_page(page, mode)) 128bda807d4SMinchan Kim goto out_no_isolated; 129bda807d4SMinchan Kim 130bda807d4SMinchan Kim /* Driver shouldn't use PG_isolated bit of page->flags */ 131bda807d4SMinchan Kim WARN_ON_ONCE(PageIsolated(page)); 132bda807d4SMinchan Kim __SetPageIsolated(page); 133bda807d4SMinchan Kim unlock_page(page); 134bda807d4SMinchan Kim 1359e5bcd61SYisheng Xie return 0; 136bda807d4SMinchan Kim 137bda807d4SMinchan Kim out_no_isolated: 138bda807d4SMinchan Kim unlock_page(page); 139bda807d4SMinchan Kim out_putpage: 140bda807d4SMinchan Kim put_page(page); 141bda807d4SMinchan Kim out: 1429e5bcd61SYisheng Xie return -EBUSY; 143bda807d4SMinchan Kim } 144bda807d4SMinchan Kim 145bda807d4SMinchan Kim /* It should be called on page which is PG_movable */ 146bda807d4SMinchan Kim void putback_movable_page(struct page *page) 147bda807d4SMinchan Kim { 148bda807d4SMinchan Kim struct address_space *mapping; 149bda807d4SMinchan Kim 150bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 151bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 152bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 153bda807d4SMinchan Kim 154bda807d4SMinchan Kim mapping = page_mapping(page); 155bda807d4SMinchan Kim mapping->a_ops->putback_page(page); 156bda807d4SMinchan Kim __ClearPageIsolated(page); 157bda807d4SMinchan Kim } 158bda807d4SMinchan Kim 159b20a3503SChristoph Lameter /* 1605733c7d1SRafael Aquini * Put previously isolated pages back onto the appropriate lists 1615733c7d1SRafael Aquini * from where they were once taken off for compaction/migration. 1625733c7d1SRafael Aquini * 16359c82b70SJoonsoo Kim * This function shall be used whenever the isolated pageset has been 16459c82b70SJoonsoo Kim * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 16559c82b70SJoonsoo Kim * and isolate_huge_page(). 1665733c7d1SRafael Aquini */ 1675733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l) 1685733c7d1SRafael Aquini { 1695733c7d1SRafael Aquini struct page *page; 1705733c7d1SRafael Aquini struct page *page2; 1715733c7d1SRafael Aquini 1725733c7d1SRafael Aquini list_for_each_entry_safe(page, page2, l, lru) { 17331caf665SNaoya Horiguchi if (unlikely(PageHuge(page))) { 17431caf665SNaoya Horiguchi putback_active_hugepage(page); 17531caf665SNaoya Horiguchi continue; 17631caf665SNaoya Horiguchi } 1775733c7d1SRafael Aquini list_del(&page->lru); 178bda807d4SMinchan Kim /* 179bda807d4SMinchan Kim * We isolated non-lru movable page so here we can use 180bda807d4SMinchan Kim * __PageMovable because LRU page's mapping cannot have 181bda807d4SMinchan Kim * PAGE_MAPPING_MOVABLE. 182bda807d4SMinchan Kim */ 183b1123ea6SMinchan Kim if (unlikely(__PageMovable(page))) { 184bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 185bda807d4SMinchan Kim lock_page(page); 186bda807d4SMinchan Kim if (PageMovable(page)) 187bda807d4SMinchan Kim putback_movable_page(page); 188bf6bddf1SRafael Aquini else 189bda807d4SMinchan Kim __ClearPageIsolated(page); 190bda807d4SMinchan Kim unlock_page(page); 191bda807d4SMinchan Kim put_page(page); 192bda807d4SMinchan Kim } else { 193e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 194e8db67ebSNaoya Horiguchi page_is_file_cache(page), -hpage_nr_pages(page)); 195fc280fe8SRabin Vincent putback_lru_page(page); 196b20a3503SChristoph Lameter } 197b20a3503SChristoph Lameter } 198bda807d4SMinchan Kim } 199b20a3503SChristoph Lameter 2000697212aSChristoph Lameter /* 2010697212aSChristoph Lameter * Restore a potential migration pte to a working pte entry 2020697212aSChristoph Lameter */ 203e4b82222SMinchan Kim static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 204e9995ef9SHugh Dickins unsigned long addr, void *old) 2050697212aSChristoph Lameter { 2063fe87967SKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 2073fe87967SKirill A. Shutemov .page = old, 2083fe87967SKirill A. Shutemov .vma = vma, 2093fe87967SKirill A. Shutemov .address = addr, 2103fe87967SKirill A. Shutemov .flags = PVMW_SYNC | PVMW_MIGRATION, 2113fe87967SKirill A. Shutemov }; 2123fe87967SKirill A. Shutemov struct page *new; 2133fe87967SKirill A. Shutemov pte_t pte; 2140697212aSChristoph Lameter swp_entry_t entry; 2150697212aSChristoph Lameter 2163fe87967SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2173fe87967SKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 2184b0ece6fSNaoya Horiguchi if (PageKsm(page)) 2194b0ece6fSNaoya Horiguchi new = page; 2204b0ece6fSNaoya Horiguchi else 2213fe87967SKirill A. Shutemov new = page - pvmw.page->index + 2223fe87967SKirill A. Shutemov linear_page_index(vma, pvmw.address); 2230697212aSChristoph Lameter 224616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 225616b8371SZi Yan /* PMD-mapped THP migration entry */ 226616b8371SZi Yan if (!pvmw.pte) { 227616b8371SZi Yan VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 228616b8371SZi Yan remove_migration_pmd(&pvmw, new); 229616b8371SZi Yan continue; 230616b8371SZi Yan } 231616b8371SZi Yan #endif 232616b8371SZi Yan 2330697212aSChristoph Lameter get_page(new); 2346d2329f8SAndrea Arcangeli pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 2353fe87967SKirill A. Shutemov if (pte_swp_soft_dirty(*pvmw.pte)) 236c3d16e16SCyrill Gorcunov pte = pte_mksoft_dirty(pte); 237d3cb8bf6SMel Gorman 2383fe87967SKirill A. Shutemov /* 2393fe87967SKirill A. Shutemov * Recheck VMA as permissions can change since migration started 2403fe87967SKirill A. Shutemov */ 2413fe87967SKirill A. Shutemov entry = pte_to_swp_entry(*pvmw.pte); 2420697212aSChristoph Lameter if (is_write_migration_entry(entry)) 243d3cb8bf6SMel Gorman pte = maybe_mkwrite(pte, vma); 244d3cb8bf6SMel Gorman 245df6ad698SJérôme Glisse if (unlikely(is_zone_device_page(new))) { 246df6ad698SJérôme Glisse if (is_device_private_page(new)) { 247a5430ddaSJérôme Glisse entry = make_device_private_entry(new, pte_write(pte)); 248a5430ddaSJérôme Glisse pte = swp_entry_to_pte(entry); 249df6ad698SJérôme Glisse } else if (is_device_public_page(new)) { 250df6ad698SJérôme Glisse pte = pte_mkdevmap(pte); 251df6ad698SJérôme Glisse flush_dcache_page(new); 252df6ad698SJérôme Glisse } 253a5430ddaSJérôme Glisse } else 254383321abSAneesh Kumar K.V flush_dcache_page(new); 255a5430ddaSJérôme Glisse 2563ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE 257be7517d6STony Lu if (PageHuge(new)) { 258290408d4SNaoya Horiguchi pte = pte_mkhuge(pte); 259be7517d6STony Lu pte = arch_make_huge_pte(pte, vma, new, 0); 260383321abSAneesh Kumar K.V set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 26104e62a29SChristoph Lameter if (PageAnon(new)) 2623fe87967SKirill A. Shutemov hugepage_add_anon_rmap(new, vma, pvmw.address); 263290408d4SNaoya Horiguchi else 26453f9263bSKirill A. Shutemov page_dup_rmap(new, true); 265383321abSAneesh Kumar K.V } else 266383321abSAneesh Kumar K.V #endif 267383321abSAneesh Kumar K.V { 268383321abSAneesh Kumar K.V set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 269383321abSAneesh Kumar K.V 270383321abSAneesh Kumar K.V if (PageAnon(new)) 2713fe87967SKirill A. Shutemov page_add_anon_rmap(new, vma, pvmw.address, false); 27204e62a29SChristoph Lameter else 273dd78feddSKirill A. Shutemov page_add_file_rmap(new, false); 274383321abSAneesh Kumar K.V } 275e388466dSKirill A. Shutemov if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 27651afb12bSHugh Dickins mlock_vma_page(new); 27751afb12bSHugh Dickins 27804e62a29SChristoph Lameter /* No need to invalidate - it was non-present before */ 2793fe87967SKirill A. Shutemov update_mmu_cache(vma, pvmw.address, pvmw.pte); 2803fe87967SKirill A. Shutemov } 2813fe87967SKirill A. Shutemov 282e4b82222SMinchan Kim return true; 2830697212aSChristoph Lameter } 2840697212aSChristoph Lameter 2850697212aSChristoph Lameter /* 28604e62a29SChristoph Lameter * Get rid of all migration entries and replace them by 28704e62a29SChristoph Lameter * references to the indicated page. 28804e62a29SChristoph Lameter */ 289e388466dSKirill A. Shutemov void remove_migration_ptes(struct page *old, struct page *new, bool locked) 29004e62a29SChristoph Lameter { 291051ac83aSJoonsoo Kim struct rmap_walk_control rwc = { 292051ac83aSJoonsoo Kim .rmap_one = remove_migration_pte, 293051ac83aSJoonsoo Kim .arg = old, 294051ac83aSJoonsoo Kim }; 295051ac83aSJoonsoo Kim 296e388466dSKirill A. Shutemov if (locked) 297e388466dSKirill A. Shutemov rmap_walk_locked(new, &rwc); 298e388466dSKirill A. Shutemov else 299051ac83aSJoonsoo Kim rmap_walk(new, &rwc); 30004e62a29SChristoph Lameter } 30104e62a29SChristoph Lameter 30204e62a29SChristoph Lameter /* 3030697212aSChristoph Lameter * Something used the pte of a page under migration. We need to 3040697212aSChristoph Lameter * get to the page and wait until migration is finished. 3050697212aSChristoph Lameter * When we return from this function the fault will be retried. 3060697212aSChristoph Lameter */ 307e66f17ffSNaoya Horiguchi void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 30830dad309SNaoya Horiguchi spinlock_t *ptl) 3090697212aSChristoph Lameter { 31030dad309SNaoya Horiguchi pte_t pte; 3110697212aSChristoph Lameter swp_entry_t entry; 3120697212aSChristoph Lameter struct page *page; 3130697212aSChristoph Lameter 31430dad309SNaoya Horiguchi spin_lock(ptl); 3150697212aSChristoph Lameter pte = *ptep; 3160697212aSChristoph Lameter if (!is_swap_pte(pte)) 3170697212aSChristoph Lameter goto out; 3180697212aSChristoph Lameter 3190697212aSChristoph Lameter entry = pte_to_swp_entry(pte); 3200697212aSChristoph Lameter if (!is_migration_entry(entry)) 3210697212aSChristoph Lameter goto out; 3220697212aSChristoph Lameter 3230697212aSChristoph Lameter page = migration_entry_to_page(entry); 3240697212aSChristoph Lameter 325e286781dSNick Piggin /* 326e286781dSNick Piggin * Once radix-tree replacement of page migration started, page_count 327e286781dSNick Piggin * *must* be zero. And, we don't want to call wait_on_page_locked() 328e286781dSNick Piggin * against a page without get_page(). 329e286781dSNick Piggin * So, we use get_page_unless_zero(), here. Even failed, page fault 330e286781dSNick Piggin * will occur again. 331e286781dSNick Piggin */ 332e286781dSNick Piggin if (!get_page_unless_zero(page)) 333e286781dSNick Piggin goto out; 3340697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3350697212aSChristoph Lameter wait_on_page_locked(page); 3360697212aSChristoph Lameter put_page(page); 3370697212aSChristoph Lameter return; 3380697212aSChristoph Lameter out: 3390697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3400697212aSChristoph Lameter } 3410697212aSChristoph Lameter 34230dad309SNaoya Horiguchi void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 34330dad309SNaoya Horiguchi unsigned long address) 34430dad309SNaoya Horiguchi { 34530dad309SNaoya Horiguchi spinlock_t *ptl = pte_lockptr(mm, pmd); 34630dad309SNaoya Horiguchi pte_t *ptep = pte_offset_map(pmd, address); 34730dad309SNaoya Horiguchi __migration_entry_wait(mm, ptep, ptl); 34830dad309SNaoya Horiguchi } 34930dad309SNaoya Horiguchi 350cb900f41SKirill A. Shutemov void migration_entry_wait_huge(struct vm_area_struct *vma, 351cb900f41SKirill A. Shutemov struct mm_struct *mm, pte_t *pte) 35230dad309SNaoya Horiguchi { 353cb900f41SKirill A. Shutemov spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 35430dad309SNaoya Horiguchi __migration_entry_wait(mm, pte, ptl); 35530dad309SNaoya Horiguchi } 35630dad309SNaoya Horiguchi 357616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 358616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 359616b8371SZi Yan { 360616b8371SZi Yan spinlock_t *ptl; 361616b8371SZi Yan struct page *page; 362616b8371SZi Yan 363616b8371SZi Yan ptl = pmd_lock(mm, pmd); 364616b8371SZi Yan if (!is_pmd_migration_entry(*pmd)) 365616b8371SZi Yan goto unlock; 366616b8371SZi Yan page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); 367616b8371SZi Yan if (!get_page_unless_zero(page)) 368616b8371SZi Yan goto unlock; 369616b8371SZi Yan spin_unlock(ptl); 370616b8371SZi Yan wait_on_page_locked(page); 371616b8371SZi Yan put_page(page); 372616b8371SZi Yan return; 373616b8371SZi Yan unlock: 374616b8371SZi Yan spin_unlock(ptl); 375616b8371SZi Yan } 376616b8371SZi Yan #endif 377616b8371SZi Yan 378b969c4abSMel Gorman #ifdef CONFIG_BLOCK 379b969c4abSMel Gorman /* Returns true if all buffers are successfully locked */ 380a6bc32b8SMel Gorman static bool buffer_migrate_lock_buffers(struct buffer_head *head, 381a6bc32b8SMel Gorman enum migrate_mode mode) 382b969c4abSMel Gorman { 383b969c4abSMel Gorman struct buffer_head *bh = head; 384b969c4abSMel Gorman 385b969c4abSMel Gorman /* Simple case, sync compaction */ 386a6bc32b8SMel Gorman if (mode != MIGRATE_ASYNC) { 387b969c4abSMel Gorman do { 388b969c4abSMel Gorman get_bh(bh); 389b969c4abSMel Gorman lock_buffer(bh); 390b969c4abSMel Gorman bh = bh->b_this_page; 391b969c4abSMel Gorman 392b969c4abSMel Gorman } while (bh != head); 393b969c4abSMel Gorman 394b969c4abSMel Gorman return true; 395b969c4abSMel Gorman } 396b969c4abSMel Gorman 397b969c4abSMel Gorman /* async case, we cannot block on lock_buffer so use trylock_buffer */ 398b969c4abSMel Gorman do { 399b969c4abSMel Gorman get_bh(bh); 400b969c4abSMel Gorman if (!trylock_buffer(bh)) { 401b969c4abSMel Gorman /* 402b969c4abSMel Gorman * We failed to lock the buffer and cannot stall in 403b969c4abSMel Gorman * async migration. Release the taken locks 404b969c4abSMel Gorman */ 405b969c4abSMel Gorman struct buffer_head *failed_bh = bh; 406b969c4abSMel Gorman put_bh(failed_bh); 407b969c4abSMel Gorman bh = head; 408b969c4abSMel Gorman while (bh != failed_bh) { 409b969c4abSMel Gorman unlock_buffer(bh); 410b969c4abSMel Gorman put_bh(bh); 411b969c4abSMel Gorman bh = bh->b_this_page; 412b969c4abSMel Gorman } 413b969c4abSMel Gorman return false; 414b969c4abSMel Gorman } 415b969c4abSMel Gorman 416b969c4abSMel Gorman bh = bh->b_this_page; 417b969c4abSMel Gorman } while (bh != head); 418b969c4abSMel Gorman return true; 419b969c4abSMel Gorman } 420b969c4abSMel Gorman #else 421b969c4abSMel Gorman static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 422a6bc32b8SMel Gorman enum migrate_mode mode) 423b969c4abSMel Gorman { 424b969c4abSMel Gorman return true; 425b969c4abSMel Gorman } 426b969c4abSMel Gorman #endif /* CONFIG_BLOCK */ 427b969c4abSMel Gorman 428b20a3503SChristoph Lameter /* 429c3fcf8a5SChristoph Lameter * Replace the page in the mapping. 4305b5c7120SChristoph Lameter * 4315b5c7120SChristoph Lameter * The number of remaining references must be: 4325b5c7120SChristoph Lameter * 1 for anonymous pages without a mapping 4335b5c7120SChristoph Lameter * 2 for pages with a mapping 434266cf658SDavid Howells * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 435b20a3503SChristoph Lameter */ 43636bc08ccSGu Zheng int migrate_page_move_mapping(struct address_space *mapping, 437b969c4abSMel Gorman struct page *newpage, struct page *page, 4388e321fefSBenjamin LaHaise struct buffer_head *head, enum migrate_mode mode, 4398e321fefSBenjamin LaHaise int extra_count) 440b20a3503SChristoph Lameter { 44142cb14b1SHugh Dickins struct zone *oldzone, *newzone; 44242cb14b1SHugh Dickins int dirty; 4438e321fefSBenjamin LaHaise int expected_count = 1 + extra_count; 4447cf9c2c7SNick Piggin void **pslot; 445b20a3503SChristoph Lameter 4468763cb45SJérôme Glisse /* 447df6ad698SJérôme Glisse * Device public or private pages have an extra refcount as they are 448df6ad698SJérôme Glisse * ZONE_DEVICE pages. 4498763cb45SJérôme Glisse */ 450df6ad698SJérôme Glisse expected_count += is_device_private_page(page); 451df6ad698SJérôme Glisse expected_count += is_device_public_page(page); 4528763cb45SJérôme Glisse 4536c5240aeSChristoph Lameter if (!mapping) { 4540e8c7d0fSChristoph Lameter /* Anonymous page without mapping */ 4558e321fefSBenjamin LaHaise if (page_count(page) != expected_count) 4566c5240aeSChristoph Lameter return -EAGAIN; 457cf4b769aSHugh Dickins 458cf4b769aSHugh Dickins /* No turning back from here */ 459cf4b769aSHugh Dickins newpage->index = page->index; 460cf4b769aSHugh Dickins newpage->mapping = page->mapping; 461cf4b769aSHugh Dickins if (PageSwapBacked(page)) 462fa9949daSHugh Dickins __SetPageSwapBacked(newpage); 463cf4b769aSHugh Dickins 46478bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 4656c5240aeSChristoph Lameter } 4666c5240aeSChristoph Lameter 46742cb14b1SHugh Dickins oldzone = page_zone(page); 46842cb14b1SHugh Dickins newzone = page_zone(newpage); 46942cb14b1SHugh Dickins 47019fd6231SNick Piggin spin_lock_irq(&mapping->tree_lock); 471b20a3503SChristoph Lameter 4727cf9c2c7SNick Piggin pslot = radix_tree_lookup_slot(&mapping->page_tree, 473b20a3503SChristoph Lameter page_index(page)); 474b20a3503SChristoph Lameter 4758e321fefSBenjamin LaHaise expected_count += 1 + page_has_private(page); 476e286781dSNick Piggin if (page_count(page) != expected_count || 47729c1f677SMel Gorman radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 47819fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 479e23ca00bSChristoph Lameter return -EAGAIN; 480b20a3503SChristoph Lameter } 481b20a3503SChristoph Lameter 482fe896d18SJoonsoo Kim if (!page_ref_freeze(page, expected_count)) { 48319fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 484e286781dSNick Piggin return -EAGAIN; 485e286781dSNick Piggin } 486e286781dSNick Piggin 487b20a3503SChristoph Lameter /* 488b969c4abSMel Gorman * In the async migration case of moving a page with buffers, lock the 489b969c4abSMel Gorman * buffers using trylock before the mapping is moved. If the mapping 490b969c4abSMel Gorman * was moved, we later failed to lock the buffers and could not move 491b969c4abSMel Gorman * the mapping back due to an elevated page count, we would have to 492b969c4abSMel Gorman * block waiting on other references to be dropped. 493b969c4abSMel Gorman */ 494a6bc32b8SMel Gorman if (mode == MIGRATE_ASYNC && head && 495a6bc32b8SMel Gorman !buffer_migrate_lock_buffers(head, mode)) { 496fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count); 497b969c4abSMel Gorman spin_unlock_irq(&mapping->tree_lock); 498b969c4abSMel Gorman return -EAGAIN; 499b969c4abSMel Gorman } 500b969c4abSMel Gorman 501b969c4abSMel Gorman /* 502cf4b769aSHugh Dickins * Now we know that no one else is looking at the page: 503cf4b769aSHugh Dickins * no turning back from here. 504b20a3503SChristoph Lameter */ 505cf4b769aSHugh Dickins newpage->index = page->index; 506cf4b769aSHugh Dickins newpage->mapping = page->mapping; 5077cf9c2c7SNick Piggin get_page(newpage); /* add cache reference */ 5086326fec1SNicholas Piggin if (PageSwapBacked(page)) { 5096326fec1SNicholas Piggin __SetPageSwapBacked(newpage); 510b20a3503SChristoph Lameter if (PageSwapCache(page)) { 511b20a3503SChristoph Lameter SetPageSwapCache(newpage); 512b20a3503SChristoph Lameter set_page_private(newpage, page_private(page)); 513b20a3503SChristoph Lameter } 5146326fec1SNicholas Piggin } else { 5156326fec1SNicholas Piggin VM_BUG_ON_PAGE(PageSwapCache(page), page); 5166326fec1SNicholas Piggin } 517b20a3503SChristoph Lameter 51842cb14b1SHugh Dickins /* Move dirty while page refs frozen and newpage not yet exposed */ 51942cb14b1SHugh Dickins dirty = PageDirty(page); 52042cb14b1SHugh Dickins if (dirty) { 52142cb14b1SHugh Dickins ClearPageDirty(page); 52242cb14b1SHugh Dickins SetPageDirty(newpage); 52342cb14b1SHugh Dickins } 52442cb14b1SHugh Dickins 5256d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); 5267cf9c2c7SNick Piggin 5277cf9c2c7SNick Piggin /* 528937a94c9SJacobo Giralt * Drop cache reference from old page by unfreezing 529937a94c9SJacobo Giralt * to one less reference. 5307cf9c2c7SNick Piggin * We know this isn't the last reference. 5317cf9c2c7SNick Piggin */ 532fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count - 1); 5337cf9c2c7SNick Piggin 53442cb14b1SHugh Dickins spin_unlock(&mapping->tree_lock); 53542cb14b1SHugh Dickins /* Leave irq disabled to prevent preemption while updating stats */ 53642cb14b1SHugh Dickins 5370e8c7d0fSChristoph Lameter /* 5380e8c7d0fSChristoph Lameter * If moved to a different zone then also account 5390e8c7d0fSChristoph Lameter * the page for that zone. Other VM counters will be 5400e8c7d0fSChristoph Lameter * taken care of when we establish references to the 5410e8c7d0fSChristoph Lameter * new page and drop references to the old page. 5420e8c7d0fSChristoph Lameter * 5430e8c7d0fSChristoph Lameter * Note that anonymous pages are accounted for 5444b9d0fabSMel Gorman * via NR_FILE_PAGES and NR_ANON_MAPPED if they 5450e8c7d0fSChristoph Lameter * are mapped to swap space. 5460e8c7d0fSChristoph Lameter */ 54742cb14b1SHugh Dickins if (newzone != oldzone) { 54811fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); 54911fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); 55042cb14b1SHugh Dickins if (PageSwapBacked(page) && !PageSwapCache(page)) { 55111fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); 55211fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_SHMEM); 5534b02108aSKOSAKI Motohiro } 55442cb14b1SHugh Dickins if (dirty && mapping_cap_account_dirty(mapping)) { 55511fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); 5565a1c84b4SMel Gorman __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); 55711fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); 5585a1c84b4SMel Gorman __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); 55942cb14b1SHugh Dickins } 56042cb14b1SHugh Dickins } 56142cb14b1SHugh Dickins local_irq_enable(); 562b20a3503SChristoph Lameter 56378bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 564b20a3503SChristoph Lameter } 5651118dce7SRichard Weinberger EXPORT_SYMBOL(migrate_page_move_mapping); 566b20a3503SChristoph Lameter 567b20a3503SChristoph Lameter /* 568290408d4SNaoya Horiguchi * The expected number of remaining references is the same as that 569290408d4SNaoya Horiguchi * of migrate_page_move_mapping(). 570290408d4SNaoya Horiguchi */ 571290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping, 572290408d4SNaoya Horiguchi struct page *newpage, struct page *page) 573290408d4SNaoya Horiguchi { 574290408d4SNaoya Horiguchi int expected_count; 575290408d4SNaoya Horiguchi void **pslot; 576290408d4SNaoya Horiguchi 577290408d4SNaoya Horiguchi spin_lock_irq(&mapping->tree_lock); 578290408d4SNaoya Horiguchi 579290408d4SNaoya Horiguchi pslot = radix_tree_lookup_slot(&mapping->page_tree, 580290408d4SNaoya Horiguchi page_index(page)); 581290408d4SNaoya Horiguchi 582290408d4SNaoya Horiguchi expected_count = 2 + page_has_private(page); 583290408d4SNaoya Horiguchi if (page_count(page) != expected_count || 58429c1f677SMel Gorman radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 585290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 586290408d4SNaoya Horiguchi return -EAGAIN; 587290408d4SNaoya Horiguchi } 588290408d4SNaoya Horiguchi 589fe896d18SJoonsoo Kim if (!page_ref_freeze(page, expected_count)) { 590290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 591290408d4SNaoya Horiguchi return -EAGAIN; 592290408d4SNaoya Horiguchi } 593290408d4SNaoya Horiguchi 594cf4b769aSHugh Dickins newpage->index = page->index; 595cf4b769aSHugh Dickins newpage->mapping = page->mapping; 5966a93ca8fSJohannes Weiner 597290408d4SNaoya Horiguchi get_page(newpage); 598290408d4SNaoya Horiguchi 5996d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); 600290408d4SNaoya Horiguchi 601fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count - 1); 602290408d4SNaoya Horiguchi 603290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 6046a93ca8fSJohannes Weiner 60578bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 606290408d4SNaoya Horiguchi } 607290408d4SNaoya Horiguchi 608290408d4SNaoya Horiguchi /* 60930b0a105SDave Hansen * Gigantic pages are so large that we do not guarantee that page++ pointer 61030b0a105SDave Hansen * arithmetic will work across the entire page. We need something more 61130b0a105SDave Hansen * specialized. 61230b0a105SDave Hansen */ 61330b0a105SDave Hansen static void __copy_gigantic_page(struct page *dst, struct page *src, 61430b0a105SDave Hansen int nr_pages) 61530b0a105SDave Hansen { 61630b0a105SDave Hansen int i; 61730b0a105SDave Hansen struct page *dst_base = dst; 61830b0a105SDave Hansen struct page *src_base = src; 61930b0a105SDave Hansen 62030b0a105SDave Hansen for (i = 0; i < nr_pages; ) { 62130b0a105SDave Hansen cond_resched(); 62230b0a105SDave Hansen copy_highpage(dst, src); 62330b0a105SDave Hansen 62430b0a105SDave Hansen i++; 62530b0a105SDave Hansen dst = mem_map_next(dst, dst_base, i); 62630b0a105SDave Hansen src = mem_map_next(src, src_base, i); 62730b0a105SDave Hansen } 62830b0a105SDave Hansen } 62930b0a105SDave Hansen 63030b0a105SDave Hansen static void copy_huge_page(struct page *dst, struct page *src) 63130b0a105SDave Hansen { 63230b0a105SDave Hansen int i; 63330b0a105SDave Hansen int nr_pages; 63430b0a105SDave Hansen 63530b0a105SDave Hansen if (PageHuge(src)) { 63630b0a105SDave Hansen /* hugetlbfs page */ 63730b0a105SDave Hansen struct hstate *h = page_hstate(src); 63830b0a105SDave Hansen nr_pages = pages_per_huge_page(h); 63930b0a105SDave Hansen 64030b0a105SDave Hansen if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 64130b0a105SDave Hansen __copy_gigantic_page(dst, src, nr_pages); 64230b0a105SDave Hansen return; 64330b0a105SDave Hansen } 64430b0a105SDave Hansen } else { 64530b0a105SDave Hansen /* thp page */ 64630b0a105SDave Hansen BUG_ON(!PageTransHuge(src)); 64730b0a105SDave Hansen nr_pages = hpage_nr_pages(src); 64830b0a105SDave Hansen } 64930b0a105SDave Hansen 65030b0a105SDave Hansen for (i = 0; i < nr_pages; i++) { 65130b0a105SDave Hansen cond_resched(); 65230b0a105SDave Hansen copy_highpage(dst + i, src + i); 65330b0a105SDave Hansen } 65430b0a105SDave Hansen } 65530b0a105SDave Hansen 65630b0a105SDave Hansen /* 657b20a3503SChristoph Lameter * Copy the page to its new location 658b20a3503SChristoph Lameter */ 6592916ecc0SJérôme Glisse void migrate_page_states(struct page *newpage, struct page *page) 660b20a3503SChristoph Lameter { 6617851a45cSRik van Riel int cpupid; 6627851a45cSRik van Riel 663b20a3503SChristoph Lameter if (PageError(page)) 664b20a3503SChristoph Lameter SetPageError(newpage); 665b20a3503SChristoph Lameter if (PageReferenced(page)) 666b20a3503SChristoph Lameter SetPageReferenced(newpage); 667b20a3503SChristoph Lameter if (PageUptodate(page)) 668b20a3503SChristoph Lameter SetPageUptodate(newpage); 669894bc310SLee Schermerhorn if (TestClearPageActive(page)) { 670309381feSSasha Levin VM_BUG_ON_PAGE(PageUnevictable(page), page); 671b20a3503SChristoph Lameter SetPageActive(newpage); 672418b27efSLee Schermerhorn } else if (TestClearPageUnevictable(page)) 673418b27efSLee Schermerhorn SetPageUnevictable(newpage); 674b20a3503SChristoph Lameter if (PageChecked(page)) 675b20a3503SChristoph Lameter SetPageChecked(newpage); 676b20a3503SChristoph Lameter if (PageMappedToDisk(page)) 677b20a3503SChristoph Lameter SetPageMappedToDisk(newpage); 678b20a3503SChristoph Lameter 67942cb14b1SHugh Dickins /* Move dirty on pages not done by migrate_page_move_mapping() */ 68042cb14b1SHugh Dickins if (PageDirty(page)) 681752dc185SHugh Dickins SetPageDirty(newpage); 682b20a3503SChristoph Lameter 68333c3fc71SVladimir Davydov if (page_is_young(page)) 68433c3fc71SVladimir Davydov set_page_young(newpage); 68533c3fc71SVladimir Davydov if (page_is_idle(page)) 68633c3fc71SVladimir Davydov set_page_idle(newpage); 68733c3fc71SVladimir Davydov 6887851a45cSRik van Riel /* 6897851a45cSRik van Riel * Copy NUMA information to the new page, to prevent over-eager 6907851a45cSRik van Riel * future migrations of this same page. 6917851a45cSRik van Riel */ 6927851a45cSRik van Riel cpupid = page_cpupid_xchg_last(page, -1); 6937851a45cSRik van Riel page_cpupid_xchg_last(newpage, cpupid); 6947851a45cSRik van Riel 695e9995ef9SHugh Dickins ksm_migrate_page(newpage, page); 696c8d6553bSHugh Dickins /* 697c8d6553bSHugh Dickins * Please do not reorder this without considering how mm/ksm.c's 698c8d6553bSHugh Dickins * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 699c8d6553bSHugh Dickins */ 700b3b3a99cSNaoya Horiguchi if (PageSwapCache(page)) 701b20a3503SChristoph Lameter ClearPageSwapCache(page); 702b20a3503SChristoph Lameter ClearPagePrivate(page); 703b20a3503SChristoph Lameter set_page_private(page, 0); 704b20a3503SChristoph Lameter 705b20a3503SChristoph Lameter /* 706b20a3503SChristoph Lameter * If any waiters have accumulated on the new page then 707b20a3503SChristoph Lameter * wake them up. 708b20a3503SChristoph Lameter */ 709b20a3503SChristoph Lameter if (PageWriteback(newpage)) 710b20a3503SChristoph Lameter end_page_writeback(newpage); 711d435edcaSVlastimil Babka 712d435edcaSVlastimil Babka copy_page_owner(page, newpage); 71374485cf2SJohannes Weiner 71474485cf2SJohannes Weiner mem_cgroup_migrate(page, newpage); 715b20a3503SChristoph Lameter } 7162916ecc0SJérôme Glisse EXPORT_SYMBOL(migrate_page_states); 7172916ecc0SJérôme Glisse 7182916ecc0SJérôme Glisse void migrate_page_copy(struct page *newpage, struct page *page) 7192916ecc0SJérôme Glisse { 7202916ecc0SJérôme Glisse if (PageHuge(page) || PageTransHuge(page)) 7212916ecc0SJérôme Glisse copy_huge_page(newpage, page); 7222916ecc0SJérôme Glisse else 7232916ecc0SJérôme Glisse copy_highpage(newpage, page); 7242916ecc0SJérôme Glisse 7252916ecc0SJérôme Glisse migrate_page_states(newpage, page); 7262916ecc0SJérôme Glisse } 7271118dce7SRichard Weinberger EXPORT_SYMBOL(migrate_page_copy); 728b20a3503SChristoph Lameter 7291d8b85ccSChristoph Lameter /************************************************************ 7301d8b85ccSChristoph Lameter * Migration functions 7311d8b85ccSChristoph Lameter ***********************************************************/ 7321d8b85ccSChristoph Lameter 733b20a3503SChristoph Lameter /* 734bda807d4SMinchan Kim * Common logic to directly migrate a single LRU page suitable for 735266cf658SDavid Howells * pages that do not use PagePrivate/PagePrivate2. 736b20a3503SChristoph Lameter * 737b20a3503SChristoph Lameter * Pages are locked upon entry and exit. 738b20a3503SChristoph Lameter */ 7392d1db3b1SChristoph Lameter int migrate_page(struct address_space *mapping, 740a6bc32b8SMel Gorman struct page *newpage, struct page *page, 741a6bc32b8SMel Gorman enum migrate_mode mode) 742b20a3503SChristoph Lameter { 743b20a3503SChristoph Lameter int rc; 744b20a3503SChristoph Lameter 745b20a3503SChristoph Lameter BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 746b20a3503SChristoph Lameter 7478e321fefSBenjamin LaHaise rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 748b20a3503SChristoph Lameter 74978bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 750b20a3503SChristoph Lameter return rc; 751b20a3503SChristoph Lameter 7522916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 753b20a3503SChristoph Lameter migrate_page_copy(newpage, page); 7542916ecc0SJérôme Glisse else 7552916ecc0SJérôme Glisse migrate_page_states(newpage, page); 75678bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 757b20a3503SChristoph Lameter } 758b20a3503SChristoph Lameter EXPORT_SYMBOL(migrate_page); 759b20a3503SChristoph Lameter 7609361401eSDavid Howells #ifdef CONFIG_BLOCK 761b20a3503SChristoph Lameter /* 7621d8b85ccSChristoph Lameter * Migration function for pages with buffers. This function can only be used 7631d8b85ccSChristoph Lameter * if the underlying filesystem guarantees that no other references to "page" 7641d8b85ccSChristoph Lameter * exist. 7651d8b85ccSChristoph Lameter */ 7662d1db3b1SChristoph Lameter int buffer_migrate_page(struct address_space *mapping, 767a6bc32b8SMel Gorman struct page *newpage, struct page *page, enum migrate_mode mode) 7681d8b85ccSChristoph Lameter { 7691d8b85ccSChristoph Lameter struct buffer_head *bh, *head; 7701d8b85ccSChristoph Lameter int rc; 7711d8b85ccSChristoph Lameter 7721d8b85ccSChristoph Lameter if (!page_has_buffers(page)) 773a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 7741d8b85ccSChristoph Lameter 7751d8b85ccSChristoph Lameter head = page_buffers(page); 7761d8b85ccSChristoph Lameter 7778e321fefSBenjamin LaHaise rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 7781d8b85ccSChristoph Lameter 77978bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 7801d8b85ccSChristoph Lameter return rc; 7811d8b85ccSChristoph Lameter 782b969c4abSMel Gorman /* 783b969c4abSMel Gorman * In the async case, migrate_page_move_mapping locked the buffers 784b969c4abSMel Gorman * with an IRQ-safe spinlock held. In the sync case, the buffers 785b969c4abSMel Gorman * need to be locked now 786b969c4abSMel Gorman */ 787a6bc32b8SMel Gorman if (mode != MIGRATE_ASYNC) 788a6bc32b8SMel Gorman BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 7891d8b85ccSChristoph Lameter 7901d8b85ccSChristoph Lameter ClearPagePrivate(page); 7911d8b85ccSChristoph Lameter set_page_private(newpage, page_private(page)); 7921d8b85ccSChristoph Lameter set_page_private(page, 0); 7931d8b85ccSChristoph Lameter put_page(page); 7941d8b85ccSChristoph Lameter get_page(newpage); 7951d8b85ccSChristoph Lameter 7961d8b85ccSChristoph Lameter bh = head; 7971d8b85ccSChristoph Lameter do { 7981d8b85ccSChristoph Lameter set_bh_page(bh, newpage, bh_offset(bh)); 7991d8b85ccSChristoph Lameter bh = bh->b_this_page; 8001d8b85ccSChristoph Lameter 8011d8b85ccSChristoph Lameter } while (bh != head); 8021d8b85ccSChristoph Lameter 8031d8b85ccSChristoph Lameter SetPagePrivate(newpage); 8041d8b85ccSChristoph Lameter 8052916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 8061d8b85ccSChristoph Lameter migrate_page_copy(newpage, page); 8072916ecc0SJérôme Glisse else 8082916ecc0SJérôme Glisse migrate_page_states(newpage, page); 8091d8b85ccSChristoph Lameter 8101d8b85ccSChristoph Lameter bh = head; 8111d8b85ccSChristoph Lameter do { 8121d8b85ccSChristoph Lameter unlock_buffer(bh); 8131d8b85ccSChristoph Lameter put_bh(bh); 8141d8b85ccSChristoph Lameter bh = bh->b_this_page; 8151d8b85ccSChristoph Lameter 8161d8b85ccSChristoph Lameter } while (bh != head); 8171d8b85ccSChristoph Lameter 81878bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 8191d8b85ccSChristoph Lameter } 8201d8b85ccSChristoph Lameter EXPORT_SYMBOL(buffer_migrate_page); 8219361401eSDavid Howells #endif 8221d8b85ccSChristoph Lameter 82304e62a29SChristoph Lameter /* 82404e62a29SChristoph Lameter * Writeback a page to clean the dirty state 82504e62a29SChristoph Lameter */ 82604e62a29SChristoph Lameter static int writeout(struct address_space *mapping, struct page *page) 82704e62a29SChristoph Lameter { 82804e62a29SChristoph Lameter struct writeback_control wbc = { 82904e62a29SChristoph Lameter .sync_mode = WB_SYNC_NONE, 83004e62a29SChristoph Lameter .nr_to_write = 1, 83104e62a29SChristoph Lameter .range_start = 0, 83204e62a29SChristoph Lameter .range_end = LLONG_MAX, 83304e62a29SChristoph Lameter .for_reclaim = 1 83404e62a29SChristoph Lameter }; 83504e62a29SChristoph Lameter int rc; 83604e62a29SChristoph Lameter 83704e62a29SChristoph Lameter if (!mapping->a_ops->writepage) 83804e62a29SChristoph Lameter /* No write method for the address space */ 83904e62a29SChristoph Lameter return -EINVAL; 84004e62a29SChristoph Lameter 84104e62a29SChristoph Lameter if (!clear_page_dirty_for_io(page)) 84204e62a29SChristoph Lameter /* Someone else already triggered a write */ 84304e62a29SChristoph Lameter return -EAGAIN; 84404e62a29SChristoph Lameter 84504e62a29SChristoph Lameter /* 84604e62a29SChristoph Lameter * A dirty page may imply that the underlying filesystem has 84704e62a29SChristoph Lameter * the page on some queue. So the page must be clean for 84804e62a29SChristoph Lameter * migration. Writeout may mean we loose the lock and the 84904e62a29SChristoph Lameter * page state is no longer what we checked for earlier. 85004e62a29SChristoph Lameter * At this point we know that the migration attempt cannot 85104e62a29SChristoph Lameter * be successful. 85204e62a29SChristoph Lameter */ 853e388466dSKirill A. Shutemov remove_migration_ptes(page, page, false); 85404e62a29SChristoph Lameter 85504e62a29SChristoph Lameter rc = mapping->a_ops->writepage(page, &wbc); 85604e62a29SChristoph Lameter 85704e62a29SChristoph Lameter if (rc != AOP_WRITEPAGE_ACTIVATE) 85804e62a29SChristoph Lameter /* unlocked. Relock */ 85904e62a29SChristoph Lameter lock_page(page); 86004e62a29SChristoph Lameter 861bda8550dSHugh Dickins return (rc < 0) ? -EIO : -EAGAIN; 86204e62a29SChristoph Lameter } 86304e62a29SChristoph Lameter 86404e62a29SChristoph Lameter /* 86504e62a29SChristoph Lameter * Default handling if a filesystem does not provide a migration function. 86604e62a29SChristoph Lameter */ 8678351a6e4SChristoph Lameter static int fallback_migrate_page(struct address_space *mapping, 868a6bc32b8SMel Gorman struct page *newpage, struct page *page, enum migrate_mode mode) 8698351a6e4SChristoph Lameter { 870b969c4abSMel Gorman if (PageDirty(page)) { 871a6bc32b8SMel Gorman /* Only writeback pages in full synchronous migration */ 8722916ecc0SJérôme Glisse switch (mode) { 8732916ecc0SJérôme Glisse case MIGRATE_SYNC: 8742916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 8752916ecc0SJérôme Glisse break; 8762916ecc0SJérôme Glisse default: 877b969c4abSMel Gorman return -EBUSY; 8782916ecc0SJérôme Glisse } 87904e62a29SChristoph Lameter return writeout(mapping, page); 880b969c4abSMel Gorman } 8818351a6e4SChristoph Lameter 8828351a6e4SChristoph Lameter /* 8838351a6e4SChristoph Lameter * Buffers may be managed in a filesystem specific way. 8848351a6e4SChristoph Lameter * We must have no buffers or drop them. 8858351a6e4SChristoph Lameter */ 886266cf658SDavid Howells if (page_has_private(page) && 8878351a6e4SChristoph Lameter !try_to_release_page(page, GFP_KERNEL)) 8888351a6e4SChristoph Lameter return -EAGAIN; 8898351a6e4SChristoph Lameter 890a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 8918351a6e4SChristoph Lameter } 8928351a6e4SChristoph Lameter 8931d8b85ccSChristoph Lameter /* 894e24f0b8fSChristoph Lameter * Move a page to a newly allocated page 895e24f0b8fSChristoph Lameter * The page is locked and all ptes have been successfully removed. 896b20a3503SChristoph Lameter * 897e24f0b8fSChristoph Lameter * The new page will have replaced the old page if this function 898e24f0b8fSChristoph Lameter * is successful. 899894bc310SLee Schermerhorn * 900894bc310SLee Schermerhorn * Return value: 901894bc310SLee Schermerhorn * < 0 - error code 90278bd5209SRafael Aquini * MIGRATEPAGE_SUCCESS - success 903b20a3503SChristoph Lameter */ 9043fe2011fSMel Gorman static int move_to_new_page(struct page *newpage, struct page *page, 9055c3f9a67SHugh Dickins enum migrate_mode mode) 906b20a3503SChristoph Lameter { 907e24f0b8fSChristoph Lameter struct address_space *mapping; 908bda807d4SMinchan Kim int rc = -EAGAIN; 909bda807d4SMinchan Kim bool is_lru = !__PageMovable(page); 910b20a3503SChristoph Lameter 9117db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(page), page); 9127db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 913b20a3503SChristoph Lameter 914b20a3503SChristoph Lameter mapping = page_mapping(page); 915bda807d4SMinchan Kim 916bda807d4SMinchan Kim if (likely(is_lru)) { 917b20a3503SChristoph Lameter if (!mapping) 918a6bc32b8SMel Gorman rc = migrate_page(mapping, newpage, page, mode); 9196c5240aeSChristoph Lameter else if (mapping->a_ops->migratepage) 920b20a3503SChristoph Lameter /* 921bda807d4SMinchan Kim * Most pages have a mapping and most filesystems 922bda807d4SMinchan Kim * provide a migratepage callback. Anonymous pages 923bda807d4SMinchan Kim * are part of swap space which also has its own 924bda807d4SMinchan Kim * migratepage callback. This is the most common path 925bda807d4SMinchan Kim * for page migration. 926b20a3503SChristoph Lameter */ 927bda807d4SMinchan Kim rc = mapping->a_ops->migratepage(mapping, newpage, 928bda807d4SMinchan Kim page, mode); 9298351a6e4SChristoph Lameter else 930bda807d4SMinchan Kim rc = fallback_migrate_page(mapping, newpage, 931bda807d4SMinchan Kim page, mode); 932bda807d4SMinchan Kim } else { 933bda807d4SMinchan Kim /* 934bda807d4SMinchan Kim * In case of non-lru page, it could be released after 935bda807d4SMinchan Kim * isolation step. In that case, we shouldn't try migration. 936bda807d4SMinchan Kim */ 937bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 938bda807d4SMinchan Kim if (!PageMovable(page)) { 939bda807d4SMinchan Kim rc = MIGRATEPAGE_SUCCESS; 940bda807d4SMinchan Kim __ClearPageIsolated(page); 941bda807d4SMinchan Kim goto out; 942bda807d4SMinchan Kim } 943bda807d4SMinchan Kim 944bda807d4SMinchan Kim rc = mapping->a_ops->migratepage(mapping, newpage, 945bda807d4SMinchan Kim page, mode); 946bda807d4SMinchan Kim WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 947bda807d4SMinchan Kim !PageIsolated(page)); 948bda807d4SMinchan Kim } 949b20a3503SChristoph Lameter 9505c3f9a67SHugh Dickins /* 9515c3f9a67SHugh Dickins * When successful, old pagecache page->mapping must be cleared before 9525c3f9a67SHugh Dickins * page is freed; but stats require that PageAnon be left as PageAnon. 9535c3f9a67SHugh Dickins */ 9545c3f9a67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 955bda807d4SMinchan Kim if (__PageMovable(page)) { 956bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 957bda807d4SMinchan Kim 958bda807d4SMinchan Kim /* 959bda807d4SMinchan Kim * We clear PG_movable under page_lock so any compactor 960bda807d4SMinchan Kim * cannot try to migrate this page. 961bda807d4SMinchan Kim */ 962bda807d4SMinchan Kim __ClearPageIsolated(page); 963bda807d4SMinchan Kim } 964bda807d4SMinchan Kim 965bda807d4SMinchan Kim /* 966bda807d4SMinchan Kim * Anonymous and movable page->mapping will be cleard by 967bda807d4SMinchan Kim * free_pages_prepare so don't reset it here for keeping 968bda807d4SMinchan Kim * the type to work PageAnon, for example. 969bda807d4SMinchan Kim */ 970bda807d4SMinchan Kim if (!PageMappingFlags(page)) 9715c3f9a67SHugh Dickins page->mapping = NULL; 9723fe2011fSMel Gorman } 973bda807d4SMinchan Kim out: 974e24f0b8fSChristoph Lameter return rc; 975e24f0b8fSChristoph Lameter } 976e24f0b8fSChristoph Lameter 9770dabec93SMinchan Kim static int __unmap_and_move(struct page *page, struct page *newpage, 9789c620e2bSHugh Dickins int force, enum migrate_mode mode) 979e24f0b8fSChristoph Lameter { 9800dabec93SMinchan Kim int rc = -EAGAIN; 9812ebba6b7SHugh Dickins int page_was_mapped = 0; 9823f6c8272SMel Gorman struct anon_vma *anon_vma = NULL; 983bda807d4SMinchan Kim bool is_lru = !__PageMovable(page); 98495a402c3SChristoph Lameter 985529ae9aaSNick Piggin if (!trylock_page(page)) { 986a6bc32b8SMel Gorman if (!force || mode == MIGRATE_ASYNC) 9870dabec93SMinchan Kim goto out; 9883e7d3449SMel Gorman 9893e7d3449SMel Gorman /* 9903e7d3449SMel Gorman * It's not safe for direct compaction to call lock_page. 9913e7d3449SMel Gorman * For example, during page readahead pages are added locked 9923e7d3449SMel Gorman * to the LRU. Later, when the IO completes the pages are 9933e7d3449SMel Gorman * marked uptodate and unlocked. However, the queueing 9943e7d3449SMel Gorman * could be merging multiple pages for one bio (e.g. 9953e7d3449SMel Gorman * mpage_readpages). If an allocation happens for the 9963e7d3449SMel Gorman * second or third page, the process can end up locking 9973e7d3449SMel Gorman * the same page twice and deadlocking. Rather than 9983e7d3449SMel Gorman * trying to be clever about what pages can be locked, 9993e7d3449SMel Gorman * avoid the use of lock_page for direct compaction 10003e7d3449SMel Gorman * altogether. 10013e7d3449SMel Gorman */ 10023e7d3449SMel Gorman if (current->flags & PF_MEMALLOC) 10030dabec93SMinchan Kim goto out; 10043e7d3449SMel Gorman 1005e24f0b8fSChristoph Lameter lock_page(page); 1006e24f0b8fSChristoph Lameter } 1007e24f0b8fSChristoph Lameter 1008e24f0b8fSChristoph Lameter if (PageWriteback(page)) { 100911bc82d6SAndrea Arcangeli /* 1010fed5b64aSJianguo Wu * Only in the case of a full synchronous migration is it 1011a6bc32b8SMel Gorman * necessary to wait for PageWriteback. In the async case, 1012a6bc32b8SMel Gorman * the retry loop is too short and in the sync-light case, 1013a6bc32b8SMel Gorman * the overhead of stalling is too much 101411bc82d6SAndrea Arcangeli */ 10152916ecc0SJérôme Glisse switch (mode) { 10162916ecc0SJérôme Glisse case MIGRATE_SYNC: 10172916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 10182916ecc0SJérôme Glisse break; 10192916ecc0SJérôme Glisse default: 102011bc82d6SAndrea Arcangeli rc = -EBUSY; 10210a31bc97SJohannes Weiner goto out_unlock; 102211bc82d6SAndrea Arcangeli } 102311bc82d6SAndrea Arcangeli if (!force) 10240a31bc97SJohannes Weiner goto out_unlock; 1025e24f0b8fSChristoph Lameter wait_on_page_writeback(page); 1026e24f0b8fSChristoph Lameter } 102703f15c86SHugh Dickins 1028e24f0b8fSChristoph Lameter /* 1029dc386d4dSKAMEZAWA Hiroyuki * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 1030dc386d4dSKAMEZAWA Hiroyuki * we cannot notice that anon_vma is freed while we migrates a page. 10311ce82b69SHugh Dickins * This get_anon_vma() delays freeing anon_vma pointer until the end 1032dc386d4dSKAMEZAWA Hiroyuki * of migration. File cache pages are no problem because of page_lock() 1033989f89c5SKAMEZAWA Hiroyuki * File Caches may use write_page() or lock_page() in migration, then, 1034989f89c5SKAMEZAWA Hiroyuki * just care Anon page here. 10353fe2011fSMel Gorman * 103603f15c86SHugh Dickins * Only page_get_anon_vma() understands the subtleties of 103703f15c86SHugh Dickins * getting a hold on an anon_vma from outside one of its mms. 103803f15c86SHugh Dickins * But if we cannot get anon_vma, then we won't need it anyway, 103903f15c86SHugh Dickins * because that implies that the anon page is no longer mapped 104003f15c86SHugh Dickins * (and cannot be remapped so long as we hold the page lock). 10413fe2011fSMel Gorman */ 104203f15c86SHugh Dickins if (PageAnon(page) && !PageKsm(page)) 104303f15c86SHugh Dickins anon_vma = page_get_anon_vma(page); 104462e1c553SShaohua Li 10457db7671fSHugh Dickins /* 10467db7671fSHugh Dickins * Block others from accessing the new page when we get around to 10477db7671fSHugh Dickins * establishing additional references. We are usually the only one 10487db7671fSHugh Dickins * holding a reference to newpage at this point. We used to have a BUG 10497db7671fSHugh Dickins * here if trylock_page(newpage) fails, but would like to allow for 10507db7671fSHugh Dickins * cases where there might be a race with the previous use of newpage. 10517db7671fSHugh Dickins * This is much like races on refcount of oldpage: just don't BUG(). 10527db7671fSHugh Dickins */ 10537db7671fSHugh Dickins if (unlikely(!trylock_page(newpage))) 10547db7671fSHugh Dickins goto out_unlock; 10557db7671fSHugh Dickins 1056bda807d4SMinchan Kim if (unlikely(!is_lru)) { 1057bda807d4SMinchan Kim rc = move_to_new_page(newpage, page, mode); 1058bda807d4SMinchan Kim goto out_unlock_both; 1059bda807d4SMinchan Kim } 1060bda807d4SMinchan Kim 1061dc386d4dSKAMEZAWA Hiroyuki /* 106262e1c553SShaohua Li * Corner case handling: 106362e1c553SShaohua Li * 1. When a new swap-cache page is read into, it is added to the LRU 106462e1c553SShaohua Li * and treated as swapcache but it has no rmap yet. 106562e1c553SShaohua Li * Calling try_to_unmap() against a page->mapping==NULL page will 106662e1c553SShaohua Li * trigger a BUG. So handle it here. 106762e1c553SShaohua Li * 2. An orphaned page (see truncate_complete_page) might have 106862e1c553SShaohua Li * fs-private metadata. The page can be picked up due to memory 106962e1c553SShaohua Li * offlining. Everywhere else except page reclaim, the page is 107062e1c553SShaohua Li * invisible to the vm, so the page can not be migrated. So try to 107162e1c553SShaohua Li * free the metadata, so the page can be freed. 1072dc386d4dSKAMEZAWA Hiroyuki */ 107362e1c553SShaohua Li if (!page->mapping) { 1074309381feSSasha Levin VM_BUG_ON_PAGE(PageAnon(page), page); 10751ce82b69SHugh Dickins if (page_has_private(page)) { 107662e1c553SShaohua Li try_to_free_buffers(page); 10777db7671fSHugh Dickins goto out_unlock_both; 107862e1c553SShaohua Li } 10797db7671fSHugh Dickins } else if (page_mapped(page)) { 10807db7671fSHugh Dickins /* Establish migration ptes */ 108103f15c86SHugh Dickins VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 108203f15c86SHugh Dickins page); 10832ebba6b7SHugh Dickins try_to_unmap(page, 1084da1b13ccSWanpeng Li TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 10852ebba6b7SHugh Dickins page_was_mapped = 1; 10862ebba6b7SHugh Dickins } 1087dc386d4dSKAMEZAWA Hiroyuki 1088e24f0b8fSChristoph Lameter if (!page_mapped(page)) 10895c3f9a67SHugh Dickins rc = move_to_new_page(newpage, page, mode); 1090e24f0b8fSChristoph Lameter 10915c3f9a67SHugh Dickins if (page_was_mapped) 10925c3f9a67SHugh Dickins remove_migration_ptes(page, 1093e388466dSKirill A. Shutemov rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 10943f6c8272SMel Gorman 10957db7671fSHugh Dickins out_unlock_both: 10967db7671fSHugh Dickins unlock_page(newpage); 10977db7671fSHugh Dickins out_unlock: 10983f6c8272SMel Gorman /* Drop an anon_vma reference if we took one */ 109976545066SRik van Riel if (anon_vma) 11009e60109fSPeter Zijlstra put_anon_vma(anon_vma); 1101b20a3503SChristoph Lameter unlock_page(page); 11020dabec93SMinchan Kim out: 1103c6c919ebSMinchan Kim /* 1104c6c919ebSMinchan Kim * If migration is successful, decrease refcount of the newpage 1105c6c919ebSMinchan Kim * which will not free the page because new page owner increased 1106c6c919ebSMinchan Kim * refcounter. As well, if it is LRU page, add the page to LRU 1107c6c919ebSMinchan Kim * list in here. 1108c6c919ebSMinchan Kim */ 1109c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) { 1110b1123ea6SMinchan Kim if (unlikely(__PageMovable(newpage))) 1111c6c919ebSMinchan Kim put_page(newpage); 1112c6c919ebSMinchan Kim else 1113c6c919ebSMinchan Kim putback_lru_page(newpage); 1114c6c919ebSMinchan Kim } 1115c6c919ebSMinchan Kim 11160dabec93SMinchan Kim return rc; 11170dabec93SMinchan Kim } 111895a402c3SChristoph Lameter 11190dabec93SMinchan Kim /* 1120ef2a5153SGeert Uytterhoeven * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work 1121ef2a5153SGeert Uytterhoeven * around it. 1122ef2a5153SGeert Uytterhoeven */ 1123ef2a5153SGeert Uytterhoeven #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) 1124ef2a5153SGeert Uytterhoeven #define ICE_noinline noinline 1125ef2a5153SGeert Uytterhoeven #else 1126ef2a5153SGeert Uytterhoeven #define ICE_noinline 1127ef2a5153SGeert Uytterhoeven #endif 1128ef2a5153SGeert Uytterhoeven 1129ef2a5153SGeert Uytterhoeven /* 11300dabec93SMinchan Kim * Obtain the lock on page, remove all ptes and migrate the page 11310dabec93SMinchan Kim * to the newly allocated page in newpage. 11320dabec93SMinchan Kim */ 1133ef2a5153SGeert Uytterhoeven static ICE_noinline int unmap_and_move(new_page_t get_new_page, 1134ef2a5153SGeert Uytterhoeven free_page_t put_new_page, 1135ef2a5153SGeert Uytterhoeven unsigned long private, struct page *page, 1136add05cecSNaoya Horiguchi int force, enum migrate_mode mode, 1137add05cecSNaoya Horiguchi enum migrate_reason reason) 11380dabec93SMinchan Kim { 11392def7424SHugh Dickins int rc = MIGRATEPAGE_SUCCESS; 11400dabec93SMinchan Kim int *result = NULL; 11412def7424SHugh Dickins struct page *newpage; 11420dabec93SMinchan Kim 11432def7424SHugh Dickins newpage = get_new_page(page, private, &result); 11440dabec93SMinchan Kim if (!newpage) 11450dabec93SMinchan Kim return -ENOMEM; 11460dabec93SMinchan Kim 11470dabec93SMinchan Kim if (page_count(page) == 1) { 11480dabec93SMinchan Kim /* page was freed from under us. So we are done. */ 1149c6c919ebSMinchan Kim ClearPageActive(page); 1150c6c919ebSMinchan Kim ClearPageUnevictable(page); 1151bda807d4SMinchan Kim if (unlikely(__PageMovable(page))) { 1152bda807d4SMinchan Kim lock_page(page); 1153bda807d4SMinchan Kim if (!PageMovable(page)) 1154bda807d4SMinchan Kim __ClearPageIsolated(page); 1155bda807d4SMinchan Kim unlock_page(page); 1156bda807d4SMinchan Kim } 1157c6c919ebSMinchan Kim if (put_new_page) 1158c6c919ebSMinchan Kim put_new_page(newpage, private); 1159c6c919ebSMinchan Kim else 1160c6c919ebSMinchan Kim put_page(newpage); 11610dabec93SMinchan Kim goto out; 11620dabec93SMinchan Kim } 11630dabec93SMinchan Kim 1164616b8371SZi Yan if (unlikely(PageTransHuge(page) && !PageTransHuge(newpage))) { 11654d2fa965SKirill A. Shutemov lock_page(page); 11664d2fa965SKirill A. Shutemov rc = split_huge_page(page); 11674d2fa965SKirill A. Shutemov unlock_page(page); 11684d2fa965SKirill A. Shutemov if (rc) 11690dabec93SMinchan Kim goto out; 11704d2fa965SKirill A. Shutemov } 11710dabec93SMinchan Kim 11729c620e2bSHugh Dickins rc = __unmap_and_move(page, newpage, force, mode); 1173c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) 11747cd12b4aSVlastimil Babka set_page_owner_migrate_reason(newpage, reason); 1175bf6bddf1SRafael Aquini 11760dabec93SMinchan Kim out: 1177e24f0b8fSChristoph Lameter if (rc != -EAGAIN) { 1178aaa994b3SChristoph Lameter /* 1179aaa994b3SChristoph Lameter * A page that has been migrated has all references 1180aaa994b3SChristoph Lameter * removed and will be freed. A page that has not been 1181aaa994b3SChristoph Lameter * migrated will have kepts its references and be 1182aaa994b3SChristoph Lameter * restored. 1183aaa994b3SChristoph Lameter */ 1184aaa994b3SChristoph Lameter list_del(&page->lru); 11856afcf8efSMing Ling 11866afcf8efSMing Ling /* 11876afcf8efSMing Ling * Compaction can migrate also non-LRU pages which are 11886afcf8efSMing Ling * not accounted to NR_ISOLATED_*. They can be recognized 11896afcf8efSMing Ling * as __PageMovable 11906afcf8efSMing Ling */ 11916afcf8efSMing Ling if (likely(!__PageMovable(page))) 1192e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1193e8db67ebSNaoya Horiguchi page_is_file_cache(page), -hpage_nr_pages(page)); 1194e24f0b8fSChristoph Lameter } 119568711a74SDavid Rientjes 119695a402c3SChristoph Lameter /* 1197c6c919ebSMinchan Kim * If migration is successful, releases reference grabbed during 1198c6c919ebSMinchan Kim * isolation. Otherwise, restore the page to right list unless 1199c6c919ebSMinchan Kim * we want to retry. 120095a402c3SChristoph Lameter */ 1201c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) { 1202c6c919ebSMinchan Kim put_page(page); 1203c6c919ebSMinchan Kim if (reason == MR_MEMORY_FAILURE) { 1204c6c919ebSMinchan Kim /* 1205c6c919ebSMinchan Kim * Set PG_HWPoison on just freed page 1206c6c919ebSMinchan Kim * intentionally. Although it's rather weird, 1207c6c919ebSMinchan Kim * it's how HWPoison flag works at the moment. 1208c6c919ebSMinchan Kim */ 1209c6c919ebSMinchan Kim if (!test_set_page_hwpoison(page)) 1210c6c919ebSMinchan Kim num_poisoned_pages_inc(); 1211c6c919ebSMinchan Kim } 1212c6c919ebSMinchan Kim } else { 1213bda807d4SMinchan Kim if (rc != -EAGAIN) { 1214bda807d4SMinchan Kim if (likely(!__PageMovable(page))) { 1215c6c919ebSMinchan Kim putback_lru_page(page); 1216bda807d4SMinchan Kim goto put_new; 1217bda807d4SMinchan Kim } 1218bda807d4SMinchan Kim 1219bda807d4SMinchan Kim lock_page(page); 1220bda807d4SMinchan Kim if (PageMovable(page)) 1221bda807d4SMinchan Kim putback_movable_page(page); 1222bda807d4SMinchan Kim else 1223bda807d4SMinchan Kim __ClearPageIsolated(page); 1224bda807d4SMinchan Kim unlock_page(page); 1225bda807d4SMinchan Kim put_page(page); 1226bda807d4SMinchan Kim } 1227bda807d4SMinchan Kim put_new: 1228cf4b769aSHugh Dickins if (put_new_page) 122968711a74SDavid Rientjes put_new_page(newpage, private); 1230c6c919ebSMinchan Kim else 1231d6d86c0aSKonstantin Khlebnikov put_page(newpage); 1232c6c919ebSMinchan Kim } 123368711a74SDavid Rientjes 1234742755a1SChristoph Lameter if (result) { 1235742755a1SChristoph Lameter if (rc) 1236742755a1SChristoph Lameter *result = rc; 1237742755a1SChristoph Lameter else 1238742755a1SChristoph Lameter *result = page_to_nid(newpage); 1239742755a1SChristoph Lameter } 1240e24f0b8fSChristoph Lameter return rc; 1241e24f0b8fSChristoph Lameter } 1242b20a3503SChristoph Lameter 1243e24f0b8fSChristoph Lameter /* 1244290408d4SNaoya Horiguchi * Counterpart of unmap_and_move_page() for hugepage migration. 1245290408d4SNaoya Horiguchi * 1246290408d4SNaoya Horiguchi * This function doesn't wait the completion of hugepage I/O 1247290408d4SNaoya Horiguchi * because there is no race between I/O and migration for hugepage. 1248290408d4SNaoya Horiguchi * Note that currently hugepage I/O occurs only in direct I/O 1249290408d4SNaoya Horiguchi * where no lock is held and PG_writeback is irrelevant, 1250290408d4SNaoya Horiguchi * and writeback status of all subpages are counted in the reference 1251290408d4SNaoya Horiguchi * count of the head page (i.e. if all subpages of a 2MB hugepage are 1252290408d4SNaoya Horiguchi * under direct I/O, the reference of the head page is 512 and a bit more.) 1253290408d4SNaoya Horiguchi * This means that when we try to migrate hugepage whose subpages are 1254290408d4SNaoya Horiguchi * doing direct I/O, some references remain after try_to_unmap() and 1255290408d4SNaoya Horiguchi * hugepage migration fails without data corruption. 1256290408d4SNaoya Horiguchi * 1257290408d4SNaoya Horiguchi * There is also no race when direct I/O is issued on the page under migration, 1258290408d4SNaoya Horiguchi * because then pte is replaced with migration swap entry and direct I/O code 1259290408d4SNaoya Horiguchi * will wait in the page fault for migration to complete. 1260290408d4SNaoya Horiguchi */ 1261290408d4SNaoya Horiguchi static int unmap_and_move_huge_page(new_page_t get_new_page, 126268711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 126368711a74SDavid Rientjes struct page *hpage, int force, 12647cd12b4aSVlastimil Babka enum migrate_mode mode, int reason) 1265290408d4SNaoya Horiguchi { 12662def7424SHugh Dickins int rc = -EAGAIN; 1267290408d4SNaoya Horiguchi int *result = NULL; 12682ebba6b7SHugh Dickins int page_was_mapped = 0; 126932665f2bSJoonsoo Kim struct page *new_hpage; 1270290408d4SNaoya Horiguchi struct anon_vma *anon_vma = NULL; 1271290408d4SNaoya Horiguchi 127283467efbSNaoya Horiguchi /* 127383467efbSNaoya Horiguchi * Movability of hugepages depends on architectures and hugepage size. 127483467efbSNaoya Horiguchi * This check is necessary because some callers of hugepage migration 127583467efbSNaoya Horiguchi * like soft offline and memory hotremove don't walk through page 127683467efbSNaoya Horiguchi * tables or check whether the hugepage is pmd-based or not before 127783467efbSNaoya Horiguchi * kicking migration. 127883467efbSNaoya Horiguchi */ 1279100873d7SNaoya Horiguchi if (!hugepage_migration_supported(page_hstate(hpage))) { 128032665f2bSJoonsoo Kim putback_active_hugepage(hpage); 128183467efbSNaoya Horiguchi return -ENOSYS; 128232665f2bSJoonsoo Kim } 128383467efbSNaoya Horiguchi 128432665f2bSJoonsoo Kim new_hpage = get_new_page(hpage, private, &result); 1285290408d4SNaoya Horiguchi if (!new_hpage) 1286290408d4SNaoya Horiguchi return -ENOMEM; 1287290408d4SNaoya Horiguchi 1288290408d4SNaoya Horiguchi if (!trylock_page(hpage)) { 12892916ecc0SJérôme Glisse if (!force) 1290290408d4SNaoya Horiguchi goto out; 12912916ecc0SJérôme Glisse switch (mode) { 12922916ecc0SJérôme Glisse case MIGRATE_SYNC: 12932916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 12942916ecc0SJérôme Glisse break; 12952916ecc0SJérôme Glisse default: 12962916ecc0SJérôme Glisse goto out; 12972916ecc0SJérôme Glisse } 1298290408d4SNaoya Horiguchi lock_page(hpage); 1299290408d4SNaoya Horiguchi } 1300290408d4SNaoya Horiguchi 1301746b18d4SPeter Zijlstra if (PageAnon(hpage)) 1302746b18d4SPeter Zijlstra anon_vma = page_get_anon_vma(hpage); 1303290408d4SNaoya Horiguchi 13047db7671fSHugh Dickins if (unlikely(!trylock_page(new_hpage))) 13057db7671fSHugh Dickins goto put_anon; 13067db7671fSHugh Dickins 13072ebba6b7SHugh Dickins if (page_mapped(hpage)) { 13082ebba6b7SHugh Dickins try_to_unmap(hpage, 13092ebba6b7SHugh Dickins TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 13102ebba6b7SHugh Dickins page_was_mapped = 1; 13112ebba6b7SHugh Dickins } 1312290408d4SNaoya Horiguchi 1313290408d4SNaoya Horiguchi if (!page_mapped(hpage)) 13145c3f9a67SHugh Dickins rc = move_to_new_page(new_hpage, hpage, mode); 1315290408d4SNaoya Horiguchi 13165c3f9a67SHugh Dickins if (page_was_mapped) 13175c3f9a67SHugh Dickins remove_migration_ptes(hpage, 1318e388466dSKirill A. Shutemov rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1319290408d4SNaoya Horiguchi 13207db7671fSHugh Dickins unlock_page(new_hpage); 13217db7671fSHugh Dickins 13227db7671fSHugh Dickins put_anon: 1323fd4a4663SHugh Dickins if (anon_vma) 13249e60109fSPeter Zijlstra put_anon_vma(anon_vma); 13258e6ac7faSAneesh Kumar K.V 13262def7424SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 1327ab5ac90aSMichal Hocko move_hugetlb_state(hpage, new_hpage, reason); 13282def7424SHugh Dickins put_new_page = NULL; 13292def7424SHugh Dickins } 13308e6ac7faSAneesh Kumar K.V 1331290408d4SNaoya Horiguchi unlock_page(hpage); 133209761333SHillf Danton out: 1333b8ec1ceeSNaoya Horiguchi if (rc != -EAGAIN) 1334b8ec1ceeSNaoya Horiguchi putback_active_hugepage(hpage); 1335c3114a84SAnshuman Khandual if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage)) 1336c3114a84SAnshuman Khandual num_poisoned_pages_inc(); 133768711a74SDavid Rientjes 133868711a74SDavid Rientjes /* 133968711a74SDavid Rientjes * If migration was not successful and there's a freeing callback, use 134068711a74SDavid Rientjes * it. Otherwise, put_page() will drop the reference grabbed during 134168711a74SDavid Rientjes * isolation. 134268711a74SDavid Rientjes */ 13432def7424SHugh Dickins if (put_new_page) 134468711a74SDavid Rientjes put_new_page(new_hpage, private); 134568711a74SDavid Rientjes else 13463aaa76e1SNaoya Horiguchi putback_active_hugepage(new_hpage); 134768711a74SDavid Rientjes 1348290408d4SNaoya Horiguchi if (result) { 1349290408d4SNaoya Horiguchi if (rc) 1350290408d4SNaoya Horiguchi *result = rc; 1351290408d4SNaoya Horiguchi else 1352290408d4SNaoya Horiguchi *result = page_to_nid(new_hpage); 1353290408d4SNaoya Horiguchi } 1354290408d4SNaoya Horiguchi return rc; 1355290408d4SNaoya Horiguchi } 1356290408d4SNaoya Horiguchi 1357290408d4SNaoya Horiguchi /* 1358c73e5c9cSSrivatsa S. Bhat * migrate_pages - migrate the pages specified in a list, to the free pages 1359c73e5c9cSSrivatsa S. Bhat * supplied as the target for the page migration 1360e24f0b8fSChristoph Lameter * 1361c73e5c9cSSrivatsa S. Bhat * @from: The list of pages to be migrated. 1362c73e5c9cSSrivatsa S. Bhat * @get_new_page: The function used to allocate free pages to be used 1363c73e5c9cSSrivatsa S. Bhat * as the target of the page migration. 136468711a74SDavid Rientjes * @put_new_page: The function used to free target pages if migration 136568711a74SDavid Rientjes * fails, or NULL if no special handling is necessary. 1366c73e5c9cSSrivatsa S. Bhat * @private: Private data to be passed on to get_new_page() 1367c73e5c9cSSrivatsa S. Bhat * @mode: The migration mode that specifies the constraints for 1368c73e5c9cSSrivatsa S. Bhat * page migration, if any. 1369c73e5c9cSSrivatsa S. Bhat * @reason: The reason for page migration. 1370e24f0b8fSChristoph Lameter * 1371c73e5c9cSSrivatsa S. Bhat * The function returns after 10 attempts or if no pages are movable any more 1372c73e5c9cSSrivatsa S. Bhat * because the list has become empty or no retryable pages exist any more. 137314e0f9bcSHugh Dickins * The caller should call putback_movable_pages() to return pages to the LRU 137428bd6578SMinchan Kim * or free list only if ret != 0. 1375e24f0b8fSChristoph Lameter * 1376c73e5c9cSSrivatsa S. Bhat * Returns the number of pages that were not migrated, or an error code. 1377e24f0b8fSChristoph Lameter */ 13789c620e2bSHugh Dickins int migrate_pages(struct list_head *from, new_page_t get_new_page, 137968711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 138068711a74SDavid Rientjes enum migrate_mode mode, int reason) 1381e24f0b8fSChristoph Lameter { 1382e24f0b8fSChristoph Lameter int retry = 1; 1383e24f0b8fSChristoph Lameter int nr_failed = 0; 13845647bc29SMel Gorman int nr_succeeded = 0; 1385e24f0b8fSChristoph Lameter int pass = 0; 1386e24f0b8fSChristoph Lameter struct page *page; 1387e24f0b8fSChristoph Lameter struct page *page2; 1388e24f0b8fSChristoph Lameter int swapwrite = current->flags & PF_SWAPWRITE; 1389e24f0b8fSChristoph Lameter int rc; 13902d1db3b1SChristoph Lameter 1391e24f0b8fSChristoph Lameter if (!swapwrite) 1392e24f0b8fSChristoph Lameter current->flags |= PF_SWAPWRITE; 1393e24f0b8fSChristoph Lameter 1394e24f0b8fSChristoph Lameter for(pass = 0; pass < 10 && retry; pass++) { 1395e24f0b8fSChristoph Lameter retry = 0; 1396e24f0b8fSChristoph Lameter 1397e24f0b8fSChristoph Lameter list_for_each_entry_safe(page, page2, from, lru) { 1398e24f0b8fSChristoph Lameter cond_resched(); 1399e24f0b8fSChristoph Lameter 140031caf665SNaoya Horiguchi if (PageHuge(page)) 140131caf665SNaoya Horiguchi rc = unmap_and_move_huge_page(get_new_page, 140268711a74SDavid Rientjes put_new_page, private, page, 14037cd12b4aSVlastimil Babka pass > 2, mode, reason); 140431caf665SNaoya Horiguchi else 140568711a74SDavid Rientjes rc = unmap_and_move(get_new_page, put_new_page, 1406add05cecSNaoya Horiguchi private, page, pass > 2, mode, 1407add05cecSNaoya Horiguchi reason); 1408e24f0b8fSChristoph Lameter 1409e24f0b8fSChristoph Lameter switch(rc) { 141095a402c3SChristoph Lameter case -ENOMEM: 1411dfef2ef4SDavid Rientjes nr_failed++; 141295a402c3SChristoph Lameter goto out; 1413e24f0b8fSChristoph Lameter case -EAGAIN: 1414b20a3503SChristoph Lameter retry++; 1415e24f0b8fSChristoph Lameter break; 141678bd5209SRafael Aquini case MIGRATEPAGE_SUCCESS: 14175647bc29SMel Gorman nr_succeeded++; 1418e24f0b8fSChristoph Lameter break; 1419e24f0b8fSChristoph Lameter default: 1420354a3363SNaoya Horiguchi /* 1421354a3363SNaoya Horiguchi * Permanent failure (-EBUSY, -ENOSYS, etc.): 1422354a3363SNaoya Horiguchi * unlike -EAGAIN case, the failed page is 1423354a3363SNaoya Horiguchi * removed from migration page list and not 1424354a3363SNaoya Horiguchi * retried in the next outer loop. 1425354a3363SNaoya Horiguchi */ 1426b20a3503SChristoph Lameter nr_failed++; 1427e24f0b8fSChristoph Lameter break; 1428b20a3503SChristoph Lameter } 1429b20a3503SChristoph Lameter } 1430e24f0b8fSChristoph Lameter } 1431f2f81fb2SVlastimil Babka nr_failed += retry; 1432f2f81fb2SVlastimil Babka rc = nr_failed; 143395a402c3SChristoph Lameter out: 14345647bc29SMel Gorman if (nr_succeeded) 14355647bc29SMel Gorman count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 14365647bc29SMel Gorman if (nr_failed) 14375647bc29SMel Gorman count_vm_events(PGMIGRATE_FAIL, nr_failed); 14387b2a2d4aSMel Gorman trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 14397b2a2d4aSMel Gorman 1440b20a3503SChristoph Lameter if (!swapwrite) 1441b20a3503SChristoph Lameter current->flags &= ~PF_SWAPWRITE; 1442b20a3503SChristoph Lameter 144395a402c3SChristoph Lameter return rc; 1444b20a3503SChristoph Lameter } 1445b20a3503SChristoph Lameter 1446742755a1SChristoph Lameter #ifdef CONFIG_NUMA 1447742755a1SChristoph Lameter 1448*a49bd4d7SMichal Hocko static int store_status(int __user *status, int start, int value, int nr) 1449742755a1SChristoph Lameter { 1450*a49bd4d7SMichal Hocko while (nr-- > 0) { 1451*a49bd4d7SMichal Hocko if (put_user(value, status + start)) 1452*a49bd4d7SMichal Hocko return -EFAULT; 1453*a49bd4d7SMichal Hocko start++; 1454*a49bd4d7SMichal Hocko } 1455742755a1SChristoph Lameter 1456*a49bd4d7SMichal Hocko return 0; 1457*a49bd4d7SMichal Hocko } 1458742755a1SChristoph Lameter 1459*a49bd4d7SMichal Hocko static int do_move_pages_to_node(struct mm_struct *mm, 1460*a49bd4d7SMichal Hocko struct list_head *pagelist, int node) 1461*a49bd4d7SMichal Hocko { 1462*a49bd4d7SMichal Hocko int err; 1463742755a1SChristoph Lameter 1464*a49bd4d7SMichal Hocko if (list_empty(pagelist)) 1465*a49bd4d7SMichal Hocko return 0; 1466742755a1SChristoph Lameter 1467*a49bd4d7SMichal Hocko err = migrate_pages(pagelist, alloc_new_node_page, NULL, node, 1468*a49bd4d7SMichal Hocko MIGRATE_SYNC, MR_SYSCALL); 1469*a49bd4d7SMichal Hocko if (err) 1470*a49bd4d7SMichal Hocko putback_movable_pages(pagelist); 1471*a49bd4d7SMichal Hocko return err; 1472742755a1SChristoph Lameter } 1473742755a1SChristoph Lameter 1474742755a1SChristoph Lameter /* 1475*a49bd4d7SMichal Hocko * Resolves the given address to a struct page, isolates it from the LRU and 1476*a49bd4d7SMichal Hocko * puts it to the given pagelist. 1477*a49bd4d7SMichal Hocko * Returns -errno if the page cannot be found/isolated or 0 when it has been 1478*a49bd4d7SMichal Hocko * queued or the page doesn't need to be migrated because it is already on 1479*a49bd4d7SMichal Hocko * the target node 1480742755a1SChristoph Lameter */ 1481*a49bd4d7SMichal Hocko static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 1482*a49bd4d7SMichal Hocko int node, struct list_head *pagelist, bool migrate_all) 1483742755a1SChristoph Lameter { 1484742755a1SChristoph Lameter struct vm_area_struct *vma; 1485742755a1SChristoph Lameter struct page *page; 1486e8db67ebSNaoya Horiguchi unsigned int follflags; 1487*a49bd4d7SMichal Hocko int err; 1488742755a1SChristoph Lameter 1489*a49bd4d7SMichal Hocko down_read(&mm->mmap_sem); 1490742755a1SChristoph Lameter err = -EFAULT; 1491*a49bd4d7SMichal Hocko vma = find_vma(mm, addr); 1492*a49bd4d7SMichal Hocko if (!vma || addr < vma->vm_start || !vma_migratable(vma)) 1493*a49bd4d7SMichal Hocko goto out; 1494742755a1SChristoph Lameter 1495d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 1496e8db67ebSNaoya Horiguchi follflags = FOLL_GET | FOLL_DUMP; 1497e8db67ebSNaoya Horiguchi if (!thp_migration_supported()) 1498e8db67ebSNaoya Horiguchi follflags |= FOLL_SPLIT; 1499*a49bd4d7SMichal Hocko page = follow_page(vma, addr, follflags); 150089f5b7daSLinus Torvalds 150189f5b7daSLinus Torvalds err = PTR_ERR(page); 150289f5b7daSLinus Torvalds if (IS_ERR(page)) 1503*a49bd4d7SMichal Hocko goto out; 150489f5b7daSLinus Torvalds 1505742755a1SChristoph Lameter err = -ENOENT; 1506742755a1SChristoph Lameter if (!page) 1507*a49bd4d7SMichal Hocko goto out; 1508742755a1SChristoph Lameter 1509*a49bd4d7SMichal Hocko err = 0; 1510*a49bd4d7SMichal Hocko if (page_to_nid(page) == node) 1511*a49bd4d7SMichal Hocko goto out_putpage; 1512742755a1SChristoph Lameter 1513742755a1SChristoph Lameter err = -EACCES; 1514*a49bd4d7SMichal Hocko if (page_mapcount(page) > 1 && !migrate_all) 1515*a49bd4d7SMichal Hocko goto out_putpage; 1516742755a1SChristoph Lameter 1517e632a938SNaoya Horiguchi if (PageHuge(page)) { 1518e8db67ebSNaoya Horiguchi if (PageHead(page)) { 1519*a49bd4d7SMichal Hocko isolate_huge_page(page, pagelist); 1520e8db67ebSNaoya Horiguchi err = 0; 1521e8db67ebSNaoya Horiguchi } 1522*a49bd4d7SMichal Hocko } else { 1523*a49bd4d7SMichal Hocko struct page *head; 1524e632a938SNaoya Horiguchi 1525e8db67ebSNaoya Horiguchi head = compound_head(page); 1526e8db67ebSNaoya Horiguchi err = isolate_lru_page(head); 1527*a49bd4d7SMichal Hocko if (err) 1528*a49bd4d7SMichal Hocko goto out_putpage; 1529*a49bd4d7SMichal Hocko 1530*a49bd4d7SMichal Hocko err = 0; 1531*a49bd4d7SMichal Hocko list_add_tail(&head->lru, pagelist); 1532e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(head), 1533e8db67ebSNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 1534e8db67ebSNaoya Horiguchi hpage_nr_pages(head)); 15356d9c285aSKOSAKI Motohiro } 1536*a49bd4d7SMichal Hocko out_putpage: 1537742755a1SChristoph Lameter /* 1538742755a1SChristoph Lameter * Either remove the duplicate refcount from 1539742755a1SChristoph Lameter * isolate_lru_page() or drop the page ref if it was 1540742755a1SChristoph Lameter * not isolated. 1541742755a1SChristoph Lameter */ 1542742755a1SChristoph Lameter put_page(page); 1543*a49bd4d7SMichal Hocko out: 1544742755a1SChristoph Lameter up_read(&mm->mmap_sem); 1545742755a1SChristoph Lameter return err; 1546742755a1SChristoph Lameter } 1547742755a1SChristoph Lameter 1548742755a1SChristoph Lameter /* 15495e9a0f02SBrice Goglin * Migrate an array of page address onto an array of nodes and fill 15505e9a0f02SBrice Goglin * the corresponding array of status. 15515e9a0f02SBrice Goglin */ 15523268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 15535e9a0f02SBrice Goglin unsigned long nr_pages, 15545e9a0f02SBrice Goglin const void __user * __user *pages, 15555e9a0f02SBrice Goglin const int __user *nodes, 15565e9a0f02SBrice Goglin int __user *status, int flags) 15575e9a0f02SBrice Goglin { 1558*a49bd4d7SMichal Hocko int current_node = NUMA_NO_NODE; 1559*a49bd4d7SMichal Hocko LIST_HEAD(pagelist); 1560*a49bd4d7SMichal Hocko int start, i; 1561*a49bd4d7SMichal Hocko int err = 0, err1; 156235282a2dSBrice Goglin 156335282a2dSBrice Goglin migrate_prep(); 156435282a2dSBrice Goglin 1565*a49bd4d7SMichal Hocko for (i = start = 0; i < nr_pages; i++) { 15665e9a0f02SBrice Goglin const void __user *p; 1567*a49bd4d7SMichal Hocko unsigned long addr; 15685e9a0f02SBrice Goglin int node; 15695e9a0f02SBrice Goglin 15703140a227SBrice Goglin err = -EFAULT; 1571*a49bd4d7SMichal Hocko if (get_user(p, pages + i)) 1572*a49bd4d7SMichal Hocko goto out_flush; 1573*a49bd4d7SMichal Hocko if (get_user(node, nodes + i)) 1574*a49bd4d7SMichal Hocko goto out_flush; 1575*a49bd4d7SMichal Hocko addr = (unsigned long)p; 15765e9a0f02SBrice Goglin 15775e9a0f02SBrice Goglin err = -ENODEV; 15786f5a55f1SLinus Torvalds if (node < 0 || node >= MAX_NUMNODES) 1579*a49bd4d7SMichal Hocko goto out_flush; 1580389162c2SLai Jiangshan if (!node_state(node, N_MEMORY)) 1581*a49bd4d7SMichal Hocko goto out_flush; 15825e9a0f02SBrice Goglin 15835e9a0f02SBrice Goglin err = -EACCES; 15845e9a0f02SBrice Goglin if (!node_isset(node, task_nodes)) 1585*a49bd4d7SMichal Hocko goto out_flush; 15865e9a0f02SBrice Goglin 1587*a49bd4d7SMichal Hocko if (current_node == NUMA_NO_NODE) { 1588*a49bd4d7SMichal Hocko current_node = node; 1589*a49bd4d7SMichal Hocko start = i; 1590*a49bd4d7SMichal Hocko } else if (node != current_node) { 1591*a49bd4d7SMichal Hocko err = do_move_pages_to_node(mm, &pagelist, current_node); 1592*a49bd4d7SMichal Hocko if (err) 1593*a49bd4d7SMichal Hocko goto out; 1594*a49bd4d7SMichal Hocko err = store_status(status, start, current_node, i - start); 1595*a49bd4d7SMichal Hocko if (err) 1596*a49bd4d7SMichal Hocko goto out; 1597*a49bd4d7SMichal Hocko start = i; 1598*a49bd4d7SMichal Hocko current_node = node; 15995e9a0f02SBrice Goglin } 16005e9a0f02SBrice Goglin 1601*a49bd4d7SMichal Hocko /* 1602*a49bd4d7SMichal Hocko * Errors in the page lookup or isolation are not fatal and we simply 1603*a49bd4d7SMichal Hocko * report them via status 1604*a49bd4d7SMichal Hocko */ 1605*a49bd4d7SMichal Hocko err = add_page_for_migration(mm, addr, current_node, 1606*a49bd4d7SMichal Hocko &pagelist, flags & MPOL_MF_MOVE_ALL); 1607*a49bd4d7SMichal Hocko if (!err) 1608*a49bd4d7SMichal Hocko continue; 16093140a227SBrice Goglin 1610*a49bd4d7SMichal Hocko err = store_status(status, i, err, 1); 1611*a49bd4d7SMichal Hocko if (err) 1612*a49bd4d7SMichal Hocko goto out_flush; 16133140a227SBrice Goglin 1614*a49bd4d7SMichal Hocko err = do_move_pages_to_node(mm, &pagelist, current_node); 1615*a49bd4d7SMichal Hocko if (err) 1616*a49bd4d7SMichal Hocko goto out; 1617*a49bd4d7SMichal Hocko if (i > start) { 1618*a49bd4d7SMichal Hocko err = store_status(status, start, current_node, i - start); 1619*a49bd4d7SMichal Hocko if (err) 1620*a49bd4d7SMichal Hocko goto out; 16213140a227SBrice Goglin } 1622*a49bd4d7SMichal Hocko current_node = NUMA_NO_NODE; 16233140a227SBrice Goglin } 1624*a49bd4d7SMichal Hocko out_flush: 1625*a49bd4d7SMichal Hocko /* Make sure we do not overwrite the existing error */ 1626*a49bd4d7SMichal Hocko err1 = do_move_pages_to_node(mm, &pagelist, current_node); 1627*a49bd4d7SMichal Hocko if (!err1) 1628*a49bd4d7SMichal Hocko err1 = store_status(status, start, current_node, i - start); 1629*a49bd4d7SMichal Hocko if (!err) 1630*a49bd4d7SMichal Hocko err = err1; 16315e9a0f02SBrice Goglin out: 16325e9a0f02SBrice Goglin return err; 16335e9a0f02SBrice Goglin } 16345e9a0f02SBrice Goglin 16355e9a0f02SBrice Goglin /* 16362f007e74SBrice Goglin * Determine the nodes of an array of pages and store it in an array of status. 1637742755a1SChristoph Lameter */ 163880bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 163980bba129SBrice Goglin const void __user **pages, int *status) 1640742755a1SChristoph Lameter { 16412f007e74SBrice Goglin unsigned long i; 1642742755a1SChristoph Lameter 16432f007e74SBrice Goglin down_read(&mm->mmap_sem); 16442f007e74SBrice Goglin 16452f007e74SBrice Goglin for (i = 0; i < nr_pages; i++) { 164680bba129SBrice Goglin unsigned long addr = (unsigned long)(*pages); 16472f007e74SBrice Goglin struct vm_area_struct *vma; 16482f007e74SBrice Goglin struct page *page; 1649c095adbcSKOSAKI Motohiro int err = -EFAULT; 16502f007e74SBrice Goglin 16512f007e74SBrice Goglin vma = find_vma(mm, addr); 165270384dc6SGleb Natapov if (!vma || addr < vma->vm_start) 1653742755a1SChristoph Lameter goto set_status; 1654742755a1SChristoph Lameter 1655d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 1656d899844eSKirill A. Shutemov page = follow_page(vma, addr, FOLL_DUMP); 165789f5b7daSLinus Torvalds 165889f5b7daSLinus Torvalds err = PTR_ERR(page); 165989f5b7daSLinus Torvalds if (IS_ERR(page)) 166089f5b7daSLinus Torvalds goto set_status; 166189f5b7daSLinus Torvalds 1662d899844eSKirill A. Shutemov err = page ? page_to_nid(page) : -ENOENT; 1663742755a1SChristoph Lameter set_status: 166480bba129SBrice Goglin *status = err; 166580bba129SBrice Goglin 166680bba129SBrice Goglin pages++; 166780bba129SBrice Goglin status++; 166880bba129SBrice Goglin } 166980bba129SBrice Goglin 167080bba129SBrice Goglin up_read(&mm->mmap_sem); 167180bba129SBrice Goglin } 167280bba129SBrice Goglin 167380bba129SBrice Goglin /* 167480bba129SBrice Goglin * Determine the nodes of a user array of pages and store it in 167580bba129SBrice Goglin * a user array of status. 167680bba129SBrice Goglin */ 167780bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 167880bba129SBrice Goglin const void __user * __user *pages, 167980bba129SBrice Goglin int __user *status) 168080bba129SBrice Goglin { 168180bba129SBrice Goglin #define DO_PAGES_STAT_CHUNK_NR 16 168280bba129SBrice Goglin const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 168380bba129SBrice Goglin int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 168480bba129SBrice Goglin 168587b8d1adSH. Peter Anvin while (nr_pages) { 168687b8d1adSH. Peter Anvin unsigned long chunk_nr; 168780bba129SBrice Goglin 168887b8d1adSH. Peter Anvin chunk_nr = nr_pages; 168987b8d1adSH. Peter Anvin if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 169087b8d1adSH. Peter Anvin chunk_nr = DO_PAGES_STAT_CHUNK_NR; 169187b8d1adSH. Peter Anvin 169287b8d1adSH. Peter Anvin if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 169387b8d1adSH. Peter Anvin break; 169480bba129SBrice Goglin 169580bba129SBrice Goglin do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 169680bba129SBrice Goglin 169787b8d1adSH. Peter Anvin if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 169887b8d1adSH. Peter Anvin break; 1699742755a1SChristoph Lameter 170087b8d1adSH. Peter Anvin pages += chunk_nr; 170187b8d1adSH. Peter Anvin status += chunk_nr; 170287b8d1adSH. Peter Anvin nr_pages -= chunk_nr; 170387b8d1adSH. Peter Anvin } 170487b8d1adSH. Peter Anvin return nr_pages ? -EFAULT : 0; 1705742755a1SChristoph Lameter } 1706742755a1SChristoph Lameter 1707742755a1SChristoph Lameter /* 1708742755a1SChristoph Lameter * Move a list of pages in the address space of the currently executing 1709742755a1SChristoph Lameter * process. 1710742755a1SChristoph Lameter */ 17117addf443SDominik Brodowski static int kernel_move_pages(pid_t pid, unsigned long nr_pages, 17127addf443SDominik Brodowski const void __user * __user *pages, 17137addf443SDominik Brodowski const int __user *nodes, 17147addf443SDominik Brodowski int __user *status, int flags) 1715742755a1SChristoph Lameter { 1716742755a1SChristoph Lameter struct task_struct *task; 1717742755a1SChristoph Lameter struct mm_struct *mm; 17185e9a0f02SBrice Goglin int err; 17193268c63eSChristoph Lameter nodemask_t task_nodes; 1720742755a1SChristoph Lameter 1721742755a1SChristoph Lameter /* Check flags */ 1722742755a1SChristoph Lameter if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1723742755a1SChristoph Lameter return -EINVAL; 1724742755a1SChristoph Lameter 1725742755a1SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1726742755a1SChristoph Lameter return -EPERM; 1727742755a1SChristoph Lameter 1728742755a1SChristoph Lameter /* Find the mm_struct */ 1729a879bf58SGreg Thelen rcu_read_lock(); 1730228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 1731742755a1SChristoph Lameter if (!task) { 1732a879bf58SGreg Thelen rcu_read_unlock(); 1733742755a1SChristoph Lameter return -ESRCH; 1734742755a1SChristoph Lameter } 17353268c63eSChristoph Lameter get_task_struct(task); 1736742755a1SChristoph Lameter 1737742755a1SChristoph Lameter /* 1738742755a1SChristoph Lameter * Check if this process has the right to modify the specified 1739197e7e52SLinus Torvalds * process. Use the regular "ptrace_may_access()" checks. 1740742755a1SChristoph Lameter */ 1741197e7e52SLinus Torvalds if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1742c69e8d9cSDavid Howells rcu_read_unlock(); 1743742755a1SChristoph Lameter err = -EPERM; 17445e9a0f02SBrice Goglin goto out; 1745742755a1SChristoph Lameter } 1746c69e8d9cSDavid Howells rcu_read_unlock(); 1747742755a1SChristoph Lameter 174886c3a764SDavid Quigley err = security_task_movememory(task); 174986c3a764SDavid Quigley if (err) 1750742755a1SChristoph Lameter goto out; 1751742755a1SChristoph Lameter 17523268c63eSChristoph Lameter task_nodes = cpuset_mems_allowed(task); 17533268c63eSChristoph Lameter mm = get_task_mm(task); 17543268c63eSChristoph Lameter put_task_struct(task); 17553268c63eSChristoph Lameter 17566e8b09eaSSasha Levin if (!mm) 17576e8b09eaSSasha Levin return -EINVAL; 17586e8b09eaSSasha Levin 17593268c63eSChristoph Lameter if (nodes) 17603268c63eSChristoph Lameter err = do_pages_move(mm, task_nodes, nr_pages, pages, 17613268c63eSChristoph Lameter nodes, status, flags); 17623268c63eSChristoph Lameter else 17635e9a0f02SBrice Goglin err = do_pages_stat(mm, nr_pages, pages, status); 17643268c63eSChristoph Lameter 17653268c63eSChristoph Lameter mmput(mm); 17663268c63eSChristoph Lameter return err; 1767742755a1SChristoph Lameter 1768742755a1SChristoph Lameter out: 17693268c63eSChristoph Lameter put_task_struct(task); 1770742755a1SChristoph Lameter return err; 1771742755a1SChristoph Lameter } 1772742755a1SChristoph Lameter 17737addf443SDominik Brodowski SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 17747addf443SDominik Brodowski const void __user * __user *, pages, 17757addf443SDominik Brodowski const int __user *, nodes, 17767addf443SDominik Brodowski int __user *, status, int, flags) 17777addf443SDominik Brodowski { 17787addf443SDominik Brodowski return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 17797addf443SDominik Brodowski } 17807addf443SDominik Brodowski 17817addf443SDominik Brodowski #ifdef CONFIG_COMPAT 17827addf443SDominik Brodowski COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 17837addf443SDominik Brodowski compat_uptr_t __user *, pages32, 17847addf443SDominik Brodowski const int __user *, nodes, 17857addf443SDominik Brodowski int __user *, status, 17867addf443SDominik Brodowski int, flags) 17877addf443SDominik Brodowski { 17887addf443SDominik Brodowski const void __user * __user *pages; 17897addf443SDominik Brodowski int i; 17907addf443SDominik Brodowski 17917addf443SDominik Brodowski pages = compat_alloc_user_space(nr_pages * sizeof(void *)); 17927addf443SDominik Brodowski for (i = 0; i < nr_pages; i++) { 17937addf443SDominik Brodowski compat_uptr_t p; 17947addf443SDominik Brodowski 17957addf443SDominik Brodowski if (get_user(p, pages32 + i) || 17967addf443SDominik Brodowski put_user(compat_ptr(p), pages + i)) 17977addf443SDominik Brodowski return -EFAULT; 17987addf443SDominik Brodowski } 17997addf443SDominik Brodowski return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); 18007addf443SDominik Brodowski } 18017addf443SDominik Brodowski #endif /* CONFIG_COMPAT */ 18027addf443SDominik Brodowski 18037039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 18047039e1dbSPeter Zijlstra /* 18057039e1dbSPeter Zijlstra * Returns true if this is a safe migration target node for misplaced NUMA 18067039e1dbSPeter Zijlstra * pages. Currently it only checks the watermarks which crude 18077039e1dbSPeter Zijlstra */ 18087039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 18093abef4e6SMel Gorman unsigned long nr_migrate_pages) 18107039e1dbSPeter Zijlstra { 18117039e1dbSPeter Zijlstra int z; 1812599d0c95SMel Gorman 18137039e1dbSPeter Zijlstra for (z = pgdat->nr_zones - 1; z >= 0; z--) { 18147039e1dbSPeter Zijlstra struct zone *zone = pgdat->node_zones + z; 18157039e1dbSPeter Zijlstra 18167039e1dbSPeter Zijlstra if (!populated_zone(zone)) 18177039e1dbSPeter Zijlstra continue; 18187039e1dbSPeter Zijlstra 18197039e1dbSPeter Zijlstra /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 18207039e1dbSPeter Zijlstra if (!zone_watermark_ok(zone, 0, 18217039e1dbSPeter Zijlstra high_wmark_pages(zone) + 18227039e1dbSPeter Zijlstra nr_migrate_pages, 18237039e1dbSPeter Zijlstra 0, 0)) 18247039e1dbSPeter Zijlstra continue; 18257039e1dbSPeter Zijlstra return true; 18267039e1dbSPeter Zijlstra } 18277039e1dbSPeter Zijlstra return false; 18287039e1dbSPeter Zijlstra } 18297039e1dbSPeter Zijlstra 18307039e1dbSPeter Zijlstra static struct page *alloc_misplaced_dst_page(struct page *page, 18317039e1dbSPeter Zijlstra unsigned long data, 18327039e1dbSPeter Zijlstra int **result) 18337039e1dbSPeter Zijlstra { 18347039e1dbSPeter Zijlstra int nid = (int) data; 18357039e1dbSPeter Zijlstra struct page *newpage; 18367039e1dbSPeter Zijlstra 183796db800fSVlastimil Babka newpage = __alloc_pages_node(nid, 1838e97ca8e5SJohannes Weiner (GFP_HIGHUSER_MOVABLE | 1839e97ca8e5SJohannes Weiner __GFP_THISNODE | __GFP_NOMEMALLOC | 1840e97ca8e5SJohannes Weiner __GFP_NORETRY | __GFP_NOWARN) & 18418479eba7SMel Gorman ~__GFP_RECLAIM, 0); 1842bac0382cSHillf Danton 18437039e1dbSPeter Zijlstra return newpage; 18447039e1dbSPeter Zijlstra } 18457039e1dbSPeter Zijlstra 18467039e1dbSPeter Zijlstra /* 1847a8f60772SMel Gorman * page migration rate limiting control. 1848a8f60772SMel Gorman * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs 1849a8f60772SMel Gorman * window of time. Default here says do not migrate more than 1280M per second. 1850a8f60772SMel Gorman */ 1851a8f60772SMel Gorman static unsigned int migrate_interval_millisecs __read_mostly = 100; 1852a8f60772SMel Gorman static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); 1853a8f60772SMel Gorman 1854b32967ffSMel Gorman /* Returns true if the node is migrate rate-limited after the update */ 18551c30e017SMel Gorman static bool numamigrate_update_ratelimit(pg_data_t *pgdat, 18561c30e017SMel Gorman unsigned long nr_pages) 1857b32967ffSMel Gorman { 1858b32967ffSMel Gorman /* 1859b32967ffSMel Gorman * Rate-limit the amount of data that is being migrated to a node. 1860b32967ffSMel Gorman * Optimal placement is no good if the memory bus is saturated and 1861b32967ffSMel Gorman * all the time is being spent migrating! 1862b32967ffSMel Gorman */ 1863b32967ffSMel Gorman if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 18641c5e9c27SMel Gorman spin_lock(&pgdat->numabalancing_migrate_lock); 1865b32967ffSMel Gorman pgdat->numabalancing_migrate_nr_pages = 0; 1866b32967ffSMel Gorman pgdat->numabalancing_migrate_next_window = jiffies + 1867b32967ffSMel Gorman msecs_to_jiffies(migrate_interval_millisecs); 18681c5e9c27SMel Gorman spin_unlock(&pgdat->numabalancing_migrate_lock); 1869b32967ffSMel Gorman } 1870af1839d7SMel Gorman if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { 1871af1839d7SMel Gorman trace_mm_numa_migrate_ratelimit(current, pgdat->node_id, 1872af1839d7SMel Gorman nr_pages); 18731c5e9c27SMel Gorman return true; 1874af1839d7SMel Gorman } 1875b32967ffSMel Gorman 18761c5e9c27SMel Gorman /* 18771c5e9c27SMel Gorman * This is an unlocked non-atomic update so errors are possible. 18781c5e9c27SMel Gorman * The consequences are failing to migrate when we potentiall should 18791c5e9c27SMel Gorman * have which is not severe enough to warrant locking. If it is ever 18801c5e9c27SMel Gorman * a problem, it can be converted to a per-cpu counter. 18811c5e9c27SMel Gorman */ 18821c5e9c27SMel Gorman pgdat->numabalancing_migrate_nr_pages += nr_pages; 18831c5e9c27SMel Gorman return false; 1884b32967ffSMel Gorman } 1885b32967ffSMel Gorman 18861c30e017SMel Gorman static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1887b32967ffSMel Gorman { 1888340ef390SHugh Dickins int page_lru; 1889b32967ffSMel Gorman 1890309381feSSasha Levin VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 18913abef4e6SMel Gorman 1892b32967ffSMel Gorman /* Avoid migrating to a node that is nearly full */ 1893340ef390SHugh Dickins if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1894340ef390SHugh Dickins return 0; 1895b32967ffSMel Gorman 1896340ef390SHugh Dickins if (isolate_lru_page(page)) 1897340ef390SHugh Dickins return 0; 1898340ef390SHugh Dickins 1899340ef390SHugh Dickins /* 1900340ef390SHugh Dickins * migrate_misplaced_transhuge_page() skips page migration's usual 1901340ef390SHugh Dickins * check on page_count(), so we must do it here, now that the page 1902340ef390SHugh Dickins * has been isolated: a GUP pin, or any other pin, prevents migration. 1903340ef390SHugh Dickins * The expected page count is 3: 1 for page's mapcount and 1 for the 1904340ef390SHugh Dickins * caller's pin and 1 for the reference taken by isolate_lru_page(). 1905340ef390SHugh Dickins */ 1906340ef390SHugh Dickins if (PageTransHuge(page) && page_count(page) != 3) { 1907340ef390SHugh Dickins putback_lru_page(page); 1908b32967ffSMel Gorman return 0; 1909b32967ffSMel Gorman } 1910b32967ffSMel Gorman 1911b32967ffSMel Gorman page_lru = page_is_file_cache(page); 1912599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 1913340ef390SHugh Dickins hpage_nr_pages(page)); 1914b32967ffSMel Gorman 1915b32967ffSMel Gorman /* 1916340ef390SHugh Dickins * Isolating the page has taken another reference, so the 1917340ef390SHugh Dickins * caller's reference can be safely dropped without the page 1918340ef390SHugh Dickins * disappearing underneath us during migration. 1919b32967ffSMel Gorman */ 1920b32967ffSMel Gorman put_page(page); 1921340ef390SHugh Dickins return 1; 1922b32967ffSMel Gorman } 1923b32967ffSMel Gorman 1924de466bd6SMel Gorman bool pmd_trans_migrating(pmd_t pmd) 1925de466bd6SMel Gorman { 1926de466bd6SMel Gorman struct page *page = pmd_page(pmd); 1927de466bd6SMel Gorman return PageLocked(page); 1928de466bd6SMel Gorman } 1929de466bd6SMel Gorman 1930a8f60772SMel Gorman /* 19317039e1dbSPeter Zijlstra * Attempt to migrate a misplaced page to the specified destination 19327039e1dbSPeter Zijlstra * node. Caller is expected to have an elevated reference count on 19337039e1dbSPeter Zijlstra * the page that will be dropped by this function before returning. 19347039e1dbSPeter Zijlstra */ 19351bc115d8SMel Gorman int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 19361bc115d8SMel Gorman int node) 19377039e1dbSPeter Zijlstra { 1938a8f60772SMel Gorman pg_data_t *pgdat = NODE_DATA(node); 1939340ef390SHugh Dickins int isolated; 1940b32967ffSMel Gorman int nr_remaining; 19417039e1dbSPeter Zijlstra LIST_HEAD(migratepages); 19427039e1dbSPeter Zijlstra 19437039e1dbSPeter Zijlstra /* 19441bc115d8SMel Gorman * Don't migrate file pages that are mapped in multiple processes 19451bc115d8SMel Gorman * with execute permissions as they are probably shared libraries. 19467039e1dbSPeter Zijlstra */ 19471bc115d8SMel Gorman if (page_mapcount(page) != 1 && page_is_file_cache(page) && 19481bc115d8SMel Gorman (vma->vm_flags & VM_EXEC)) 19497039e1dbSPeter Zijlstra goto out; 19507039e1dbSPeter Zijlstra 1951a8f60772SMel Gorman /* 195209a913a7SMel Gorman * Also do not migrate dirty pages as not all filesystems can move 195309a913a7SMel Gorman * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. 195409a913a7SMel Gorman */ 195509a913a7SMel Gorman if (page_is_file_cache(page) && PageDirty(page)) 195609a913a7SMel Gorman goto out; 195709a913a7SMel Gorman 195809a913a7SMel Gorman /* 1959a8f60772SMel Gorman * Rate-limit the amount of data that is being migrated to a node. 1960a8f60772SMel Gorman * Optimal placement is no good if the memory bus is saturated and 1961a8f60772SMel Gorman * all the time is being spent migrating! 1962a8f60772SMel Gorman */ 1963340ef390SHugh Dickins if (numamigrate_update_ratelimit(pgdat, 1)) 1964a8f60772SMel Gorman goto out; 1965a8f60772SMel Gorman 1966b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 1967b32967ffSMel Gorman if (!isolated) 19687039e1dbSPeter Zijlstra goto out; 19697039e1dbSPeter Zijlstra 19707039e1dbSPeter Zijlstra list_add(&page->lru, &migratepages); 19719c620e2bSHugh Dickins nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 197268711a74SDavid Rientjes NULL, node, MIGRATE_ASYNC, 197368711a74SDavid Rientjes MR_NUMA_MISPLACED); 19747039e1dbSPeter Zijlstra if (nr_remaining) { 197559c82b70SJoonsoo Kim if (!list_empty(&migratepages)) { 197659c82b70SJoonsoo Kim list_del(&page->lru); 1977599d0c95SMel Gorman dec_node_page_state(page, NR_ISOLATED_ANON + 197859c82b70SJoonsoo Kim page_is_file_cache(page)); 197959c82b70SJoonsoo Kim putback_lru_page(page); 198059c82b70SJoonsoo Kim } 19817039e1dbSPeter Zijlstra isolated = 0; 198203c5a6e1SMel Gorman } else 198303c5a6e1SMel Gorman count_vm_numa_event(NUMA_PAGE_MIGRATE); 19847039e1dbSPeter Zijlstra BUG_ON(!list_empty(&migratepages)); 19857039e1dbSPeter Zijlstra return isolated; 1986340ef390SHugh Dickins 1987340ef390SHugh Dickins out: 1988340ef390SHugh Dickins put_page(page); 1989340ef390SHugh Dickins return 0; 19907039e1dbSPeter Zijlstra } 1991220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1992b32967ffSMel Gorman 1993220018d3SMel Gorman #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1994340ef390SHugh Dickins /* 1995340ef390SHugh Dickins * Migrates a THP to a given target node. page must be locked and is unlocked 1996340ef390SHugh Dickins * before returning. 1997340ef390SHugh Dickins */ 1998b32967ffSMel Gorman int migrate_misplaced_transhuge_page(struct mm_struct *mm, 1999b32967ffSMel Gorman struct vm_area_struct *vma, 2000b32967ffSMel Gorman pmd_t *pmd, pmd_t entry, 2001b32967ffSMel Gorman unsigned long address, 2002b32967ffSMel Gorman struct page *page, int node) 2003b32967ffSMel Gorman { 2004c4088ebdSKirill A. Shutemov spinlock_t *ptl; 2005b32967ffSMel Gorman pg_data_t *pgdat = NODE_DATA(node); 2006b32967ffSMel Gorman int isolated = 0; 2007b32967ffSMel Gorman struct page *new_page = NULL; 2008b32967ffSMel Gorman int page_lru = page_is_file_cache(page); 2009f714f4f2SMel Gorman unsigned long mmun_start = address & HPAGE_PMD_MASK; 2010f714f4f2SMel Gorman unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 2011b32967ffSMel Gorman 2012b32967ffSMel Gorman /* 2013b32967ffSMel Gorman * Rate-limit the amount of data that is being migrated to a node. 2014b32967ffSMel Gorman * Optimal placement is no good if the memory bus is saturated and 2015b32967ffSMel Gorman * all the time is being spent migrating! 2016b32967ffSMel Gorman */ 2017d28d4335SMel Gorman if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) 2018b32967ffSMel Gorman goto out_dropref; 2019b32967ffSMel Gorman 2020b32967ffSMel Gorman new_page = alloc_pages_node(node, 202125160354SVlastimil Babka (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2022e97ca8e5SJohannes Weiner HPAGE_PMD_ORDER); 2023340ef390SHugh Dickins if (!new_page) 2024340ef390SHugh Dickins goto out_fail; 20259a982250SKirill A. Shutemov prep_transhuge_page(new_page); 2026340ef390SHugh Dickins 2027b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 2028340ef390SHugh Dickins if (!isolated) { 2029b32967ffSMel Gorman put_page(new_page); 2030340ef390SHugh Dickins goto out_fail; 2031b32967ffSMel Gorman } 2032b0943d61SMel Gorman 2033b32967ffSMel Gorman /* Prepare a page as a migration target */ 203448c935adSKirill A. Shutemov __SetPageLocked(new_page); 2035d44d363fSShaohua Li if (PageSwapBacked(page)) 2036fa9949daSHugh Dickins __SetPageSwapBacked(new_page); 2037b32967ffSMel Gorman 2038b32967ffSMel Gorman /* anon mapping, we can simply copy page->mapping to the new page: */ 2039b32967ffSMel Gorman new_page->mapping = page->mapping; 2040b32967ffSMel Gorman new_page->index = page->index; 2041b32967ffSMel Gorman migrate_page_copy(new_page, page); 2042b32967ffSMel Gorman WARN_ON(PageLRU(new_page)); 2043b32967ffSMel Gorman 2044b32967ffSMel Gorman /* Recheck the target PMD */ 2045f714f4f2SMel Gorman mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2046c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 2047f4e177d1SWill Deacon if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { 2048c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2049f714f4f2SMel Gorman mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2050b32967ffSMel Gorman 2051b32967ffSMel Gorman /* Reverse changes made by migrate_page_copy() */ 2052b32967ffSMel Gorman if (TestClearPageActive(new_page)) 2053b32967ffSMel Gorman SetPageActive(page); 2054b32967ffSMel Gorman if (TestClearPageUnevictable(new_page)) 2055b32967ffSMel Gorman SetPageUnevictable(page); 2056b32967ffSMel Gorman 2057b32967ffSMel Gorman unlock_page(new_page); 2058b32967ffSMel Gorman put_page(new_page); /* Free it */ 2059b32967ffSMel Gorman 2060a54a407fSMel Gorman /* Retake the callers reference and putback on LRU */ 2061a54a407fSMel Gorman get_page(page); 2062b32967ffSMel Gorman putback_lru_page(page); 2063599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), 2064a54a407fSMel Gorman NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 2065eb4489f6SMel Gorman 2066eb4489f6SMel Gorman goto out_unlock; 2067b32967ffSMel Gorman } 2068b32967ffSMel Gorman 206910102459SKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 2070f55e1014SLinus Torvalds entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 2071b32967ffSMel Gorman 20722b4847e7SMel Gorman /* 20732b4847e7SMel Gorman * Clear the old entry under pagetable lock and establish the new PTE. 20742b4847e7SMel Gorman * Any parallel GUP will either observe the old page blocking on the 20752b4847e7SMel Gorman * page lock, block on the page table lock or observe the new page. 20762b4847e7SMel Gorman * The SetPageUptodate on the new page and page_add_new_anon_rmap 20772b4847e7SMel Gorman * guarantee the copy is visible before the pagetable update. 20782b4847e7SMel Gorman */ 2079f714f4f2SMel Gorman flush_cache_range(vma, mmun_start, mmun_end); 2080d281ee61SKirill A. Shutemov page_add_anon_rmap(new_page, vma, mmun_start, true); 20818809aa2dSAneesh Kumar K.V pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); 2082f714f4f2SMel Gorman set_pmd_at(mm, mmun_start, pmd, entry); 2083ce4a9cc5SStephen Rothwell update_mmu_cache_pmd(vma, address, &entry); 20842b4847e7SMel Gorman 2085f4e177d1SWill Deacon page_ref_unfreeze(page, 2); 208651afb12bSHugh Dickins mlock_migrate_page(new_page, page); 2087d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 20887cd12b4aSVlastimil Babka set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); 20892b4847e7SMel Gorman 2090c4088ebdSKirill A. Shutemov spin_unlock(ptl); 20914645b9feSJérôme Glisse /* 20924645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 20934645b9feSJérôme Glisse * the above pmdp_huge_clear_flush_notify() did already call it. 20944645b9feSJérôme Glisse */ 20954645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end); 2096b32967ffSMel Gorman 209711de9927SMel Gorman /* Take an "isolate" reference and put new page on the LRU. */ 209811de9927SMel Gorman get_page(new_page); 209911de9927SMel Gorman putback_lru_page(new_page); 210011de9927SMel Gorman 2101b32967ffSMel Gorman unlock_page(new_page); 2102b32967ffSMel Gorman unlock_page(page); 2103b32967ffSMel Gorman put_page(page); /* Drop the rmap reference */ 2104b32967ffSMel Gorman put_page(page); /* Drop the LRU isolation reference */ 2105b32967ffSMel Gorman 2106b32967ffSMel Gorman count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 2107b32967ffSMel Gorman count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 2108b32967ffSMel Gorman 2109599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), 2110b32967ffSMel Gorman NR_ISOLATED_ANON + page_lru, 2111b32967ffSMel Gorman -HPAGE_PMD_NR); 2112b32967ffSMel Gorman return isolated; 2113b32967ffSMel Gorman 2114340ef390SHugh Dickins out_fail: 2115340ef390SHugh Dickins count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 2116b32967ffSMel Gorman out_dropref: 21172b4847e7SMel Gorman ptl = pmd_lock(mm, pmd); 21182b4847e7SMel Gorman if (pmd_same(*pmd, entry)) { 21194d942466SMel Gorman entry = pmd_modify(entry, vma->vm_page_prot); 2120f714f4f2SMel Gorman set_pmd_at(mm, mmun_start, pmd, entry); 2121a54a407fSMel Gorman update_mmu_cache_pmd(vma, address, &entry); 21222b4847e7SMel Gorman } 21232b4847e7SMel Gorman spin_unlock(ptl); 2124a54a407fSMel Gorman 2125eb4489f6SMel Gorman out_unlock: 2126340ef390SHugh Dickins unlock_page(page); 2127b32967ffSMel Gorman put_page(page); 2128b32967ffSMel Gorman return 0; 2129b32967ffSMel Gorman } 21307039e1dbSPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */ 21317039e1dbSPeter Zijlstra 21327039e1dbSPeter Zijlstra #endif /* CONFIG_NUMA */ 21338763cb45SJérôme Glisse 21346b368cd4SJérôme Glisse #if defined(CONFIG_MIGRATE_VMA_HELPER) 21358763cb45SJérôme Glisse struct migrate_vma { 21368763cb45SJérôme Glisse struct vm_area_struct *vma; 21378763cb45SJérôme Glisse unsigned long *dst; 21388763cb45SJérôme Glisse unsigned long *src; 21398763cb45SJérôme Glisse unsigned long cpages; 21408763cb45SJérôme Glisse unsigned long npages; 21418763cb45SJérôme Glisse unsigned long start; 21428763cb45SJérôme Glisse unsigned long end; 21438763cb45SJérôme Glisse }; 21448763cb45SJérôme Glisse 21458763cb45SJérôme Glisse static int migrate_vma_collect_hole(unsigned long start, 21468763cb45SJérôme Glisse unsigned long end, 21478763cb45SJérôme Glisse struct mm_walk *walk) 21488763cb45SJérôme Glisse { 21498763cb45SJérôme Glisse struct migrate_vma *migrate = walk->private; 21508763cb45SJérôme Glisse unsigned long addr; 21518763cb45SJérôme Glisse 21528763cb45SJérôme Glisse for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 2153e20d103bSMark Hairgrove migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; 21548315ada7SJérôme Glisse migrate->dst[migrate->npages] = 0; 2155e20d103bSMark Hairgrove migrate->npages++; 21568315ada7SJérôme Glisse migrate->cpages++; 21578315ada7SJérôme Glisse } 21588315ada7SJérôme Glisse 21598315ada7SJérôme Glisse return 0; 21608315ada7SJérôme Glisse } 21618315ada7SJérôme Glisse 21628315ada7SJérôme Glisse static int migrate_vma_collect_skip(unsigned long start, 21638315ada7SJérôme Glisse unsigned long end, 21648315ada7SJérôme Glisse struct mm_walk *walk) 21658315ada7SJérôme Glisse { 21668315ada7SJérôme Glisse struct migrate_vma *migrate = walk->private; 21678315ada7SJérôme Glisse unsigned long addr; 21688315ada7SJérôme Glisse 21698315ada7SJérôme Glisse for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 21708763cb45SJérôme Glisse migrate->dst[migrate->npages] = 0; 21718763cb45SJérôme Glisse migrate->src[migrate->npages++] = 0; 21728763cb45SJérôme Glisse } 21738763cb45SJérôme Glisse 21748763cb45SJérôme Glisse return 0; 21758763cb45SJérôme Glisse } 21768763cb45SJérôme Glisse 21778763cb45SJérôme Glisse static int migrate_vma_collect_pmd(pmd_t *pmdp, 21788763cb45SJérôme Glisse unsigned long start, 21798763cb45SJérôme Glisse unsigned long end, 21808763cb45SJérôme Glisse struct mm_walk *walk) 21818763cb45SJérôme Glisse { 21828763cb45SJérôme Glisse struct migrate_vma *migrate = walk->private; 21838763cb45SJérôme Glisse struct vm_area_struct *vma = walk->vma; 21848763cb45SJérôme Glisse struct mm_struct *mm = vma->vm_mm; 21858c3328f1SJérôme Glisse unsigned long addr = start, unmapped = 0; 21868763cb45SJérôme Glisse spinlock_t *ptl; 21878763cb45SJérôme Glisse pte_t *ptep; 21888763cb45SJérôme Glisse 21898763cb45SJérôme Glisse again: 21908763cb45SJérôme Glisse if (pmd_none(*pmdp)) 21918763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, walk); 21928763cb45SJérôme Glisse 21938763cb45SJérôme Glisse if (pmd_trans_huge(*pmdp)) { 21948763cb45SJérôme Glisse struct page *page; 21958763cb45SJérôme Glisse 21968763cb45SJérôme Glisse ptl = pmd_lock(mm, pmdp); 21978763cb45SJérôme Glisse if (unlikely(!pmd_trans_huge(*pmdp))) { 21988763cb45SJérôme Glisse spin_unlock(ptl); 21998763cb45SJérôme Glisse goto again; 22008763cb45SJérôme Glisse } 22018763cb45SJérôme Glisse 22028763cb45SJérôme Glisse page = pmd_page(*pmdp); 22038763cb45SJérôme Glisse if (is_huge_zero_page(page)) { 22048763cb45SJérôme Glisse spin_unlock(ptl); 22058763cb45SJérôme Glisse split_huge_pmd(vma, pmdp, addr); 22068763cb45SJérôme Glisse if (pmd_trans_unstable(pmdp)) 22078315ada7SJérôme Glisse return migrate_vma_collect_skip(start, end, 22088763cb45SJérôme Glisse walk); 22098763cb45SJérôme Glisse } else { 22108763cb45SJérôme Glisse int ret; 22118763cb45SJérôme Glisse 22128763cb45SJérôme Glisse get_page(page); 22138763cb45SJérôme Glisse spin_unlock(ptl); 22148763cb45SJérôme Glisse if (unlikely(!trylock_page(page))) 22158315ada7SJérôme Glisse return migrate_vma_collect_skip(start, end, 22168763cb45SJérôme Glisse walk); 22178763cb45SJérôme Glisse ret = split_huge_page(page); 22188763cb45SJérôme Glisse unlock_page(page); 22198763cb45SJérôme Glisse put_page(page); 22208315ada7SJérôme Glisse if (ret) 22218315ada7SJérôme Glisse return migrate_vma_collect_skip(start, end, 22228315ada7SJérôme Glisse walk); 22238315ada7SJérôme Glisse if (pmd_none(*pmdp)) 22248763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, 22258763cb45SJérôme Glisse walk); 22268763cb45SJérôme Glisse } 22278763cb45SJérôme Glisse } 22288763cb45SJérôme Glisse 22298763cb45SJérôme Glisse if (unlikely(pmd_bad(*pmdp))) 22308315ada7SJérôme Glisse return migrate_vma_collect_skip(start, end, walk); 22318763cb45SJérôme Glisse 22328763cb45SJérôme Glisse ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 22338c3328f1SJérôme Glisse arch_enter_lazy_mmu_mode(); 22348c3328f1SJérôme Glisse 22358763cb45SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++) { 22368763cb45SJérôme Glisse unsigned long mpfn, pfn; 22378763cb45SJérôme Glisse struct page *page; 22388c3328f1SJérôme Glisse swp_entry_t entry; 22398763cb45SJérôme Glisse pte_t pte; 22408763cb45SJérôme Glisse 22418763cb45SJérôme Glisse pte = *ptep; 22428763cb45SJérôme Glisse pfn = pte_pfn(pte); 22438763cb45SJérôme Glisse 2244a5430ddaSJérôme Glisse if (pte_none(pte)) { 22458315ada7SJérôme Glisse mpfn = MIGRATE_PFN_MIGRATE; 22468315ada7SJérôme Glisse migrate->cpages++; 22478315ada7SJérôme Glisse pfn = 0; 22488763cb45SJérôme Glisse goto next; 22498763cb45SJérôme Glisse } 22508763cb45SJérôme Glisse 2251a5430ddaSJérôme Glisse if (!pte_present(pte)) { 2252a5430ddaSJérôme Glisse mpfn = pfn = 0; 2253a5430ddaSJérôme Glisse 2254a5430ddaSJérôme Glisse /* 2255a5430ddaSJérôme Glisse * Only care about unaddressable device page special 2256a5430ddaSJérôme Glisse * page table entry. Other special swap entries are not 2257a5430ddaSJérôme Glisse * migratable, and we ignore regular swapped page. 2258a5430ddaSJérôme Glisse */ 2259a5430ddaSJérôme Glisse entry = pte_to_swp_entry(pte); 2260a5430ddaSJérôme Glisse if (!is_device_private_entry(entry)) 2261a5430ddaSJérôme Glisse goto next; 2262a5430ddaSJérôme Glisse 2263a5430ddaSJérôme Glisse page = device_private_entry_to_page(entry); 2264a5430ddaSJérôme Glisse mpfn = migrate_pfn(page_to_pfn(page))| 2265a5430ddaSJérôme Glisse MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE; 2266a5430ddaSJérôme Glisse if (is_write_device_private_entry(entry)) 2267a5430ddaSJérôme Glisse mpfn |= MIGRATE_PFN_WRITE; 2268a5430ddaSJérôme Glisse } else { 22698315ada7SJérôme Glisse if (is_zero_pfn(pfn)) { 22708315ada7SJérôme Glisse mpfn = MIGRATE_PFN_MIGRATE; 22718315ada7SJérôme Glisse migrate->cpages++; 22728315ada7SJérôme Glisse pfn = 0; 22738315ada7SJérôme Glisse goto next; 22748315ada7SJérôme Glisse } 2275df6ad698SJérôme Glisse page = _vm_normal_page(migrate->vma, addr, pte, true); 2276a5430ddaSJérôme Glisse mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2277a5430ddaSJérôme Glisse mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2278a5430ddaSJérôme Glisse } 2279a5430ddaSJérôme Glisse 2280a5430ddaSJérôme Glisse /* FIXME support THP */ 22818763cb45SJérôme Glisse if (!page || !page->mapping || PageTransCompound(page)) { 22828763cb45SJérôme Glisse mpfn = pfn = 0; 22838763cb45SJérôme Glisse goto next; 22848763cb45SJérôme Glisse } 2285a5430ddaSJérôme Glisse pfn = page_to_pfn(page); 22868763cb45SJérôme Glisse 22878763cb45SJérôme Glisse /* 22888763cb45SJérôme Glisse * By getting a reference on the page we pin it and that blocks 22898763cb45SJérôme Glisse * any kind of migration. Side effect is that it "freezes" the 22908763cb45SJérôme Glisse * pte. 22918763cb45SJérôme Glisse * 22928763cb45SJérôme Glisse * We drop this reference after isolating the page from the lru 22938763cb45SJérôme Glisse * for non device page (device page are not on the lru and thus 22948763cb45SJérôme Glisse * can't be dropped from it). 22958763cb45SJérôme Glisse */ 22968763cb45SJérôme Glisse get_page(page); 22978763cb45SJérôme Glisse migrate->cpages++; 22988763cb45SJérôme Glisse 22998c3328f1SJérôme Glisse /* 23008c3328f1SJérôme Glisse * Optimize for the common case where page is only mapped once 23018c3328f1SJérôme Glisse * in one process. If we can lock the page, then we can safely 23028c3328f1SJérôme Glisse * set up a special migration page table entry now. 23038c3328f1SJérôme Glisse */ 23048c3328f1SJérôme Glisse if (trylock_page(page)) { 23058c3328f1SJérôme Glisse pte_t swp_pte; 23068c3328f1SJérôme Glisse 23078c3328f1SJérôme Glisse mpfn |= MIGRATE_PFN_LOCKED; 23088c3328f1SJérôme Glisse ptep_get_and_clear(mm, addr, ptep); 23098c3328f1SJérôme Glisse 23108c3328f1SJérôme Glisse /* Setup special migration page table entry */ 231107707125SRalph Campbell entry = make_migration_entry(page, mpfn & 231207707125SRalph Campbell MIGRATE_PFN_WRITE); 23138c3328f1SJérôme Glisse swp_pte = swp_entry_to_pte(entry); 23148c3328f1SJérôme Glisse if (pte_soft_dirty(pte)) 23158c3328f1SJérôme Glisse swp_pte = pte_swp_mksoft_dirty(swp_pte); 23168c3328f1SJérôme Glisse set_pte_at(mm, addr, ptep, swp_pte); 23178c3328f1SJérôme Glisse 23188c3328f1SJérôme Glisse /* 23198c3328f1SJérôme Glisse * This is like regular unmap: we remove the rmap and 23208c3328f1SJérôme Glisse * drop page refcount. Page won't be freed, as we took 23218c3328f1SJérôme Glisse * a reference just above. 23228c3328f1SJérôme Glisse */ 23238c3328f1SJérôme Glisse page_remove_rmap(page, false); 23248c3328f1SJérôme Glisse put_page(page); 2325a5430ddaSJérôme Glisse 2326a5430ddaSJérôme Glisse if (pte_present(pte)) 23278c3328f1SJérôme Glisse unmapped++; 23288c3328f1SJérôme Glisse } 23298c3328f1SJérôme Glisse 23308763cb45SJérôme Glisse next: 2331a5430ddaSJérôme Glisse migrate->dst[migrate->npages] = 0; 23328763cb45SJérôme Glisse migrate->src[migrate->npages++] = mpfn; 23338763cb45SJérôme Glisse } 23348c3328f1SJérôme Glisse arch_leave_lazy_mmu_mode(); 23358763cb45SJérôme Glisse pte_unmap_unlock(ptep - 1, ptl); 23368763cb45SJérôme Glisse 23378c3328f1SJérôme Glisse /* Only flush the TLB if we actually modified any entries */ 23388c3328f1SJérôme Glisse if (unmapped) 23398c3328f1SJérôme Glisse flush_tlb_range(walk->vma, start, end); 23408c3328f1SJérôme Glisse 23418763cb45SJérôme Glisse return 0; 23428763cb45SJérôme Glisse } 23438763cb45SJérôme Glisse 23448763cb45SJérôme Glisse /* 23458763cb45SJérôme Glisse * migrate_vma_collect() - collect pages over a range of virtual addresses 23468763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 23478763cb45SJérôme Glisse * 23488763cb45SJérôme Glisse * This will walk the CPU page table. For each virtual address backed by a 23498763cb45SJérôme Glisse * valid page, it updates the src array and takes a reference on the page, in 23508763cb45SJérôme Glisse * order to pin the page until we lock it and unmap it. 23518763cb45SJérôme Glisse */ 23528763cb45SJérôme Glisse static void migrate_vma_collect(struct migrate_vma *migrate) 23538763cb45SJérôme Glisse { 23548763cb45SJérôme Glisse struct mm_walk mm_walk; 23558763cb45SJérôme Glisse 23568763cb45SJérôme Glisse mm_walk.pmd_entry = migrate_vma_collect_pmd; 23578763cb45SJérôme Glisse mm_walk.pte_entry = NULL; 23588763cb45SJérôme Glisse mm_walk.pte_hole = migrate_vma_collect_hole; 23598763cb45SJérôme Glisse mm_walk.hugetlb_entry = NULL; 23608763cb45SJérôme Glisse mm_walk.test_walk = NULL; 23618763cb45SJérôme Glisse mm_walk.vma = migrate->vma; 23628763cb45SJérôme Glisse mm_walk.mm = migrate->vma->vm_mm; 23638763cb45SJérôme Glisse mm_walk.private = migrate; 23648763cb45SJérôme Glisse 23658c3328f1SJérôme Glisse mmu_notifier_invalidate_range_start(mm_walk.mm, 23668c3328f1SJérôme Glisse migrate->start, 23678c3328f1SJérôme Glisse migrate->end); 23688763cb45SJérôme Glisse walk_page_range(migrate->start, migrate->end, &mm_walk); 23698c3328f1SJérôme Glisse mmu_notifier_invalidate_range_end(mm_walk.mm, 23708c3328f1SJérôme Glisse migrate->start, 23718c3328f1SJérôme Glisse migrate->end); 23728763cb45SJérôme Glisse 23738763cb45SJérôme Glisse migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 23748763cb45SJérôme Glisse } 23758763cb45SJérôme Glisse 23768763cb45SJérôme Glisse /* 23778763cb45SJérôme Glisse * migrate_vma_check_page() - check if page is pinned or not 23788763cb45SJérôme Glisse * @page: struct page to check 23798763cb45SJérôme Glisse * 23808763cb45SJérôme Glisse * Pinned pages cannot be migrated. This is the same test as in 23818763cb45SJérôme Glisse * migrate_page_move_mapping(), except that here we allow migration of a 23828763cb45SJérôme Glisse * ZONE_DEVICE page. 23838763cb45SJérôme Glisse */ 23848763cb45SJérôme Glisse static bool migrate_vma_check_page(struct page *page) 23858763cb45SJérôme Glisse { 23868763cb45SJérôme Glisse /* 23878763cb45SJérôme Glisse * One extra ref because caller holds an extra reference, either from 23888763cb45SJérôme Glisse * isolate_lru_page() for a regular page, or migrate_vma_collect() for 23898763cb45SJérôme Glisse * a device page. 23908763cb45SJérôme Glisse */ 23918763cb45SJérôme Glisse int extra = 1; 23928763cb45SJérôme Glisse 23938763cb45SJérôme Glisse /* 23948763cb45SJérôme Glisse * FIXME support THP (transparent huge page), it is bit more complex to 23958763cb45SJérôme Glisse * check them than regular pages, because they can be mapped with a pmd 23968763cb45SJérôme Glisse * or with a pte (split pte mapping). 23978763cb45SJérôme Glisse */ 23988763cb45SJérôme Glisse if (PageCompound(page)) 23998763cb45SJérôme Glisse return false; 24008763cb45SJérôme Glisse 2401a5430ddaSJérôme Glisse /* Page from ZONE_DEVICE have one extra reference */ 2402a5430ddaSJérôme Glisse if (is_zone_device_page(page)) { 2403a5430ddaSJérôme Glisse /* 2404a5430ddaSJérôme Glisse * Private page can never be pin as they have no valid pte and 2405a5430ddaSJérôme Glisse * GUP will fail for those. Yet if there is a pending migration 2406a5430ddaSJérôme Glisse * a thread might try to wait on the pte migration entry and 2407a5430ddaSJérôme Glisse * will bump the page reference count. Sadly there is no way to 2408a5430ddaSJérôme Glisse * differentiate a regular pin from migration wait. Hence to 2409a5430ddaSJérôme Glisse * avoid 2 racing thread trying to migrate back to CPU to enter 2410a5430ddaSJérôme Glisse * infinite loop (one stoping migration because the other is 2411a5430ddaSJérôme Glisse * waiting on pte migration entry). We always return true here. 2412a5430ddaSJérôme Glisse * 2413a5430ddaSJérôme Glisse * FIXME proper solution is to rework migration_entry_wait() so 2414a5430ddaSJérôme Glisse * it does not need to take a reference on page. 2415a5430ddaSJérôme Glisse */ 2416a5430ddaSJérôme Glisse if (is_device_private_page(page)) 2417a5430ddaSJérôme Glisse return true; 2418a5430ddaSJérôme Glisse 2419df6ad698SJérôme Glisse /* 2420df6ad698SJérôme Glisse * Only allow device public page to be migrated and account for 2421df6ad698SJérôme Glisse * the extra reference count imply by ZONE_DEVICE pages. 2422df6ad698SJérôme Glisse */ 2423df6ad698SJérôme Glisse if (!is_device_public_page(page)) 2424a5430ddaSJérôme Glisse return false; 2425df6ad698SJérôme Glisse extra++; 2426a5430ddaSJérôme Glisse } 2427a5430ddaSJérôme Glisse 2428df6ad698SJérôme Glisse /* For file back page */ 2429df6ad698SJérôme Glisse if (page_mapping(page)) 2430df6ad698SJérôme Glisse extra += 1 + page_has_private(page); 2431df6ad698SJérôme Glisse 24328763cb45SJérôme Glisse if ((page_count(page) - extra) > page_mapcount(page)) 24338763cb45SJérôme Glisse return false; 24348763cb45SJérôme Glisse 24358763cb45SJérôme Glisse return true; 24368763cb45SJérôme Glisse } 24378763cb45SJérôme Glisse 24388763cb45SJérôme Glisse /* 24398763cb45SJérôme Glisse * migrate_vma_prepare() - lock pages and isolate them from the lru 24408763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 24418763cb45SJérôme Glisse * 24428763cb45SJérôme Glisse * This locks pages that have been collected by migrate_vma_collect(). Once each 24438763cb45SJérôme Glisse * page is locked it is isolated from the lru (for non-device pages). Finally, 24448763cb45SJérôme Glisse * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be 24458763cb45SJérôme Glisse * migrated by concurrent kernel threads. 24468763cb45SJérôme Glisse */ 24478763cb45SJérôme Glisse static void migrate_vma_prepare(struct migrate_vma *migrate) 24488763cb45SJérôme Glisse { 24498763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 24508c3328f1SJérôme Glisse const unsigned long start = migrate->start; 24518c3328f1SJérôme Glisse unsigned long addr, i, restore = 0; 24528763cb45SJérôme Glisse bool allow_drain = true; 24538763cb45SJérôme Glisse 24548763cb45SJérôme Glisse lru_add_drain(); 24558763cb45SJérôme Glisse 24568763cb45SJérôme Glisse for (i = 0; (i < npages) && migrate->cpages; i++) { 24578763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 24588c3328f1SJérôme Glisse bool remap = true; 24598763cb45SJérôme Glisse 24608763cb45SJérôme Glisse if (!page) 24618763cb45SJérôme Glisse continue; 24628763cb45SJérôme Glisse 24638c3328f1SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { 24648763cb45SJérôme Glisse /* 24658763cb45SJérôme Glisse * Because we are migrating several pages there can be 24668763cb45SJérôme Glisse * a deadlock between 2 concurrent migration where each 24678763cb45SJérôme Glisse * are waiting on each other page lock. 24688763cb45SJérôme Glisse * 24698763cb45SJérôme Glisse * Make migrate_vma() a best effort thing and backoff 24708763cb45SJérôme Glisse * for any page we can not lock right away. 24718763cb45SJérôme Glisse */ 24728763cb45SJérôme Glisse if (!trylock_page(page)) { 24738763cb45SJérôme Glisse migrate->src[i] = 0; 24748763cb45SJérôme Glisse migrate->cpages--; 24758763cb45SJérôme Glisse put_page(page); 24768763cb45SJérôme Glisse continue; 24778763cb45SJérôme Glisse } 24788c3328f1SJérôme Glisse remap = false; 24798763cb45SJérôme Glisse migrate->src[i] |= MIGRATE_PFN_LOCKED; 24808c3328f1SJérôme Glisse } 24818763cb45SJérôme Glisse 2482a5430ddaSJérôme Glisse /* ZONE_DEVICE pages are not on LRU */ 2483a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) { 24848763cb45SJérôme Glisse if (!PageLRU(page) && allow_drain) { 24858763cb45SJérôme Glisse /* Drain CPU's pagevec */ 24868763cb45SJérôme Glisse lru_add_drain_all(); 24878763cb45SJérôme Glisse allow_drain = false; 24888763cb45SJérôme Glisse } 24898763cb45SJérôme Glisse 24908763cb45SJérôme Glisse if (isolate_lru_page(page)) { 24918c3328f1SJérôme Glisse if (remap) { 24928c3328f1SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 24938c3328f1SJérôme Glisse migrate->cpages--; 24948c3328f1SJérôme Glisse restore++; 24958c3328f1SJérôme Glisse } else { 24968763cb45SJérôme Glisse migrate->src[i] = 0; 24978763cb45SJérôme Glisse unlock_page(page); 24988763cb45SJérôme Glisse migrate->cpages--; 24998763cb45SJérôme Glisse put_page(page); 25008c3328f1SJérôme Glisse } 25018763cb45SJérôme Glisse continue; 25028763cb45SJérôme Glisse } 25038763cb45SJérôme Glisse 2504a5430ddaSJérôme Glisse /* Drop the reference we took in collect */ 2505a5430ddaSJérôme Glisse put_page(page); 2506a5430ddaSJérôme Glisse } 2507a5430ddaSJérôme Glisse 25088763cb45SJérôme Glisse if (!migrate_vma_check_page(page)) { 25098c3328f1SJérôme Glisse if (remap) { 25108c3328f1SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 25118c3328f1SJérôme Glisse migrate->cpages--; 25128c3328f1SJérôme Glisse restore++; 25138c3328f1SJérôme Glisse 2514a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) { 25158c3328f1SJérôme Glisse get_page(page); 25168c3328f1SJérôme Glisse putback_lru_page(page); 2517a5430ddaSJérôme Glisse } 25188c3328f1SJérôme Glisse } else { 25198763cb45SJérôme Glisse migrate->src[i] = 0; 25208763cb45SJérôme Glisse unlock_page(page); 25218763cb45SJérôme Glisse migrate->cpages--; 25228763cb45SJérôme Glisse 2523a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) 25248763cb45SJérôme Glisse putback_lru_page(page); 2525a5430ddaSJérôme Glisse else 2526a5430ddaSJérôme Glisse put_page(page); 25278763cb45SJérôme Glisse } 25288763cb45SJérôme Glisse } 25298763cb45SJérôme Glisse } 25308763cb45SJérôme Glisse 25318c3328f1SJérôme Glisse for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { 25328c3328f1SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25338c3328f1SJérôme Glisse 25348c3328f1SJérôme Glisse if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25358c3328f1SJérôme Glisse continue; 25368c3328f1SJérôme Glisse 25378c3328f1SJérôme Glisse remove_migration_pte(page, migrate->vma, addr, page); 25388c3328f1SJérôme Glisse 25398c3328f1SJérôme Glisse migrate->src[i] = 0; 25408c3328f1SJérôme Glisse unlock_page(page); 25418c3328f1SJérôme Glisse put_page(page); 25428c3328f1SJérôme Glisse restore--; 25438c3328f1SJérôme Glisse } 25448c3328f1SJérôme Glisse } 25458c3328f1SJérôme Glisse 25468763cb45SJérôme Glisse /* 25478763cb45SJérôme Glisse * migrate_vma_unmap() - replace page mapping with special migration pte entry 25488763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 25498763cb45SJérôme Glisse * 25508763cb45SJérôme Glisse * Replace page mapping (CPU page table pte) with a special migration pte entry 25518763cb45SJérôme Glisse * and check again if it has been pinned. Pinned pages are restored because we 25528763cb45SJérôme Glisse * cannot migrate them. 25538763cb45SJérôme Glisse * 25548763cb45SJérôme Glisse * This is the last step before we call the device driver callback to allocate 25558763cb45SJérôme Glisse * destination memory and copy contents of original page over to new page. 25568763cb45SJérôme Glisse */ 25578763cb45SJérôme Glisse static void migrate_vma_unmap(struct migrate_vma *migrate) 25588763cb45SJérôme Glisse { 25598763cb45SJérôme Glisse int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 25608763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 25618763cb45SJérôme Glisse const unsigned long start = migrate->start; 25628763cb45SJérôme Glisse unsigned long addr, i, restore = 0; 25638763cb45SJérôme Glisse 25648763cb45SJérôme Glisse for (i = 0; i < npages; i++) { 25658763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25668763cb45SJérôme Glisse 25678763cb45SJérôme Glisse if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25688763cb45SJérôme Glisse continue; 25698763cb45SJérôme Glisse 25708c3328f1SJérôme Glisse if (page_mapped(page)) { 25718763cb45SJérôme Glisse try_to_unmap(page, flags); 25728c3328f1SJérôme Glisse if (page_mapped(page)) 25738c3328f1SJérôme Glisse goto restore; 25748c3328f1SJérôme Glisse } 25758c3328f1SJérôme Glisse 25768c3328f1SJérôme Glisse if (migrate_vma_check_page(page)) 25778c3328f1SJérôme Glisse continue; 25788c3328f1SJérôme Glisse 25798c3328f1SJérôme Glisse restore: 25808763cb45SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 25818763cb45SJérôme Glisse migrate->cpages--; 25828763cb45SJérôme Glisse restore++; 25838763cb45SJérôme Glisse } 25848763cb45SJérôme Glisse 25858763cb45SJérôme Glisse for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { 25868763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25878763cb45SJérôme Glisse 25888763cb45SJérôme Glisse if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25898763cb45SJérôme Glisse continue; 25908763cb45SJérôme Glisse 25918763cb45SJérôme Glisse remove_migration_ptes(page, page, false); 25928763cb45SJérôme Glisse 25938763cb45SJérôme Glisse migrate->src[i] = 0; 25948763cb45SJérôme Glisse unlock_page(page); 25958763cb45SJérôme Glisse restore--; 25968763cb45SJérôme Glisse 2597a5430ddaSJérôme Glisse if (is_zone_device_page(page)) 2598a5430ddaSJérôme Glisse put_page(page); 2599a5430ddaSJérôme Glisse else 26008763cb45SJérôme Glisse putback_lru_page(page); 26018763cb45SJérôme Glisse } 26028763cb45SJérôme Glisse } 26038763cb45SJérôme Glisse 26048315ada7SJérôme Glisse static void migrate_vma_insert_page(struct migrate_vma *migrate, 26058315ada7SJérôme Glisse unsigned long addr, 26068315ada7SJérôme Glisse struct page *page, 26078315ada7SJérôme Glisse unsigned long *src, 26088315ada7SJérôme Glisse unsigned long *dst) 26098315ada7SJérôme Glisse { 26108315ada7SJérôme Glisse struct vm_area_struct *vma = migrate->vma; 26118315ada7SJérôme Glisse struct mm_struct *mm = vma->vm_mm; 26128315ada7SJérôme Glisse struct mem_cgroup *memcg; 26138315ada7SJérôme Glisse bool flush = false; 26148315ada7SJérôme Glisse spinlock_t *ptl; 26158315ada7SJérôme Glisse pte_t entry; 26168315ada7SJérôme Glisse pgd_t *pgdp; 26178315ada7SJérôme Glisse p4d_t *p4dp; 26188315ada7SJérôme Glisse pud_t *pudp; 26198315ada7SJérôme Glisse pmd_t *pmdp; 26208315ada7SJérôme Glisse pte_t *ptep; 26218315ada7SJérôme Glisse 26228315ada7SJérôme Glisse /* Only allow populating anonymous memory */ 26238315ada7SJérôme Glisse if (!vma_is_anonymous(vma)) 26248315ada7SJérôme Glisse goto abort; 26258315ada7SJérôme Glisse 26268315ada7SJérôme Glisse pgdp = pgd_offset(mm, addr); 26278315ada7SJérôme Glisse p4dp = p4d_alloc(mm, pgdp, addr); 26288315ada7SJérôme Glisse if (!p4dp) 26298315ada7SJérôme Glisse goto abort; 26308315ada7SJérôme Glisse pudp = pud_alloc(mm, p4dp, addr); 26318315ada7SJérôme Glisse if (!pudp) 26328315ada7SJérôme Glisse goto abort; 26338315ada7SJérôme Glisse pmdp = pmd_alloc(mm, pudp, addr); 26348315ada7SJérôme Glisse if (!pmdp) 26358315ada7SJérôme Glisse goto abort; 26368315ada7SJérôme Glisse 26378315ada7SJérôme Glisse if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) 26388315ada7SJérôme Glisse goto abort; 26398315ada7SJérôme Glisse 26408315ada7SJérôme Glisse /* 26418315ada7SJérôme Glisse * Use pte_alloc() instead of pte_alloc_map(). We can't run 26428315ada7SJérôme Glisse * pte_offset_map() on pmds where a huge pmd might be created 26438315ada7SJérôme Glisse * from a different thread. 26448315ada7SJérôme Glisse * 26458315ada7SJérôme Glisse * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 26468315ada7SJérôme Glisse * parallel threads are excluded by other means. 26478315ada7SJérôme Glisse * 26488315ada7SJérôme Glisse * Here we only have down_read(mmap_sem). 26498315ada7SJérôme Glisse */ 26508315ada7SJérôme Glisse if (pte_alloc(mm, pmdp, addr)) 26518315ada7SJérôme Glisse goto abort; 26528315ada7SJérôme Glisse 26538315ada7SJérôme Glisse /* See the comment in pte_alloc_one_map() */ 26548315ada7SJérôme Glisse if (unlikely(pmd_trans_unstable(pmdp))) 26558315ada7SJérôme Glisse goto abort; 26568315ada7SJérôme Glisse 26578315ada7SJérôme Glisse if (unlikely(anon_vma_prepare(vma))) 26588315ada7SJérôme Glisse goto abort; 26598315ada7SJérôme Glisse if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) 26608315ada7SJérôme Glisse goto abort; 26618315ada7SJérôme Glisse 26628315ada7SJérôme Glisse /* 26638315ada7SJérôme Glisse * The memory barrier inside __SetPageUptodate makes sure that 26648315ada7SJérôme Glisse * preceding stores to the page contents become visible before 26658315ada7SJérôme Glisse * the set_pte_at() write. 26668315ada7SJérôme Glisse */ 26678315ada7SJérôme Glisse __SetPageUptodate(page); 26688315ada7SJérôme Glisse 2669df6ad698SJérôme Glisse if (is_zone_device_page(page)) { 2670df6ad698SJérôme Glisse if (is_device_private_page(page)) { 26718315ada7SJérôme Glisse swp_entry_t swp_entry; 26728315ada7SJérôme Glisse 26738315ada7SJérôme Glisse swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); 26748315ada7SJérôme Glisse entry = swp_entry_to_pte(swp_entry); 2675df6ad698SJérôme Glisse } else if (is_device_public_page(page)) { 2676df6ad698SJérôme Glisse entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 2677df6ad698SJérôme Glisse if (vma->vm_flags & VM_WRITE) 2678df6ad698SJérôme Glisse entry = pte_mkwrite(pte_mkdirty(entry)); 2679df6ad698SJérôme Glisse entry = pte_mkdevmap(entry); 2680df6ad698SJérôme Glisse } 26818315ada7SJérôme Glisse } else { 26828315ada7SJérôme Glisse entry = mk_pte(page, vma->vm_page_prot); 26838315ada7SJérôme Glisse if (vma->vm_flags & VM_WRITE) 26848315ada7SJérôme Glisse entry = pte_mkwrite(pte_mkdirty(entry)); 26858315ada7SJérôme Glisse } 26868315ada7SJérôme Glisse 26878315ada7SJérôme Glisse ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 26888315ada7SJérôme Glisse 26898315ada7SJérôme Glisse if (pte_present(*ptep)) { 26908315ada7SJérôme Glisse unsigned long pfn = pte_pfn(*ptep); 26918315ada7SJérôme Glisse 26928315ada7SJérôme Glisse if (!is_zero_pfn(pfn)) { 26938315ada7SJérôme Glisse pte_unmap_unlock(ptep, ptl); 26948315ada7SJérôme Glisse mem_cgroup_cancel_charge(page, memcg, false); 26958315ada7SJérôme Glisse goto abort; 26968315ada7SJérôme Glisse } 26978315ada7SJérôme Glisse flush = true; 26988315ada7SJérôme Glisse } else if (!pte_none(*ptep)) { 26998315ada7SJérôme Glisse pte_unmap_unlock(ptep, ptl); 27008315ada7SJérôme Glisse mem_cgroup_cancel_charge(page, memcg, false); 27018315ada7SJérôme Glisse goto abort; 27028315ada7SJérôme Glisse } 27038315ada7SJérôme Glisse 27048315ada7SJérôme Glisse /* 27058315ada7SJérôme Glisse * Check for usefaultfd but do not deliver the fault. Instead, 27068315ada7SJérôme Glisse * just back off. 27078315ada7SJérôme Glisse */ 27088315ada7SJérôme Glisse if (userfaultfd_missing(vma)) { 27098315ada7SJérôme Glisse pte_unmap_unlock(ptep, ptl); 27108315ada7SJérôme Glisse mem_cgroup_cancel_charge(page, memcg, false); 27118315ada7SJérôme Glisse goto abort; 27128315ada7SJérôme Glisse } 27138315ada7SJérôme Glisse 27148315ada7SJérôme Glisse inc_mm_counter(mm, MM_ANONPAGES); 27158315ada7SJérôme Glisse page_add_new_anon_rmap(page, vma, addr, false); 27168315ada7SJérôme Glisse mem_cgroup_commit_charge(page, memcg, false, false); 27178315ada7SJérôme Glisse if (!is_zone_device_page(page)) 27188315ada7SJérôme Glisse lru_cache_add_active_or_unevictable(page, vma); 27198315ada7SJérôme Glisse get_page(page); 27208315ada7SJérôme Glisse 27218315ada7SJérôme Glisse if (flush) { 27228315ada7SJérôme Glisse flush_cache_page(vma, addr, pte_pfn(*ptep)); 27238315ada7SJérôme Glisse ptep_clear_flush_notify(vma, addr, ptep); 27248315ada7SJérôme Glisse set_pte_at_notify(mm, addr, ptep, entry); 27258315ada7SJérôme Glisse update_mmu_cache(vma, addr, ptep); 27268315ada7SJérôme Glisse } else { 27278315ada7SJérôme Glisse /* No need to invalidate - it was non-present before */ 27288315ada7SJérôme Glisse set_pte_at(mm, addr, ptep, entry); 27298315ada7SJérôme Glisse update_mmu_cache(vma, addr, ptep); 27308315ada7SJérôme Glisse } 27318315ada7SJérôme Glisse 27328315ada7SJérôme Glisse pte_unmap_unlock(ptep, ptl); 27338315ada7SJérôme Glisse *src = MIGRATE_PFN_MIGRATE; 27348315ada7SJérôme Glisse return; 27358315ada7SJérôme Glisse 27368315ada7SJérôme Glisse abort: 27378315ada7SJérôme Glisse *src &= ~MIGRATE_PFN_MIGRATE; 27388315ada7SJérôme Glisse } 27398315ada7SJérôme Glisse 27408763cb45SJérôme Glisse /* 27418763cb45SJérôme Glisse * migrate_vma_pages() - migrate meta-data from src page to dst page 27428763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 27438763cb45SJérôme Glisse * 27448763cb45SJérôme Glisse * This migrates struct page meta-data from source struct page to destination 27458763cb45SJérôme Glisse * struct page. This effectively finishes the migration from source page to the 27468763cb45SJérôme Glisse * destination page. 27478763cb45SJérôme Glisse */ 27488763cb45SJérôme Glisse static void migrate_vma_pages(struct migrate_vma *migrate) 27498763cb45SJérôme Glisse { 27508763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 27518763cb45SJérôme Glisse const unsigned long start = migrate->start; 27528315ada7SJérôme Glisse struct vm_area_struct *vma = migrate->vma; 27538315ada7SJérôme Glisse struct mm_struct *mm = vma->vm_mm; 27548315ada7SJérôme Glisse unsigned long addr, i, mmu_start; 27558315ada7SJérôme Glisse bool notified = false; 27568763cb45SJérôme Glisse 27578763cb45SJérôme Glisse for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 27588763cb45SJérôme Glisse struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 27598763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 27608763cb45SJérôme Glisse struct address_space *mapping; 27618763cb45SJérôme Glisse int r; 27628763cb45SJérôme Glisse 27638315ada7SJérôme Glisse if (!newpage) { 27648315ada7SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 27658763cb45SJérôme Glisse continue; 27668315ada7SJérôme Glisse } 27678315ada7SJérôme Glisse 27688315ada7SJérôme Glisse if (!page) { 27698315ada7SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) { 27708763cb45SJérôme Glisse continue; 27718315ada7SJérôme Glisse } 27728315ada7SJérôme Glisse if (!notified) { 27738315ada7SJérôme Glisse mmu_start = addr; 27748315ada7SJérôme Glisse notified = true; 27758315ada7SJérôme Glisse mmu_notifier_invalidate_range_start(mm, 27768315ada7SJérôme Glisse mmu_start, 27778315ada7SJérôme Glisse migrate->end); 27788315ada7SJérôme Glisse } 27798315ada7SJérôme Glisse migrate_vma_insert_page(migrate, addr, newpage, 27808315ada7SJérôme Glisse &migrate->src[i], 27818315ada7SJérôme Glisse &migrate->dst[i]); 27828315ada7SJérôme Glisse continue; 27838315ada7SJérôme Glisse } 27848763cb45SJérôme Glisse 27858763cb45SJérôme Glisse mapping = page_mapping(page); 27868763cb45SJérôme Glisse 2787a5430ddaSJérôme Glisse if (is_zone_device_page(newpage)) { 2788a5430ddaSJérôme Glisse if (is_device_private_page(newpage)) { 2789a5430ddaSJérôme Glisse /* 2790a5430ddaSJérôme Glisse * For now only support private anonymous when 2791a5430ddaSJérôme Glisse * migrating to un-addressable device memory. 2792a5430ddaSJérôme Glisse */ 2793a5430ddaSJérôme Glisse if (mapping) { 2794a5430ddaSJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2795a5430ddaSJérôme Glisse continue; 2796a5430ddaSJérôme Glisse } 2797df6ad698SJérôme Glisse } else if (!is_device_public_page(newpage)) { 2798a5430ddaSJérôme Glisse /* 2799a5430ddaSJérôme Glisse * Other types of ZONE_DEVICE page are not 2800a5430ddaSJérôme Glisse * supported. 2801a5430ddaSJérôme Glisse */ 2802a5430ddaSJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2803a5430ddaSJérôme Glisse continue; 2804a5430ddaSJérôme Glisse } 2805a5430ddaSJérôme Glisse } 2806a5430ddaSJérôme Glisse 28078763cb45SJérôme Glisse r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 28088763cb45SJérôme Glisse if (r != MIGRATEPAGE_SUCCESS) 28098763cb45SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 28108763cb45SJérôme Glisse } 28118315ada7SJérôme Glisse 28124645b9feSJérôme Glisse /* 28134645b9feSJérôme Glisse * No need to double call mmu_notifier->invalidate_range() callback as 28144645b9feSJérôme Glisse * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() 28154645b9feSJérôme Glisse * did already call it. 28164645b9feSJérôme Glisse */ 28178315ada7SJérôme Glisse if (notified) 28184645b9feSJérôme Glisse mmu_notifier_invalidate_range_only_end(mm, mmu_start, 28198315ada7SJérôme Glisse migrate->end); 28208763cb45SJérôme Glisse } 28218763cb45SJérôme Glisse 28228763cb45SJérôme Glisse /* 28238763cb45SJérôme Glisse * migrate_vma_finalize() - restore CPU page table entry 28248763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 28258763cb45SJérôme Glisse * 28268763cb45SJérôme Glisse * This replaces the special migration pte entry with either a mapping to the 28278763cb45SJérôme Glisse * new page if migration was successful for that page, or to the original page 28288763cb45SJérôme Glisse * otherwise. 28298763cb45SJérôme Glisse * 28308763cb45SJérôme Glisse * This also unlocks the pages and puts them back on the lru, or drops the extra 28318763cb45SJérôme Glisse * refcount, for device pages. 28328763cb45SJérôme Glisse */ 28338763cb45SJérôme Glisse static void migrate_vma_finalize(struct migrate_vma *migrate) 28348763cb45SJérôme Glisse { 28358763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 28368763cb45SJérôme Glisse unsigned long i; 28378763cb45SJérôme Glisse 28388763cb45SJérôme Glisse for (i = 0; i < npages; i++) { 28398763cb45SJérôme Glisse struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 28408763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 28418763cb45SJérôme Glisse 28428315ada7SJérôme Glisse if (!page) { 28438315ada7SJérôme Glisse if (newpage) { 28448315ada7SJérôme Glisse unlock_page(newpage); 28458315ada7SJérôme Glisse put_page(newpage); 28468315ada7SJérôme Glisse } 28478763cb45SJérôme Glisse continue; 28488315ada7SJérôme Glisse } 28498315ada7SJérôme Glisse 28508763cb45SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 28518763cb45SJérôme Glisse if (newpage) { 28528763cb45SJérôme Glisse unlock_page(newpage); 28538763cb45SJérôme Glisse put_page(newpage); 28548763cb45SJérôme Glisse } 28558763cb45SJérôme Glisse newpage = page; 28568763cb45SJérôme Glisse } 28578763cb45SJérôme Glisse 28588763cb45SJérôme Glisse remove_migration_ptes(page, newpage, false); 28598763cb45SJérôme Glisse unlock_page(page); 28608763cb45SJérôme Glisse migrate->cpages--; 28618763cb45SJérôme Glisse 2862a5430ddaSJérôme Glisse if (is_zone_device_page(page)) 2863a5430ddaSJérôme Glisse put_page(page); 2864a5430ddaSJérôme Glisse else 28658763cb45SJérôme Glisse putback_lru_page(page); 28668763cb45SJérôme Glisse 28678763cb45SJérôme Glisse if (newpage != page) { 28688763cb45SJérôme Glisse unlock_page(newpage); 2869a5430ddaSJérôme Glisse if (is_zone_device_page(newpage)) 2870a5430ddaSJérôme Glisse put_page(newpage); 2871a5430ddaSJérôme Glisse else 28728763cb45SJérôme Glisse putback_lru_page(newpage); 28738763cb45SJérôme Glisse } 28748763cb45SJérôme Glisse } 28758763cb45SJérôme Glisse } 28768763cb45SJérôme Glisse 28778763cb45SJérôme Glisse /* 28788763cb45SJérôme Glisse * migrate_vma() - migrate a range of memory inside vma 28798763cb45SJérôme Glisse * 28808763cb45SJérôme Glisse * @ops: migration callback for allocating destination memory and copying 28818763cb45SJérôme Glisse * @vma: virtual memory area containing the range to be migrated 28828763cb45SJérôme Glisse * @start: start address of the range to migrate (inclusive) 28838763cb45SJérôme Glisse * @end: end address of the range to migrate (exclusive) 28848763cb45SJérôme Glisse * @src: array of hmm_pfn_t containing source pfns 28858763cb45SJérôme Glisse * @dst: array of hmm_pfn_t containing destination pfns 28868763cb45SJérôme Glisse * @private: pointer passed back to each of the callback 28878763cb45SJérôme Glisse * Returns: 0 on success, error code otherwise 28888763cb45SJérôme Glisse * 28898763cb45SJérôme Glisse * This function tries to migrate a range of memory virtual address range, using 28908763cb45SJérôme Glisse * callbacks to allocate and copy memory from source to destination. First it 28918763cb45SJérôme Glisse * collects all the pages backing each virtual address in the range, saving this 28928763cb45SJérôme Glisse * inside the src array. Then it locks those pages and unmaps them. Once the pages 28938763cb45SJérôme Glisse * are locked and unmapped, it checks whether each page is pinned or not. Pages 28948763cb45SJérôme Glisse * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) 28958763cb45SJérôme Glisse * in the corresponding src array entry. It then restores any pages that are 28968763cb45SJérôme Glisse * pinned, by remapping and unlocking those pages. 28978763cb45SJérôme Glisse * 28988763cb45SJérôme Glisse * At this point it calls the alloc_and_copy() callback. For documentation on 28998763cb45SJérôme Glisse * what is expected from that callback, see struct migrate_vma_ops comments in 29008763cb45SJérôme Glisse * include/linux/migrate.h 29018763cb45SJérôme Glisse * 29028763cb45SJérôme Glisse * After the alloc_and_copy() callback, this function goes over each entry in 29038763cb45SJérôme Glisse * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 29048763cb45SJérôme Glisse * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 29058763cb45SJérôme Glisse * then the function tries to migrate struct page information from the source 29068763cb45SJérôme Glisse * struct page to the destination struct page. If it fails to migrate the struct 29078763cb45SJérôme Glisse * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src 29088763cb45SJérôme Glisse * array. 29098763cb45SJérôme Glisse * 29108763cb45SJérôme Glisse * At this point all successfully migrated pages have an entry in the src 29118763cb45SJérôme Glisse * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 29128763cb45SJérôme Glisse * array entry with MIGRATE_PFN_VALID flag set. 29138763cb45SJérôme Glisse * 29148763cb45SJérôme Glisse * It then calls the finalize_and_map() callback. See comments for "struct 29158763cb45SJérôme Glisse * migrate_vma_ops", in include/linux/migrate.h for details about 29168763cb45SJérôme Glisse * finalize_and_map() behavior. 29178763cb45SJérôme Glisse * 29188763cb45SJérôme Glisse * After the finalize_and_map() callback, for successfully migrated pages, this 29198763cb45SJérôme Glisse * function updates the CPU page table to point to new pages, otherwise it 29208763cb45SJérôme Glisse * restores the CPU page table to point to the original source pages. 29218763cb45SJérôme Glisse * 29228763cb45SJérôme Glisse * Function returns 0 after the above steps, even if no pages were migrated 29238763cb45SJérôme Glisse * (The function only returns an error if any of the arguments are invalid.) 29248763cb45SJérôme Glisse * 29258763cb45SJérôme Glisse * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT 29268763cb45SJérôme Glisse * unsigned long entries. 29278763cb45SJérôme Glisse */ 29288763cb45SJérôme Glisse int migrate_vma(const struct migrate_vma_ops *ops, 29298763cb45SJérôme Glisse struct vm_area_struct *vma, 29308763cb45SJérôme Glisse unsigned long start, 29318763cb45SJérôme Glisse unsigned long end, 29328763cb45SJérôme Glisse unsigned long *src, 29338763cb45SJérôme Glisse unsigned long *dst, 29348763cb45SJérôme Glisse void *private) 29358763cb45SJérôme Glisse { 29368763cb45SJérôme Glisse struct migrate_vma migrate; 29378763cb45SJérôme Glisse 29388763cb45SJérôme Glisse /* Sanity check the arguments */ 29398763cb45SJérôme Glisse start &= PAGE_MASK; 29408763cb45SJérôme Glisse end &= PAGE_MASK; 29418763cb45SJérôme Glisse if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) 29428763cb45SJérôme Glisse return -EINVAL; 29438763cb45SJérôme Glisse if (start < vma->vm_start || start >= vma->vm_end) 29448763cb45SJérôme Glisse return -EINVAL; 29458763cb45SJérôme Glisse if (end <= vma->vm_start || end > vma->vm_end) 29468763cb45SJérôme Glisse return -EINVAL; 29478763cb45SJérôme Glisse if (!ops || !src || !dst || start >= end) 29488763cb45SJérôme Glisse return -EINVAL; 29498763cb45SJérôme Glisse 29508763cb45SJérôme Glisse memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT)); 29518763cb45SJérôme Glisse migrate.src = src; 29528763cb45SJérôme Glisse migrate.dst = dst; 29538763cb45SJérôme Glisse migrate.start = start; 29548763cb45SJérôme Glisse migrate.npages = 0; 29558763cb45SJérôme Glisse migrate.cpages = 0; 29568763cb45SJérôme Glisse migrate.end = end; 29578763cb45SJérôme Glisse migrate.vma = vma; 29588763cb45SJérôme Glisse 29598763cb45SJérôme Glisse /* Collect, and try to unmap source pages */ 29608763cb45SJérôme Glisse migrate_vma_collect(&migrate); 29618763cb45SJérôme Glisse if (!migrate.cpages) 29628763cb45SJérôme Glisse return 0; 29638763cb45SJérôme Glisse 29648763cb45SJérôme Glisse /* Lock and isolate page */ 29658763cb45SJérôme Glisse migrate_vma_prepare(&migrate); 29668763cb45SJérôme Glisse if (!migrate.cpages) 29678763cb45SJérôme Glisse return 0; 29688763cb45SJérôme Glisse 29698763cb45SJérôme Glisse /* Unmap pages */ 29708763cb45SJérôme Glisse migrate_vma_unmap(&migrate); 29718763cb45SJérôme Glisse if (!migrate.cpages) 29728763cb45SJérôme Glisse return 0; 29738763cb45SJérôme Glisse 29748763cb45SJérôme Glisse /* 29758763cb45SJérôme Glisse * At this point pages are locked and unmapped, and thus they have 29768763cb45SJérôme Glisse * stable content and can safely be copied to destination memory that 29778763cb45SJérôme Glisse * is allocated by the callback. 29788763cb45SJérôme Glisse * 29798763cb45SJérôme Glisse * Note that migration can fail in migrate_vma_struct_page() for each 29808763cb45SJérôme Glisse * individual page. 29818763cb45SJérôme Glisse */ 29828763cb45SJérôme Glisse ops->alloc_and_copy(vma, src, dst, start, end, private); 29838763cb45SJérôme Glisse 29848763cb45SJérôme Glisse /* This does the real migration of struct page */ 29858763cb45SJérôme Glisse migrate_vma_pages(&migrate); 29868763cb45SJérôme Glisse 29878763cb45SJérôme Glisse ops->finalize_and_map(vma, src, dst, start, end, private); 29888763cb45SJérôme Glisse 29898763cb45SJérôme Glisse /* Unlock and remap pages */ 29908763cb45SJérôme Glisse migrate_vma_finalize(&migrate); 29918763cb45SJérôme Glisse 29928763cb45SJérôme Glisse return 0; 29938763cb45SJérôme Glisse } 29948763cb45SJérôme Glisse EXPORT_SYMBOL(migrate_vma); 29956b368cd4SJérôme Glisse #endif /* defined(MIGRATE_VMA_HELPER) */ 2996