1b20a3503SChristoph Lameter /* 214e0f9bcSHugh Dickins * Memory Migration functionality - linux/mm/migrate.c 3b20a3503SChristoph Lameter * 4b20a3503SChristoph Lameter * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5b20a3503SChristoph Lameter * 6b20a3503SChristoph Lameter * Page migration was first developed in the context of the memory hotplug 7b20a3503SChristoph Lameter * project. The main authors of the migration code are: 8b20a3503SChristoph Lameter * 9b20a3503SChristoph Lameter * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10b20a3503SChristoph Lameter * Hirokazu Takahashi <taka@valinux.co.jp> 11b20a3503SChristoph Lameter * Dave Hansen <haveblue@us.ibm.com> 12cde53535SChristoph Lameter * Christoph Lameter 13b20a3503SChristoph Lameter */ 14b20a3503SChristoph Lameter 15b20a3503SChristoph Lameter #include <linux/migrate.h> 16b95f1b31SPaul Gortmaker #include <linux/export.h> 17b20a3503SChristoph Lameter #include <linux/swap.h> 180697212aSChristoph Lameter #include <linux/swapops.h> 19b20a3503SChristoph Lameter #include <linux/pagemap.h> 20e23ca00bSChristoph Lameter #include <linux/buffer_head.h> 21b20a3503SChristoph Lameter #include <linux/mm_inline.h> 22b488893aSPavel Emelyanov #include <linux/nsproxy.h> 23b20a3503SChristoph Lameter #include <linux/pagevec.h> 24e9995ef9SHugh Dickins #include <linux/ksm.h> 25b20a3503SChristoph Lameter #include <linux/rmap.h> 26b20a3503SChristoph Lameter #include <linux/topology.h> 27b20a3503SChristoph Lameter #include <linux/cpu.h> 28b20a3503SChristoph Lameter #include <linux/cpuset.h> 2904e62a29SChristoph Lameter #include <linux/writeback.h> 30742755a1SChristoph Lameter #include <linux/mempolicy.h> 31742755a1SChristoph Lameter #include <linux/vmalloc.h> 3286c3a764SDavid Quigley #include <linux/security.h> 3342cb14b1SHugh Dickins #include <linux/backing-dev.h> 34bda807d4SMinchan Kim #include <linux/compaction.h> 354f5ca265SAdrian Bunk #include <linux/syscalls.h> 36290408d4SNaoya Horiguchi #include <linux/hugetlb.h> 378e6ac7faSAneesh Kumar K.V #include <linux/hugetlb_cgroup.h> 385a0e3ad6STejun Heo #include <linux/gfp.h> 39*a5430ddaSJérôme Glisse #include <linux/memremap.h> 40bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 41f714f4f2SMel Gorman #include <linux/mmu_notifier.h> 4233c3fc71SVladimir Davydov #include <linux/page_idle.h> 43d435edcaSVlastimil Babka #include <linux/page_owner.h> 446e84f315SIngo Molnar #include <linux/sched/mm.h> 45197e7e52SLinus Torvalds #include <linux/ptrace.h> 46b20a3503SChristoph Lameter 470d1836c3SMichal Nazarewicz #include <asm/tlbflush.h> 480d1836c3SMichal Nazarewicz 497b2a2d4aSMel Gorman #define CREATE_TRACE_POINTS 507b2a2d4aSMel Gorman #include <trace/events/migrate.h> 517b2a2d4aSMel Gorman 52b20a3503SChristoph Lameter #include "internal.h" 53b20a3503SChristoph Lameter 54b20a3503SChristoph Lameter /* 55742755a1SChristoph Lameter * migrate_prep() needs to be called before we start compiling a list of pages 56748446bbSMel Gorman * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 57748446bbSMel Gorman * undesirable, use migrate_prep_local() 58b20a3503SChristoph Lameter */ 59b20a3503SChristoph Lameter int migrate_prep(void) 60b20a3503SChristoph Lameter { 61b20a3503SChristoph Lameter /* 62b20a3503SChristoph Lameter * Clear the LRU lists so pages can be isolated. 63b20a3503SChristoph Lameter * Note that pages may be moved off the LRU after we have 64b20a3503SChristoph Lameter * drained them. Those pages will fail to migrate like other 65b20a3503SChristoph Lameter * pages that may be busy. 66b20a3503SChristoph Lameter */ 67b20a3503SChristoph Lameter lru_add_drain_all(); 68b20a3503SChristoph Lameter 69b20a3503SChristoph Lameter return 0; 70b20a3503SChristoph Lameter } 71b20a3503SChristoph Lameter 72748446bbSMel Gorman /* Do the necessary work of migrate_prep but not if it involves other CPUs */ 73748446bbSMel Gorman int migrate_prep_local(void) 74748446bbSMel Gorman { 75748446bbSMel Gorman lru_add_drain(); 76748446bbSMel Gorman 77748446bbSMel Gorman return 0; 78748446bbSMel Gorman } 79748446bbSMel Gorman 809e5bcd61SYisheng Xie int isolate_movable_page(struct page *page, isolate_mode_t mode) 81bda807d4SMinchan Kim { 82bda807d4SMinchan Kim struct address_space *mapping; 83bda807d4SMinchan Kim 84bda807d4SMinchan Kim /* 85bda807d4SMinchan Kim * Avoid burning cycles with pages that are yet under __free_pages(), 86bda807d4SMinchan Kim * or just got freed under us. 87bda807d4SMinchan Kim * 88bda807d4SMinchan Kim * In case we 'win' a race for a movable page being freed under us and 89bda807d4SMinchan Kim * raise its refcount preventing __free_pages() from doing its job 90bda807d4SMinchan Kim * the put_page() at the end of this block will take care of 91bda807d4SMinchan Kim * release this page, thus avoiding a nasty leakage. 92bda807d4SMinchan Kim */ 93bda807d4SMinchan Kim if (unlikely(!get_page_unless_zero(page))) 94bda807d4SMinchan Kim goto out; 95bda807d4SMinchan Kim 96bda807d4SMinchan Kim /* 97bda807d4SMinchan Kim * Check PageMovable before holding a PG_lock because page's owner 98bda807d4SMinchan Kim * assumes anybody doesn't touch PG_lock of newly allocated page 99bda807d4SMinchan Kim * so unconditionally grapping the lock ruins page's owner side. 100bda807d4SMinchan Kim */ 101bda807d4SMinchan Kim if (unlikely(!__PageMovable(page))) 102bda807d4SMinchan Kim goto out_putpage; 103bda807d4SMinchan Kim /* 104bda807d4SMinchan Kim * As movable pages are not isolated from LRU lists, concurrent 105bda807d4SMinchan Kim * compaction threads can race against page migration functions 106bda807d4SMinchan Kim * as well as race against the releasing a page. 107bda807d4SMinchan Kim * 108bda807d4SMinchan Kim * In order to avoid having an already isolated movable page 109bda807d4SMinchan Kim * being (wrongly) re-isolated while it is under migration, 110bda807d4SMinchan Kim * or to avoid attempting to isolate pages being released, 111bda807d4SMinchan Kim * lets be sure we have the page lock 112bda807d4SMinchan Kim * before proceeding with the movable page isolation steps. 113bda807d4SMinchan Kim */ 114bda807d4SMinchan Kim if (unlikely(!trylock_page(page))) 115bda807d4SMinchan Kim goto out_putpage; 116bda807d4SMinchan Kim 117bda807d4SMinchan Kim if (!PageMovable(page) || PageIsolated(page)) 118bda807d4SMinchan Kim goto out_no_isolated; 119bda807d4SMinchan Kim 120bda807d4SMinchan Kim mapping = page_mapping(page); 121bda807d4SMinchan Kim VM_BUG_ON_PAGE(!mapping, page); 122bda807d4SMinchan Kim 123bda807d4SMinchan Kim if (!mapping->a_ops->isolate_page(page, mode)) 124bda807d4SMinchan Kim goto out_no_isolated; 125bda807d4SMinchan Kim 126bda807d4SMinchan Kim /* Driver shouldn't use PG_isolated bit of page->flags */ 127bda807d4SMinchan Kim WARN_ON_ONCE(PageIsolated(page)); 128bda807d4SMinchan Kim __SetPageIsolated(page); 129bda807d4SMinchan Kim unlock_page(page); 130bda807d4SMinchan Kim 1319e5bcd61SYisheng Xie return 0; 132bda807d4SMinchan Kim 133bda807d4SMinchan Kim out_no_isolated: 134bda807d4SMinchan Kim unlock_page(page); 135bda807d4SMinchan Kim out_putpage: 136bda807d4SMinchan Kim put_page(page); 137bda807d4SMinchan Kim out: 1389e5bcd61SYisheng Xie return -EBUSY; 139bda807d4SMinchan Kim } 140bda807d4SMinchan Kim 141bda807d4SMinchan Kim /* It should be called on page which is PG_movable */ 142bda807d4SMinchan Kim void putback_movable_page(struct page *page) 143bda807d4SMinchan Kim { 144bda807d4SMinchan Kim struct address_space *mapping; 145bda807d4SMinchan Kim 146bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 147bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 148bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 149bda807d4SMinchan Kim 150bda807d4SMinchan Kim mapping = page_mapping(page); 151bda807d4SMinchan Kim mapping->a_ops->putback_page(page); 152bda807d4SMinchan Kim __ClearPageIsolated(page); 153bda807d4SMinchan Kim } 154bda807d4SMinchan Kim 155b20a3503SChristoph Lameter /* 1565733c7d1SRafael Aquini * Put previously isolated pages back onto the appropriate lists 1575733c7d1SRafael Aquini * from where they were once taken off for compaction/migration. 1585733c7d1SRafael Aquini * 15959c82b70SJoonsoo Kim * This function shall be used whenever the isolated pageset has been 16059c82b70SJoonsoo Kim * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 16159c82b70SJoonsoo Kim * and isolate_huge_page(). 1625733c7d1SRafael Aquini */ 1635733c7d1SRafael Aquini void putback_movable_pages(struct list_head *l) 1645733c7d1SRafael Aquini { 1655733c7d1SRafael Aquini struct page *page; 1665733c7d1SRafael Aquini struct page *page2; 1675733c7d1SRafael Aquini 1685733c7d1SRafael Aquini list_for_each_entry_safe(page, page2, l, lru) { 16931caf665SNaoya Horiguchi if (unlikely(PageHuge(page))) { 17031caf665SNaoya Horiguchi putback_active_hugepage(page); 17131caf665SNaoya Horiguchi continue; 17231caf665SNaoya Horiguchi } 1735733c7d1SRafael Aquini list_del(&page->lru); 174bda807d4SMinchan Kim /* 175bda807d4SMinchan Kim * We isolated non-lru movable page so here we can use 176bda807d4SMinchan Kim * __PageMovable because LRU page's mapping cannot have 177bda807d4SMinchan Kim * PAGE_MAPPING_MOVABLE. 178bda807d4SMinchan Kim */ 179b1123ea6SMinchan Kim if (unlikely(__PageMovable(page))) { 180bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 181bda807d4SMinchan Kim lock_page(page); 182bda807d4SMinchan Kim if (PageMovable(page)) 183bda807d4SMinchan Kim putback_movable_page(page); 184bf6bddf1SRafael Aquini else 185bda807d4SMinchan Kim __ClearPageIsolated(page); 186bda807d4SMinchan Kim unlock_page(page); 187bda807d4SMinchan Kim put_page(page); 188bda807d4SMinchan Kim } else { 189e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 190e8db67ebSNaoya Horiguchi page_is_file_cache(page), -hpage_nr_pages(page)); 191fc280fe8SRabin Vincent putback_lru_page(page); 192b20a3503SChristoph Lameter } 193b20a3503SChristoph Lameter } 194bda807d4SMinchan Kim } 195b20a3503SChristoph Lameter 1960697212aSChristoph Lameter /* 1970697212aSChristoph Lameter * Restore a potential migration pte to a working pte entry 1980697212aSChristoph Lameter */ 199e4b82222SMinchan Kim static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, 200e9995ef9SHugh Dickins unsigned long addr, void *old) 2010697212aSChristoph Lameter { 2023fe87967SKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 2033fe87967SKirill A. Shutemov .page = old, 2043fe87967SKirill A. Shutemov .vma = vma, 2053fe87967SKirill A. Shutemov .address = addr, 2063fe87967SKirill A. Shutemov .flags = PVMW_SYNC | PVMW_MIGRATION, 2073fe87967SKirill A. Shutemov }; 2083fe87967SKirill A. Shutemov struct page *new; 2093fe87967SKirill A. Shutemov pte_t pte; 2100697212aSChristoph Lameter swp_entry_t entry; 2110697212aSChristoph Lameter 2123fe87967SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 2133fe87967SKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 2144b0ece6fSNaoya Horiguchi if (PageKsm(page)) 2154b0ece6fSNaoya Horiguchi new = page; 2164b0ece6fSNaoya Horiguchi else 2173fe87967SKirill A. Shutemov new = page - pvmw.page->index + 2183fe87967SKirill A. Shutemov linear_page_index(vma, pvmw.address); 2190697212aSChristoph Lameter 220616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 221616b8371SZi Yan /* PMD-mapped THP migration entry */ 222616b8371SZi Yan if (!pvmw.pte) { 223616b8371SZi Yan VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 224616b8371SZi Yan remove_migration_pmd(&pvmw, new); 225616b8371SZi Yan continue; 226616b8371SZi Yan } 227616b8371SZi Yan #endif 228616b8371SZi Yan 2290697212aSChristoph Lameter get_page(new); 2306d2329f8SAndrea Arcangeli pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 2313fe87967SKirill A. Shutemov if (pte_swp_soft_dirty(*pvmw.pte)) 232c3d16e16SCyrill Gorcunov pte = pte_mksoft_dirty(pte); 233d3cb8bf6SMel Gorman 2343fe87967SKirill A. Shutemov /* 2353fe87967SKirill A. Shutemov * Recheck VMA as permissions can change since migration started 2363fe87967SKirill A. Shutemov */ 2373fe87967SKirill A. Shutemov entry = pte_to_swp_entry(*pvmw.pte); 2380697212aSChristoph Lameter if (is_write_migration_entry(entry)) 239d3cb8bf6SMel Gorman pte = maybe_mkwrite(pte, vma); 240d3cb8bf6SMel Gorman 241*a5430ddaSJérôme Glisse if (unlikely(is_zone_device_page(new)) && 242*a5430ddaSJérôme Glisse is_device_private_page(new)) { 243*a5430ddaSJérôme Glisse entry = make_device_private_entry(new, pte_write(pte)); 244*a5430ddaSJérôme Glisse pte = swp_entry_to_pte(entry); 245*a5430ddaSJérôme Glisse } else 246383321abSAneesh Kumar K.V flush_dcache_page(new); 247*a5430ddaSJérôme Glisse 2483ef8fd7fSAndi Kleen #ifdef CONFIG_HUGETLB_PAGE 249be7517d6STony Lu if (PageHuge(new)) { 250290408d4SNaoya Horiguchi pte = pte_mkhuge(pte); 251be7517d6STony Lu pte = arch_make_huge_pte(pte, vma, new, 0); 252383321abSAneesh Kumar K.V set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 25304e62a29SChristoph Lameter if (PageAnon(new)) 2543fe87967SKirill A. Shutemov hugepage_add_anon_rmap(new, vma, pvmw.address); 255290408d4SNaoya Horiguchi else 25653f9263bSKirill A. Shutemov page_dup_rmap(new, true); 257383321abSAneesh Kumar K.V } else 258383321abSAneesh Kumar K.V #endif 259383321abSAneesh Kumar K.V { 260383321abSAneesh Kumar K.V set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 261383321abSAneesh Kumar K.V 262383321abSAneesh Kumar K.V if (PageAnon(new)) 2633fe87967SKirill A. Shutemov page_add_anon_rmap(new, vma, pvmw.address, false); 26404e62a29SChristoph Lameter else 265dd78feddSKirill A. Shutemov page_add_file_rmap(new, false); 266383321abSAneesh Kumar K.V } 267e388466dSKirill A. Shutemov if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 26851afb12bSHugh Dickins mlock_vma_page(new); 26951afb12bSHugh Dickins 27004e62a29SChristoph Lameter /* No need to invalidate - it was non-present before */ 2713fe87967SKirill A. Shutemov update_mmu_cache(vma, pvmw.address, pvmw.pte); 2723fe87967SKirill A. Shutemov } 2733fe87967SKirill A. Shutemov 274e4b82222SMinchan Kim return true; 2750697212aSChristoph Lameter } 2760697212aSChristoph Lameter 2770697212aSChristoph Lameter /* 27804e62a29SChristoph Lameter * Get rid of all migration entries and replace them by 27904e62a29SChristoph Lameter * references to the indicated page. 28004e62a29SChristoph Lameter */ 281e388466dSKirill A. Shutemov void remove_migration_ptes(struct page *old, struct page *new, bool locked) 28204e62a29SChristoph Lameter { 283051ac83aSJoonsoo Kim struct rmap_walk_control rwc = { 284051ac83aSJoonsoo Kim .rmap_one = remove_migration_pte, 285051ac83aSJoonsoo Kim .arg = old, 286051ac83aSJoonsoo Kim }; 287051ac83aSJoonsoo Kim 288e388466dSKirill A. Shutemov if (locked) 289e388466dSKirill A. Shutemov rmap_walk_locked(new, &rwc); 290e388466dSKirill A. Shutemov else 291051ac83aSJoonsoo Kim rmap_walk(new, &rwc); 29204e62a29SChristoph Lameter } 29304e62a29SChristoph Lameter 29404e62a29SChristoph Lameter /* 2950697212aSChristoph Lameter * Something used the pte of a page under migration. We need to 2960697212aSChristoph Lameter * get to the page and wait until migration is finished. 2970697212aSChristoph Lameter * When we return from this function the fault will be retried. 2980697212aSChristoph Lameter */ 299e66f17ffSNaoya Horiguchi void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 30030dad309SNaoya Horiguchi spinlock_t *ptl) 3010697212aSChristoph Lameter { 30230dad309SNaoya Horiguchi pte_t pte; 3030697212aSChristoph Lameter swp_entry_t entry; 3040697212aSChristoph Lameter struct page *page; 3050697212aSChristoph Lameter 30630dad309SNaoya Horiguchi spin_lock(ptl); 3070697212aSChristoph Lameter pte = *ptep; 3080697212aSChristoph Lameter if (!is_swap_pte(pte)) 3090697212aSChristoph Lameter goto out; 3100697212aSChristoph Lameter 3110697212aSChristoph Lameter entry = pte_to_swp_entry(pte); 3120697212aSChristoph Lameter if (!is_migration_entry(entry)) 3130697212aSChristoph Lameter goto out; 3140697212aSChristoph Lameter 3150697212aSChristoph Lameter page = migration_entry_to_page(entry); 3160697212aSChristoph Lameter 317e286781dSNick Piggin /* 318e286781dSNick Piggin * Once radix-tree replacement of page migration started, page_count 319e286781dSNick Piggin * *must* be zero. And, we don't want to call wait_on_page_locked() 320e286781dSNick Piggin * against a page without get_page(). 321e286781dSNick Piggin * So, we use get_page_unless_zero(), here. Even failed, page fault 322e286781dSNick Piggin * will occur again. 323e286781dSNick Piggin */ 324e286781dSNick Piggin if (!get_page_unless_zero(page)) 325e286781dSNick Piggin goto out; 3260697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3270697212aSChristoph Lameter wait_on_page_locked(page); 3280697212aSChristoph Lameter put_page(page); 3290697212aSChristoph Lameter return; 3300697212aSChristoph Lameter out: 3310697212aSChristoph Lameter pte_unmap_unlock(ptep, ptl); 3320697212aSChristoph Lameter } 3330697212aSChristoph Lameter 33430dad309SNaoya Horiguchi void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 33530dad309SNaoya Horiguchi unsigned long address) 33630dad309SNaoya Horiguchi { 33730dad309SNaoya Horiguchi spinlock_t *ptl = pte_lockptr(mm, pmd); 33830dad309SNaoya Horiguchi pte_t *ptep = pte_offset_map(pmd, address); 33930dad309SNaoya Horiguchi __migration_entry_wait(mm, ptep, ptl); 34030dad309SNaoya Horiguchi } 34130dad309SNaoya Horiguchi 342cb900f41SKirill A. Shutemov void migration_entry_wait_huge(struct vm_area_struct *vma, 343cb900f41SKirill A. Shutemov struct mm_struct *mm, pte_t *pte) 34430dad309SNaoya Horiguchi { 345cb900f41SKirill A. Shutemov spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); 34630dad309SNaoya Horiguchi __migration_entry_wait(mm, pte, ptl); 34730dad309SNaoya Horiguchi } 34830dad309SNaoya Horiguchi 349616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 350616b8371SZi Yan void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) 351616b8371SZi Yan { 352616b8371SZi Yan spinlock_t *ptl; 353616b8371SZi Yan struct page *page; 354616b8371SZi Yan 355616b8371SZi Yan ptl = pmd_lock(mm, pmd); 356616b8371SZi Yan if (!is_pmd_migration_entry(*pmd)) 357616b8371SZi Yan goto unlock; 358616b8371SZi Yan page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); 359616b8371SZi Yan if (!get_page_unless_zero(page)) 360616b8371SZi Yan goto unlock; 361616b8371SZi Yan spin_unlock(ptl); 362616b8371SZi Yan wait_on_page_locked(page); 363616b8371SZi Yan put_page(page); 364616b8371SZi Yan return; 365616b8371SZi Yan unlock: 366616b8371SZi Yan spin_unlock(ptl); 367616b8371SZi Yan } 368616b8371SZi Yan #endif 369616b8371SZi Yan 370b969c4abSMel Gorman #ifdef CONFIG_BLOCK 371b969c4abSMel Gorman /* Returns true if all buffers are successfully locked */ 372a6bc32b8SMel Gorman static bool buffer_migrate_lock_buffers(struct buffer_head *head, 373a6bc32b8SMel Gorman enum migrate_mode mode) 374b969c4abSMel Gorman { 375b969c4abSMel Gorman struct buffer_head *bh = head; 376b969c4abSMel Gorman 377b969c4abSMel Gorman /* Simple case, sync compaction */ 378a6bc32b8SMel Gorman if (mode != MIGRATE_ASYNC) { 379b969c4abSMel Gorman do { 380b969c4abSMel Gorman get_bh(bh); 381b969c4abSMel Gorman lock_buffer(bh); 382b969c4abSMel Gorman bh = bh->b_this_page; 383b969c4abSMel Gorman 384b969c4abSMel Gorman } while (bh != head); 385b969c4abSMel Gorman 386b969c4abSMel Gorman return true; 387b969c4abSMel Gorman } 388b969c4abSMel Gorman 389b969c4abSMel Gorman /* async case, we cannot block on lock_buffer so use trylock_buffer */ 390b969c4abSMel Gorman do { 391b969c4abSMel Gorman get_bh(bh); 392b969c4abSMel Gorman if (!trylock_buffer(bh)) { 393b969c4abSMel Gorman /* 394b969c4abSMel Gorman * We failed to lock the buffer and cannot stall in 395b969c4abSMel Gorman * async migration. Release the taken locks 396b969c4abSMel Gorman */ 397b969c4abSMel Gorman struct buffer_head *failed_bh = bh; 398b969c4abSMel Gorman put_bh(failed_bh); 399b969c4abSMel Gorman bh = head; 400b969c4abSMel Gorman while (bh != failed_bh) { 401b969c4abSMel Gorman unlock_buffer(bh); 402b969c4abSMel Gorman put_bh(bh); 403b969c4abSMel Gorman bh = bh->b_this_page; 404b969c4abSMel Gorman } 405b969c4abSMel Gorman return false; 406b969c4abSMel Gorman } 407b969c4abSMel Gorman 408b969c4abSMel Gorman bh = bh->b_this_page; 409b969c4abSMel Gorman } while (bh != head); 410b969c4abSMel Gorman return true; 411b969c4abSMel Gorman } 412b969c4abSMel Gorman #else 413b969c4abSMel Gorman static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, 414a6bc32b8SMel Gorman enum migrate_mode mode) 415b969c4abSMel Gorman { 416b969c4abSMel Gorman return true; 417b969c4abSMel Gorman } 418b969c4abSMel Gorman #endif /* CONFIG_BLOCK */ 419b969c4abSMel Gorman 420b20a3503SChristoph Lameter /* 421c3fcf8a5SChristoph Lameter * Replace the page in the mapping. 4225b5c7120SChristoph Lameter * 4235b5c7120SChristoph Lameter * The number of remaining references must be: 4245b5c7120SChristoph Lameter * 1 for anonymous pages without a mapping 4255b5c7120SChristoph Lameter * 2 for pages with a mapping 426266cf658SDavid Howells * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 427b20a3503SChristoph Lameter */ 42836bc08ccSGu Zheng int migrate_page_move_mapping(struct address_space *mapping, 429b969c4abSMel Gorman struct page *newpage, struct page *page, 4308e321fefSBenjamin LaHaise struct buffer_head *head, enum migrate_mode mode, 4318e321fefSBenjamin LaHaise int extra_count) 432b20a3503SChristoph Lameter { 43342cb14b1SHugh Dickins struct zone *oldzone, *newzone; 43442cb14b1SHugh Dickins int dirty; 4358e321fefSBenjamin LaHaise int expected_count = 1 + extra_count; 4367cf9c2c7SNick Piggin void **pslot; 437b20a3503SChristoph Lameter 4388763cb45SJérôme Glisse /* 4398763cb45SJérôme Glisse * ZONE_DEVICE pages have 1 refcount always held by their device 4408763cb45SJérôme Glisse * 4418763cb45SJérôme Glisse * Note that DAX memory will never reach that point as it does not have 4428763cb45SJérôme Glisse * the MEMORY_DEVICE_ALLOW_MIGRATE flag set (see memory_hotplug.h). 4438763cb45SJérôme Glisse */ 4448763cb45SJérôme Glisse expected_count += is_zone_device_page(page); 4458763cb45SJérôme Glisse 4466c5240aeSChristoph Lameter if (!mapping) { 4470e8c7d0fSChristoph Lameter /* Anonymous page without mapping */ 4488e321fefSBenjamin LaHaise if (page_count(page) != expected_count) 4496c5240aeSChristoph Lameter return -EAGAIN; 450cf4b769aSHugh Dickins 451cf4b769aSHugh Dickins /* No turning back from here */ 452cf4b769aSHugh Dickins newpage->index = page->index; 453cf4b769aSHugh Dickins newpage->mapping = page->mapping; 454cf4b769aSHugh Dickins if (PageSwapBacked(page)) 455fa9949daSHugh Dickins __SetPageSwapBacked(newpage); 456cf4b769aSHugh Dickins 45778bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 4586c5240aeSChristoph Lameter } 4596c5240aeSChristoph Lameter 46042cb14b1SHugh Dickins oldzone = page_zone(page); 46142cb14b1SHugh Dickins newzone = page_zone(newpage); 46242cb14b1SHugh Dickins 46319fd6231SNick Piggin spin_lock_irq(&mapping->tree_lock); 464b20a3503SChristoph Lameter 4657cf9c2c7SNick Piggin pslot = radix_tree_lookup_slot(&mapping->page_tree, 466b20a3503SChristoph Lameter page_index(page)); 467b20a3503SChristoph Lameter 4688e321fefSBenjamin LaHaise expected_count += 1 + page_has_private(page); 469e286781dSNick Piggin if (page_count(page) != expected_count || 47029c1f677SMel Gorman radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 47119fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 472e23ca00bSChristoph Lameter return -EAGAIN; 473b20a3503SChristoph Lameter } 474b20a3503SChristoph Lameter 475fe896d18SJoonsoo Kim if (!page_ref_freeze(page, expected_count)) { 47619fd6231SNick Piggin spin_unlock_irq(&mapping->tree_lock); 477e286781dSNick Piggin return -EAGAIN; 478e286781dSNick Piggin } 479e286781dSNick Piggin 480b20a3503SChristoph Lameter /* 481b969c4abSMel Gorman * In the async migration case of moving a page with buffers, lock the 482b969c4abSMel Gorman * buffers using trylock before the mapping is moved. If the mapping 483b969c4abSMel Gorman * was moved, we later failed to lock the buffers and could not move 484b969c4abSMel Gorman * the mapping back due to an elevated page count, we would have to 485b969c4abSMel Gorman * block waiting on other references to be dropped. 486b969c4abSMel Gorman */ 487a6bc32b8SMel Gorman if (mode == MIGRATE_ASYNC && head && 488a6bc32b8SMel Gorman !buffer_migrate_lock_buffers(head, mode)) { 489fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count); 490b969c4abSMel Gorman spin_unlock_irq(&mapping->tree_lock); 491b969c4abSMel Gorman return -EAGAIN; 492b969c4abSMel Gorman } 493b969c4abSMel Gorman 494b969c4abSMel Gorman /* 495cf4b769aSHugh Dickins * Now we know that no one else is looking at the page: 496cf4b769aSHugh Dickins * no turning back from here. 497b20a3503SChristoph Lameter */ 498cf4b769aSHugh Dickins newpage->index = page->index; 499cf4b769aSHugh Dickins newpage->mapping = page->mapping; 5007cf9c2c7SNick Piggin get_page(newpage); /* add cache reference */ 5016326fec1SNicholas Piggin if (PageSwapBacked(page)) { 5026326fec1SNicholas Piggin __SetPageSwapBacked(newpage); 503b20a3503SChristoph Lameter if (PageSwapCache(page)) { 504b20a3503SChristoph Lameter SetPageSwapCache(newpage); 505b20a3503SChristoph Lameter set_page_private(newpage, page_private(page)); 506b20a3503SChristoph Lameter } 5076326fec1SNicholas Piggin } else { 5086326fec1SNicholas Piggin VM_BUG_ON_PAGE(PageSwapCache(page), page); 5096326fec1SNicholas Piggin } 510b20a3503SChristoph Lameter 51142cb14b1SHugh Dickins /* Move dirty while page refs frozen and newpage not yet exposed */ 51242cb14b1SHugh Dickins dirty = PageDirty(page); 51342cb14b1SHugh Dickins if (dirty) { 51442cb14b1SHugh Dickins ClearPageDirty(page); 51542cb14b1SHugh Dickins SetPageDirty(newpage); 51642cb14b1SHugh Dickins } 51742cb14b1SHugh Dickins 5186d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); 5197cf9c2c7SNick Piggin 5207cf9c2c7SNick Piggin /* 521937a94c9SJacobo Giralt * Drop cache reference from old page by unfreezing 522937a94c9SJacobo Giralt * to one less reference. 5237cf9c2c7SNick Piggin * We know this isn't the last reference. 5247cf9c2c7SNick Piggin */ 525fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count - 1); 5267cf9c2c7SNick Piggin 52742cb14b1SHugh Dickins spin_unlock(&mapping->tree_lock); 52842cb14b1SHugh Dickins /* Leave irq disabled to prevent preemption while updating stats */ 52942cb14b1SHugh Dickins 5300e8c7d0fSChristoph Lameter /* 5310e8c7d0fSChristoph Lameter * If moved to a different zone then also account 5320e8c7d0fSChristoph Lameter * the page for that zone. Other VM counters will be 5330e8c7d0fSChristoph Lameter * taken care of when we establish references to the 5340e8c7d0fSChristoph Lameter * new page and drop references to the old page. 5350e8c7d0fSChristoph Lameter * 5360e8c7d0fSChristoph Lameter * Note that anonymous pages are accounted for 5374b9d0fabSMel Gorman * via NR_FILE_PAGES and NR_ANON_MAPPED if they 5380e8c7d0fSChristoph Lameter * are mapped to swap space. 5390e8c7d0fSChristoph Lameter */ 54042cb14b1SHugh Dickins if (newzone != oldzone) { 54111fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); 54211fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); 54342cb14b1SHugh Dickins if (PageSwapBacked(page) && !PageSwapCache(page)) { 54411fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); 54511fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_SHMEM); 5464b02108aSKOSAKI Motohiro } 54742cb14b1SHugh Dickins if (dirty && mapping_cap_account_dirty(mapping)) { 54811fb9989SMel Gorman __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); 5495a1c84b4SMel Gorman __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); 55011fb9989SMel Gorman __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); 5515a1c84b4SMel Gorman __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); 55242cb14b1SHugh Dickins } 55342cb14b1SHugh Dickins } 55442cb14b1SHugh Dickins local_irq_enable(); 555b20a3503SChristoph Lameter 55678bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 557b20a3503SChristoph Lameter } 5581118dce7SRichard Weinberger EXPORT_SYMBOL(migrate_page_move_mapping); 559b20a3503SChristoph Lameter 560b20a3503SChristoph Lameter /* 561290408d4SNaoya Horiguchi * The expected number of remaining references is the same as that 562290408d4SNaoya Horiguchi * of migrate_page_move_mapping(). 563290408d4SNaoya Horiguchi */ 564290408d4SNaoya Horiguchi int migrate_huge_page_move_mapping(struct address_space *mapping, 565290408d4SNaoya Horiguchi struct page *newpage, struct page *page) 566290408d4SNaoya Horiguchi { 567290408d4SNaoya Horiguchi int expected_count; 568290408d4SNaoya Horiguchi void **pslot; 569290408d4SNaoya Horiguchi 570290408d4SNaoya Horiguchi spin_lock_irq(&mapping->tree_lock); 571290408d4SNaoya Horiguchi 572290408d4SNaoya Horiguchi pslot = radix_tree_lookup_slot(&mapping->page_tree, 573290408d4SNaoya Horiguchi page_index(page)); 574290408d4SNaoya Horiguchi 575290408d4SNaoya Horiguchi expected_count = 2 + page_has_private(page); 576290408d4SNaoya Horiguchi if (page_count(page) != expected_count || 57729c1f677SMel Gorman radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 578290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 579290408d4SNaoya Horiguchi return -EAGAIN; 580290408d4SNaoya Horiguchi } 581290408d4SNaoya Horiguchi 582fe896d18SJoonsoo Kim if (!page_ref_freeze(page, expected_count)) { 583290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 584290408d4SNaoya Horiguchi return -EAGAIN; 585290408d4SNaoya Horiguchi } 586290408d4SNaoya Horiguchi 587cf4b769aSHugh Dickins newpage->index = page->index; 588cf4b769aSHugh Dickins newpage->mapping = page->mapping; 5896a93ca8fSJohannes Weiner 590290408d4SNaoya Horiguchi get_page(newpage); 591290408d4SNaoya Horiguchi 5926d75f366SJohannes Weiner radix_tree_replace_slot(&mapping->page_tree, pslot, newpage); 593290408d4SNaoya Horiguchi 594fe896d18SJoonsoo Kim page_ref_unfreeze(page, expected_count - 1); 595290408d4SNaoya Horiguchi 596290408d4SNaoya Horiguchi spin_unlock_irq(&mapping->tree_lock); 5976a93ca8fSJohannes Weiner 59878bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 599290408d4SNaoya Horiguchi } 600290408d4SNaoya Horiguchi 601290408d4SNaoya Horiguchi /* 60230b0a105SDave Hansen * Gigantic pages are so large that we do not guarantee that page++ pointer 60330b0a105SDave Hansen * arithmetic will work across the entire page. We need something more 60430b0a105SDave Hansen * specialized. 60530b0a105SDave Hansen */ 60630b0a105SDave Hansen static void __copy_gigantic_page(struct page *dst, struct page *src, 60730b0a105SDave Hansen int nr_pages) 60830b0a105SDave Hansen { 60930b0a105SDave Hansen int i; 61030b0a105SDave Hansen struct page *dst_base = dst; 61130b0a105SDave Hansen struct page *src_base = src; 61230b0a105SDave Hansen 61330b0a105SDave Hansen for (i = 0; i < nr_pages; ) { 61430b0a105SDave Hansen cond_resched(); 61530b0a105SDave Hansen copy_highpage(dst, src); 61630b0a105SDave Hansen 61730b0a105SDave Hansen i++; 61830b0a105SDave Hansen dst = mem_map_next(dst, dst_base, i); 61930b0a105SDave Hansen src = mem_map_next(src, src_base, i); 62030b0a105SDave Hansen } 62130b0a105SDave Hansen } 62230b0a105SDave Hansen 62330b0a105SDave Hansen static void copy_huge_page(struct page *dst, struct page *src) 62430b0a105SDave Hansen { 62530b0a105SDave Hansen int i; 62630b0a105SDave Hansen int nr_pages; 62730b0a105SDave Hansen 62830b0a105SDave Hansen if (PageHuge(src)) { 62930b0a105SDave Hansen /* hugetlbfs page */ 63030b0a105SDave Hansen struct hstate *h = page_hstate(src); 63130b0a105SDave Hansen nr_pages = pages_per_huge_page(h); 63230b0a105SDave Hansen 63330b0a105SDave Hansen if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 63430b0a105SDave Hansen __copy_gigantic_page(dst, src, nr_pages); 63530b0a105SDave Hansen return; 63630b0a105SDave Hansen } 63730b0a105SDave Hansen } else { 63830b0a105SDave Hansen /* thp page */ 63930b0a105SDave Hansen BUG_ON(!PageTransHuge(src)); 64030b0a105SDave Hansen nr_pages = hpage_nr_pages(src); 64130b0a105SDave Hansen } 64230b0a105SDave Hansen 64330b0a105SDave Hansen for (i = 0; i < nr_pages; i++) { 64430b0a105SDave Hansen cond_resched(); 64530b0a105SDave Hansen copy_highpage(dst + i, src + i); 64630b0a105SDave Hansen } 64730b0a105SDave Hansen } 64830b0a105SDave Hansen 64930b0a105SDave Hansen /* 650b20a3503SChristoph Lameter * Copy the page to its new location 651b20a3503SChristoph Lameter */ 6522916ecc0SJérôme Glisse void migrate_page_states(struct page *newpage, struct page *page) 653b20a3503SChristoph Lameter { 6547851a45cSRik van Riel int cpupid; 6557851a45cSRik van Riel 656b20a3503SChristoph Lameter if (PageError(page)) 657b20a3503SChristoph Lameter SetPageError(newpage); 658b20a3503SChristoph Lameter if (PageReferenced(page)) 659b20a3503SChristoph Lameter SetPageReferenced(newpage); 660b20a3503SChristoph Lameter if (PageUptodate(page)) 661b20a3503SChristoph Lameter SetPageUptodate(newpage); 662894bc310SLee Schermerhorn if (TestClearPageActive(page)) { 663309381feSSasha Levin VM_BUG_ON_PAGE(PageUnevictable(page), page); 664b20a3503SChristoph Lameter SetPageActive(newpage); 665418b27efSLee Schermerhorn } else if (TestClearPageUnevictable(page)) 666418b27efSLee Schermerhorn SetPageUnevictable(newpage); 667b20a3503SChristoph Lameter if (PageChecked(page)) 668b20a3503SChristoph Lameter SetPageChecked(newpage); 669b20a3503SChristoph Lameter if (PageMappedToDisk(page)) 670b20a3503SChristoph Lameter SetPageMappedToDisk(newpage); 671b20a3503SChristoph Lameter 67242cb14b1SHugh Dickins /* Move dirty on pages not done by migrate_page_move_mapping() */ 67342cb14b1SHugh Dickins if (PageDirty(page)) 674752dc185SHugh Dickins SetPageDirty(newpage); 675b20a3503SChristoph Lameter 67633c3fc71SVladimir Davydov if (page_is_young(page)) 67733c3fc71SVladimir Davydov set_page_young(newpage); 67833c3fc71SVladimir Davydov if (page_is_idle(page)) 67933c3fc71SVladimir Davydov set_page_idle(newpage); 68033c3fc71SVladimir Davydov 6817851a45cSRik van Riel /* 6827851a45cSRik van Riel * Copy NUMA information to the new page, to prevent over-eager 6837851a45cSRik van Riel * future migrations of this same page. 6847851a45cSRik van Riel */ 6857851a45cSRik van Riel cpupid = page_cpupid_xchg_last(page, -1); 6867851a45cSRik van Riel page_cpupid_xchg_last(newpage, cpupid); 6877851a45cSRik van Riel 688e9995ef9SHugh Dickins ksm_migrate_page(newpage, page); 689c8d6553bSHugh Dickins /* 690c8d6553bSHugh Dickins * Please do not reorder this without considering how mm/ksm.c's 691c8d6553bSHugh Dickins * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). 692c8d6553bSHugh Dickins */ 693b3b3a99cSNaoya Horiguchi if (PageSwapCache(page)) 694b20a3503SChristoph Lameter ClearPageSwapCache(page); 695b20a3503SChristoph Lameter ClearPagePrivate(page); 696b20a3503SChristoph Lameter set_page_private(page, 0); 697b20a3503SChristoph Lameter 698b20a3503SChristoph Lameter /* 699b20a3503SChristoph Lameter * If any waiters have accumulated on the new page then 700b20a3503SChristoph Lameter * wake them up. 701b20a3503SChristoph Lameter */ 702b20a3503SChristoph Lameter if (PageWriteback(newpage)) 703b20a3503SChristoph Lameter end_page_writeback(newpage); 704d435edcaSVlastimil Babka 705d435edcaSVlastimil Babka copy_page_owner(page, newpage); 70674485cf2SJohannes Weiner 70774485cf2SJohannes Weiner mem_cgroup_migrate(page, newpage); 708b20a3503SChristoph Lameter } 7092916ecc0SJérôme Glisse EXPORT_SYMBOL(migrate_page_states); 7102916ecc0SJérôme Glisse 7112916ecc0SJérôme Glisse void migrate_page_copy(struct page *newpage, struct page *page) 7122916ecc0SJérôme Glisse { 7132916ecc0SJérôme Glisse if (PageHuge(page) || PageTransHuge(page)) 7142916ecc0SJérôme Glisse copy_huge_page(newpage, page); 7152916ecc0SJérôme Glisse else 7162916ecc0SJérôme Glisse copy_highpage(newpage, page); 7172916ecc0SJérôme Glisse 7182916ecc0SJérôme Glisse migrate_page_states(newpage, page); 7192916ecc0SJérôme Glisse } 7201118dce7SRichard Weinberger EXPORT_SYMBOL(migrate_page_copy); 721b20a3503SChristoph Lameter 7221d8b85ccSChristoph Lameter /************************************************************ 7231d8b85ccSChristoph Lameter * Migration functions 7241d8b85ccSChristoph Lameter ***********************************************************/ 7251d8b85ccSChristoph Lameter 726b20a3503SChristoph Lameter /* 727bda807d4SMinchan Kim * Common logic to directly migrate a single LRU page suitable for 728266cf658SDavid Howells * pages that do not use PagePrivate/PagePrivate2. 729b20a3503SChristoph Lameter * 730b20a3503SChristoph Lameter * Pages are locked upon entry and exit. 731b20a3503SChristoph Lameter */ 7322d1db3b1SChristoph Lameter int migrate_page(struct address_space *mapping, 733a6bc32b8SMel Gorman struct page *newpage, struct page *page, 734a6bc32b8SMel Gorman enum migrate_mode mode) 735b20a3503SChristoph Lameter { 736b20a3503SChristoph Lameter int rc; 737b20a3503SChristoph Lameter 738b20a3503SChristoph Lameter BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 739b20a3503SChristoph Lameter 7408e321fefSBenjamin LaHaise rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 741b20a3503SChristoph Lameter 74278bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 743b20a3503SChristoph Lameter return rc; 744b20a3503SChristoph Lameter 7452916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 746b20a3503SChristoph Lameter migrate_page_copy(newpage, page); 7472916ecc0SJérôme Glisse else 7482916ecc0SJérôme Glisse migrate_page_states(newpage, page); 74978bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 750b20a3503SChristoph Lameter } 751b20a3503SChristoph Lameter EXPORT_SYMBOL(migrate_page); 752b20a3503SChristoph Lameter 7539361401eSDavid Howells #ifdef CONFIG_BLOCK 754b20a3503SChristoph Lameter /* 7551d8b85ccSChristoph Lameter * Migration function for pages with buffers. This function can only be used 7561d8b85ccSChristoph Lameter * if the underlying filesystem guarantees that no other references to "page" 7571d8b85ccSChristoph Lameter * exist. 7581d8b85ccSChristoph Lameter */ 7592d1db3b1SChristoph Lameter int buffer_migrate_page(struct address_space *mapping, 760a6bc32b8SMel Gorman struct page *newpage, struct page *page, enum migrate_mode mode) 7611d8b85ccSChristoph Lameter { 7621d8b85ccSChristoph Lameter struct buffer_head *bh, *head; 7631d8b85ccSChristoph Lameter int rc; 7641d8b85ccSChristoph Lameter 7651d8b85ccSChristoph Lameter if (!page_has_buffers(page)) 766a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 7671d8b85ccSChristoph Lameter 7681d8b85ccSChristoph Lameter head = page_buffers(page); 7691d8b85ccSChristoph Lameter 7708e321fefSBenjamin LaHaise rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 7711d8b85ccSChristoph Lameter 77278bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 7731d8b85ccSChristoph Lameter return rc; 7741d8b85ccSChristoph Lameter 775b969c4abSMel Gorman /* 776b969c4abSMel Gorman * In the async case, migrate_page_move_mapping locked the buffers 777b969c4abSMel Gorman * with an IRQ-safe spinlock held. In the sync case, the buffers 778b969c4abSMel Gorman * need to be locked now 779b969c4abSMel Gorman */ 780a6bc32b8SMel Gorman if (mode != MIGRATE_ASYNC) 781a6bc32b8SMel Gorman BUG_ON(!buffer_migrate_lock_buffers(head, mode)); 7821d8b85ccSChristoph Lameter 7831d8b85ccSChristoph Lameter ClearPagePrivate(page); 7841d8b85ccSChristoph Lameter set_page_private(newpage, page_private(page)); 7851d8b85ccSChristoph Lameter set_page_private(page, 0); 7861d8b85ccSChristoph Lameter put_page(page); 7871d8b85ccSChristoph Lameter get_page(newpage); 7881d8b85ccSChristoph Lameter 7891d8b85ccSChristoph Lameter bh = head; 7901d8b85ccSChristoph Lameter do { 7911d8b85ccSChristoph Lameter set_bh_page(bh, newpage, bh_offset(bh)); 7921d8b85ccSChristoph Lameter bh = bh->b_this_page; 7931d8b85ccSChristoph Lameter 7941d8b85ccSChristoph Lameter } while (bh != head); 7951d8b85ccSChristoph Lameter 7961d8b85ccSChristoph Lameter SetPagePrivate(newpage); 7971d8b85ccSChristoph Lameter 7982916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 7991d8b85ccSChristoph Lameter migrate_page_copy(newpage, page); 8002916ecc0SJérôme Glisse else 8012916ecc0SJérôme Glisse migrate_page_states(newpage, page); 8021d8b85ccSChristoph Lameter 8031d8b85ccSChristoph Lameter bh = head; 8041d8b85ccSChristoph Lameter do { 8051d8b85ccSChristoph Lameter unlock_buffer(bh); 8061d8b85ccSChristoph Lameter put_bh(bh); 8071d8b85ccSChristoph Lameter bh = bh->b_this_page; 8081d8b85ccSChristoph Lameter 8091d8b85ccSChristoph Lameter } while (bh != head); 8101d8b85ccSChristoph Lameter 81178bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 8121d8b85ccSChristoph Lameter } 8131d8b85ccSChristoph Lameter EXPORT_SYMBOL(buffer_migrate_page); 8149361401eSDavid Howells #endif 8151d8b85ccSChristoph Lameter 81604e62a29SChristoph Lameter /* 81704e62a29SChristoph Lameter * Writeback a page to clean the dirty state 81804e62a29SChristoph Lameter */ 81904e62a29SChristoph Lameter static int writeout(struct address_space *mapping, struct page *page) 82004e62a29SChristoph Lameter { 82104e62a29SChristoph Lameter struct writeback_control wbc = { 82204e62a29SChristoph Lameter .sync_mode = WB_SYNC_NONE, 82304e62a29SChristoph Lameter .nr_to_write = 1, 82404e62a29SChristoph Lameter .range_start = 0, 82504e62a29SChristoph Lameter .range_end = LLONG_MAX, 82604e62a29SChristoph Lameter .for_reclaim = 1 82704e62a29SChristoph Lameter }; 82804e62a29SChristoph Lameter int rc; 82904e62a29SChristoph Lameter 83004e62a29SChristoph Lameter if (!mapping->a_ops->writepage) 83104e62a29SChristoph Lameter /* No write method for the address space */ 83204e62a29SChristoph Lameter return -EINVAL; 83304e62a29SChristoph Lameter 83404e62a29SChristoph Lameter if (!clear_page_dirty_for_io(page)) 83504e62a29SChristoph Lameter /* Someone else already triggered a write */ 83604e62a29SChristoph Lameter return -EAGAIN; 83704e62a29SChristoph Lameter 83804e62a29SChristoph Lameter /* 83904e62a29SChristoph Lameter * A dirty page may imply that the underlying filesystem has 84004e62a29SChristoph Lameter * the page on some queue. So the page must be clean for 84104e62a29SChristoph Lameter * migration. Writeout may mean we loose the lock and the 84204e62a29SChristoph Lameter * page state is no longer what we checked for earlier. 84304e62a29SChristoph Lameter * At this point we know that the migration attempt cannot 84404e62a29SChristoph Lameter * be successful. 84504e62a29SChristoph Lameter */ 846e388466dSKirill A. Shutemov remove_migration_ptes(page, page, false); 84704e62a29SChristoph Lameter 84804e62a29SChristoph Lameter rc = mapping->a_ops->writepage(page, &wbc); 84904e62a29SChristoph Lameter 85004e62a29SChristoph Lameter if (rc != AOP_WRITEPAGE_ACTIVATE) 85104e62a29SChristoph Lameter /* unlocked. Relock */ 85204e62a29SChristoph Lameter lock_page(page); 85304e62a29SChristoph Lameter 854bda8550dSHugh Dickins return (rc < 0) ? -EIO : -EAGAIN; 85504e62a29SChristoph Lameter } 85604e62a29SChristoph Lameter 85704e62a29SChristoph Lameter /* 85804e62a29SChristoph Lameter * Default handling if a filesystem does not provide a migration function. 85904e62a29SChristoph Lameter */ 8608351a6e4SChristoph Lameter static int fallback_migrate_page(struct address_space *mapping, 861a6bc32b8SMel Gorman struct page *newpage, struct page *page, enum migrate_mode mode) 8628351a6e4SChristoph Lameter { 863b969c4abSMel Gorman if (PageDirty(page)) { 864a6bc32b8SMel Gorman /* Only writeback pages in full synchronous migration */ 8652916ecc0SJérôme Glisse switch (mode) { 8662916ecc0SJérôme Glisse case MIGRATE_SYNC: 8672916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 8682916ecc0SJérôme Glisse break; 8692916ecc0SJérôme Glisse default: 870b969c4abSMel Gorman return -EBUSY; 8712916ecc0SJérôme Glisse } 87204e62a29SChristoph Lameter return writeout(mapping, page); 873b969c4abSMel Gorman } 8748351a6e4SChristoph Lameter 8758351a6e4SChristoph Lameter /* 8768351a6e4SChristoph Lameter * Buffers may be managed in a filesystem specific way. 8778351a6e4SChristoph Lameter * We must have no buffers or drop them. 8788351a6e4SChristoph Lameter */ 879266cf658SDavid Howells if (page_has_private(page) && 8808351a6e4SChristoph Lameter !try_to_release_page(page, GFP_KERNEL)) 8818351a6e4SChristoph Lameter return -EAGAIN; 8828351a6e4SChristoph Lameter 883a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 8848351a6e4SChristoph Lameter } 8858351a6e4SChristoph Lameter 8861d8b85ccSChristoph Lameter /* 887e24f0b8fSChristoph Lameter * Move a page to a newly allocated page 888e24f0b8fSChristoph Lameter * The page is locked and all ptes have been successfully removed. 889b20a3503SChristoph Lameter * 890e24f0b8fSChristoph Lameter * The new page will have replaced the old page if this function 891e24f0b8fSChristoph Lameter * is successful. 892894bc310SLee Schermerhorn * 893894bc310SLee Schermerhorn * Return value: 894894bc310SLee Schermerhorn * < 0 - error code 89578bd5209SRafael Aquini * MIGRATEPAGE_SUCCESS - success 896b20a3503SChristoph Lameter */ 8973fe2011fSMel Gorman static int move_to_new_page(struct page *newpage, struct page *page, 8985c3f9a67SHugh Dickins enum migrate_mode mode) 899b20a3503SChristoph Lameter { 900e24f0b8fSChristoph Lameter struct address_space *mapping; 901bda807d4SMinchan Kim int rc = -EAGAIN; 902bda807d4SMinchan Kim bool is_lru = !__PageMovable(page); 903b20a3503SChristoph Lameter 9047db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(page), page); 9057db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 906b20a3503SChristoph Lameter 907b20a3503SChristoph Lameter mapping = page_mapping(page); 908bda807d4SMinchan Kim 909bda807d4SMinchan Kim if (likely(is_lru)) { 910b20a3503SChristoph Lameter if (!mapping) 911a6bc32b8SMel Gorman rc = migrate_page(mapping, newpage, page, mode); 9126c5240aeSChristoph Lameter else if (mapping->a_ops->migratepage) 913b20a3503SChristoph Lameter /* 914bda807d4SMinchan Kim * Most pages have a mapping and most filesystems 915bda807d4SMinchan Kim * provide a migratepage callback. Anonymous pages 916bda807d4SMinchan Kim * are part of swap space which also has its own 917bda807d4SMinchan Kim * migratepage callback. This is the most common path 918bda807d4SMinchan Kim * for page migration. 919b20a3503SChristoph Lameter */ 920bda807d4SMinchan Kim rc = mapping->a_ops->migratepage(mapping, newpage, 921bda807d4SMinchan Kim page, mode); 9228351a6e4SChristoph Lameter else 923bda807d4SMinchan Kim rc = fallback_migrate_page(mapping, newpage, 924bda807d4SMinchan Kim page, mode); 925bda807d4SMinchan Kim } else { 926bda807d4SMinchan Kim /* 927bda807d4SMinchan Kim * In case of non-lru page, it could be released after 928bda807d4SMinchan Kim * isolation step. In that case, we shouldn't try migration. 929bda807d4SMinchan Kim */ 930bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 931bda807d4SMinchan Kim if (!PageMovable(page)) { 932bda807d4SMinchan Kim rc = MIGRATEPAGE_SUCCESS; 933bda807d4SMinchan Kim __ClearPageIsolated(page); 934bda807d4SMinchan Kim goto out; 935bda807d4SMinchan Kim } 936bda807d4SMinchan Kim 937bda807d4SMinchan Kim rc = mapping->a_ops->migratepage(mapping, newpage, 938bda807d4SMinchan Kim page, mode); 939bda807d4SMinchan Kim WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && 940bda807d4SMinchan Kim !PageIsolated(page)); 941bda807d4SMinchan Kim } 942b20a3503SChristoph Lameter 9435c3f9a67SHugh Dickins /* 9445c3f9a67SHugh Dickins * When successful, old pagecache page->mapping must be cleared before 9455c3f9a67SHugh Dickins * page is freed; but stats require that PageAnon be left as PageAnon. 9465c3f9a67SHugh Dickins */ 9475c3f9a67SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 948bda807d4SMinchan Kim if (__PageMovable(page)) { 949bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageIsolated(page), page); 950bda807d4SMinchan Kim 951bda807d4SMinchan Kim /* 952bda807d4SMinchan Kim * We clear PG_movable under page_lock so any compactor 953bda807d4SMinchan Kim * cannot try to migrate this page. 954bda807d4SMinchan Kim */ 955bda807d4SMinchan Kim __ClearPageIsolated(page); 956bda807d4SMinchan Kim } 957bda807d4SMinchan Kim 958bda807d4SMinchan Kim /* 959bda807d4SMinchan Kim * Anonymous and movable page->mapping will be cleard by 960bda807d4SMinchan Kim * free_pages_prepare so don't reset it here for keeping 961bda807d4SMinchan Kim * the type to work PageAnon, for example. 962bda807d4SMinchan Kim */ 963bda807d4SMinchan Kim if (!PageMappingFlags(page)) 9645c3f9a67SHugh Dickins page->mapping = NULL; 9653fe2011fSMel Gorman } 966bda807d4SMinchan Kim out: 967e24f0b8fSChristoph Lameter return rc; 968e24f0b8fSChristoph Lameter } 969e24f0b8fSChristoph Lameter 9700dabec93SMinchan Kim static int __unmap_and_move(struct page *page, struct page *newpage, 9719c620e2bSHugh Dickins int force, enum migrate_mode mode) 972e24f0b8fSChristoph Lameter { 9730dabec93SMinchan Kim int rc = -EAGAIN; 9742ebba6b7SHugh Dickins int page_was_mapped = 0; 9753f6c8272SMel Gorman struct anon_vma *anon_vma = NULL; 976bda807d4SMinchan Kim bool is_lru = !__PageMovable(page); 97795a402c3SChristoph Lameter 978529ae9aaSNick Piggin if (!trylock_page(page)) { 979a6bc32b8SMel Gorman if (!force || mode == MIGRATE_ASYNC) 9800dabec93SMinchan Kim goto out; 9813e7d3449SMel Gorman 9823e7d3449SMel Gorman /* 9833e7d3449SMel Gorman * It's not safe for direct compaction to call lock_page. 9843e7d3449SMel Gorman * For example, during page readahead pages are added locked 9853e7d3449SMel Gorman * to the LRU. Later, when the IO completes the pages are 9863e7d3449SMel Gorman * marked uptodate and unlocked. However, the queueing 9873e7d3449SMel Gorman * could be merging multiple pages for one bio (e.g. 9883e7d3449SMel Gorman * mpage_readpages). If an allocation happens for the 9893e7d3449SMel Gorman * second or third page, the process can end up locking 9903e7d3449SMel Gorman * the same page twice and deadlocking. Rather than 9913e7d3449SMel Gorman * trying to be clever about what pages can be locked, 9923e7d3449SMel Gorman * avoid the use of lock_page for direct compaction 9933e7d3449SMel Gorman * altogether. 9943e7d3449SMel Gorman */ 9953e7d3449SMel Gorman if (current->flags & PF_MEMALLOC) 9960dabec93SMinchan Kim goto out; 9973e7d3449SMel Gorman 998e24f0b8fSChristoph Lameter lock_page(page); 999e24f0b8fSChristoph Lameter } 1000e24f0b8fSChristoph Lameter 1001e24f0b8fSChristoph Lameter if (PageWriteback(page)) { 100211bc82d6SAndrea Arcangeli /* 1003fed5b64aSJianguo Wu * Only in the case of a full synchronous migration is it 1004a6bc32b8SMel Gorman * necessary to wait for PageWriteback. In the async case, 1005a6bc32b8SMel Gorman * the retry loop is too short and in the sync-light case, 1006a6bc32b8SMel Gorman * the overhead of stalling is too much 100711bc82d6SAndrea Arcangeli */ 10082916ecc0SJérôme Glisse switch (mode) { 10092916ecc0SJérôme Glisse case MIGRATE_SYNC: 10102916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 10112916ecc0SJérôme Glisse break; 10122916ecc0SJérôme Glisse default: 101311bc82d6SAndrea Arcangeli rc = -EBUSY; 10140a31bc97SJohannes Weiner goto out_unlock; 101511bc82d6SAndrea Arcangeli } 101611bc82d6SAndrea Arcangeli if (!force) 10170a31bc97SJohannes Weiner goto out_unlock; 1018e24f0b8fSChristoph Lameter wait_on_page_writeback(page); 1019e24f0b8fSChristoph Lameter } 102003f15c86SHugh Dickins 1021e24f0b8fSChristoph Lameter /* 1022dc386d4dSKAMEZAWA Hiroyuki * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 1023dc386d4dSKAMEZAWA Hiroyuki * we cannot notice that anon_vma is freed while we migrates a page. 10241ce82b69SHugh Dickins * This get_anon_vma() delays freeing anon_vma pointer until the end 1025dc386d4dSKAMEZAWA Hiroyuki * of migration. File cache pages are no problem because of page_lock() 1026989f89c5SKAMEZAWA Hiroyuki * File Caches may use write_page() or lock_page() in migration, then, 1027989f89c5SKAMEZAWA Hiroyuki * just care Anon page here. 10283fe2011fSMel Gorman * 102903f15c86SHugh Dickins * Only page_get_anon_vma() understands the subtleties of 103003f15c86SHugh Dickins * getting a hold on an anon_vma from outside one of its mms. 103103f15c86SHugh Dickins * But if we cannot get anon_vma, then we won't need it anyway, 103203f15c86SHugh Dickins * because that implies that the anon page is no longer mapped 103303f15c86SHugh Dickins * (and cannot be remapped so long as we hold the page lock). 10343fe2011fSMel Gorman */ 103503f15c86SHugh Dickins if (PageAnon(page) && !PageKsm(page)) 103603f15c86SHugh Dickins anon_vma = page_get_anon_vma(page); 103762e1c553SShaohua Li 10387db7671fSHugh Dickins /* 10397db7671fSHugh Dickins * Block others from accessing the new page when we get around to 10407db7671fSHugh Dickins * establishing additional references. We are usually the only one 10417db7671fSHugh Dickins * holding a reference to newpage at this point. We used to have a BUG 10427db7671fSHugh Dickins * here if trylock_page(newpage) fails, but would like to allow for 10437db7671fSHugh Dickins * cases where there might be a race with the previous use of newpage. 10447db7671fSHugh Dickins * This is much like races on refcount of oldpage: just don't BUG(). 10457db7671fSHugh Dickins */ 10467db7671fSHugh Dickins if (unlikely(!trylock_page(newpage))) 10477db7671fSHugh Dickins goto out_unlock; 10487db7671fSHugh Dickins 1049bda807d4SMinchan Kim if (unlikely(!is_lru)) { 1050bda807d4SMinchan Kim rc = move_to_new_page(newpage, page, mode); 1051bda807d4SMinchan Kim goto out_unlock_both; 1052bda807d4SMinchan Kim } 1053bda807d4SMinchan Kim 1054dc386d4dSKAMEZAWA Hiroyuki /* 105562e1c553SShaohua Li * Corner case handling: 105662e1c553SShaohua Li * 1. When a new swap-cache page is read into, it is added to the LRU 105762e1c553SShaohua Li * and treated as swapcache but it has no rmap yet. 105862e1c553SShaohua Li * Calling try_to_unmap() against a page->mapping==NULL page will 105962e1c553SShaohua Li * trigger a BUG. So handle it here. 106062e1c553SShaohua Li * 2. An orphaned page (see truncate_complete_page) might have 106162e1c553SShaohua Li * fs-private metadata. The page can be picked up due to memory 106262e1c553SShaohua Li * offlining. Everywhere else except page reclaim, the page is 106362e1c553SShaohua Li * invisible to the vm, so the page can not be migrated. So try to 106462e1c553SShaohua Li * free the metadata, so the page can be freed. 1065dc386d4dSKAMEZAWA Hiroyuki */ 106662e1c553SShaohua Li if (!page->mapping) { 1067309381feSSasha Levin VM_BUG_ON_PAGE(PageAnon(page), page); 10681ce82b69SHugh Dickins if (page_has_private(page)) { 106962e1c553SShaohua Li try_to_free_buffers(page); 10707db7671fSHugh Dickins goto out_unlock_both; 107162e1c553SShaohua Li } 10727db7671fSHugh Dickins } else if (page_mapped(page)) { 10737db7671fSHugh Dickins /* Establish migration ptes */ 107403f15c86SHugh Dickins VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, 107503f15c86SHugh Dickins page); 10762ebba6b7SHugh Dickins try_to_unmap(page, 1077da1b13ccSWanpeng Li TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 10782ebba6b7SHugh Dickins page_was_mapped = 1; 10792ebba6b7SHugh Dickins } 1080dc386d4dSKAMEZAWA Hiroyuki 1081e24f0b8fSChristoph Lameter if (!page_mapped(page)) 10825c3f9a67SHugh Dickins rc = move_to_new_page(newpage, page, mode); 1083e24f0b8fSChristoph Lameter 10845c3f9a67SHugh Dickins if (page_was_mapped) 10855c3f9a67SHugh Dickins remove_migration_ptes(page, 1086e388466dSKirill A. Shutemov rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); 10873f6c8272SMel Gorman 10887db7671fSHugh Dickins out_unlock_both: 10897db7671fSHugh Dickins unlock_page(newpage); 10907db7671fSHugh Dickins out_unlock: 10913f6c8272SMel Gorman /* Drop an anon_vma reference if we took one */ 109276545066SRik van Riel if (anon_vma) 10939e60109fSPeter Zijlstra put_anon_vma(anon_vma); 1094b20a3503SChristoph Lameter unlock_page(page); 10950dabec93SMinchan Kim out: 1096c6c919ebSMinchan Kim /* 1097c6c919ebSMinchan Kim * If migration is successful, decrease refcount of the newpage 1098c6c919ebSMinchan Kim * which will not free the page because new page owner increased 1099c6c919ebSMinchan Kim * refcounter. As well, if it is LRU page, add the page to LRU 1100c6c919ebSMinchan Kim * list in here. 1101c6c919ebSMinchan Kim */ 1102c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) { 1103b1123ea6SMinchan Kim if (unlikely(__PageMovable(newpage))) 1104c6c919ebSMinchan Kim put_page(newpage); 1105c6c919ebSMinchan Kim else 1106c6c919ebSMinchan Kim putback_lru_page(newpage); 1107c6c919ebSMinchan Kim } 1108c6c919ebSMinchan Kim 11090dabec93SMinchan Kim return rc; 11100dabec93SMinchan Kim } 111195a402c3SChristoph Lameter 11120dabec93SMinchan Kim /* 1113ef2a5153SGeert Uytterhoeven * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work 1114ef2a5153SGeert Uytterhoeven * around it. 1115ef2a5153SGeert Uytterhoeven */ 1116ef2a5153SGeert Uytterhoeven #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) 1117ef2a5153SGeert Uytterhoeven #define ICE_noinline noinline 1118ef2a5153SGeert Uytterhoeven #else 1119ef2a5153SGeert Uytterhoeven #define ICE_noinline 1120ef2a5153SGeert Uytterhoeven #endif 1121ef2a5153SGeert Uytterhoeven 1122ef2a5153SGeert Uytterhoeven /* 11230dabec93SMinchan Kim * Obtain the lock on page, remove all ptes and migrate the page 11240dabec93SMinchan Kim * to the newly allocated page in newpage. 11250dabec93SMinchan Kim */ 1126ef2a5153SGeert Uytterhoeven static ICE_noinline int unmap_and_move(new_page_t get_new_page, 1127ef2a5153SGeert Uytterhoeven free_page_t put_new_page, 1128ef2a5153SGeert Uytterhoeven unsigned long private, struct page *page, 1129add05cecSNaoya Horiguchi int force, enum migrate_mode mode, 1130add05cecSNaoya Horiguchi enum migrate_reason reason) 11310dabec93SMinchan Kim { 11322def7424SHugh Dickins int rc = MIGRATEPAGE_SUCCESS; 11330dabec93SMinchan Kim int *result = NULL; 11342def7424SHugh Dickins struct page *newpage; 11350dabec93SMinchan Kim 11362def7424SHugh Dickins newpage = get_new_page(page, private, &result); 11370dabec93SMinchan Kim if (!newpage) 11380dabec93SMinchan Kim return -ENOMEM; 11390dabec93SMinchan Kim 11400dabec93SMinchan Kim if (page_count(page) == 1) { 11410dabec93SMinchan Kim /* page was freed from under us. So we are done. */ 1142c6c919ebSMinchan Kim ClearPageActive(page); 1143c6c919ebSMinchan Kim ClearPageUnevictable(page); 1144bda807d4SMinchan Kim if (unlikely(__PageMovable(page))) { 1145bda807d4SMinchan Kim lock_page(page); 1146bda807d4SMinchan Kim if (!PageMovable(page)) 1147bda807d4SMinchan Kim __ClearPageIsolated(page); 1148bda807d4SMinchan Kim unlock_page(page); 1149bda807d4SMinchan Kim } 1150c6c919ebSMinchan Kim if (put_new_page) 1151c6c919ebSMinchan Kim put_new_page(newpage, private); 1152c6c919ebSMinchan Kim else 1153c6c919ebSMinchan Kim put_page(newpage); 11540dabec93SMinchan Kim goto out; 11550dabec93SMinchan Kim } 11560dabec93SMinchan Kim 1157616b8371SZi Yan if (unlikely(PageTransHuge(page) && !PageTransHuge(newpage))) { 11584d2fa965SKirill A. Shutemov lock_page(page); 11594d2fa965SKirill A. Shutemov rc = split_huge_page(page); 11604d2fa965SKirill A. Shutemov unlock_page(page); 11614d2fa965SKirill A. Shutemov if (rc) 11620dabec93SMinchan Kim goto out; 11634d2fa965SKirill A. Shutemov } 11640dabec93SMinchan Kim 11659c620e2bSHugh Dickins rc = __unmap_and_move(page, newpage, force, mode); 1166c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) 11677cd12b4aSVlastimil Babka set_page_owner_migrate_reason(newpage, reason); 1168bf6bddf1SRafael Aquini 11690dabec93SMinchan Kim out: 1170e24f0b8fSChristoph Lameter if (rc != -EAGAIN) { 1171aaa994b3SChristoph Lameter /* 1172aaa994b3SChristoph Lameter * A page that has been migrated has all references 1173aaa994b3SChristoph Lameter * removed and will be freed. A page that has not been 1174aaa994b3SChristoph Lameter * migrated will have kepts its references and be 1175aaa994b3SChristoph Lameter * restored. 1176aaa994b3SChristoph Lameter */ 1177aaa994b3SChristoph Lameter list_del(&page->lru); 11786afcf8efSMing Ling 11796afcf8efSMing Ling /* 11806afcf8efSMing Ling * Compaction can migrate also non-LRU pages which are 11816afcf8efSMing Ling * not accounted to NR_ISOLATED_*. They can be recognized 11826afcf8efSMing Ling * as __PageMovable 11836afcf8efSMing Ling */ 11846afcf8efSMing Ling if (likely(!__PageMovable(page))) 1185e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + 1186e8db67ebSNaoya Horiguchi page_is_file_cache(page), -hpage_nr_pages(page)); 1187e24f0b8fSChristoph Lameter } 118868711a74SDavid Rientjes 118995a402c3SChristoph Lameter /* 1190c6c919ebSMinchan Kim * If migration is successful, releases reference grabbed during 1191c6c919ebSMinchan Kim * isolation. Otherwise, restore the page to right list unless 1192c6c919ebSMinchan Kim * we want to retry. 119395a402c3SChristoph Lameter */ 1194c6c919ebSMinchan Kim if (rc == MIGRATEPAGE_SUCCESS) { 1195c6c919ebSMinchan Kim put_page(page); 1196c6c919ebSMinchan Kim if (reason == MR_MEMORY_FAILURE) { 1197c6c919ebSMinchan Kim /* 1198c6c919ebSMinchan Kim * Set PG_HWPoison on just freed page 1199c6c919ebSMinchan Kim * intentionally. Although it's rather weird, 1200c6c919ebSMinchan Kim * it's how HWPoison flag works at the moment. 1201c6c919ebSMinchan Kim */ 1202c6c919ebSMinchan Kim if (!test_set_page_hwpoison(page)) 1203c6c919ebSMinchan Kim num_poisoned_pages_inc(); 1204c6c919ebSMinchan Kim } 1205c6c919ebSMinchan Kim } else { 1206bda807d4SMinchan Kim if (rc != -EAGAIN) { 1207bda807d4SMinchan Kim if (likely(!__PageMovable(page))) { 1208c6c919ebSMinchan Kim putback_lru_page(page); 1209bda807d4SMinchan Kim goto put_new; 1210bda807d4SMinchan Kim } 1211bda807d4SMinchan Kim 1212bda807d4SMinchan Kim lock_page(page); 1213bda807d4SMinchan Kim if (PageMovable(page)) 1214bda807d4SMinchan Kim putback_movable_page(page); 1215bda807d4SMinchan Kim else 1216bda807d4SMinchan Kim __ClearPageIsolated(page); 1217bda807d4SMinchan Kim unlock_page(page); 1218bda807d4SMinchan Kim put_page(page); 1219bda807d4SMinchan Kim } 1220bda807d4SMinchan Kim put_new: 1221cf4b769aSHugh Dickins if (put_new_page) 122268711a74SDavid Rientjes put_new_page(newpage, private); 1223c6c919ebSMinchan Kim else 1224d6d86c0aSKonstantin Khlebnikov put_page(newpage); 1225c6c919ebSMinchan Kim } 122668711a74SDavid Rientjes 1227742755a1SChristoph Lameter if (result) { 1228742755a1SChristoph Lameter if (rc) 1229742755a1SChristoph Lameter *result = rc; 1230742755a1SChristoph Lameter else 1231742755a1SChristoph Lameter *result = page_to_nid(newpage); 1232742755a1SChristoph Lameter } 1233e24f0b8fSChristoph Lameter return rc; 1234e24f0b8fSChristoph Lameter } 1235b20a3503SChristoph Lameter 1236e24f0b8fSChristoph Lameter /* 1237290408d4SNaoya Horiguchi * Counterpart of unmap_and_move_page() for hugepage migration. 1238290408d4SNaoya Horiguchi * 1239290408d4SNaoya Horiguchi * This function doesn't wait the completion of hugepage I/O 1240290408d4SNaoya Horiguchi * because there is no race between I/O and migration for hugepage. 1241290408d4SNaoya Horiguchi * Note that currently hugepage I/O occurs only in direct I/O 1242290408d4SNaoya Horiguchi * where no lock is held and PG_writeback is irrelevant, 1243290408d4SNaoya Horiguchi * and writeback status of all subpages are counted in the reference 1244290408d4SNaoya Horiguchi * count of the head page (i.e. if all subpages of a 2MB hugepage are 1245290408d4SNaoya Horiguchi * under direct I/O, the reference of the head page is 512 and a bit more.) 1246290408d4SNaoya Horiguchi * This means that when we try to migrate hugepage whose subpages are 1247290408d4SNaoya Horiguchi * doing direct I/O, some references remain after try_to_unmap() and 1248290408d4SNaoya Horiguchi * hugepage migration fails without data corruption. 1249290408d4SNaoya Horiguchi * 1250290408d4SNaoya Horiguchi * There is also no race when direct I/O is issued on the page under migration, 1251290408d4SNaoya Horiguchi * because then pte is replaced with migration swap entry and direct I/O code 1252290408d4SNaoya Horiguchi * will wait in the page fault for migration to complete. 1253290408d4SNaoya Horiguchi */ 1254290408d4SNaoya Horiguchi static int unmap_and_move_huge_page(new_page_t get_new_page, 125568711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 125668711a74SDavid Rientjes struct page *hpage, int force, 12577cd12b4aSVlastimil Babka enum migrate_mode mode, int reason) 1258290408d4SNaoya Horiguchi { 12592def7424SHugh Dickins int rc = -EAGAIN; 1260290408d4SNaoya Horiguchi int *result = NULL; 12612ebba6b7SHugh Dickins int page_was_mapped = 0; 126232665f2bSJoonsoo Kim struct page *new_hpage; 1263290408d4SNaoya Horiguchi struct anon_vma *anon_vma = NULL; 1264290408d4SNaoya Horiguchi 126583467efbSNaoya Horiguchi /* 126683467efbSNaoya Horiguchi * Movability of hugepages depends on architectures and hugepage size. 126783467efbSNaoya Horiguchi * This check is necessary because some callers of hugepage migration 126883467efbSNaoya Horiguchi * like soft offline and memory hotremove don't walk through page 126983467efbSNaoya Horiguchi * tables or check whether the hugepage is pmd-based or not before 127083467efbSNaoya Horiguchi * kicking migration. 127183467efbSNaoya Horiguchi */ 1272100873d7SNaoya Horiguchi if (!hugepage_migration_supported(page_hstate(hpage))) { 127332665f2bSJoonsoo Kim putback_active_hugepage(hpage); 127483467efbSNaoya Horiguchi return -ENOSYS; 127532665f2bSJoonsoo Kim } 127683467efbSNaoya Horiguchi 127732665f2bSJoonsoo Kim new_hpage = get_new_page(hpage, private, &result); 1278290408d4SNaoya Horiguchi if (!new_hpage) 1279290408d4SNaoya Horiguchi return -ENOMEM; 1280290408d4SNaoya Horiguchi 1281290408d4SNaoya Horiguchi if (!trylock_page(hpage)) { 12822916ecc0SJérôme Glisse if (!force) 1283290408d4SNaoya Horiguchi goto out; 12842916ecc0SJérôme Glisse switch (mode) { 12852916ecc0SJérôme Glisse case MIGRATE_SYNC: 12862916ecc0SJérôme Glisse case MIGRATE_SYNC_NO_COPY: 12872916ecc0SJérôme Glisse break; 12882916ecc0SJérôme Glisse default: 12892916ecc0SJérôme Glisse goto out; 12902916ecc0SJérôme Glisse } 1291290408d4SNaoya Horiguchi lock_page(hpage); 1292290408d4SNaoya Horiguchi } 1293290408d4SNaoya Horiguchi 1294746b18d4SPeter Zijlstra if (PageAnon(hpage)) 1295746b18d4SPeter Zijlstra anon_vma = page_get_anon_vma(hpage); 1296290408d4SNaoya Horiguchi 12977db7671fSHugh Dickins if (unlikely(!trylock_page(new_hpage))) 12987db7671fSHugh Dickins goto put_anon; 12997db7671fSHugh Dickins 13002ebba6b7SHugh Dickins if (page_mapped(hpage)) { 13012ebba6b7SHugh Dickins try_to_unmap(hpage, 13022ebba6b7SHugh Dickins TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 13032ebba6b7SHugh Dickins page_was_mapped = 1; 13042ebba6b7SHugh Dickins } 1305290408d4SNaoya Horiguchi 1306290408d4SNaoya Horiguchi if (!page_mapped(hpage)) 13075c3f9a67SHugh Dickins rc = move_to_new_page(new_hpage, hpage, mode); 1308290408d4SNaoya Horiguchi 13095c3f9a67SHugh Dickins if (page_was_mapped) 13105c3f9a67SHugh Dickins remove_migration_ptes(hpage, 1311e388466dSKirill A. Shutemov rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false); 1312290408d4SNaoya Horiguchi 13137db7671fSHugh Dickins unlock_page(new_hpage); 13147db7671fSHugh Dickins 13157db7671fSHugh Dickins put_anon: 1316fd4a4663SHugh Dickins if (anon_vma) 13179e60109fSPeter Zijlstra put_anon_vma(anon_vma); 13188e6ac7faSAneesh Kumar K.V 13192def7424SHugh Dickins if (rc == MIGRATEPAGE_SUCCESS) { 13208e6ac7faSAneesh Kumar K.V hugetlb_cgroup_migrate(hpage, new_hpage); 13212def7424SHugh Dickins put_new_page = NULL; 13227cd12b4aSVlastimil Babka set_page_owner_migrate_reason(new_hpage, reason); 13232def7424SHugh Dickins } 13248e6ac7faSAneesh Kumar K.V 1325290408d4SNaoya Horiguchi unlock_page(hpage); 132609761333SHillf Danton out: 1327b8ec1ceeSNaoya Horiguchi if (rc != -EAGAIN) 1328b8ec1ceeSNaoya Horiguchi putback_active_hugepage(hpage); 1329c3114a84SAnshuman Khandual if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage)) 1330c3114a84SAnshuman Khandual num_poisoned_pages_inc(); 133168711a74SDavid Rientjes 133268711a74SDavid Rientjes /* 133368711a74SDavid Rientjes * If migration was not successful and there's a freeing callback, use 133468711a74SDavid Rientjes * it. Otherwise, put_page() will drop the reference grabbed during 133568711a74SDavid Rientjes * isolation. 133668711a74SDavid Rientjes */ 13372def7424SHugh Dickins if (put_new_page) 133868711a74SDavid Rientjes put_new_page(new_hpage, private); 133968711a74SDavid Rientjes else 13403aaa76e1SNaoya Horiguchi putback_active_hugepage(new_hpage); 134168711a74SDavid Rientjes 1342290408d4SNaoya Horiguchi if (result) { 1343290408d4SNaoya Horiguchi if (rc) 1344290408d4SNaoya Horiguchi *result = rc; 1345290408d4SNaoya Horiguchi else 1346290408d4SNaoya Horiguchi *result = page_to_nid(new_hpage); 1347290408d4SNaoya Horiguchi } 1348290408d4SNaoya Horiguchi return rc; 1349290408d4SNaoya Horiguchi } 1350290408d4SNaoya Horiguchi 1351290408d4SNaoya Horiguchi /* 1352c73e5c9cSSrivatsa S. Bhat * migrate_pages - migrate the pages specified in a list, to the free pages 1353c73e5c9cSSrivatsa S. Bhat * supplied as the target for the page migration 1354e24f0b8fSChristoph Lameter * 1355c73e5c9cSSrivatsa S. Bhat * @from: The list of pages to be migrated. 1356c73e5c9cSSrivatsa S. Bhat * @get_new_page: The function used to allocate free pages to be used 1357c73e5c9cSSrivatsa S. Bhat * as the target of the page migration. 135868711a74SDavid Rientjes * @put_new_page: The function used to free target pages if migration 135968711a74SDavid Rientjes * fails, or NULL if no special handling is necessary. 1360c73e5c9cSSrivatsa S. Bhat * @private: Private data to be passed on to get_new_page() 1361c73e5c9cSSrivatsa S. Bhat * @mode: The migration mode that specifies the constraints for 1362c73e5c9cSSrivatsa S. Bhat * page migration, if any. 1363c73e5c9cSSrivatsa S. Bhat * @reason: The reason for page migration. 1364e24f0b8fSChristoph Lameter * 1365c73e5c9cSSrivatsa S. Bhat * The function returns after 10 attempts or if no pages are movable any more 1366c73e5c9cSSrivatsa S. Bhat * because the list has become empty or no retryable pages exist any more. 136714e0f9bcSHugh Dickins * The caller should call putback_movable_pages() to return pages to the LRU 136828bd6578SMinchan Kim * or free list only if ret != 0. 1369e24f0b8fSChristoph Lameter * 1370c73e5c9cSSrivatsa S. Bhat * Returns the number of pages that were not migrated, or an error code. 1371e24f0b8fSChristoph Lameter */ 13729c620e2bSHugh Dickins int migrate_pages(struct list_head *from, new_page_t get_new_page, 137368711a74SDavid Rientjes free_page_t put_new_page, unsigned long private, 137468711a74SDavid Rientjes enum migrate_mode mode, int reason) 1375e24f0b8fSChristoph Lameter { 1376e24f0b8fSChristoph Lameter int retry = 1; 1377e24f0b8fSChristoph Lameter int nr_failed = 0; 13785647bc29SMel Gorman int nr_succeeded = 0; 1379e24f0b8fSChristoph Lameter int pass = 0; 1380e24f0b8fSChristoph Lameter struct page *page; 1381e24f0b8fSChristoph Lameter struct page *page2; 1382e24f0b8fSChristoph Lameter int swapwrite = current->flags & PF_SWAPWRITE; 1383e24f0b8fSChristoph Lameter int rc; 13842d1db3b1SChristoph Lameter 1385e24f0b8fSChristoph Lameter if (!swapwrite) 1386e24f0b8fSChristoph Lameter current->flags |= PF_SWAPWRITE; 1387e24f0b8fSChristoph Lameter 1388e24f0b8fSChristoph Lameter for(pass = 0; pass < 10 && retry; pass++) { 1389e24f0b8fSChristoph Lameter retry = 0; 1390e24f0b8fSChristoph Lameter 1391e24f0b8fSChristoph Lameter list_for_each_entry_safe(page, page2, from, lru) { 1392e24f0b8fSChristoph Lameter cond_resched(); 1393e24f0b8fSChristoph Lameter 139431caf665SNaoya Horiguchi if (PageHuge(page)) 139531caf665SNaoya Horiguchi rc = unmap_and_move_huge_page(get_new_page, 139668711a74SDavid Rientjes put_new_page, private, page, 13977cd12b4aSVlastimil Babka pass > 2, mode, reason); 139831caf665SNaoya Horiguchi else 139968711a74SDavid Rientjes rc = unmap_and_move(get_new_page, put_new_page, 1400add05cecSNaoya Horiguchi private, page, pass > 2, mode, 1401add05cecSNaoya Horiguchi reason); 1402e24f0b8fSChristoph Lameter 1403e24f0b8fSChristoph Lameter switch(rc) { 140495a402c3SChristoph Lameter case -ENOMEM: 1405dfef2ef4SDavid Rientjes nr_failed++; 140695a402c3SChristoph Lameter goto out; 1407e24f0b8fSChristoph Lameter case -EAGAIN: 1408b20a3503SChristoph Lameter retry++; 1409e24f0b8fSChristoph Lameter break; 141078bd5209SRafael Aquini case MIGRATEPAGE_SUCCESS: 14115647bc29SMel Gorman nr_succeeded++; 1412e24f0b8fSChristoph Lameter break; 1413e24f0b8fSChristoph Lameter default: 1414354a3363SNaoya Horiguchi /* 1415354a3363SNaoya Horiguchi * Permanent failure (-EBUSY, -ENOSYS, etc.): 1416354a3363SNaoya Horiguchi * unlike -EAGAIN case, the failed page is 1417354a3363SNaoya Horiguchi * removed from migration page list and not 1418354a3363SNaoya Horiguchi * retried in the next outer loop. 1419354a3363SNaoya Horiguchi */ 1420b20a3503SChristoph Lameter nr_failed++; 1421e24f0b8fSChristoph Lameter break; 1422b20a3503SChristoph Lameter } 1423b20a3503SChristoph Lameter } 1424e24f0b8fSChristoph Lameter } 1425f2f81fb2SVlastimil Babka nr_failed += retry; 1426f2f81fb2SVlastimil Babka rc = nr_failed; 142795a402c3SChristoph Lameter out: 14285647bc29SMel Gorman if (nr_succeeded) 14295647bc29SMel Gorman count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); 14305647bc29SMel Gorman if (nr_failed) 14315647bc29SMel Gorman count_vm_events(PGMIGRATE_FAIL, nr_failed); 14327b2a2d4aSMel Gorman trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); 14337b2a2d4aSMel Gorman 1434b20a3503SChristoph Lameter if (!swapwrite) 1435b20a3503SChristoph Lameter current->flags &= ~PF_SWAPWRITE; 1436b20a3503SChristoph Lameter 143795a402c3SChristoph Lameter return rc; 1438b20a3503SChristoph Lameter } 1439b20a3503SChristoph Lameter 1440742755a1SChristoph Lameter #ifdef CONFIG_NUMA 1441742755a1SChristoph Lameter /* 1442742755a1SChristoph Lameter * Move a list of individual pages 1443742755a1SChristoph Lameter */ 1444742755a1SChristoph Lameter struct page_to_node { 1445742755a1SChristoph Lameter unsigned long addr; 1446742755a1SChristoph Lameter struct page *page; 1447742755a1SChristoph Lameter int node; 1448742755a1SChristoph Lameter int status; 1449742755a1SChristoph Lameter }; 1450742755a1SChristoph Lameter 1451742755a1SChristoph Lameter static struct page *new_page_node(struct page *p, unsigned long private, 1452742755a1SChristoph Lameter int **result) 1453742755a1SChristoph Lameter { 1454742755a1SChristoph Lameter struct page_to_node *pm = (struct page_to_node *)private; 1455742755a1SChristoph Lameter 1456742755a1SChristoph Lameter while (pm->node != MAX_NUMNODES && pm->page != p) 1457742755a1SChristoph Lameter pm++; 1458742755a1SChristoph Lameter 1459742755a1SChristoph Lameter if (pm->node == MAX_NUMNODES) 1460742755a1SChristoph Lameter return NULL; 1461742755a1SChristoph Lameter 1462742755a1SChristoph Lameter *result = &pm->status; 1463742755a1SChristoph Lameter 1464e632a938SNaoya Horiguchi if (PageHuge(p)) 1465e632a938SNaoya Horiguchi return alloc_huge_page_node(page_hstate(compound_head(p)), 1466e632a938SNaoya Horiguchi pm->node); 1467e8db67ebSNaoya Horiguchi else if (thp_migration_supported() && PageTransHuge(p)) { 1468e8db67ebSNaoya Horiguchi struct page *thp; 1469e8db67ebSNaoya Horiguchi 1470e8db67ebSNaoya Horiguchi thp = alloc_pages_node(pm->node, 1471e8db67ebSNaoya Horiguchi (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM, 1472e8db67ebSNaoya Horiguchi HPAGE_PMD_ORDER); 1473e8db67ebSNaoya Horiguchi if (!thp) 1474e8db67ebSNaoya Horiguchi return NULL; 1475e8db67ebSNaoya Horiguchi prep_transhuge_page(thp); 1476e8db67ebSNaoya Horiguchi return thp; 1477e8db67ebSNaoya Horiguchi } else 147896db800fSVlastimil Babka return __alloc_pages_node(pm->node, 1479e97ca8e5SJohannes Weiner GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); 1480742755a1SChristoph Lameter } 1481742755a1SChristoph Lameter 1482742755a1SChristoph Lameter /* 1483742755a1SChristoph Lameter * Move a set of pages as indicated in the pm array. The addr 1484742755a1SChristoph Lameter * field must be set to the virtual address of the page to be moved 1485742755a1SChristoph Lameter * and the node number must contain a valid target node. 14865e9a0f02SBrice Goglin * The pm array ends with node = MAX_NUMNODES. 1487742755a1SChristoph Lameter */ 14885e9a0f02SBrice Goglin static int do_move_page_to_node_array(struct mm_struct *mm, 14895e9a0f02SBrice Goglin struct page_to_node *pm, 1490742755a1SChristoph Lameter int migrate_all) 1491742755a1SChristoph Lameter { 1492742755a1SChristoph Lameter int err; 1493742755a1SChristoph Lameter struct page_to_node *pp; 1494742755a1SChristoph Lameter LIST_HEAD(pagelist); 1495742755a1SChristoph Lameter 1496742755a1SChristoph Lameter down_read(&mm->mmap_sem); 1497742755a1SChristoph Lameter 1498742755a1SChristoph Lameter /* 1499742755a1SChristoph Lameter * Build a list of pages to migrate 1500742755a1SChristoph Lameter */ 1501742755a1SChristoph Lameter for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1502742755a1SChristoph Lameter struct vm_area_struct *vma; 1503742755a1SChristoph Lameter struct page *page; 1504e8db67ebSNaoya Horiguchi struct page *head; 1505e8db67ebSNaoya Horiguchi unsigned int follflags; 1506742755a1SChristoph Lameter 1507742755a1SChristoph Lameter err = -EFAULT; 1508742755a1SChristoph Lameter vma = find_vma(mm, pp->addr); 150970384dc6SGleb Natapov if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1510742755a1SChristoph Lameter goto set_status; 1511742755a1SChristoph Lameter 1512d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 1513e8db67ebSNaoya Horiguchi follflags = FOLL_GET | FOLL_DUMP; 1514e8db67ebSNaoya Horiguchi if (!thp_migration_supported()) 1515e8db67ebSNaoya Horiguchi follflags |= FOLL_SPLIT; 1516e8db67ebSNaoya Horiguchi page = follow_page(vma, pp->addr, follflags); 151789f5b7daSLinus Torvalds 151889f5b7daSLinus Torvalds err = PTR_ERR(page); 151989f5b7daSLinus Torvalds if (IS_ERR(page)) 152089f5b7daSLinus Torvalds goto set_status; 152189f5b7daSLinus Torvalds 1522742755a1SChristoph Lameter err = -ENOENT; 1523742755a1SChristoph Lameter if (!page) 1524742755a1SChristoph Lameter goto set_status; 1525742755a1SChristoph Lameter 1526742755a1SChristoph Lameter err = page_to_nid(page); 1527742755a1SChristoph Lameter 1528742755a1SChristoph Lameter if (err == pp->node) 1529742755a1SChristoph Lameter /* 1530742755a1SChristoph Lameter * Node already in the right place 1531742755a1SChristoph Lameter */ 1532742755a1SChristoph Lameter goto put_and_set; 1533742755a1SChristoph Lameter 1534742755a1SChristoph Lameter err = -EACCES; 1535742755a1SChristoph Lameter if (page_mapcount(page) > 1 && 1536742755a1SChristoph Lameter !migrate_all) 1537742755a1SChristoph Lameter goto put_and_set; 1538742755a1SChristoph Lameter 1539e632a938SNaoya Horiguchi if (PageHuge(page)) { 1540e8db67ebSNaoya Horiguchi if (PageHead(page)) { 1541e632a938SNaoya Horiguchi isolate_huge_page(page, &pagelist); 1542e8db67ebSNaoya Horiguchi err = 0; 1543e8db67ebSNaoya Horiguchi pp->page = page; 1544e8db67ebSNaoya Horiguchi } 1545e632a938SNaoya Horiguchi goto put_and_set; 1546e632a938SNaoya Horiguchi } 1547e632a938SNaoya Horiguchi 1548e8db67ebSNaoya Horiguchi pp->page = compound_head(page); 1549e8db67ebSNaoya Horiguchi head = compound_head(page); 1550e8db67ebSNaoya Horiguchi err = isolate_lru_page(head); 15516d9c285aSKOSAKI Motohiro if (!err) { 1552e8db67ebSNaoya Horiguchi list_add_tail(&head->lru, &pagelist); 1553e8db67ebSNaoya Horiguchi mod_node_page_state(page_pgdat(head), 1554e8db67ebSNaoya Horiguchi NR_ISOLATED_ANON + page_is_file_cache(head), 1555e8db67ebSNaoya Horiguchi hpage_nr_pages(head)); 15566d9c285aSKOSAKI Motohiro } 1557742755a1SChristoph Lameter put_and_set: 1558742755a1SChristoph Lameter /* 1559742755a1SChristoph Lameter * Either remove the duplicate refcount from 1560742755a1SChristoph Lameter * isolate_lru_page() or drop the page ref if it was 1561742755a1SChristoph Lameter * not isolated. 1562742755a1SChristoph Lameter */ 1563742755a1SChristoph Lameter put_page(page); 1564742755a1SChristoph Lameter set_status: 1565742755a1SChristoph Lameter pp->status = err; 1566742755a1SChristoph Lameter } 1567742755a1SChristoph Lameter 1568e78bbfa8SBrice Goglin err = 0; 1569cf608ac1SMinchan Kim if (!list_empty(&pagelist)) { 157068711a74SDavid Rientjes err = migrate_pages(&pagelist, new_page_node, NULL, 15719c620e2bSHugh Dickins (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); 1572cf608ac1SMinchan Kim if (err) 1573e632a938SNaoya Horiguchi putback_movable_pages(&pagelist); 1574cf608ac1SMinchan Kim } 1575742755a1SChristoph Lameter 1576742755a1SChristoph Lameter up_read(&mm->mmap_sem); 1577742755a1SChristoph Lameter return err; 1578742755a1SChristoph Lameter } 1579742755a1SChristoph Lameter 1580742755a1SChristoph Lameter /* 15815e9a0f02SBrice Goglin * Migrate an array of page address onto an array of nodes and fill 15825e9a0f02SBrice Goglin * the corresponding array of status. 15835e9a0f02SBrice Goglin */ 15843268c63eSChristoph Lameter static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, 15855e9a0f02SBrice Goglin unsigned long nr_pages, 15865e9a0f02SBrice Goglin const void __user * __user *pages, 15875e9a0f02SBrice Goglin const int __user *nodes, 15885e9a0f02SBrice Goglin int __user *status, int flags) 15895e9a0f02SBrice Goglin { 15903140a227SBrice Goglin struct page_to_node *pm; 15913140a227SBrice Goglin unsigned long chunk_nr_pages; 15923140a227SBrice Goglin unsigned long chunk_start; 15933140a227SBrice Goglin int err; 15945e9a0f02SBrice Goglin 15955e9a0f02SBrice Goglin err = -ENOMEM; 15963140a227SBrice Goglin pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 15973140a227SBrice Goglin if (!pm) 15985e9a0f02SBrice Goglin goto out; 159935282a2dSBrice Goglin 160035282a2dSBrice Goglin migrate_prep(); 160135282a2dSBrice Goglin 16025e9a0f02SBrice Goglin /* 16033140a227SBrice Goglin * Store a chunk of page_to_node array in a page, 16043140a227SBrice Goglin * but keep the last one as a marker 16055e9a0f02SBrice Goglin */ 16063140a227SBrice Goglin chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 16073140a227SBrice Goglin 16083140a227SBrice Goglin for (chunk_start = 0; 16093140a227SBrice Goglin chunk_start < nr_pages; 16103140a227SBrice Goglin chunk_start += chunk_nr_pages) { 16113140a227SBrice Goglin int j; 16123140a227SBrice Goglin 16133140a227SBrice Goglin if (chunk_start + chunk_nr_pages > nr_pages) 16143140a227SBrice Goglin chunk_nr_pages = nr_pages - chunk_start; 16153140a227SBrice Goglin 16163140a227SBrice Goglin /* fill the chunk pm with addrs and nodes from user-space */ 16173140a227SBrice Goglin for (j = 0; j < chunk_nr_pages; j++) { 16185e9a0f02SBrice Goglin const void __user *p; 16195e9a0f02SBrice Goglin int node; 16205e9a0f02SBrice Goglin 16213140a227SBrice Goglin err = -EFAULT; 16223140a227SBrice Goglin if (get_user(p, pages + j + chunk_start)) 16233140a227SBrice Goglin goto out_pm; 16243140a227SBrice Goglin pm[j].addr = (unsigned long) p; 16253140a227SBrice Goglin 16263140a227SBrice Goglin if (get_user(node, nodes + j + chunk_start)) 16275e9a0f02SBrice Goglin goto out_pm; 16285e9a0f02SBrice Goglin 16295e9a0f02SBrice Goglin err = -ENODEV; 16306f5a55f1SLinus Torvalds if (node < 0 || node >= MAX_NUMNODES) 16316f5a55f1SLinus Torvalds goto out_pm; 16326f5a55f1SLinus Torvalds 1633389162c2SLai Jiangshan if (!node_state(node, N_MEMORY)) 16345e9a0f02SBrice Goglin goto out_pm; 16355e9a0f02SBrice Goglin 16365e9a0f02SBrice Goglin err = -EACCES; 16375e9a0f02SBrice Goglin if (!node_isset(node, task_nodes)) 16385e9a0f02SBrice Goglin goto out_pm; 16395e9a0f02SBrice Goglin 16403140a227SBrice Goglin pm[j].node = node; 16415e9a0f02SBrice Goglin } 16425e9a0f02SBrice Goglin 16433140a227SBrice Goglin /* End marker for this chunk */ 16443140a227SBrice Goglin pm[chunk_nr_pages].node = MAX_NUMNODES; 16453140a227SBrice Goglin 16463140a227SBrice Goglin /* Migrate this chunk */ 16473140a227SBrice Goglin err = do_move_page_to_node_array(mm, pm, 16483140a227SBrice Goglin flags & MPOL_MF_MOVE_ALL); 16493140a227SBrice Goglin if (err < 0) 16503140a227SBrice Goglin goto out_pm; 16513140a227SBrice Goglin 16525e9a0f02SBrice Goglin /* Return status information */ 16533140a227SBrice Goglin for (j = 0; j < chunk_nr_pages; j++) 16543140a227SBrice Goglin if (put_user(pm[j].status, status + j + chunk_start)) { 16555e9a0f02SBrice Goglin err = -EFAULT; 16563140a227SBrice Goglin goto out_pm; 16573140a227SBrice Goglin } 16583140a227SBrice Goglin } 16593140a227SBrice Goglin err = 0; 16605e9a0f02SBrice Goglin 16615e9a0f02SBrice Goglin out_pm: 16623140a227SBrice Goglin free_page((unsigned long)pm); 16635e9a0f02SBrice Goglin out: 16645e9a0f02SBrice Goglin return err; 16655e9a0f02SBrice Goglin } 16665e9a0f02SBrice Goglin 16675e9a0f02SBrice Goglin /* 16682f007e74SBrice Goglin * Determine the nodes of an array of pages and store it in an array of status. 1669742755a1SChristoph Lameter */ 167080bba129SBrice Goglin static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 167180bba129SBrice Goglin const void __user **pages, int *status) 1672742755a1SChristoph Lameter { 16732f007e74SBrice Goglin unsigned long i; 1674742755a1SChristoph Lameter 16752f007e74SBrice Goglin down_read(&mm->mmap_sem); 16762f007e74SBrice Goglin 16772f007e74SBrice Goglin for (i = 0; i < nr_pages; i++) { 167880bba129SBrice Goglin unsigned long addr = (unsigned long)(*pages); 16792f007e74SBrice Goglin struct vm_area_struct *vma; 16802f007e74SBrice Goglin struct page *page; 1681c095adbcSKOSAKI Motohiro int err = -EFAULT; 16822f007e74SBrice Goglin 16832f007e74SBrice Goglin vma = find_vma(mm, addr); 168470384dc6SGleb Natapov if (!vma || addr < vma->vm_start) 1685742755a1SChristoph Lameter goto set_status; 1686742755a1SChristoph Lameter 1687d899844eSKirill A. Shutemov /* FOLL_DUMP to ignore special (like zero) pages */ 1688d899844eSKirill A. Shutemov page = follow_page(vma, addr, FOLL_DUMP); 168989f5b7daSLinus Torvalds 169089f5b7daSLinus Torvalds err = PTR_ERR(page); 169189f5b7daSLinus Torvalds if (IS_ERR(page)) 169289f5b7daSLinus Torvalds goto set_status; 169389f5b7daSLinus Torvalds 1694d899844eSKirill A. Shutemov err = page ? page_to_nid(page) : -ENOENT; 1695742755a1SChristoph Lameter set_status: 169680bba129SBrice Goglin *status = err; 169780bba129SBrice Goglin 169880bba129SBrice Goglin pages++; 169980bba129SBrice Goglin status++; 170080bba129SBrice Goglin } 170180bba129SBrice Goglin 170280bba129SBrice Goglin up_read(&mm->mmap_sem); 170380bba129SBrice Goglin } 170480bba129SBrice Goglin 170580bba129SBrice Goglin /* 170680bba129SBrice Goglin * Determine the nodes of a user array of pages and store it in 170780bba129SBrice Goglin * a user array of status. 170880bba129SBrice Goglin */ 170980bba129SBrice Goglin static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 171080bba129SBrice Goglin const void __user * __user *pages, 171180bba129SBrice Goglin int __user *status) 171280bba129SBrice Goglin { 171380bba129SBrice Goglin #define DO_PAGES_STAT_CHUNK_NR 16 171480bba129SBrice Goglin const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 171580bba129SBrice Goglin int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 171680bba129SBrice Goglin 171787b8d1adSH. Peter Anvin while (nr_pages) { 171887b8d1adSH. Peter Anvin unsigned long chunk_nr; 171980bba129SBrice Goglin 172087b8d1adSH. Peter Anvin chunk_nr = nr_pages; 172187b8d1adSH. Peter Anvin if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 172287b8d1adSH. Peter Anvin chunk_nr = DO_PAGES_STAT_CHUNK_NR; 172387b8d1adSH. Peter Anvin 172487b8d1adSH. Peter Anvin if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 172587b8d1adSH. Peter Anvin break; 172680bba129SBrice Goglin 172780bba129SBrice Goglin do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 172880bba129SBrice Goglin 172987b8d1adSH. Peter Anvin if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 173087b8d1adSH. Peter Anvin break; 1731742755a1SChristoph Lameter 173287b8d1adSH. Peter Anvin pages += chunk_nr; 173387b8d1adSH. Peter Anvin status += chunk_nr; 173487b8d1adSH. Peter Anvin nr_pages -= chunk_nr; 173587b8d1adSH. Peter Anvin } 173687b8d1adSH. Peter Anvin return nr_pages ? -EFAULT : 0; 1737742755a1SChristoph Lameter } 1738742755a1SChristoph Lameter 1739742755a1SChristoph Lameter /* 1740742755a1SChristoph Lameter * Move a list of pages in the address space of the currently executing 1741742755a1SChristoph Lameter * process. 1742742755a1SChristoph Lameter */ 1743938bb9f5SHeiko Carstens SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1744938bb9f5SHeiko Carstens const void __user * __user *, pages, 1745938bb9f5SHeiko Carstens const int __user *, nodes, 1746938bb9f5SHeiko Carstens int __user *, status, int, flags) 1747742755a1SChristoph Lameter { 1748742755a1SChristoph Lameter struct task_struct *task; 1749742755a1SChristoph Lameter struct mm_struct *mm; 17505e9a0f02SBrice Goglin int err; 17513268c63eSChristoph Lameter nodemask_t task_nodes; 1752742755a1SChristoph Lameter 1753742755a1SChristoph Lameter /* Check flags */ 1754742755a1SChristoph Lameter if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1755742755a1SChristoph Lameter return -EINVAL; 1756742755a1SChristoph Lameter 1757742755a1SChristoph Lameter if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1758742755a1SChristoph Lameter return -EPERM; 1759742755a1SChristoph Lameter 1760742755a1SChristoph Lameter /* Find the mm_struct */ 1761a879bf58SGreg Thelen rcu_read_lock(); 1762228ebcbeSPavel Emelyanov task = pid ? find_task_by_vpid(pid) : current; 1763742755a1SChristoph Lameter if (!task) { 1764a879bf58SGreg Thelen rcu_read_unlock(); 1765742755a1SChristoph Lameter return -ESRCH; 1766742755a1SChristoph Lameter } 17673268c63eSChristoph Lameter get_task_struct(task); 1768742755a1SChristoph Lameter 1769742755a1SChristoph Lameter /* 1770742755a1SChristoph Lameter * Check if this process has the right to modify the specified 1771197e7e52SLinus Torvalds * process. Use the regular "ptrace_may_access()" checks. 1772742755a1SChristoph Lameter */ 1773197e7e52SLinus Torvalds if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1774c69e8d9cSDavid Howells rcu_read_unlock(); 1775742755a1SChristoph Lameter err = -EPERM; 17765e9a0f02SBrice Goglin goto out; 1777742755a1SChristoph Lameter } 1778c69e8d9cSDavid Howells rcu_read_unlock(); 1779742755a1SChristoph Lameter 178086c3a764SDavid Quigley err = security_task_movememory(task); 178186c3a764SDavid Quigley if (err) 1782742755a1SChristoph Lameter goto out; 1783742755a1SChristoph Lameter 17843268c63eSChristoph Lameter task_nodes = cpuset_mems_allowed(task); 17853268c63eSChristoph Lameter mm = get_task_mm(task); 17863268c63eSChristoph Lameter put_task_struct(task); 17873268c63eSChristoph Lameter 17886e8b09eaSSasha Levin if (!mm) 17896e8b09eaSSasha Levin return -EINVAL; 17906e8b09eaSSasha Levin 17913268c63eSChristoph Lameter if (nodes) 17923268c63eSChristoph Lameter err = do_pages_move(mm, task_nodes, nr_pages, pages, 17933268c63eSChristoph Lameter nodes, status, flags); 17943268c63eSChristoph Lameter else 17955e9a0f02SBrice Goglin err = do_pages_stat(mm, nr_pages, pages, status); 17963268c63eSChristoph Lameter 17973268c63eSChristoph Lameter mmput(mm); 17983268c63eSChristoph Lameter return err; 1799742755a1SChristoph Lameter 1800742755a1SChristoph Lameter out: 18013268c63eSChristoph Lameter put_task_struct(task); 1802742755a1SChristoph Lameter return err; 1803742755a1SChristoph Lameter } 1804742755a1SChristoph Lameter 18057039e1dbSPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 18067039e1dbSPeter Zijlstra /* 18077039e1dbSPeter Zijlstra * Returns true if this is a safe migration target node for misplaced NUMA 18087039e1dbSPeter Zijlstra * pages. Currently it only checks the watermarks which crude 18097039e1dbSPeter Zijlstra */ 18107039e1dbSPeter Zijlstra static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 18113abef4e6SMel Gorman unsigned long nr_migrate_pages) 18127039e1dbSPeter Zijlstra { 18137039e1dbSPeter Zijlstra int z; 1814599d0c95SMel Gorman 18157039e1dbSPeter Zijlstra for (z = pgdat->nr_zones - 1; z >= 0; z--) { 18167039e1dbSPeter Zijlstra struct zone *zone = pgdat->node_zones + z; 18177039e1dbSPeter Zijlstra 18187039e1dbSPeter Zijlstra if (!populated_zone(zone)) 18197039e1dbSPeter Zijlstra continue; 18207039e1dbSPeter Zijlstra 18217039e1dbSPeter Zijlstra /* Avoid waking kswapd by allocating pages_to_migrate pages. */ 18227039e1dbSPeter Zijlstra if (!zone_watermark_ok(zone, 0, 18237039e1dbSPeter Zijlstra high_wmark_pages(zone) + 18247039e1dbSPeter Zijlstra nr_migrate_pages, 18257039e1dbSPeter Zijlstra 0, 0)) 18267039e1dbSPeter Zijlstra continue; 18277039e1dbSPeter Zijlstra return true; 18287039e1dbSPeter Zijlstra } 18297039e1dbSPeter Zijlstra return false; 18307039e1dbSPeter Zijlstra } 18317039e1dbSPeter Zijlstra 18327039e1dbSPeter Zijlstra static struct page *alloc_misplaced_dst_page(struct page *page, 18337039e1dbSPeter Zijlstra unsigned long data, 18347039e1dbSPeter Zijlstra int **result) 18357039e1dbSPeter Zijlstra { 18367039e1dbSPeter Zijlstra int nid = (int) data; 18377039e1dbSPeter Zijlstra struct page *newpage; 18387039e1dbSPeter Zijlstra 183996db800fSVlastimil Babka newpage = __alloc_pages_node(nid, 1840e97ca8e5SJohannes Weiner (GFP_HIGHUSER_MOVABLE | 1841e97ca8e5SJohannes Weiner __GFP_THISNODE | __GFP_NOMEMALLOC | 1842e97ca8e5SJohannes Weiner __GFP_NORETRY | __GFP_NOWARN) & 18438479eba7SMel Gorman ~__GFP_RECLAIM, 0); 1844bac0382cSHillf Danton 18457039e1dbSPeter Zijlstra return newpage; 18467039e1dbSPeter Zijlstra } 18477039e1dbSPeter Zijlstra 18487039e1dbSPeter Zijlstra /* 1849a8f60772SMel Gorman * page migration rate limiting control. 1850a8f60772SMel Gorman * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs 1851a8f60772SMel Gorman * window of time. Default here says do not migrate more than 1280M per second. 1852a8f60772SMel Gorman */ 1853a8f60772SMel Gorman static unsigned int migrate_interval_millisecs __read_mostly = 100; 1854a8f60772SMel Gorman static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); 1855a8f60772SMel Gorman 1856b32967ffSMel Gorman /* Returns true if the node is migrate rate-limited after the update */ 18571c30e017SMel Gorman static bool numamigrate_update_ratelimit(pg_data_t *pgdat, 18581c30e017SMel Gorman unsigned long nr_pages) 1859b32967ffSMel Gorman { 1860b32967ffSMel Gorman /* 1861b32967ffSMel Gorman * Rate-limit the amount of data that is being migrated to a node. 1862b32967ffSMel Gorman * Optimal placement is no good if the memory bus is saturated and 1863b32967ffSMel Gorman * all the time is being spent migrating! 1864b32967ffSMel Gorman */ 1865b32967ffSMel Gorman if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { 18661c5e9c27SMel Gorman spin_lock(&pgdat->numabalancing_migrate_lock); 1867b32967ffSMel Gorman pgdat->numabalancing_migrate_nr_pages = 0; 1868b32967ffSMel Gorman pgdat->numabalancing_migrate_next_window = jiffies + 1869b32967ffSMel Gorman msecs_to_jiffies(migrate_interval_millisecs); 18701c5e9c27SMel Gorman spin_unlock(&pgdat->numabalancing_migrate_lock); 1871b32967ffSMel Gorman } 1872af1839d7SMel Gorman if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { 1873af1839d7SMel Gorman trace_mm_numa_migrate_ratelimit(current, pgdat->node_id, 1874af1839d7SMel Gorman nr_pages); 18751c5e9c27SMel Gorman return true; 1876af1839d7SMel Gorman } 1877b32967ffSMel Gorman 18781c5e9c27SMel Gorman /* 18791c5e9c27SMel Gorman * This is an unlocked non-atomic update so errors are possible. 18801c5e9c27SMel Gorman * The consequences are failing to migrate when we potentiall should 18811c5e9c27SMel Gorman * have which is not severe enough to warrant locking. If it is ever 18821c5e9c27SMel Gorman * a problem, it can be converted to a per-cpu counter. 18831c5e9c27SMel Gorman */ 18841c5e9c27SMel Gorman pgdat->numabalancing_migrate_nr_pages += nr_pages; 18851c5e9c27SMel Gorman return false; 1886b32967ffSMel Gorman } 1887b32967ffSMel Gorman 18881c30e017SMel Gorman static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1889b32967ffSMel Gorman { 1890340ef390SHugh Dickins int page_lru; 1891b32967ffSMel Gorman 1892309381feSSasha Levin VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 18933abef4e6SMel Gorman 1894b32967ffSMel Gorman /* Avoid migrating to a node that is nearly full */ 1895340ef390SHugh Dickins if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1896340ef390SHugh Dickins return 0; 1897b32967ffSMel Gorman 1898340ef390SHugh Dickins if (isolate_lru_page(page)) 1899340ef390SHugh Dickins return 0; 1900340ef390SHugh Dickins 1901340ef390SHugh Dickins /* 1902340ef390SHugh Dickins * migrate_misplaced_transhuge_page() skips page migration's usual 1903340ef390SHugh Dickins * check on page_count(), so we must do it here, now that the page 1904340ef390SHugh Dickins * has been isolated: a GUP pin, or any other pin, prevents migration. 1905340ef390SHugh Dickins * The expected page count is 3: 1 for page's mapcount and 1 for the 1906340ef390SHugh Dickins * caller's pin and 1 for the reference taken by isolate_lru_page(). 1907340ef390SHugh Dickins */ 1908340ef390SHugh Dickins if (PageTransHuge(page) && page_count(page) != 3) { 1909340ef390SHugh Dickins putback_lru_page(page); 1910b32967ffSMel Gorman return 0; 1911b32967ffSMel Gorman } 1912b32967ffSMel Gorman 1913b32967ffSMel Gorman page_lru = page_is_file_cache(page); 1914599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, 1915340ef390SHugh Dickins hpage_nr_pages(page)); 1916b32967ffSMel Gorman 1917b32967ffSMel Gorman /* 1918340ef390SHugh Dickins * Isolating the page has taken another reference, so the 1919340ef390SHugh Dickins * caller's reference can be safely dropped without the page 1920340ef390SHugh Dickins * disappearing underneath us during migration. 1921b32967ffSMel Gorman */ 1922b32967ffSMel Gorman put_page(page); 1923340ef390SHugh Dickins return 1; 1924b32967ffSMel Gorman } 1925b32967ffSMel Gorman 1926de466bd6SMel Gorman bool pmd_trans_migrating(pmd_t pmd) 1927de466bd6SMel Gorman { 1928de466bd6SMel Gorman struct page *page = pmd_page(pmd); 1929de466bd6SMel Gorman return PageLocked(page); 1930de466bd6SMel Gorman } 1931de466bd6SMel Gorman 1932a8f60772SMel Gorman /* 19337039e1dbSPeter Zijlstra * Attempt to migrate a misplaced page to the specified destination 19347039e1dbSPeter Zijlstra * node. Caller is expected to have an elevated reference count on 19357039e1dbSPeter Zijlstra * the page that will be dropped by this function before returning. 19367039e1dbSPeter Zijlstra */ 19371bc115d8SMel Gorman int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, 19381bc115d8SMel Gorman int node) 19397039e1dbSPeter Zijlstra { 1940a8f60772SMel Gorman pg_data_t *pgdat = NODE_DATA(node); 1941340ef390SHugh Dickins int isolated; 1942b32967ffSMel Gorman int nr_remaining; 19437039e1dbSPeter Zijlstra LIST_HEAD(migratepages); 19447039e1dbSPeter Zijlstra 19457039e1dbSPeter Zijlstra /* 19461bc115d8SMel Gorman * Don't migrate file pages that are mapped in multiple processes 19471bc115d8SMel Gorman * with execute permissions as they are probably shared libraries. 19487039e1dbSPeter Zijlstra */ 19491bc115d8SMel Gorman if (page_mapcount(page) != 1 && page_is_file_cache(page) && 19501bc115d8SMel Gorman (vma->vm_flags & VM_EXEC)) 19517039e1dbSPeter Zijlstra goto out; 19527039e1dbSPeter Zijlstra 1953a8f60772SMel Gorman /* 1954a8f60772SMel Gorman * Rate-limit the amount of data that is being migrated to a node. 1955a8f60772SMel Gorman * Optimal placement is no good if the memory bus is saturated and 1956a8f60772SMel Gorman * all the time is being spent migrating! 1957a8f60772SMel Gorman */ 1958340ef390SHugh Dickins if (numamigrate_update_ratelimit(pgdat, 1)) 1959a8f60772SMel Gorman goto out; 1960a8f60772SMel Gorman 1961b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 1962b32967ffSMel Gorman if (!isolated) 19637039e1dbSPeter Zijlstra goto out; 19647039e1dbSPeter Zijlstra 19657039e1dbSPeter Zijlstra list_add(&page->lru, &migratepages); 19669c620e2bSHugh Dickins nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, 196768711a74SDavid Rientjes NULL, node, MIGRATE_ASYNC, 196868711a74SDavid Rientjes MR_NUMA_MISPLACED); 19697039e1dbSPeter Zijlstra if (nr_remaining) { 197059c82b70SJoonsoo Kim if (!list_empty(&migratepages)) { 197159c82b70SJoonsoo Kim list_del(&page->lru); 1972599d0c95SMel Gorman dec_node_page_state(page, NR_ISOLATED_ANON + 197359c82b70SJoonsoo Kim page_is_file_cache(page)); 197459c82b70SJoonsoo Kim putback_lru_page(page); 197559c82b70SJoonsoo Kim } 19767039e1dbSPeter Zijlstra isolated = 0; 197703c5a6e1SMel Gorman } else 197803c5a6e1SMel Gorman count_vm_numa_event(NUMA_PAGE_MIGRATE); 19797039e1dbSPeter Zijlstra BUG_ON(!list_empty(&migratepages)); 19807039e1dbSPeter Zijlstra return isolated; 1981340ef390SHugh Dickins 1982340ef390SHugh Dickins out: 1983340ef390SHugh Dickins put_page(page); 1984340ef390SHugh Dickins return 0; 19857039e1dbSPeter Zijlstra } 1986220018d3SMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1987b32967ffSMel Gorman 1988220018d3SMel Gorman #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 1989340ef390SHugh Dickins /* 1990340ef390SHugh Dickins * Migrates a THP to a given target node. page must be locked and is unlocked 1991340ef390SHugh Dickins * before returning. 1992340ef390SHugh Dickins */ 1993b32967ffSMel Gorman int migrate_misplaced_transhuge_page(struct mm_struct *mm, 1994b32967ffSMel Gorman struct vm_area_struct *vma, 1995b32967ffSMel Gorman pmd_t *pmd, pmd_t entry, 1996b32967ffSMel Gorman unsigned long address, 1997b32967ffSMel Gorman struct page *page, int node) 1998b32967ffSMel Gorman { 1999c4088ebdSKirill A. Shutemov spinlock_t *ptl; 2000b32967ffSMel Gorman pg_data_t *pgdat = NODE_DATA(node); 2001b32967ffSMel Gorman int isolated = 0; 2002b32967ffSMel Gorman struct page *new_page = NULL; 2003b32967ffSMel Gorman int page_lru = page_is_file_cache(page); 2004f714f4f2SMel Gorman unsigned long mmun_start = address & HPAGE_PMD_MASK; 2005f714f4f2SMel Gorman unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 2006b32967ffSMel Gorman 2007b32967ffSMel Gorman /* 2008b32967ffSMel Gorman * Rate-limit the amount of data that is being migrated to a node. 2009b32967ffSMel Gorman * Optimal placement is no good if the memory bus is saturated and 2010b32967ffSMel Gorman * all the time is being spent migrating! 2011b32967ffSMel Gorman */ 2012d28d4335SMel Gorman if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR)) 2013b32967ffSMel Gorman goto out_dropref; 2014b32967ffSMel Gorman 2015b32967ffSMel Gorman new_page = alloc_pages_node(node, 201625160354SVlastimil Babka (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 2017e97ca8e5SJohannes Weiner HPAGE_PMD_ORDER); 2018340ef390SHugh Dickins if (!new_page) 2019340ef390SHugh Dickins goto out_fail; 20209a982250SKirill A. Shutemov prep_transhuge_page(new_page); 2021340ef390SHugh Dickins 2022b32967ffSMel Gorman isolated = numamigrate_isolate_page(pgdat, page); 2023340ef390SHugh Dickins if (!isolated) { 2024b32967ffSMel Gorman put_page(new_page); 2025340ef390SHugh Dickins goto out_fail; 2026b32967ffSMel Gorman } 2027b0943d61SMel Gorman 2028b32967ffSMel Gorman /* Prepare a page as a migration target */ 202948c935adSKirill A. Shutemov __SetPageLocked(new_page); 2030d44d363fSShaohua Li if (PageSwapBacked(page)) 2031fa9949daSHugh Dickins __SetPageSwapBacked(new_page); 2032b32967ffSMel Gorman 2033b32967ffSMel Gorman /* anon mapping, we can simply copy page->mapping to the new page: */ 2034b32967ffSMel Gorman new_page->mapping = page->mapping; 2035b32967ffSMel Gorman new_page->index = page->index; 2036b32967ffSMel Gorman migrate_page_copy(new_page, page); 2037b32967ffSMel Gorman WARN_ON(PageLRU(new_page)); 2038b32967ffSMel Gorman 2039b32967ffSMel Gorman /* Recheck the target PMD */ 2040f714f4f2SMel Gorman mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2041c4088ebdSKirill A. Shutemov ptl = pmd_lock(mm, pmd); 2042f4e177d1SWill Deacon if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { 2043c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2044f714f4f2SMel Gorman mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2045b32967ffSMel Gorman 2046b32967ffSMel Gorman /* Reverse changes made by migrate_page_copy() */ 2047b32967ffSMel Gorman if (TestClearPageActive(new_page)) 2048b32967ffSMel Gorman SetPageActive(page); 2049b32967ffSMel Gorman if (TestClearPageUnevictable(new_page)) 2050b32967ffSMel Gorman SetPageUnevictable(page); 2051b32967ffSMel Gorman 2052b32967ffSMel Gorman unlock_page(new_page); 2053b32967ffSMel Gorman put_page(new_page); /* Free it */ 2054b32967ffSMel Gorman 2055a54a407fSMel Gorman /* Retake the callers reference and putback on LRU */ 2056a54a407fSMel Gorman get_page(page); 2057b32967ffSMel Gorman putback_lru_page(page); 2058599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), 2059a54a407fSMel Gorman NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); 2060eb4489f6SMel Gorman 2061eb4489f6SMel Gorman goto out_unlock; 2062b32967ffSMel Gorman } 2063b32967ffSMel Gorman 206410102459SKirill A. Shutemov entry = mk_huge_pmd(new_page, vma->vm_page_prot); 20652b4847e7SMel Gorman entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 2066b32967ffSMel Gorman 20672b4847e7SMel Gorman /* 20682b4847e7SMel Gorman * Clear the old entry under pagetable lock and establish the new PTE. 20692b4847e7SMel Gorman * Any parallel GUP will either observe the old page blocking on the 20702b4847e7SMel Gorman * page lock, block on the page table lock or observe the new page. 20712b4847e7SMel Gorman * The SetPageUptodate on the new page and page_add_new_anon_rmap 20722b4847e7SMel Gorman * guarantee the copy is visible before the pagetable update. 20732b4847e7SMel Gorman */ 2074f714f4f2SMel Gorman flush_cache_range(vma, mmun_start, mmun_end); 2075d281ee61SKirill A. Shutemov page_add_anon_rmap(new_page, vma, mmun_start, true); 20768809aa2dSAneesh Kumar K.V pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); 2077f714f4f2SMel Gorman set_pmd_at(mm, mmun_start, pmd, entry); 2078ce4a9cc5SStephen Rothwell update_mmu_cache_pmd(vma, address, &entry); 20792b4847e7SMel Gorman 2080f4e177d1SWill Deacon page_ref_unfreeze(page, 2); 208151afb12bSHugh Dickins mlock_migrate_page(new_page, page); 2082d281ee61SKirill A. Shutemov page_remove_rmap(page, true); 20837cd12b4aSVlastimil Babka set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); 20842b4847e7SMel Gorman 2085c4088ebdSKirill A. Shutemov spin_unlock(ptl); 2086f714f4f2SMel Gorman mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2087b32967ffSMel Gorman 208811de9927SMel Gorman /* Take an "isolate" reference and put new page on the LRU. */ 208911de9927SMel Gorman get_page(new_page); 209011de9927SMel Gorman putback_lru_page(new_page); 209111de9927SMel Gorman 2092b32967ffSMel Gorman unlock_page(new_page); 2093b32967ffSMel Gorman unlock_page(page); 2094b32967ffSMel Gorman put_page(page); /* Drop the rmap reference */ 2095b32967ffSMel Gorman put_page(page); /* Drop the LRU isolation reference */ 2096b32967ffSMel Gorman 2097b32967ffSMel Gorman count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); 2098b32967ffSMel Gorman count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); 2099b32967ffSMel Gorman 2100599d0c95SMel Gorman mod_node_page_state(page_pgdat(page), 2101b32967ffSMel Gorman NR_ISOLATED_ANON + page_lru, 2102b32967ffSMel Gorman -HPAGE_PMD_NR); 2103b32967ffSMel Gorman return isolated; 2104b32967ffSMel Gorman 2105340ef390SHugh Dickins out_fail: 2106340ef390SHugh Dickins count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 2107b32967ffSMel Gorman out_dropref: 21082b4847e7SMel Gorman ptl = pmd_lock(mm, pmd); 21092b4847e7SMel Gorman if (pmd_same(*pmd, entry)) { 21104d942466SMel Gorman entry = pmd_modify(entry, vma->vm_page_prot); 2111f714f4f2SMel Gorman set_pmd_at(mm, mmun_start, pmd, entry); 2112a54a407fSMel Gorman update_mmu_cache_pmd(vma, address, &entry); 21132b4847e7SMel Gorman } 21142b4847e7SMel Gorman spin_unlock(ptl); 2115a54a407fSMel Gorman 2116eb4489f6SMel Gorman out_unlock: 2117340ef390SHugh Dickins unlock_page(page); 2118b32967ffSMel Gorman put_page(page); 2119b32967ffSMel Gorman return 0; 2120b32967ffSMel Gorman } 21217039e1dbSPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */ 21227039e1dbSPeter Zijlstra 21237039e1dbSPeter Zijlstra #endif /* CONFIG_NUMA */ 21248763cb45SJérôme Glisse 21258763cb45SJérôme Glisse 21268763cb45SJérôme Glisse struct migrate_vma { 21278763cb45SJérôme Glisse struct vm_area_struct *vma; 21288763cb45SJérôme Glisse unsigned long *dst; 21298763cb45SJérôme Glisse unsigned long *src; 21308763cb45SJérôme Glisse unsigned long cpages; 21318763cb45SJérôme Glisse unsigned long npages; 21328763cb45SJérôme Glisse unsigned long start; 21338763cb45SJérôme Glisse unsigned long end; 21348763cb45SJérôme Glisse }; 21358763cb45SJérôme Glisse 21368763cb45SJérôme Glisse static int migrate_vma_collect_hole(unsigned long start, 21378763cb45SJérôme Glisse unsigned long end, 21388763cb45SJérôme Glisse struct mm_walk *walk) 21398763cb45SJérôme Glisse { 21408763cb45SJérôme Glisse struct migrate_vma *migrate = walk->private; 21418763cb45SJérôme Glisse unsigned long addr; 21428763cb45SJérôme Glisse 21438763cb45SJérôme Glisse for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 21448763cb45SJérôme Glisse migrate->dst[migrate->npages] = 0; 21458763cb45SJérôme Glisse migrate->src[migrate->npages++] = 0; 21468763cb45SJérôme Glisse } 21478763cb45SJérôme Glisse 21488763cb45SJérôme Glisse return 0; 21498763cb45SJérôme Glisse } 21508763cb45SJérôme Glisse 21518763cb45SJérôme Glisse static int migrate_vma_collect_pmd(pmd_t *pmdp, 21528763cb45SJérôme Glisse unsigned long start, 21538763cb45SJérôme Glisse unsigned long end, 21548763cb45SJérôme Glisse struct mm_walk *walk) 21558763cb45SJérôme Glisse { 21568763cb45SJérôme Glisse struct migrate_vma *migrate = walk->private; 21578763cb45SJérôme Glisse struct vm_area_struct *vma = walk->vma; 21588763cb45SJérôme Glisse struct mm_struct *mm = vma->vm_mm; 21598c3328f1SJérôme Glisse unsigned long addr = start, unmapped = 0; 21608763cb45SJérôme Glisse spinlock_t *ptl; 21618763cb45SJérôme Glisse pte_t *ptep; 21628763cb45SJérôme Glisse 21638763cb45SJérôme Glisse again: 21648763cb45SJérôme Glisse if (pmd_none(*pmdp)) 21658763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, walk); 21668763cb45SJérôme Glisse 21678763cb45SJérôme Glisse if (pmd_trans_huge(*pmdp)) { 21688763cb45SJérôme Glisse struct page *page; 21698763cb45SJérôme Glisse 21708763cb45SJérôme Glisse ptl = pmd_lock(mm, pmdp); 21718763cb45SJérôme Glisse if (unlikely(!pmd_trans_huge(*pmdp))) { 21728763cb45SJérôme Glisse spin_unlock(ptl); 21738763cb45SJérôme Glisse goto again; 21748763cb45SJérôme Glisse } 21758763cb45SJérôme Glisse 21768763cb45SJérôme Glisse page = pmd_page(*pmdp); 21778763cb45SJérôme Glisse if (is_huge_zero_page(page)) { 21788763cb45SJérôme Glisse spin_unlock(ptl); 21798763cb45SJérôme Glisse split_huge_pmd(vma, pmdp, addr); 21808763cb45SJérôme Glisse if (pmd_trans_unstable(pmdp)) 21818763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, 21828763cb45SJérôme Glisse walk); 21838763cb45SJérôme Glisse } else { 21848763cb45SJérôme Glisse int ret; 21858763cb45SJérôme Glisse 21868763cb45SJérôme Glisse get_page(page); 21878763cb45SJérôme Glisse spin_unlock(ptl); 21888763cb45SJérôme Glisse if (unlikely(!trylock_page(page))) 21898763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, 21908763cb45SJérôme Glisse walk); 21918763cb45SJérôme Glisse ret = split_huge_page(page); 21928763cb45SJérôme Glisse unlock_page(page); 21938763cb45SJérôme Glisse put_page(page); 21948763cb45SJérôme Glisse if (ret || pmd_none(*pmdp)) 21958763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, 21968763cb45SJérôme Glisse walk); 21978763cb45SJérôme Glisse } 21988763cb45SJérôme Glisse } 21998763cb45SJérôme Glisse 22008763cb45SJérôme Glisse if (unlikely(pmd_bad(*pmdp))) 22018763cb45SJérôme Glisse return migrate_vma_collect_hole(start, end, walk); 22028763cb45SJérôme Glisse 22038763cb45SJérôme Glisse ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 22048c3328f1SJérôme Glisse arch_enter_lazy_mmu_mode(); 22058c3328f1SJérôme Glisse 22068763cb45SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++) { 22078763cb45SJérôme Glisse unsigned long mpfn, pfn; 22088763cb45SJérôme Glisse struct page *page; 22098c3328f1SJérôme Glisse swp_entry_t entry; 22108763cb45SJérôme Glisse pte_t pte; 22118763cb45SJérôme Glisse 22128763cb45SJérôme Glisse pte = *ptep; 22138763cb45SJérôme Glisse pfn = pte_pfn(pte); 22148763cb45SJérôme Glisse 2215*a5430ddaSJérôme Glisse if (pte_none(pte)) { 22168763cb45SJérôme Glisse mpfn = pfn = 0; 22178763cb45SJérôme Glisse goto next; 22188763cb45SJérôme Glisse } 22198763cb45SJérôme Glisse 2220*a5430ddaSJérôme Glisse if (!pte_present(pte)) { 2221*a5430ddaSJérôme Glisse mpfn = pfn = 0; 2222*a5430ddaSJérôme Glisse 2223*a5430ddaSJérôme Glisse /* 2224*a5430ddaSJérôme Glisse * Only care about unaddressable device page special 2225*a5430ddaSJérôme Glisse * page table entry. Other special swap entries are not 2226*a5430ddaSJérôme Glisse * migratable, and we ignore regular swapped page. 2227*a5430ddaSJérôme Glisse */ 2228*a5430ddaSJérôme Glisse entry = pte_to_swp_entry(pte); 2229*a5430ddaSJérôme Glisse if (!is_device_private_entry(entry)) 2230*a5430ddaSJérôme Glisse goto next; 2231*a5430ddaSJérôme Glisse 2232*a5430ddaSJérôme Glisse page = device_private_entry_to_page(entry); 2233*a5430ddaSJérôme Glisse mpfn = migrate_pfn(page_to_pfn(page))| 2234*a5430ddaSJérôme Glisse MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE; 2235*a5430ddaSJérôme Glisse if (is_write_device_private_entry(entry)) 2236*a5430ddaSJérôme Glisse mpfn |= MIGRATE_PFN_WRITE; 2237*a5430ddaSJérôme Glisse } else { 22388763cb45SJérôme Glisse page = vm_normal_page(migrate->vma, addr, pte); 2239*a5430ddaSJérôme Glisse mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 2240*a5430ddaSJérôme Glisse mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; 2241*a5430ddaSJérôme Glisse } 2242*a5430ddaSJérôme Glisse 2243*a5430ddaSJérôme Glisse /* FIXME support THP */ 22448763cb45SJérôme Glisse if (!page || !page->mapping || PageTransCompound(page)) { 22458763cb45SJérôme Glisse mpfn = pfn = 0; 22468763cb45SJérôme Glisse goto next; 22478763cb45SJérôme Glisse } 2248*a5430ddaSJérôme Glisse pfn = page_to_pfn(page); 22498763cb45SJérôme Glisse 22508763cb45SJérôme Glisse /* 22518763cb45SJérôme Glisse * By getting a reference on the page we pin it and that blocks 22528763cb45SJérôme Glisse * any kind of migration. Side effect is that it "freezes" the 22538763cb45SJérôme Glisse * pte. 22548763cb45SJérôme Glisse * 22558763cb45SJérôme Glisse * We drop this reference after isolating the page from the lru 22568763cb45SJérôme Glisse * for non device page (device page are not on the lru and thus 22578763cb45SJérôme Glisse * can't be dropped from it). 22588763cb45SJérôme Glisse */ 22598763cb45SJérôme Glisse get_page(page); 22608763cb45SJérôme Glisse migrate->cpages++; 22618763cb45SJérôme Glisse 22628c3328f1SJérôme Glisse /* 22638c3328f1SJérôme Glisse * Optimize for the common case where page is only mapped once 22648c3328f1SJérôme Glisse * in one process. If we can lock the page, then we can safely 22658c3328f1SJérôme Glisse * set up a special migration page table entry now. 22668c3328f1SJérôme Glisse */ 22678c3328f1SJérôme Glisse if (trylock_page(page)) { 22688c3328f1SJérôme Glisse pte_t swp_pte; 22698c3328f1SJérôme Glisse 22708c3328f1SJérôme Glisse mpfn |= MIGRATE_PFN_LOCKED; 22718c3328f1SJérôme Glisse ptep_get_and_clear(mm, addr, ptep); 22728c3328f1SJérôme Glisse 22738c3328f1SJérôme Glisse /* Setup special migration page table entry */ 22748c3328f1SJérôme Glisse entry = make_migration_entry(page, pte_write(pte)); 22758c3328f1SJérôme Glisse swp_pte = swp_entry_to_pte(entry); 22768c3328f1SJérôme Glisse if (pte_soft_dirty(pte)) 22778c3328f1SJérôme Glisse swp_pte = pte_swp_mksoft_dirty(swp_pte); 22788c3328f1SJérôme Glisse set_pte_at(mm, addr, ptep, swp_pte); 22798c3328f1SJérôme Glisse 22808c3328f1SJérôme Glisse /* 22818c3328f1SJérôme Glisse * This is like regular unmap: we remove the rmap and 22828c3328f1SJérôme Glisse * drop page refcount. Page won't be freed, as we took 22838c3328f1SJérôme Glisse * a reference just above. 22848c3328f1SJérôme Glisse */ 22858c3328f1SJérôme Glisse page_remove_rmap(page, false); 22868c3328f1SJérôme Glisse put_page(page); 2287*a5430ddaSJérôme Glisse 2288*a5430ddaSJérôme Glisse if (pte_present(pte)) 22898c3328f1SJérôme Glisse unmapped++; 22908c3328f1SJérôme Glisse } 22918c3328f1SJérôme Glisse 22928763cb45SJérôme Glisse next: 2293*a5430ddaSJérôme Glisse migrate->dst[migrate->npages] = 0; 22948763cb45SJérôme Glisse migrate->src[migrate->npages++] = mpfn; 22958763cb45SJérôme Glisse } 22968c3328f1SJérôme Glisse arch_leave_lazy_mmu_mode(); 22978763cb45SJérôme Glisse pte_unmap_unlock(ptep - 1, ptl); 22988763cb45SJérôme Glisse 22998c3328f1SJérôme Glisse /* Only flush the TLB if we actually modified any entries */ 23008c3328f1SJérôme Glisse if (unmapped) 23018c3328f1SJérôme Glisse flush_tlb_range(walk->vma, start, end); 23028c3328f1SJérôme Glisse 23038763cb45SJérôme Glisse return 0; 23048763cb45SJérôme Glisse } 23058763cb45SJérôme Glisse 23068763cb45SJérôme Glisse /* 23078763cb45SJérôme Glisse * migrate_vma_collect() - collect pages over a range of virtual addresses 23088763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 23098763cb45SJérôme Glisse * 23108763cb45SJérôme Glisse * This will walk the CPU page table. For each virtual address backed by a 23118763cb45SJérôme Glisse * valid page, it updates the src array and takes a reference on the page, in 23128763cb45SJérôme Glisse * order to pin the page until we lock it and unmap it. 23138763cb45SJérôme Glisse */ 23148763cb45SJérôme Glisse static void migrate_vma_collect(struct migrate_vma *migrate) 23158763cb45SJérôme Glisse { 23168763cb45SJérôme Glisse struct mm_walk mm_walk; 23178763cb45SJérôme Glisse 23188763cb45SJérôme Glisse mm_walk.pmd_entry = migrate_vma_collect_pmd; 23198763cb45SJérôme Glisse mm_walk.pte_entry = NULL; 23208763cb45SJérôme Glisse mm_walk.pte_hole = migrate_vma_collect_hole; 23218763cb45SJérôme Glisse mm_walk.hugetlb_entry = NULL; 23228763cb45SJérôme Glisse mm_walk.test_walk = NULL; 23238763cb45SJérôme Glisse mm_walk.vma = migrate->vma; 23248763cb45SJérôme Glisse mm_walk.mm = migrate->vma->vm_mm; 23258763cb45SJérôme Glisse mm_walk.private = migrate; 23268763cb45SJérôme Glisse 23278c3328f1SJérôme Glisse mmu_notifier_invalidate_range_start(mm_walk.mm, 23288c3328f1SJérôme Glisse migrate->start, 23298c3328f1SJérôme Glisse migrate->end); 23308763cb45SJérôme Glisse walk_page_range(migrate->start, migrate->end, &mm_walk); 23318c3328f1SJérôme Glisse mmu_notifier_invalidate_range_end(mm_walk.mm, 23328c3328f1SJérôme Glisse migrate->start, 23338c3328f1SJérôme Glisse migrate->end); 23348763cb45SJérôme Glisse 23358763cb45SJérôme Glisse migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); 23368763cb45SJérôme Glisse } 23378763cb45SJérôme Glisse 23388763cb45SJérôme Glisse /* 23398763cb45SJérôme Glisse * migrate_vma_check_page() - check if page is pinned or not 23408763cb45SJérôme Glisse * @page: struct page to check 23418763cb45SJérôme Glisse * 23428763cb45SJérôme Glisse * Pinned pages cannot be migrated. This is the same test as in 23438763cb45SJérôme Glisse * migrate_page_move_mapping(), except that here we allow migration of a 23448763cb45SJérôme Glisse * ZONE_DEVICE page. 23458763cb45SJérôme Glisse */ 23468763cb45SJérôme Glisse static bool migrate_vma_check_page(struct page *page) 23478763cb45SJérôme Glisse { 23488763cb45SJérôme Glisse /* 23498763cb45SJérôme Glisse * One extra ref because caller holds an extra reference, either from 23508763cb45SJérôme Glisse * isolate_lru_page() for a regular page, or migrate_vma_collect() for 23518763cb45SJérôme Glisse * a device page. 23528763cb45SJérôme Glisse */ 23538763cb45SJérôme Glisse int extra = 1; 23548763cb45SJérôme Glisse 23558763cb45SJérôme Glisse /* 23568763cb45SJérôme Glisse * FIXME support THP (transparent huge page), it is bit more complex to 23578763cb45SJérôme Glisse * check them than regular pages, because they can be mapped with a pmd 23588763cb45SJérôme Glisse * or with a pte (split pte mapping). 23598763cb45SJérôme Glisse */ 23608763cb45SJérôme Glisse if (PageCompound(page)) 23618763cb45SJérôme Glisse return false; 23628763cb45SJérôme Glisse 2363*a5430ddaSJérôme Glisse /* Page from ZONE_DEVICE have one extra reference */ 2364*a5430ddaSJérôme Glisse if (is_zone_device_page(page)) { 2365*a5430ddaSJérôme Glisse /* 2366*a5430ddaSJérôme Glisse * Private page can never be pin as they have no valid pte and 2367*a5430ddaSJérôme Glisse * GUP will fail for those. Yet if there is a pending migration 2368*a5430ddaSJérôme Glisse * a thread might try to wait on the pte migration entry and 2369*a5430ddaSJérôme Glisse * will bump the page reference count. Sadly there is no way to 2370*a5430ddaSJérôme Glisse * differentiate a regular pin from migration wait. Hence to 2371*a5430ddaSJérôme Glisse * avoid 2 racing thread trying to migrate back to CPU to enter 2372*a5430ddaSJérôme Glisse * infinite loop (one stoping migration because the other is 2373*a5430ddaSJérôme Glisse * waiting on pte migration entry). We always return true here. 2374*a5430ddaSJérôme Glisse * 2375*a5430ddaSJérôme Glisse * FIXME proper solution is to rework migration_entry_wait() so 2376*a5430ddaSJérôme Glisse * it does not need to take a reference on page. 2377*a5430ddaSJérôme Glisse */ 2378*a5430ddaSJérôme Glisse if (is_device_private_page(page)) 2379*a5430ddaSJérôme Glisse return true; 2380*a5430ddaSJérôme Glisse 2381*a5430ddaSJérôme Glisse /* Other ZONE_DEVICE memory type are not supported */ 2382*a5430ddaSJérôme Glisse return false; 2383*a5430ddaSJérôme Glisse } 2384*a5430ddaSJérôme Glisse 23858763cb45SJérôme Glisse if ((page_count(page) - extra) > page_mapcount(page)) 23868763cb45SJérôme Glisse return false; 23878763cb45SJérôme Glisse 23888763cb45SJérôme Glisse return true; 23898763cb45SJérôme Glisse } 23908763cb45SJérôme Glisse 23918763cb45SJérôme Glisse /* 23928763cb45SJérôme Glisse * migrate_vma_prepare() - lock pages and isolate them from the lru 23938763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 23948763cb45SJérôme Glisse * 23958763cb45SJérôme Glisse * This locks pages that have been collected by migrate_vma_collect(). Once each 23968763cb45SJérôme Glisse * page is locked it is isolated from the lru (for non-device pages). Finally, 23978763cb45SJérôme Glisse * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be 23988763cb45SJérôme Glisse * migrated by concurrent kernel threads. 23998763cb45SJérôme Glisse */ 24008763cb45SJérôme Glisse static void migrate_vma_prepare(struct migrate_vma *migrate) 24018763cb45SJérôme Glisse { 24028763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 24038c3328f1SJérôme Glisse const unsigned long start = migrate->start; 24048c3328f1SJérôme Glisse unsigned long addr, i, restore = 0; 24058763cb45SJérôme Glisse bool allow_drain = true; 24068763cb45SJérôme Glisse 24078763cb45SJérôme Glisse lru_add_drain(); 24088763cb45SJérôme Glisse 24098763cb45SJérôme Glisse for (i = 0; (i < npages) && migrate->cpages; i++) { 24108763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 24118c3328f1SJérôme Glisse bool remap = true; 24128763cb45SJérôme Glisse 24138763cb45SJérôme Glisse if (!page) 24148763cb45SJérôme Glisse continue; 24158763cb45SJérôme Glisse 24168c3328f1SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { 24178763cb45SJérôme Glisse /* 24188763cb45SJérôme Glisse * Because we are migrating several pages there can be 24198763cb45SJérôme Glisse * a deadlock between 2 concurrent migration where each 24208763cb45SJérôme Glisse * are waiting on each other page lock. 24218763cb45SJérôme Glisse * 24228763cb45SJérôme Glisse * Make migrate_vma() a best effort thing and backoff 24238763cb45SJérôme Glisse * for any page we can not lock right away. 24248763cb45SJérôme Glisse */ 24258763cb45SJérôme Glisse if (!trylock_page(page)) { 24268763cb45SJérôme Glisse migrate->src[i] = 0; 24278763cb45SJérôme Glisse migrate->cpages--; 24288763cb45SJérôme Glisse put_page(page); 24298763cb45SJérôme Glisse continue; 24308763cb45SJérôme Glisse } 24318c3328f1SJérôme Glisse remap = false; 24328763cb45SJérôme Glisse migrate->src[i] |= MIGRATE_PFN_LOCKED; 24338c3328f1SJérôme Glisse } 24348763cb45SJérôme Glisse 2435*a5430ddaSJérôme Glisse /* ZONE_DEVICE pages are not on LRU */ 2436*a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) { 24378763cb45SJérôme Glisse if (!PageLRU(page) && allow_drain) { 24388763cb45SJérôme Glisse /* Drain CPU's pagevec */ 24398763cb45SJérôme Glisse lru_add_drain_all(); 24408763cb45SJérôme Glisse allow_drain = false; 24418763cb45SJérôme Glisse } 24428763cb45SJérôme Glisse 24438763cb45SJérôme Glisse if (isolate_lru_page(page)) { 24448c3328f1SJérôme Glisse if (remap) { 24458c3328f1SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 24468c3328f1SJérôme Glisse migrate->cpages--; 24478c3328f1SJérôme Glisse restore++; 24488c3328f1SJérôme Glisse } else { 24498763cb45SJérôme Glisse migrate->src[i] = 0; 24508763cb45SJérôme Glisse unlock_page(page); 24518763cb45SJérôme Glisse migrate->cpages--; 24528763cb45SJérôme Glisse put_page(page); 24538c3328f1SJérôme Glisse } 24548763cb45SJérôme Glisse continue; 24558763cb45SJérôme Glisse } 24568763cb45SJérôme Glisse 2457*a5430ddaSJérôme Glisse /* Drop the reference we took in collect */ 2458*a5430ddaSJérôme Glisse put_page(page); 2459*a5430ddaSJérôme Glisse } 2460*a5430ddaSJérôme Glisse 24618763cb45SJérôme Glisse if (!migrate_vma_check_page(page)) { 24628c3328f1SJérôme Glisse if (remap) { 24638c3328f1SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 24648c3328f1SJérôme Glisse migrate->cpages--; 24658c3328f1SJérôme Glisse restore++; 24668c3328f1SJérôme Glisse 2467*a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) { 24688c3328f1SJérôme Glisse get_page(page); 24698c3328f1SJérôme Glisse putback_lru_page(page); 2470*a5430ddaSJérôme Glisse } 24718c3328f1SJérôme Glisse } else { 24728763cb45SJérôme Glisse migrate->src[i] = 0; 24738763cb45SJérôme Glisse unlock_page(page); 24748763cb45SJérôme Glisse migrate->cpages--; 24758763cb45SJérôme Glisse 2476*a5430ddaSJérôme Glisse if (!is_zone_device_page(page)) 24778763cb45SJérôme Glisse putback_lru_page(page); 2478*a5430ddaSJérôme Glisse else 2479*a5430ddaSJérôme Glisse put_page(page); 24808763cb45SJérôme Glisse } 24818763cb45SJérôme Glisse } 24828763cb45SJérôme Glisse } 24838763cb45SJérôme Glisse 24848c3328f1SJérôme Glisse for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { 24858c3328f1SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 24868c3328f1SJérôme Glisse 24878c3328f1SJérôme Glisse if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 24888c3328f1SJérôme Glisse continue; 24898c3328f1SJérôme Glisse 24908c3328f1SJérôme Glisse remove_migration_pte(page, migrate->vma, addr, page); 24918c3328f1SJérôme Glisse 24928c3328f1SJérôme Glisse migrate->src[i] = 0; 24938c3328f1SJérôme Glisse unlock_page(page); 24948c3328f1SJérôme Glisse put_page(page); 24958c3328f1SJérôme Glisse restore--; 24968c3328f1SJérôme Glisse } 24978c3328f1SJérôme Glisse } 24988c3328f1SJérôme Glisse 24998763cb45SJérôme Glisse /* 25008763cb45SJérôme Glisse * migrate_vma_unmap() - replace page mapping with special migration pte entry 25018763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 25028763cb45SJérôme Glisse * 25038763cb45SJérôme Glisse * Replace page mapping (CPU page table pte) with a special migration pte entry 25048763cb45SJérôme Glisse * and check again if it has been pinned. Pinned pages are restored because we 25058763cb45SJérôme Glisse * cannot migrate them. 25068763cb45SJérôme Glisse * 25078763cb45SJérôme Glisse * This is the last step before we call the device driver callback to allocate 25088763cb45SJérôme Glisse * destination memory and copy contents of original page over to new page. 25098763cb45SJérôme Glisse */ 25108763cb45SJérôme Glisse static void migrate_vma_unmap(struct migrate_vma *migrate) 25118763cb45SJérôme Glisse { 25128763cb45SJérôme Glisse int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 25138763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 25148763cb45SJérôme Glisse const unsigned long start = migrate->start; 25158763cb45SJérôme Glisse unsigned long addr, i, restore = 0; 25168763cb45SJérôme Glisse 25178763cb45SJérôme Glisse for (i = 0; i < npages; i++) { 25188763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25198763cb45SJérôme Glisse 25208763cb45SJérôme Glisse if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25218763cb45SJérôme Glisse continue; 25228763cb45SJérôme Glisse 25238c3328f1SJérôme Glisse if (page_mapped(page)) { 25248763cb45SJérôme Glisse try_to_unmap(page, flags); 25258c3328f1SJérôme Glisse if (page_mapped(page)) 25268c3328f1SJérôme Glisse goto restore; 25278c3328f1SJérôme Glisse } 25288c3328f1SJérôme Glisse 25298c3328f1SJérôme Glisse if (migrate_vma_check_page(page)) 25308c3328f1SJérôme Glisse continue; 25318c3328f1SJérôme Glisse 25328c3328f1SJérôme Glisse restore: 25338763cb45SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 25348763cb45SJérôme Glisse migrate->cpages--; 25358763cb45SJérôme Glisse restore++; 25368763cb45SJérôme Glisse } 25378763cb45SJérôme Glisse 25388763cb45SJérôme Glisse for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { 25398763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25408763cb45SJérôme Glisse 25418763cb45SJérôme Glisse if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25428763cb45SJérôme Glisse continue; 25438763cb45SJérôme Glisse 25448763cb45SJérôme Glisse remove_migration_ptes(page, page, false); 25458763cb45SJérôme Glisse 25468763cb45SJérôme Glisse migrate->src[i] = 0; 25478763cb45SJérôme Glisse unlock_page(page); 25488763cb45SJérôme Glisse restore--; 25498763cb45SJérôme Glisse 2550*a5430ddaSJérôme Glisse if (is_zone_device_page(page)) 2551*a5430ddaSJérôme Glisse put_page(page); 2552*a5430ddaSJérôme Glisse else 25538763cb45SJérôme Glisse putback_lru_page(page); 25548763cb45SJérôme Glisse } 25558763cb45SJérôme Glisse } 25568763cb45SJérôme Glisse 25578763cb45SJérôme Glisse /* 25588763cb45SJérôme Glisse * migrate_vma_pages() - migrate meta-data from src page to dst page 25598763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 25608763cb45SJérôme Glisse * 25618763cb45SJérôme Glisse * This migrates struct page meta-data from source struct page to destination 25628763cb45SJérôme Glisse * struct page. This effectively finishes the migration from source page to the 25638763cb45SJérôme Glisse * destination page. 25648763cb45SJérôme Glisse */ 25658763cb45SJérôme Glisse static void migrate_vma_pages(struct migrate_vma *migrate) 25668763cb45SJérôme Glisse { 25678763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 25688763cb45SJérôme Glisse const unsigned long start = migrate->start; 25698763cb45SJérôme Glisse unsigned long addr, i; 25708763cb45SJérôme Glisse 25718763cb45SJérôme Glisse for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 25728763cb45SJérôme Glisse struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 25738763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 25748763cb45SJérôme Glisse struct address_space *mapping; 25758763cb45SJérôme Glisse int r; 25768763cb45SJérôme Glisse 25778763cb45SJérôme Glisse if (!page || !newpage) 25788763cb45SJérôme Glisse continue; 25798763cb45SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 25808763cb45SJérôme Glisse continue; 25818763cb45SJérôme Glisse 25828763cb45SJérôme Glisse mapping = page_mapping(page); 25838763cb45SJérôme Glisse 2584*a5430ddaSJérôme Glisse if (is_zone_device_page(newpage)) { 2585*a5430ddaSJérôme Glisse if (is_device_private_page(newpage)) { 2586*a5430ddaSJérôme Glisse /* 2587*a5430ddaSJérôme Glisse * For now only support private anonymous when 2588*a5430ddaSJérôme Glisse * migrating to un-addressable device memory. 2589*a5430ddaSJérôme Glisse */ 2590*a5430ddaSJérôme Glisse if (mapping) { 2591*a5430ddaSJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2592*a5430ddaSJérôme Glisse continue; 2593*a5430ddaSJérôme Glisse } 2594*a5430ddaSJérôme Glisse } else { 2595*a5430ddaSJérôme Glisse /* 2596*a5430ddaSJérôme Glisse * Other types of ZONE_DEVICE page are not 2597*a5430ddaSJérôme Glisse * supported. 2598*a5430ddaSJérôme Glisse */ 2599*a5430ddaSJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 2600*a5430ddaSJérôme Glisse continue; 2601*a5430ddaSJérôme Glisse } 2602*a5430ddaSJérôme Glisse } 2603*a5430ddaSJérôme Glisse 26048763cb45SJérôme Glisse r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); 26058763cb45SJérôme Glisse if (r != MIGRATEPAGE_SUCCESS) 26068763cb45SJérôme Glisse migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 26078763cb45SJérôme Glisse } 26088763cb45SJérôme Glisse } 26098763cb45SJérôme Glisse 26108763cb45SJérôme Glisse /* 26118763cb45SJérôme Glisse * migrate_vma_finalize() - restore CPU page table entry 26128763cb45SJérôme Glisse * @migrate: migrate struct containing all migration information 26138763cb45SJérôme Glisse * 26148763cb45SJérôme Glisse * This replaces the special migration pte entry with either a mapping to the 26158763cb45SJérôme Glisse * new page if migration was successful for that page, or to the original page 26168763cb45SJérôme Glisse * otherwise. 26178763cb45SJérôme Glisse * 26188763cb45SJérôme Glisse * This also unlocks the pages and puts them back on the lru, or drops the extra 26198763cb45SJérôme Glisse * refcount, for device pages. 26208763cb45SJérôme Glisse */ 26218763cb45SJérôme Glisse static void migrate_vma_finalize(struct migrate_vma *migrate) 26228763cb45SJérôme Glisse { 26238763cb45SJérôme Glisse const unsigned long npages = migrate->npages; 26248763cb45SJérôme Glisse unsigned long i; 26258763cb45SJérôme Glisse 26268763cb45SJérôme Glisse for (i = 0; i < npages; i++) { 26278763cb45SJérôme Glisse struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 26288763cb45SJérôme Glisse struct page *page = migrate_pfn_to_page(migrate->src[i]); 26298763cb45SJérôme Glisse 26308763cb45SJérôme Glisse if (!page) 26318763cb45SJérôme Glisse continue; 26328763cb45SJérôme Glisse if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 26338763cb45SJérôme Glisse if (newpage) { 26348763cb45SJérôme Glisse unlock_page(newpage); 26358763cb45SJérôme Glisse put_page(newpage); 26368763cb45SJérôme Glisse } 26378763cb45SJérôme Glisse newpage = page; 26388763cb45SJérôme Glisse } 26398763cb45SJérôme Glisse 26408763cb45SJérôme Glisse remove_migration_ptes(page, newpage, false); 26418763cb45SJérôme Glisse unlock_page(page); 26428763cb45SJérôme Glisse migrate->cpages--; 26438763cb45SJérôme Glisse 2644*a5430ddaSJérôme Glisse if (is_zone_device_page(page)) 2645*a5430ddaSJérôme Glisse put_page(page); 2646*a5430ddaSJérôme Glisse else 26478763cb45SJérôme Glisse putback_lru_page(page); 26488763cb45SJérôme Glisse 26498763cb45SJérôme Glisse if (newpage != page) { 26508763cb45SJérôme Glisse unlock_page(newpage); 2651*a5430ddaSJérôme Glisse if (is_zone_device_page(newpage)) 2652*a5430ddaSJérôme Glisse put_page(newpage); 2653*a5430ddaSJérôme Glisse else 26548763cb45SJérôme Glisse putback_lru_page(newpage); 26558763cb45SJérôme Glisse } 26568763cb45SJérôme Glisse } 26578763cb45SJérôme Glisse } 26588763cb45SJérôme Glisse 26598763cb45SJérôme Glisse /* 26608763cb45SJérôme Glisse * migrate_vma() - migrate a range of memory inside vma 26618763cb45SJérôme Glisse * 26628763cb45SJérôme Glisse * @ops: migration callback for allocating destination memory and copying 26638763cb45SJérôme Glisse * @vma: virtual memory area containing the range to be migrated 26648763cb45SJérôme Glisse * @start: start address of the range to migrate (inclusive) 26658763cb45SJérôme Glisse * @end: end address of the range to migrate (exclusive) 26668763cb45SJérôme Glisse * @src: array of hmm_pfn_t containing source pfns 26678763cb45SJérôme Glisse * @dst: array of hmm_pfn_t containing destination pfns 26688763cb45SJérôme Glisse * @private: pointer passed back to each of the callback 26698763cb45SJérôme Glisse * Returns: 0 on success, error code otherwise 26708763cb45SJérôme Glisse * 26718763cb45SJérôme Glisse * This function tries to migrate a range of memory virtual address range, using 26728763cb45SJérôme Glisse * callbacks to allocate and copy memory from source to destination. First it 26738763cb45SJérôme Glisse * collects all the pages backing each virtual address in the range, saving this 26748763cb45SJérôme Glisse * inside the src array. Then it locks those pages and unmaps them. Once the pages 26758763cb45SJérôme Glisse * are locked and unmapped, it checks whether each page is pinned or not. Pages 26768763cb45SJérôme Glisse * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) 26778763cb45SJérôme Glisse * in the corresponding src array entry. It then restores any pages that are 26788763cb45SJérôme Glisse * pinned, by remapping and unlocking those pages. 26798763cb45SJérôme Glisse * 26808763cb45SJérôme Glisse * At this point it calls the alloc_and_copy() callback. For documentation on 26818763cb45SJérôme Glisse * what is expected from that callback, see struct migrate_vma_ops comments in 26828763cb45SJérôme Glisse * include/linux/migrate.h 26838763cb45SJérôme Glisse * 26848763cb45SJérôme Glisse * After the alloc_and_copy() callback, this function goes over each entry in 26858763cb45SJérôme Glisse * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag 26868763cb45SJérôme Glisse * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, 26878763cb45SJérôme Glisse * then the function tries to migrate struct page information from the source 26888763cb45SJérôme Glisse * struct page to the destination struct page. If it fails to migrate the struct 26898763cb45SJérôme Glisse * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src 26908763cb45SJérôme Glisse * array. 26918763cb45SJérôme Glisse * 26928763cb45SJérôme Glisse * At this point all successfully migrated pages have an entry in the src 26938763cb45SJérôme Glisse * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst 26948763cb45SJérôme Glisse * array entry with MIGRATE_PFN_VALID flag set. 26958763cb45SJérôme Glisse * 26968763cb45SJérôme Glisse * It then calls the finalize_and_map() callback. See comments for "struct 26978763cb45SJérôme Glisse * migrate_vma_ops", in include/linux/migrate.h for details about 26988763cb45SJérôme Glisse * finalize_and_map() behavior. 26998763cb45SJérôme Glisse * 27008763cb45SJérôme Glisse * After the finalize_and_map() callback, for successfully migrated pages, this 27018763cb45SJérôme Glisse * function updates the CPU page table to point to new pages, otherwise it 27028763cb45SJérôme Glisse * restores the CPU page table to point to the original source pages. 27038763cb45SJérôme Glisse * 27048763cb45SJérôme Glisse * Function returns 0 after the above steps, even if no pages were migrated 27058763cb45SJérôme Glisse * (The function only returns an error if any of the arguments are invalid.) 27068763cb45SJérôme Glisse * 27078763cb45SJérôme Glisse * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT 27088763cb45SJérôme Glisse * unsigned long entries. 27098763cb45SJérôme Glisse */ 27108763cb45SJérôme Glisse int migrate_vma(const struct migrate_vma_ops *ops, 27118763cb45SJérôme Glisse struct vm_area_struct *vma, 27128763cb45SJérôme Glisse unsigned long start, 27138763cb45SJérôme Glisse unsigned long end, 27148763cb45SJérôme Glisse unsigned long *src, 27158763cb45SJérôme Glisse unsigned long *dst, 27168763cb45SJérôme Glisse void *private) 27178763cb45SJérôme Glisse { 27188763cb45SJérôme Glisse struct migrate_vma migrate; 27198763cb45SJérôme Glisse 27208763cb45SJérôme Glisse /* Sanity check the arguments */ 27218763cb45SJérôme Glisse start &= PAGE_MASK; 27228763cb45SJérôme Glisse end &= PAGE_MASK; 27238763cb45SJérôme Glisse if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) 27248763cb45SJérôme Glisse return -EINVAL; 27258763cb45SJérôme Glisse if (start < vma->vm_start || start >= vma->vm_end) 27268763cb45SJérôme Glisse return -EINVAL; 27278763cb45SJérôme Glisse if (end <= vma->vm_start || end > vma->vm_end) 27288763cb45SJérôme Glisse return -EINVAL; 27298763cb45SJérôme Glisse if (!ops || !src || !dst || start >= end) 27308763cb45SJérôme Glisse return -EINVAL; 27318763cb45SJérôme Glisse 27328763cb45SJérôme Glisse memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT)); 27338763cb45SJérôme Glisse migrate.src = src; 27348763cb45SJérôme Glisse migrate.dst = dst; 27358763cb45SJérôme Glisse migrate.start = start; 27368763cb45SJérôme Glisse migrate.npages = 0; 27378763cb45SJérôme Glisse migrate.cpages = 0; 27388763cb45SJérôme Glisse migrate.end = end; 27398763cb45SJérôme Glisse migrate.vma = vma; 27408763cb45SJérôme Glisse 27418763cb45SJérôme Glisse /* Collect, and try to unmap source pages */ 27428763cb45SJérôme Glisse migrate_vma_collect(&migrate); 27438763cb45SJérôme Glisse if (!migrate.cpages) 27448763cb45SJérôme Glisse return 0; 27458763cb45SJérôme Glisse 27468763cb45SJérôme Glisse /* Lock and isolate page */ 27478763cb45SJérôme Glisse migrate_vma_prepare(&migrate); 27488763cb45SJérôme Glisse if (!migrate.cpages) 27498763cb45SJérôme Glisse return 0; 27508763cb45SJérôme Glisse 27518763cb45SJérôme Glisse /* Unmap pages */ 27528763cb45SJérôme Glisse migrate_vma_unmap(&migrate); 27538763cb45SJérôme Glisse if (!migrate.cpages) 27548763cb45SJérôme Glisse return 0; 27558763cb45SJérôme Glisse 27568763cb45SJérôme Glisse /* 27578763cb45SJérôme Glisse * At this point pages are locked and unmapped, and thus they have 27588763cb45SJérôme Glisse * stable content and can safely be copied to destination memory that 27598763cb45SJérôme Glisse * is allocated by the callback. 27608763cb45SJérôme Glisse * 27618763cb45SJérôme Glisse * Note that migration can fail in migrate_vma_struct_page() for each 27628763cb45SJérôme Glisse * individual page. 27638763cb45SJérôme Glisse */ 27648763cb45SJérôme Glisse ops->alloc_and_copy(vma, src, dst, start, end, private); 27658763cb45SJérôme Glisse 27668763cb45SJérôme Glisse /* This does the real migration of struct page */ 27678763cb45SJérôme Glisse migrate_vma_pages(&migrate); 27688763cb45SJérôme Glisse 27698763cb45SJérôme Glisse ops->finalize_and_map(vma, src, dst, start, end, private); 27708763cb45SJérôme Glisse 27718763cb45SJérôme Glisse /* Unlock and remap pages */ 27728763cb45SJérôme Glisse migrate_vma_finalize(&migrate); 27738763cb45SJérôme Glisse 27748763cb45SJérôme Glisse return 0; 27758763cb45SJérôme Glisse } 27768763cb45SJérôme Glisse EXPORT_SYMBOL(migrate_vma); 2777