11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 241da177e4SLinus Torvalds * mm->mmap_sem 251da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 263d48ae45SPeter Zijlstra * mapping->i_mmap_mutex 272b575eb6SPeter Zijlstra * anon_vma->mutex 28b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 29053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 305d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 311da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 321da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 33250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 38f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 396a46079cSAndi Kleen * 409b679320SPeter Zijlstra * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon) 416a46079cSAndi Kleen * ->tasklist_lock 426a46079cSAndi Kleen * pte map lock 431da177e4SLinus Torvalds */ 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/pagemap.h> 471da177e4SLinus Torvalds #include <linux/swap.h> 481da177e4SLinus Torvalds #include <linux/swapops.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/init.h> 515ad64688SHugh Dickins #include <linux/ksm.h> 521da177e4SLinus Torvalds #include <linux/rmap.h> 531da177e4SLinus Torvalds #include <linux/rcupdate.h> 54b95f1b31SPaul Gortmaker #include <linux/export.h> 558a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 56cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 5764cdd548SKOSAKI Motohiro #include <linux/migrate.h> 580fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 59*ef5d437fSJan Kara #include <linux/backing-dev.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #include <asm/tlbflush.h> 621da177e4SLinus Torvalds 63b291f000SNick Piggin #include "internal.h" 64b291f000SNick Piggin 65fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 665beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 67fdd2e5f8SAdrian Bunk 68fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 69fdd2e5f8SAdrian Bunk { 7001d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 7101d8b20dSPeter Zijlstra 7201d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 7301d8b20dSPeter Zijlstra if (anon_vma) { 7401d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 7501d8b20dSPeter Zijlstra /* 7601d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 7701d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 7801d8b20dSPeter Zijlstra */ 7901d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 80fdd2e5f8SAdrian Bunk } 81fdd2e5f8SAdrian Bunk 8201d8b20dSPeter Zijlstra return anon_vma; 8301d8b20dSPeter Zijlstra } 8401d8b20dSPeter Zijlstra 8501d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 86fdd2e5f8SAdrian Bunk { 8701d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 8888c22088SPeter Zijlstra 8988c22088SPeter Zijlstra /* 9088c22088SPeter Zijlstra * Synchronize against page_lock_anon_vma() such that 9188c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 9288c22088SPeter Zijlstra * freed. 9388c22088SPeter Zijlstra * 9488c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 9588c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 9688c22088SPeter Zijlstra * mutex_trylock() from page_lock_anon_vma(). This orders: 9788c22088SPeter Zijlstra * 9888c22088SPeter Zijlstra * page_lock_anon_vma() VS put_anon_vma() 9988c22088SPeter Zijlstra * mutex_trylock() atomic_dec_and_test() 10088c22088SPeter Zijlstra * LOCK MB 10188c22088SPeter Zijlstra * atomic_read() mutex_is_locked() 10288c22088SPeter Zijlstra * 10388c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 10488c22088SPeter Zijlstra * happen _before_ what follows. 10588c22088SPeter Zijlstra */ 10688c22088SPeter Zijlstra if (mutex_is_locked(&anon_vma->root->mutex)) { 10788c22088SPeter Zijlstra anon_vma_lock(anon_vma); 10888c22088SPeter Zijlstra anon_vma_unlock(anon_vma); 10988c22088SPeter Zijlstra } 11088c22088SPeter Zijlstra 111fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 112fdd2e5f8SAdrian Bunk } 1131da177e4SLinus Torvalds 114dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1155beb4930SRik van Riel { 116dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1175beb4930SRik van Riel } 1185beb4930SRik van Riel 119e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1205beb4930SRik van Riel { 1215beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1225beb4930SRik van Riel } 1235beb4930SRik van Riel 1246583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1256583a843SKautuk Consul struct anon_vma_chain *avc, 1266583a843SKautuk Consul struct anon_vma *anon_vma) 1276583a843SKautuk Consul { 1286583a843SKautuk Consul avc->vma = vma; 1296583a843SKautuk Consul avc->anon_vma = anon_vma; 1306583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 131bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1326583a843SKautuk Consul } 1336583a843SKautuk Consul 134d9d332e0SLinus Torvalds /** 135d9d332e0SLinus Torvalds * anon_vma_prepare - attach an anon_vma to a memory region 136d9d332e0SLinus Torvalds * @vma: the memory region in question 137d9d332e0SLinus Torvalds * 138d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 139d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 140d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 141d9d332e0SLinus Torvalds * 142d9d332e0SLinus Torvalds * The common case will be that we already have one, but if 14323a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 144d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 145d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 146d9d332e0SLinus Torvalds * allocate a new one. 147d9d332e0SLinus Torvalds * 148d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 149d9d332e0SLinus Torvalds * optimistically looked up an anon_vma in page_lock_anon_vma() 150d9d332e0SLinus Torvalds * and that may actually touch the spinlock even in the newly 151d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 152d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 153d9d332e0SLinus Torvalds * 154d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 155d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 156d9d332e0SLinus Torvalds * to do any locking for the common case of already having 157d9d332e0SLinus Torvalds * an anon_vma. 158d9d332e0SLinus Torvalds * 159d9d332e0SLinus Torvalds * This must be called with the mmap_sem held for reading. 160d9d332e0SLinus Torvalds */ 1611da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 1621da177e4SLinus Torvalds { 1631da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1645beb4930SRik van Riel struct anon_vma_chain *avc; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds might_sleep(); 1671da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 1681da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 169d9d332e0SLinus Torvalds struct anon_vma *allocated; 1701da177e4SLinus Torvalds 171dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1725beb4930SRik van Riel if (!avc) 1735beb4930SRik van Riel goto out_enomem; 1745beb4930SRik van Riel 1751da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1761da177e4SLinus Torvalds allocated = NULL; 177d9d332e0SLinus Torvalds if (!anon_vma) { 1781da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1791da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1805beb4930SRik van Riel goto out_enomem_free_avc; 1811da177e4SLinus Torvalds allocated = anon_vma; 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 184cba48b98SRik van Riel anon_vma_lock(anon_vma); 1851da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1861da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 1871da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1881da177e4SLinus Torvalds vma->anon_vma = anon_vma; 1896583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 1901da177e4SLinus Torvalds allocated = NULL; 19131f2b0ebSOleg Nesterov avc = NULL; 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 194cba48b98SRik van Riel anon_vma_unlock(anon_vma); 19531f2b0ebSOleg Nesterov 19631f2b0ebSOleg Nesterov if (unlikely(allocated)) 19701d8b20dSPeter Zijlstra put_anon_vma(allocated); 19831f2b0ebSOleg Nesterov if (unlikely(avc)) 1995beb4930SRik van Riel anon_vma_chain_free(avc); 2005beb4930SRik van Riel } 2011da177e4SLinus Torvalds return 0; 2025beb4930SRik van Riel 2035beb4930SRik van Riel out_enomem_free_avc: 2045beb4930SRik van Riel anon_vma_chain_free(avc); 2055beb4930SRik van Riel out_enomem: 2065beb4930SRik van Riel return -ENOMEM; 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds 209bb4aa396SLinus Torvalds /* 210bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 211bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 212bb4aa396SLinus Torvalds * have the same vma. 213bb4aa396SLinus Torvalds * 214bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 215bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 216bb4aa396SLinus Torvalds */ 217bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 218bb4aa396SLinus Torvalds { 219bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 220bb4aa396SLinus Torvalds if (new_root != root) { 221bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 222bb4aa396SLinus Torvalds mutex_unlock(&root->mutex); 223bb4aa396SLinus Torvalds root = new_root; 224bb4aa396SLinus Torvalds mutex_lock(&root->mutex); 225bb4aa396SLinus Torvalds } 226bb4aa396SLinus Torvalds return root; 227bb4aa396SLinus Torvalds } 228bb4aa396SLinus Torvalds 229bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 230bb4aa396SLinus Torvalds { 231bb4aa396SLinus Torvalds if (root) 232bb4aa396SLinus Torvalds mutex_unlock(&root->mutex); 233bb4aa396SLinus Torvalds } 234bb4aa396SLinus Torvalds 2355beb4930SRik van Riel /* 2365beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2375beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2385beb4930SRik van Riel */ 2395beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2405beb4930SRik van Riel { 2415beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 242bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2435beb4930SRik van Riel 244646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 245bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 246bb4aa396SLinus Torvalds 247dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 248dd34739cSLinus Torvalds if (unlikely(!avc)) { 249dd34739cSLinus Torvalds unlock_anon_vma_root(root); 250dd34739cSLinus Torvalds root = NULL; 251dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2525beb4930SRik van Riel if (!avc) 2535beb4930SRik van Riel goto enomem_failure; 254dd34739cSLinus Torvalds } 255bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 256bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 257bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2585beb4930SRik van Riel } 259bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 2605beb4930SRik van Riel return 0; 2615beb4930SRik van Riel 2625beb4930SRik van Riel enomem_failure: 2635beb4930SRik van Riel unlink_anon_vmas(dst); 2645beb4930SRik van Riel return -ENOMEM; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds 2675beb4930SRik van Riel /* 2685beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 2695beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 2705beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 2715beb4930SRik van Riel */ 2725beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 2731da177e4SLinus Torvalds { 2745beb4930SRik van Riel struct anon_vma_chain *avc; 2755beb4930SRik van Riel struct anon_vma *anon_vma; 2765beb4930SRik van Riel 2775beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 2785beb4930SRik van Riel if (!pvma->anon_vma) 2795beb4930SRik van Riel return 0; 2805beb4930SRik van Riel 2815beb4930SRik van Riel /* 2825beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 2835beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 2845beb4930SRik van Riel */ 2855beb4930SRik van Riel if (anon_vma_clone(vma, pvma)) 2865beb4930SRik van Riel return -ENOMEM; 2875beb4930SRik van Riel 2885beb4930SRik van Riel /* Then add our own anon_vma. */ 2895beb4930SRik van Riel anon_vma = anon_vma_alloc(); 2905beb4930SRik van Riel if (!anon_vma) 2915beb4930SRik van Riel goto out_error; 292dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2935beb4930SRik van Riel if (!avc) 2945beb4930SRik van Riel goto out_error_free_anon_vma; 2955c341ee1SRik van Riel 2965c341ee1SRik van Riel /* 2975c341ee1SRik van Riel * The root anon_vma's spinlock is the lock actually used when we 2985c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 2995c341ee1SRik van Riel */ 3005c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 30176545066SRik van Riel /* 30201d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 30301d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 30401d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 30576545066SRik van Riel */ 30676545066SRik van Riel get_anon_vma(anon_vma->root); 3075beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3085beb4930SRik van Riel vma->anon_vma = anon_vma; 309bb4aa396SLinus Torvalds anon_vma_lock(anon_vma); 3105c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 311bb4aa396SLinus Torvalds anon_vma_unlock(anon_vma); 3125beb4930SRik van Riel 3135beb4930SRik van Riel return 0; 3145beb4930SRik van Riel 3155beb4930SRik van Riel out_error_free_anon_vma: 31601d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3175beb4930SRik van Riel out_error: 3184946d54cSRik van Riel unlink_anon_vmas(vma); 3195beb4930SRik van Riel return -ENOMEM; 3205beb4930SRik van Riel } 3215beb4930SRik van Riel 3225beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3235beb4930SRik van Riel { 3245beb4930SRik van Riel struct anon_vma_chain *avc, *next; 325eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 3265beb4930SRik van Riel 3275c341ee1SRik van Riel /* 3285c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 3295c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 3305c341ee1SRik van Riel */ 3315beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 332eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 333eee2acbaSPeter Zijlstra 334eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 335bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 336eee2acbaSPeter Zijlstra 337eee2acbaSPeter Zijlstra /* 338eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 339eee2acbaSPeter Zijlstra * to free them outside the lock. 340eee2acbaSPeter Zijlstra */ 341bf181b9fSMichel Lespinasse if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 342eee2acbaSPeter Zijlstra continue; 343eee2acbaSPeter Zijlstra 344eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 345eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 346eee2acbaSPeter Zijlstra } 347eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 348eee2acbaSPeter Zijlstra 349eee2acbaSPeter Zijlstra /* 350eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 351eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 352eee2acbaSPeter Zijlstra * needing to acquire the anon_vma->root->mutex. 353eee2acbaSPeter Zijlstra */ 354eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 355eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 356eee2acbaSPeter Zijlstra 357eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 358eee2acbaSPeter Zijlstra 3595beb4930SRik van Riel list_del(&avc->same_vma); 3605beb4930SRik van Riel anon_vma_chain_free(avc); 3615beb4930SRik van Riel } 3625beb4930SRik van Riel } 3635beb4930SRik van Riel 36451cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 3651da177e4SLinus Torvalds { 3661da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 3671da177e4SLinus Torvalds 3682b575eb6SPeter Zijlstra mutex_init(&anon_vma->mutex); 36983813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 370bf181b9fSMichel Lespinasse anon_vma->rb_root = RB_ROOT; 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds void __init anon_vma_init(void) 3741da177e4SLinus Torvalds { 3751da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 37620c2df83SPaul Mundt 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 3775beb4930SRik van Riel anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds /* 3816111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 3826111e4caSPeter Zijlstra * 3836111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 3846111e4caSPeter Zijlstra * the best this function can do is return a locked anon_vma that might 3856111e4caSPeter Zijlstra * have been relevant to this page. 3866111e4caSPeter Zijlstra * 3876111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 3886111e4caSPeter Zijlstra * returned may already be freed (and even reused). 3896111e4caSPeter Zijlstra * 390bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 391bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 392bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 393bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 394bc658c96SPeter Zijlstra * 3956111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 3966111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 3976111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 3986111e4caSPeter Zijlstra * 3996111e4caSPeter Zijlstra * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 4006111e4caSPeter Zijlstra * that the anon_vma pointer from page->mapping is valid if there is a 4016111e4caSPeter Zijlstra * mapcount, we can dereference the anon_vma after observing those. 4021da177e4SLinus Torvalds */ 403746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4041da177e4SLinus Torvalds { 405746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4061da177e4SLinus Torvalds unsigned long anon_mapping; 4071da177e4SLinus Torvalds 4081da177e4SLinus Torvalds rcu_read_lock(); 40980e14822SHugh Dickins anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 4103ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4111da177e4SLinus Torvalds goto out; 4121da177e4SLinus Torvalds if (!page_mapped(page)) 4131da177e4SLinus Torvalds goto out; 4141da177e4SLinus Torvalds 4151da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 416746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 417746b18d4SPeter Zijlstra anon_vma = NULL; 418746b18d4SPeter Zijlstra goto out; 419746b18d4SPeter Zijlstra } 420f1819427SHugh Dickins 421f1819427SHugh Dickins /* 422f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 423746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 424746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 425746b18d4SPeter Zijlstra * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 426746b18d4SPeter Zijlstra * above cannot corrupt). 427f1819427SHugh Dickins */ 428746b18d4SPeter Zijlstra if (!page_mapped(page)) { 429746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 430746b18d4SPeter Zijlstra anon_vma = NULL; 431746b18d4SPeter Zijlstra } 4321da177e4SLinus Torvalds out: 4331da177e4SLinus Torvalds rcu_read_unlock(); 434746b18d4SPeter Zijlstra 435746b18d4SPeter Zijlstra return anon_vma; 436746b18d4SPeter Zijlstra } 437746b18d4SPeter Zijlstra 43888c22088SPeter Zijlstra /* 43988c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 44088c22088SPeter Zijlstra * 44188c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 44288c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 44388c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 44488c22088SPeter Zijlstra */ 445746b18d4SPeter Zijlstra struct anon_vma *page_lock_anon_vma(struct page *page) 446746b18d4SPeter Zijlstra { 44788c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 448eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 44988c22088SPeter Zijlstra unsigned long anon_mapping; 450746b18d4SPeter Zijlstra 45188c22088SPeter Zijlstra rcu_read_lock(); 45288c22088SPeter Zijlstra anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 45388c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 45488c22088SPeter Zijlstra goto out; 45588c22088SPeter Zijlstra if (!page_mapped(page)) 45688c22088SPeter Zijlstra goto out; 45788c22088SPeter Zijlstra 45888c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 459eee0f252SHugh Dickins root_anon_vma = ACCESS_ONCE(anon_vma->root); 460eee0f252SHugh Dickins if (mutex_trylock(&root_anon_vma->mutex)) { 46188c22088SPeter Zijlstra /* 462eee0f252SHugh Dickins * If the page is still mapped, then this anon_vma is still 463eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 464bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 46588c22088SPeter Zijlstra */ 466eee0f252SHugh Dickins if (!page_mapped(page)) { 467eee0f252SHugh Dickins mutex_unlock(&root_anon_vma->mutex); 46888c22088SPeter Zijlstra anon_vma = NULL; 46988c22088SPeter Zijlstra } 47088c22088SPeter Zijlstra goto out; 47188c22088SPeter Zijlstra } 47288c22088SPeter Zijlstra 47388c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 47488c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 47588c22088SPeter Zijlstra anon_vma = NULL; 47688c22088SPeter Zijlstra goto out; 47788c22088SPeter Zijlstra } 47888c22088SPeter Zijlstra 47988c22088SPeter Zijlstra if (!page_mapped(page)) { 48088c22088SPeter Zijlstra put_anon_vma(anon_vma); 48188c22088SPeter Zijlstra anon_vma = NULL; 48288c22088SPeter Zijlstra goto out; 48388c22088SPeter Zijlstra } 48488c22088SPeter Zijlstra 48588c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 48688c22088SPeter Zijlstra rcu_read_unlock(); 487746b18d4SPeter Zijlstra anon_vma_lock(anon_vma); 488746b18d4SPeter Zijlstra 48988c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 49088c22088SPeter Zijlstra /* 49188c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 49288c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 49388c22088SPeter Zijlstra * we'll deadlock on the anon_vma_lock() recursion. 49488c22088SPeter Zijlstra */ 49588c22088SPeter Zijlstra anon_vma_unlock(anon_vma); 49688c22088SPeter Zijlstra __put_anon_vma(anon_vma); 49788c22088SPeter Zijlstra anon_vma = NULL; 49888c22088SPeter Zijlstra } 49988c22088SPeter Zijlstra 50088c22088SPeter Zijlstra return anon_vma; 50188c22088SPeter Zijlstra 50288c22088SPeter Zijlstra out: 50388c22088SPeter Zijlstra rcu_read_unlock(); 504746b18d4SPeter Zijlstra return anon_vma; 50534bbd704SOleg Nesterov } 50634bbd704SOleg Nesterov 50710be22dfSAndi Kleen void page_unlock_anon_vma(struct anon_vma *anon_vma) 50834bbd704SOleg Nesterov { 509cba48b98SRik van Riel anon_vma_unlock(anon_vma); 5101da177e4SLinus Torvalds } 5111da177e4SLinus Torvalds 5121da177e4SLinus Torvalds /* 5133ad33b24SLee Schermerhorn * At what user virtual address is page expected in @vma? 5141da177e4SLinus Torvalds */ 51586c2ad19SMichel Lespinasse static inline unsigned long 51686c2ad19SMichel Lespinasse __vma_address(struct page *page, struct vm_area_struct *vma) 5171da177e4SLinus Torvalds { 5181da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 5191da177e4SLinus Torvalds 5200fe6e20bSNaoya Horiguchi if (unlikely(is_vm_hugetlb_page(vma))) 5210fe6e20bSNaoya Horiguchi pgoff = page->index << huge_page_order(page_hstate(page)); 52286c2ad19SMichel Lespinasse 52386c2ad19SMichel Lespinasse return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 5241da177e4SLinus Torvalds } 52586c2ad19SMichel Lespinasse 52686c2ad19SMichel Lespinasse inline unsigned long 52786c2ad19SMichel Lespinasse vma_address(struct page *page, struct vm_area_struct *vma) 52886c2ad19SMichel Lespinasse { 52986c2ad19SMichel Lespinasse unsigned long address = __vma_address(page, vma); 53086c2ad19SMichel Lespinasse 53186c2ad19SMichel Lespinasse /* page should be within @vma mapping range */ 53286c2ad19SMichel Lespinasse VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 53386c2ad19SMichel Lespinasse 5341da177e4SLinus Torvalds return address; 5351da177e4SLinus Torvalds } 5361da177e4SLinus Torvalds 5371da177e4SLinus Torvalds /* 538bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 539ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 5401da177e4SLinus Torvalds */ 5411da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 5421da177e4SLinus Torvalds { 54386c2ad19SMichel Lespinasse unsigned long address; 54421d0d443SAndrea Arcangeli if (PageAnon(page)) { 5454829b906SHugh Dickins struct anon_vma *page__anon_vma = page_anon_vma(page); 5464829b906SHugh Dickins /* 5474829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 5484829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 5494829b906SHugh Dickins */ 5504829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 5514829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 55221d0d443SAndrea Arcangeli return -EFAULT; 55321d0d443SAndrea Arcangeli } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 554ee498ed7SHugh Dickins if (!vma->vm_file || 555ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 5561da177e4SLinus Torvalds return -EFAULT; 5571da177e4SLinus Torvalds } else 5581da177e4SLinus Torvalds return -EFAULT; 55986c2ad19SMichel Lespinasse address = __vma_address(page, vma); 56086c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 56186c2ad19SMichel Lespinasse return -EFAULT; 56286c2ad19SMichel Lespinasse return address; 5631da177e4SLinus Torvalds } 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds /* 56681b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 56781b4082dSNikita Danilov * 568479db0bfSNick Piggin * If @sync is false, page_check_address may perform a racy check to avoid 569479db0bfSNick Piggin * the page table lock when the pte is not present (helpful when reclaiming 570479db0bfSNick Piggin * highly shared pages). 571479db0bfSNick Piggin * 572b8072f09SHugh Dickins * On success returns with pte mapped and locked. 57381b4082dSNikita Danilov */ 574e9a81a82SNamhyung Kim pte_t *__page_check_address(struct page *page, struct mm_struct *mm, 575479db0bfSNick Piggin unsigned long address, spinlock_t **ptlp, int sync) 57681b4082dSNikita Danilov { 57781b4082dSNikita Danilov pgd_t *pgd; 57881b4082dSNikita Danilov pud_t *pud; 57981b4082dSNikita Danilov pmd_t *pmd; 58081b4082dSNikita Danilov pte_t *pte; 581c0718806SHugh Dickins spinlock_t *ptl; 58281b4082dSNikita Danilov 5830fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) { 5840fe6e20bSNaoya Horiguchi pte = huge_pte_offset(mm, address); 5850fe6e20bSNaoya Horiguchi ptl = &mm->page_table_lock; 5860fe6e20bSNaoya Horiguchi goto check; 5870fe6e20bSNaoya Horiguchi } 5880fe6e20bSNaoya Horiguchi 58981b4082dSNikita Danilov pgd = pgd_offset(mm, address); 590c0718806SHugh Dickins if (!pgd_present(*pgd)) 591c0718806SHugh Dickins return NULL; 592c0718806SHugh Dickins 59381b4082dSNikita Danilov pud = pud_offset(pgd, address); 594c0718806SHugh Dickins if (!pud_present(*pud)) 595c0718806SHugh Dickins return NULL; 596c0718806SHugh Dickins 59781b4082dSNikita Danilov pmd = pmd_offset(pud, address); 598c0718806SHugh Dickins if (!pmd_present(*pmd)) 599c0718806SHugh Dickins return NULL; 60071e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) 60171e3aac0SAndrea Arcangeli return NULL; 602c0718806SHugh Dickins 60381b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 604c0718806SHugh Dickins /* Make a quick check before getting the lock */ 605479db0bfSNick Piggin if (!sync && !pte_present(*pte)) { 60681b4082dSNikita Danilov pte_unmap(pte); 607c0718806SHugh Dickins return NULL; 60881b4082dSNikita Danilov } 609c0718806SHugh Dickins 6104c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 6110fe6e20bSNaoya Horiguchi check: 612c0718806SHugh Dickins spin_lock(ptl); 613c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 614c0718806SHugh Dickins *ptlp = ptl; 615c0718806SHugh Dickins return pte; 61681b4082dSNikita Danilov } 617c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 618c0718806SHugh Dickins return NULL; 61981b4082dSNikita Danilov } 62081b4082dSNikita Danilov 621b291f000SNick Piggin /** 622b291f000SNick Piggin * page_mapped_in_vma - check whether a page is really mapped in a VMA 623b291f000SNick Piggin * @page: the page to test 624b291f000SNick Piggin * @vma: the VMA to test 625b291f000SNick Piggin * 626b291f000SNick Piggin * Returns 1 if the page is mapped into the page tables of the VMA, 0 627b291f000SNick Piggin * if the page is not mapped into the page tables of this VMA. Only 628b291f000SNick Piggin * valid for normal file or anonymous VMAs. 629b291f000SNick Piggin */ 6306a46079cSAndi Kleen int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 631b291f000SNick Piggin { 632b291f000SNick Piggin unsigned long address; 633b291f000SNick Piggin pte_t *pte; 634b291f000SNick Piggin spinlock_t *ptl; 635b291f000SNick Piggin 63686c2ad19SMichel Lespinasse address = __vma_address(page, vma); 63786c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 638b291f000SNick Piggin return 0; 639b291f000SNick Piggin pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 640b291f000SNick Piggin if (!pte) /* the page is not in this mm */ 641b291f000SNick Piggin return 0; 642b291f000SNick Piggin pte_unmap_unlock(pte, ptl); 643b291f000SNick Piggin 644b291f000SNick Piggin return 1; 645b291f000SNick Piggin } 646b291f000SNick Piggin 64781b4082dSNikita Danilov /* 6481da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 6491da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 6501da177e4SLinus Torvalds */ 6515ad64688SHugh Dickins int page_referenced_one(struct page *page, struct vm_area_struct *vma, 6521cb1729bSHugh Dickins unsigned long address, unsigned int *mapcount, 6536fe6b7e3SWu Fengguang unsigned long *vm_flags) 6541da177e4SLinus Torvalds { 6551da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6561da177e4SLinus Torvalds int referenced = 0; 6571da177e4SLinus Torvalds 6582da28bfdSAndrea Arcangeli if (unlikely(PageTransHuge(page))) { 6592da28bfdSAndrea Arcangeli pmd_t *pmd; 6602da28bfdSAndrea Arcangeli 6612da28bfdSAndrea Arcangeli spin_lock(&mm->page_table_lock); 662b291f000SNick Piggin /* 6632da28bfdSAndrea Arcangeli * rmap might return false positives; we must filter 6642da28bfdSAndrea Arcangeli * these out using page_check_address_pmd(). 665b291f000SNick Piggin */ 6662da28bfdSAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 6672da28bfdSAndrea Arcangeli PAGE_CHECK_ADDRESS_PMD_FLAG); 6682da28bfdSAndrea Arcangeli if (!pmd) { 6692da28bfdSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 6702da28bfdSAndrea Arcangeli goto out; 6712da28bfdSAndrea Arcangeli } 6722da28bfdSAndrea Arcangeli 6735a9bbdcdSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 6742da28bfdSAndrea Arcangeli spin_unlock(&mm->page_table_lock); 67571e3aac0SAndrea Arcangeli *mapcount = 0; /* break early from loop */ 67603ef83afSMinchan Kim *vm_flags |= VM_LOCKED; 67771e3aac0SAndrea Arcangeli goto out; 678b291f000SNick Piggin } 679b291f000SNick Piggin 6802da28bfdSAndrea Arcangeli /* go ahead even if the pmd is pmd_trans_splitting() */ 6812da28bfdSAndrea Arcangeli if (pmdp_clear_flush_young_notify(vma, address, pmd)) 68271e3aac0SAndrea Arcangeli referenced++; 68371e3aac0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 68471e3aac0SAndrea Arcangeli } else { 68571e3aac0SAndrea Arcangeli pte_t *pte; 68671e3aac0SAndrea Arcangeli spinlock_t *ptl; 68771e3aac0SAndrea Arcangeli 6882da28bfdSAndrea Arcangeli /* 6892da28bfdSAndrea Arcangeli * rmap might return false positives; we must filter 6902da28bfdSAndrea Arcangeli * these out using page_check_address(). 6912da28bfdSAndrea Arcangeli */ 69271e3aac0SAndrea Arcangeli pte = page_check_address(page, mm, address, &ptl, 0); 69371e3aac0SAndrea Arcangeli if (!pte) 69471e3aac0SAndrea Arcangeli goto out; 69571e3aac0SAndrea Arcangeli 6962da28bfdSAndrea Arcangeli if (vma->vm_flags & VM_LOCKED) { 6972da28bfdSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 6982da28bfdSAndrea Arcangeli *mapcount = 0; /* break early from loop */ 6992da28bfdSAndrea Arcangeli *vm_flags |= VM_LOCKED; 7002da28bfdSAndrea Arcangeli goto out; 7012da28bfdSAndrea Arcangeli } 7022da28bfdSAndrea Arcangeli 7034917e5d0SJohannes Weiner if (ptep_clear_flush_young_notify(vma, address, pte)) { 7044917e5d0SJohannes Weiner /* 7054917e5d0SJohannes Weiner * Don't treat a reference through a sequentially read 7064917e5d0SJohannes Weiner * mapping as such. If the page has been used in 7074917e5d0SJohannes Weiner * another mapping, we will catch it; if this other 7084917e5d0SJohannes Weiner * mapping is already gone, the unmap path will have 7094917e5d0SJohannes Weiner * set PG_referenced or activated the page. 7104917e5d0SJohannes Weiner */ 7114917e5d0SJohannes Weiner if (likely(!VM_SequentialReadHint(vma))) 7121da177e4SLinus Torvalds referenced++; 7134917e5d0SJohannes Weiner } 714c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 71571e3aac0SAndrea Arcangeli } 71671e3aac0SAndrea Arcangeli 71771e3aac0SAndrea Arcangeli (*mapcount)--; 718273f047eSHuang Shijie 7196fe6b7e3SWu Fengguang if (referenced) 7206fe6b7e3SWu Fengguang *vm_flags |= vma->vm_flags; 721273f047eSHuang Shijie out: 7221da177e4SLinus Torvalds return referenced; 7231da177e4SLinus Torvalds } 7241da177e4SLinus Torvalds 725bed7161aSBalbir Singh static int page_referenced_anon(struct page *page, 72672835c86SJohannes Weiner struct mem_cgroup *memcg, 7276fe6b7e3SWu Fengguang unsigned long *vm_flags) 7281da177e4SLinus Torvalds { 7291da177e4SLinus Torvalds unsigned int mapcount; 7301da177e4SLinus Torvalds struct anon_vma *anon_vma; 731bf181b9fSMichel Lespinasse pgoff_t pgoff; 7325beb4930SRik van Riel struct anon_vma_chain *avc; 7331da177e4SLinus Torvalds int referenced = 0; 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 7361da177e4SLinus Torvalds if (!anon_vma) 7371da177e4SLinus Torvalds return referenced; 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds mapcount = page_mapcount(page); 740bf181b9fSMichel Lespinasse pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 741bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 7425beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 7431cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 744bed7161aSBalbir Singh /* 745bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 746bed7161aSBalbir Singh * counting on behalf of references from different 747bed7161aSBalbir Singh * cgroups 748bed7161aSBalbir Singh */ 74972835c86SJohannes Weiner if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 750bed7161aSBalbir Singh continue; 7511cb1729bSHugh Dickins referenced += page_referenced_one(page, vma, address, 7526fe6b7e3SWu Fengguang &mapcount, vm_flags); 7531da177e4SLinus Torvalds if (!mapcount) 7541da177e4SLinus Torvalds break; 7551da177e4SLinus Torvalds } 75634bbd704SOleg Nesterov 75734bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 7581da177e4SLinus Torvalds return referenced; 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds 7611da177e4SLinus Torvalds /** 7621da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 7631da177e4SLinus Torvalds * @page: the page we're checking references on. 76472835c86SJohannes Weiner * @memcg: target memory control group 7656fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 7661da177e4SLinus Torvalds * 7671da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 7681da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 7691da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 7701da177e4SLinus Torvalds * of references it found. 7711da177e4SLinus Torvalds * 7721da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 7731da177e4SLinus Torvalds */ 774bed7161aSBalbir Singh static int page_referenced_file(struct page *page, 77572835c86SJohannes Weiner struct mem_cgroup *memcg, 7766fe6b7e3SWu Fengguang unsigned long *vm_flags) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds unsigned int mapcount; 7791da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 7801da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 7811da177e4SLinus Torvalds struct vm_area_struct *vma; 7821da177e4SLinus Torvalds int referenced = 0; 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds /* 7851da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 7861da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 7871da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 7881da177e4SLinus Torvalds */ 7891da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 7901da177e4SLinus Torvalds 7911da177e4SLinus Torvalds /* 7921da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 7931da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 7941da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 7953d48ae45SPeter Zijlstra * so we can safely take mapping->i_mmap_mutex. 7961da177e4SLinus Torvalds */ 7971da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 7981da177e4SLinus Torvalds 7993d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 8001da177e4SLinus Torvalds 8011da177e4SLinus Torvalds /* 8023d48ae45SPeter Zijlstra * i_mmap_mutex does not stabilize mapcount at all, but mapcount 8031da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 8041da177e4SLinus Torvalds */ 8051da177e4SLinus Torvalds mapcount = page_mapcount(page); 8061da177e4SLinus Torvalds 8076b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 8081cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 809bed7161aSBalbir Singh /* 810bed7161aSBalbir Singh * If we are reclaiming on behalf of a cgroup, skip 811bed7161aSBalbir Singh * counting on behalf of references from different 812bed7161aSBalbir Singh * cgroups 813bed7161aSBalbir Singh */ 81472835c86SJohannes Weiner if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 815bed7161aSBalbir Singh continue; 8161cb1729bSHugh Dickins referenced += page_referenced_one(page, vma, address, 8176fe6b7e3SWu Fengguang &mapcount, vm_flags); 8181da177e4SLinus Torvalds if (!mapcount) 8191da177e4SLinus Torvalds break; 8201da177e4SLinus Torvalds } 8211da177e4SLinus Torvalds 8223d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 8231da177e4SLinus Torvalds return referenced; 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds /** 8271da177e4SLinus Torvalds * page_referenced - test if the page was referenced 8281da177e4SLinus Torvalds * @page: the page to test 8291da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 83072835c86SJohannes Weiner * @memcg: target memory cgroup 8316fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 8321da177e4SLinus Torvalds * 8331da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 8341da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 8351da177e4SLinus Torvalds */ 8366fe6b7e3SWu Fengguang int page_referenced(struct page *page, 8376fe6b7e3SWu Fengguang int is_locked, 83872835c86SJohannes Weiner struct mem_cgroup *memcg, 8396fe6b7e3SWu Fengguang unsigned long *vm_flags) 8401da177e4SLinus Torvalds { 8411da177e4SLinus Torvalds int referenced = 0; 8425ad64688SHugh Dickins int we_locked = 0; 8431da177e4SLinus Torvalds 8446fe6b7e3SWu Fengguang *vm_flags = 0; 8453ca7b3c5SHugh Dickins if (page_mapped(page) && page_rmapping(page)) { 8465ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 8475ad64688SHugh Dickins we_locked = trylock_page(page); 8485ad64688SHugh Dickins if (!we_locked) { 8495ad64688SHugh Dickins referenced++; 8505ad64688SHugh Dickins goto out; 8515ad64688SHugh Dickins } 8525ad64688SHugh Dickins } 8535ad64688SHugh Dickins if (unlikely(PageKsm(page))) 85472835c86SJohannes Weiner referenced += page_referenced_ksm(page, memcg, 8555ad64688SHugh Dickins vm_flags); 8565ad64688SHugh Dickins else if (PageAnon(page)) 85772835c86SJohannes Weiner referenced += page_referenced_anon(page, memcg, 8586fe6b7e3SWu Fengguang vm_flags); 8595ad64688SHugh Dickins else if (page->mapping) 86072835c86SJohannes Weiner referenced += page_referenced_file(page, memcg, 8616fe6b7e3SWu Fengguang vm_flags); 8625ad64688SHugh Dickins if (we_locked) 8631da177e4SLinus Torvalds unlock_page(page); 86450a15981SMartin Schwidefsky 8652d42552dSMartin Schwidefsky if (page_test_and_clear_young(page_to_pfn(page))) 8665b7baf05SChristian Borntraeger referenced++; 86750a15981SMartin Schwidefsky } 86850a15981SMartin Schwidefsky out: 8691da177e4SLinus Torvalds return referenced; 8701da177e4SLinus Torvalds } 8711da177e4SLinus Torvalds 8721cb1729bSHugh Dickins static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 8731cb1729bSHugh Dickins unsigned long address) 874d08b3851SPeter Zijlstra { 875d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 876c2fda5feSPeter Zijlstra pte_t *pte; 877d08b3851SPeter Zijlstra spinlock_t *ptl; 878d08b3851SPeter Zijlstra int ret = 0; 879d08b3851SPeter Zijlstra 880479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 1); 881d08b3851SPeter Zijlstra if (!pte) 882d08b3851SPeter Zijlstra goto out; 883d08b3851SPeter Zijlstra 884c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 885c2fda5feSPeter Zijlstra pte_t entry; 886d08b3851SPeter Zijlstra 887c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 8882ec74c3eSSagi Grimberg entry = ptep_clear_flush(vma, address, pte); 889d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 890c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 891d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 892d08b3851SPeter Zijlstra ret = 1; 893c2fda5feSPeter Zijlstra } 894d08b3851SPeter Zijlstra 895d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 8962ec74c3eSSagi Grimberg 8972ec74c3eSSagi Grimberg if (ret) 8982ec74c3eSSagi Grimberg mmu_notifier_invalidate_page(mm, address); 899d08b3851SPeter Zijlstra out: 900d08b3851SPeter Zijlstra return ret; 901d08b3851SPeter Zijlstra } 902d08b3851SPeter Zijlstra 903d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page) 904d08b3851SPeter Zijlstra { 905d08b3851SPeter Zijlstra pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 906d08b3851SPeter Zijlstra struct vm_area_struct *vma; 907d08b3851SPeter Zijlstra int ret = 0; 908d08b3851SPeter Zijlstra 909d08b3851SPeter Zijlstra BUG_ON(PageAnon(page)); 910d08b3851SPeter Zijlstra 9113d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 9126b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 9131cb1729bSHugh Dickins if (vma->vm_flags & VM_SHARED) { 9141cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 9151cb1729bSHugh Dickins ret += page_mkclean_one(page, vma, address); 9161cb1729bSHugh Dickins } 917d08b3851SPeter Zijlstra } 9183d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 919d08b3851SPeter Zijlstra return ret; 920d08b3851SPeter Zijlstra } 921d08b3851SPeter Zijlstra 922d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 923d08b3851SPeter Zijlstra { 924d08b3851SPeter Zijlstra int ret = 0; 925d08b3851SPeter Zijlstra 926d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 927d08b3851SPeter Zijlstra 928d08b3851SPeter Zijlstra if (page_mapped(page)) { 929d08b3851SPeter Zijlstra struct address_space *mapping = page_mapping(page); 930*ef5d437fSJan Kara if (mapping) 931d08b3851SPeter Zijlstra ret = page_mkclean_file(mapping, page); 9326c210482SMartin Schwidefsky } 933d08b3851SPeter Zijlstra 934d08b3851SPeter Zijlstra return ret; 935d08b3851SPeter Zijlstra } 93660b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 937d08b3851SPeter Zijlstra 9381da177e4SLinus Torvalds /** 939c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 940c44b6743SRik van Riel * @page: the page to move to our anon_vma 941c44b6743SRik van Riel * @vma: the vma the page belongs to 942c44b6743SRik van Riel * @address: the user virtual address mapped 943c44b6743SRik van Riel * 944c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 945c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 946c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 947c44b6743SRik van Riel * processes. 948c44b6743SRik van Riel */ 949c44b6743SRik van Riel void page_move_anon_rmap(struct page *page, 950c44b6743SRik van Riel struct vm_area_struct *vma, unsigned long address) 951c44b6743SRik van Riel { 952c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 953c44b6743SRik van Riel 954c44b6743SRik van Riel VM_BUG_ON(!PageLocked(page)); 955c44b6743SRik van Riel VM_BUG_ON(!anon_vma); 956c44b6743SRik van Riel VM_BUG_ON(page->index != linear_page_index(vma, address)); 957c44b6743SRik van Riel 958c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 959c44b6743SRik van Riel page->mapping = (struct address_space *) anon_vma; 960c44b6743SRik van Riel } 961c44b6743SRik van Riel 962c44b6743SRik van Riel /** 96343d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 9644e1c1975SAndi Kleen * @page: Page to add to rmap 9654e1c1975SAndi Kleen * @vma: VM area to add page to. 9664e1c1975SAndi Kleen * @address: User virtual address of the mapping 967e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 9681da177e4SLinus Torvalds */ 9699617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 970e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 9711da177e4SLinus Torvalds { 972e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 9732822c1aaSNick Piggin 974e8a03febSRik van Riel BUG_ON(!anon_vma); 975ea90002bSLinus Torvalds 9764e1c1975SAndi Kleen if (PageAnon(page)) 9774e1c1975SAndi Kleen return; 9784e1c1975SAndi Kleen 979ea90002bSLinus Torvalds /* 980e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 981e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 982e8a03febSRik van Riel * page mapping! 983ea90002bSLinus Torvalds */ 9844e1c1975SAndi Kleen if (!exclusive) 985288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 986ea90002bSLinus Torvalds 9871da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 9882822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 9894d7670e0SNick Piggin page->index = linear_page_index(vma, address); 9901da177e4SLinus Torvalds } 9919617d95eSNick Piggin 9929617d95eSNick Piggin /** 99343d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 994c97a9e10SNick Piggin * @page: the page to add the mapping to 995c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 996c97a9e10SNick Piggin * @address: the user virtual address mapped 997c97a9e10SNick Piggin */ 998c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 999c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1000c97a9e10SNick Piggin { 1001c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 1002c97a9e10SNick Piggin /* 1003c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1004c97a9e10SNick Piggin * be set up correctly at this point. 1005c97a9e10SNick Piggin * 1006c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 1007c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 1008c97a9e10SNick Piggin * in which case the page is already known to be setup. 1009c97a9e10SNick Piggin * 1010c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1011c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1012c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1013c97a9e10SNick Piggin */ 101444ab57a0SAndrea Arcangeli BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 1015c97a9e10SNick Piggin BUG_ON(page->index != linear_page_index(vma, address)); 1016c97a9e10SNick Piggin #endif 1017c97a9e10SNick Piggin } 1018c97a9e10SNick Piggin 1019c97a9e10SNick Piggin /** 10209617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 10219617d95eSNick Piggin * @page: the page to add the mapping to 10229617d95eSNick Piggin * @vma: the vm area in which the mapping is added 10239617d95eSNick Piggin * @address: the user virtual address mapped 10249617d95eSNick Piggin * 10255ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 102680e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 102780e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 102880e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 10299617d95eSNick Piggin */ 10309617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 10319617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 10329617d95eSNick Piggin { 1033ad8c2ee8SRik van Riel do_page_add_anon_rmap(page, vma, address, 0); 1034ad8c2ee8SRik van Riel } 1035ad8c2ee8SRik van Riel 1036ad8c2ee8SRik van Riel /* 1037ad8c2ee8SRik van Riel * Special version of the above for do_swap_page, which often runs 1038ad8c2ee8SRik van Riel * into pages that are exclusively owned by the current process. 1039ad8c2ee8SRik van Riel * Everybody else should continue to use page_add_anon_rmap above. 1040ad8c2ee8SRik van Riel */ 1041ad8c2ee8SRik van Riel void do_page_add_anon_rmap(struct page *page, 1042ad8c2ee8SRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 1043ad8c2ee8SRik van Riel { 10445ad64688SHugh Dickins int first = atomic_inc_and_test(&page->_mapcount); 104579134171SAndrea Arcangeli if (first) { 104679134171SAndrea Arcangeli if (!PageTransHuge(page)) 10475ad64688SHugh Dickins __inc_zone_page_state(page, NR_ANON_PAGES); 104879134171SAndrea Arcangeli else 104979134171SAndrea Arcangeli __inc_zone_page_state(page, 105079134171SAndrea Arcangeli NR_ANON_TRANSPARENT_HUGEPAGES); 105179134171SAndrea Arcangeli } 10525ad64688SHugh Dickins if (unlikely(PageKsm(page))) 10535ad64688SHugh Dickins return; 10545ad64688SHugh Dickins 1055c97a9e10SNick Piggin VM_BUG_ON(!PageLocked(page)); 10565dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 10575ad64688SHugh Dickins if (first) 1058ad8c2ee8SRik van Riel __page_set_anon_rmap(page, vma, address, exclusive); 105969029cd5SKAMEZAWA Hiroyuki else 1060c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 10611da177e4SLinus Torvalds } 10621da177e4SLinus Torvalds 106343d8eac4SRandy Dunlap /** 10649617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 10659617d95eSNick Piggin * @page: the page to add the mapping to 10669617d95eSNick Piggin * @vma: the vm area in which the mapping is added 10679617d95eSNick Piggin * @address: the user virtual address mapped 10689617d95eSNick Piggin * 10699617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 10709617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1071c97a9e10SNick Piggin * Page does not have to be locked. 10729617d95eSNick Piggin */ 10739617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 10749617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 10759617d95eSNick Piggin { 1076b5934c53SHugh Dickins VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1077cbf84b7aSHugh Dickins SetPageSwapBacked(page); 1078cbf84b7aSHugh Dickins atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 107979134171SAndrea Arcangeli if (!PageTransHuge(page)) 10805ad64688SHugh Dickins __inc_zone_page_state(page, NR_ANON_PAGES); 108179134171SAndrea Arcangeli else 108279134171SAndrea Arcangeli __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1083e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 108439b5f29aSHugh Dickins if (!mlocked_vma_newpage(vma, page)) 1085cbf84b7aSHugh Dickins lru_cache_add_lru(page, LRU_ACTIVE_ANON); 1086b5934c53SHugh Dickins else 1087b5934c53SHugh Dickins add_page_to_unevictable_list(page); 10889617d95eSNick Piggin } 10899617d95eSNick Piggin 10901da177e4SLinus Torvalds /** 10911da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 10921da177e4SLinus Torvalds * @page: the page to add the mapping to 10931da177e4SLinus Torvalds * 1094b8072f09SHugh Dickins * The caller needs to hold the pte lock. 10951da177e4SLinus Torvalds */ 10961da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 10971da177e4SLinus Torvalds { 109889c06bd5SKAMEZAWA Hiroyuki bool locked; 109989c06bd5SKAMEZAWA Hiroyuki unsigned long flags; 110089c06bd5SKAMEZAWA Hiroyuki 110189c06bd5SKAMEZAWA Hiroyuki mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1102d69b042fSBalbir Singh if (atomic_inc_and_test(&page->_mapcount)) { 110365ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 11042a7106f2SGreg Thelen mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); 1105d69b042fSBalbir Singh } 110689c06bd5SKAMEZAWA Hiroyuki mem_cgroup_end_update_page_stat(page, &locked, &flags); 11071da177e4SLinus Torvalds } 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds /** 11101da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 11111da177e4SLinus Torvalds * @page: page to remove mapping from 11121da177e4SLinus Torvalds * 1113b8072f09SHugh Dickins * The caller needs to hold the pte lock. 11141da177e4SLinus Torvalds */ 1115edc315fdSHugh Dickins void page_remove_rmap(struct page *page) 11161da177e4SLinus Torvalds { 1117*ef5d437fSJan Kara struct address_space *mapping = page_mapping(page); 111889c06bd5SKAMEZAWA Hiroyuki bool anon = PageAnon(page); 111989c06bd5SKAMEZAWA Hiroyuki bool locked; 112089c06bd5SKAMEZAWA Hiroyuki unsigned long flags; 112189c06bd5SKAMEZAWA Hiroyuki 112289c06bd5SKAMEZAWA Hiroyuki /* 112389c06bd5SKAMEZAWA Hiroyuki * The anon case has no mem_cgroup page_stat to update; but may 112489c06bd5SKAMEZAWA Hiroyuki * uncharge_page() below, where the lock ordering can deadlock if 112589c06bd5SKAMEZAWA Hiroyuki * we hold the lock against page_stat move: so avoid it on anon. 112689c06bd5SKAMEZAWA Hiroyuki */ 112789c06bd5SKAMEZAWA Hiroyuki if (!anon) 112889c06bd5SKAMEZAWA Hiroyuki mem_cgroup_begin_update_page_stat(page, &locked, &flags); 112989c06bd5SKAMEZAWA Hiroyuki 1130b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1131b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 113289c06bd5SKAMEZAWA Hiroyuki goto out; 1133b904dcfeSKOSAKI Motohiro 11341da177e4SLinus Torvalds /* 113516f8c5b2SHugh Dickins * Now that the last pte has gone, s390 must transfer dirty 113616f8c5b2SHugh Dickins * flag from storage key to struct page. We can usually skip 113716f8c5b2SHugh Dickins * this if the page is anon, so about to be freed; but perhaps 113816f8c5b2SHugh Dickins * not if it's in swapcache - there might be another pte slot 113916f8c5b2SHugh Dickins * containing the swap entry, but page not yet written to swap. 1140*ef5d437fSJan Kara * 1141*ef5d437fSJan Kara * And we can skip it on file pages, so long as the filesystem 1142*ef5d437fSJan Kara * participates in dirty tracking; but need to catch shm and tmpfs 1143*ef5d437fSJan Kara * and ramfs pages which have been modified since creation by read 1144*ef5d437fSJan Kara * fault. 1145*ef5d437fSJan Kara * 1146*ef5d437fSJan Kara * Note that mapping must be decided above, before decrementing 1147*ef5d437fSJan Kara * mapcount (which luckily provides a barrier): once page is unmapped, 1148*ef5d437fSJan Kara * it could be truncated and page->mapping reset to NULL at any moment. 1149*ef5d437fSJan Kara * Note also that we are relying on page_mapping(page) to set mapping 1150*ef5d437fSJan Kara * to &swapper_space when PageSwapCache(page). 115116f8c5b2SHugh Dickins */ 1152*ef5d437fSJan Kara if (mapping && !mapping_cap_account_dirty(mapping) && 11532d42552dSMartin Schwidefsky page_test_and_clear_dirty(page_to_pfn(page), 1)) 115416f8c5b2SHugh Dickins set_page_dirty(page); 11550fe6e20bSNaoya Horiguchi /* 11560fe6e20bSNaoya Horiguchi * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 11570fe6e20bSNaoya Horiguchi * and not charged by memcg for now. 11580fe6e20bSNaoya Horiguchi */ 11590fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) 116089c06bd5SKAMEZAWA Hiroyuki goto out; 116189c06bd5SKAMEZAWA Hiroyuki if (anon) { 116216f8c5b2SHugh Dickins mem_cgroup_uncharge_page(page); 116379134171SAndrea Arcangeli if (!PageTransHuge(page)) 1164b904dcfeSKOSAKI Motohiro __dec_zone_page_state(page, NR_ANON_PAGES); 116579134171SAndrea Arcangeli else 116679134171SAndrea Arcangeli __dec_zone_page_state(page, 116779134171SAndrea Arcangeli NR_ANON_TRANSPARENT_HUGEPAGES); 1168b904dcfeSKOSAKI Motohiro } else { 1169b904dcfeSKOSAKI Motohiro __dec_zone_page_state(page, NR_FILE_MAPPED); 11702a7106f2SGreg Thelen mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); 1171e6c509f8SHugh Dickins mem_cgroup_end_update_page_stat(page, &locked, &flags); 1172b904dcfeSKOSAKI Motohiro } 1173e6c509f8SHugh Dickins if (unlikely(PageMlocked(page))) 1174e6c509f8SHugh Dickins clear_page_mlock(page); 117516f8c5b2SHugh Dickins /* 11761da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 11771da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 11781da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 11791da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 11801da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 11811da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 11821da177e4SLinus Torvalds * faster for those pages still in swapcache. 11831da177e4SLinus Torvalds */ 1184e6c509f8SHugh Dickins return; 118589c06bd5SKAMEZAWA Hiroyuki out: 118689c06bd5SKAMEZAWA Hiroyuki if (!anon) 118789c06bd5SKAMEZAWA Hiroyuki mem_cgroup_end_update_page_stat(page, &locked, &flags); 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds /* 11911da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 119299ef0315SWanlong Gao * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. 11931da177e4SLinus Torvalds */ 11945ad64688SHugh Dickins int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 11951cb1729bSHugh Dickins unsigned long address, enum ttu_flags flags) 11961da177e4SLinus Torvalds { 11971da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 11981da177e4SLinus Torvalds pte_t *pte; 11991da177e4SLinus Torvalds pte_t pteval; 1200c0718806SHugh Dickins spinlock_t *ptl; 12011da177e4SLinus Torvalds int ret = SWAP_AGAIN; 12021da177e4SLinus Torvalds 1203479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 0); 1204c0718806SHugh Dickins if (!pte) 120581b4082dSNikita Danilov goto out; 12061da177e4SLinus Torvalds 12071da177e4SLinus Torvalds /* 12081da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 12091da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 12101da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 12111da177e4SLinus Torvalds */ 121214fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 1213caed0f48SKOSAKI Motohiro if (vma->vm_flags & VM_LOCKED) 1214caed0f48SKOSAKI Motohiro goto out_mlock; 1215caed0f48SKOSAKI Motohiro 1216af8e3354SHugh Dickins if (TTU_ACTION(flags) == TTU_MUNLOCK) 121753f79acbSHugh Dickins goto out_unmap; 121814fa31b8SAndi Kleen } 121914fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_ACCESS)) { 1220b291f000SNick Piggin if (ptep_clear_flush_young_notify(vma, address, pte)) { 12211da177e4SLinus Torvalds ret = SWAP_FAIL; 12221da177e4SLinus Torvalds goto out_unmap; 12231da177e4SLinus Torvalds } 1224b291f000SNick Piggin } 12251da177e4SLinus Torvalds 12261da177e4SLinus Torvalds /* Nuke the page table entry. */ 12271da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 12282ec74c3eSSagi Grimberg pteval = ptep_clear_flush(vma, address, pte); 12291da177e4SLinus Torvalds 12301da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 12311da177e4SLinus Torvalds if (pte_dirty(pteval)) 12321da177e4SLinus Torvalds set_page_dirty(page); 12331da177e4SLinus Torvalds 1234365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1235365e9c87SHugh Dickins update_hiwater_rss(mm); 1236365e9c87SHugh Dickins 1237888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1238888b9f7cSAndi Kleen if (PageAnon(page)) 1239d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1240888b9f7cSAndi Kleen else 1241d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 1242888b9f7cSAndi Kleen set_pte_at(mm, address, pte, 1243888b9f7cSAndi Kleen swp_entry_to_pte(make_hwpoison_entry(page))); 1244888b9f7cSAndi Kleen } else if (PageAnon(page)) { 12454c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 12460697212aSChristoph Lameter 12470697212aSChristoph Lameter if (PageSwapCache(page)) { 12481da177e4SLinus Torvalds /* 12491da177e4SLinus Torvalds * Store the swap location in the pte. 12501da177e4SLinus Torvalds * See handle_pte_fault() ... 12511da177e4SLinus Torvalds */ 1252570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1253570a335bSHugh Dickins set_pte_at(mm, address, pte, pteval); 1254570a335bSHugh Dickins ret = SWAP_FAIL; 1255570a335bSHugh Dickins goto out_unmap; 1256570a335bSHugh Dickins } 12571da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 12581da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1259f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 12601da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 12611da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 12621da177e4SLinus Torvalds } 1263d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1264b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1265ce1744f4SKonstantin Khlebnikov } else if (IS_ENABLED(CONFIG_MIGRATION)) { 12660697212aSChristoph Lameter /* 12670697212aSChristoph Lameter * Store the pfn of the page in a special migration 12680697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 12690697212aSChristoph Lameter * pte is removed and then restart fault handling. 12700697212aSChristoph Lameter */ 127114fa31b8SAndi Kleen BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 12720697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 12730697212aSChristoph Lameter } 12741da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 12751da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 1276ce1744f4SKonstantin Khlebnikov } else if (IS_ENABLED(CONFIG_MIGRATION) && 1277ce1744f4SKonstantin Khlebnikov (TTU_ACTION(flags) == TTU_MIGRATION)) { 127804e62a29SChristoph Lameter /* Establish migration entry for a file page */ 127904e62a29SChristoph Lameter swp_entry_t entry; 128004e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 128104e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 128204e62a29SChristoph Lameter } else 1283d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 12841da177e4SLinus Torvalds 1285edc315fdSHugh Dickins page_remove_rmap(page); 12861da177e4SLinus Torvalds page_cache_release(page); 12871da177e4SLinus Torvalds 12881da177e4SLinus Torvalds out_unmap: 1289c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 12902ec74c3eSSagi Grimberg if (ret != SWAP_FAIL) 12912ec74c3eSSagi Grimberg mmu_notifier_invalidate_page(mm, address); 1292caed0f48SKOSAKI Motohiro out: 1293caed0f48SKOSAKI Motohiro return ret; 129453f79acbSHugh Dickins 1295caed0f48SKOSAKI Motohiro out_mlock: 1296caed0f48SKOSAKI Motohiro pte_unmap_unlock(pte, ptl); 1297caed0f48SKOSAKI Motohiro 1298caed0f48SKOSAKI Motohiro 1299caed0f48SKOSAKI Motohiro /* 1300caed0f48SKOSAKI Motohiro * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1301caed0f48SKOSAKI Motohiro * unstable result and race. Plus, We can't wait here because 13022b575eb6SPeter Zijlstra * we now hold anon_vma->mutex or mapping->i_mmap_mutex. 1303caed0f48SKOSAKI Motohiro * if trylock failed, the page remain in evictable lru and later 1304caed0f48SKOSAKI Motohiro * vmscan could retry to move the page to unevictable lru if the 1305caed0f48SKOSAKI Motohiro * page is actually mlocked. 1306caed0f48SKOSAKI Motohiro */ 130753f79acbSHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 130853f79acbSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 130953f79acbSHugh Dickins mlock_vma_page(page); 131053f79acbSHugh Dickins ret = SWAP_MLOCK; 131153f79acbSHugh Dickins } 131253f79acbSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 131353f79acbSHugh Dickins } 13141da177e4SLinus Torvalds return ret; 13151da177e4SLinus Torvalds } 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds /* 13181da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 13191da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 13201da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 13211da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 13221da177e4SLinus Torvalds * 13231da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 13241da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 13251da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 13261da177e4SLinus Torvalds * around the vma's virtual address space. 13271da177e4SLinus Torvalds * 13281da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 13291da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 13301da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 13311da177e4SLinus Torvalds * 13321da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 13331da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 13341da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 1335b291f000SNick Piggin * 1336b291f000SNick Piggin * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1337b291f000SNick Piggin * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1338b291f000SNick Piggin * rather than unmapping them. If we encounter the "check_page" that vmscan is 1339b291f000SNick Piggin * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 13401da177e4SLinus Torvalds */ 13411da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 13421da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 13431da177e4SLinus Torvalds 1344b291f000SNick Piggin static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1345b291f000SNick Piggin struct vm_area_struct *vma, struct page *check_page) 13461da177e4SLinus Torvalds { 13471da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 13481da177e4SLinus Torvalds pgd_t *pgd; 13491da177e4SLinus Torvalds pud_t *pud; 13501da177e4SLinus Torvalds pmd_t *pmd; 1351c0718806SHugh Dickins pte_t *pte; 13521da177e4SLinus Torvalds pte_t pteval; 1353c0718806SHugh Dickins spinlock_t *ptl; 13541da177e4SLinus Torvalds struct page *page; 13551da177e4SLinus Torvalds unsigned long address; 13562ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 13572ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 13581da177e4SLinus Torvalds unsigned long end; 1359b291f000SNick Piggin int ret = SWAP_AGAIN; 1360b291f000SNick Piggin int locked_vma = 0; 13611da177e4SLinus Torvalds 13621da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 13631da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 13641da177e4SLinus Torvalds if (address < vma->vm_start) 13651da177e4SLinus Torvalds address = vma->vm_start; 13661da177e4SLinus Torvalds if (end > vma->vm_end) 13671da177e4SLinus Torvalds end = vma->vm_end; 13681da177e4SLinus Torvalds 13691da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 13701da177e4SLinus Torvalds if (!pgd_present(*pgd)) 1371b291f000SNick Piggin return ret; 13721da177e4SLinus Torvalds 13731da177e4SLinus Torvalds pud = pud_offset(pgd, address); 13741da177e4SLinus Torvalds if (!pud_present(*pud)) 1375b291f000SNick Piggin return ret; 13761da177e4SLinus Torvalds 13771da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 13781da177e4SLinus Torvalds if (!pmd_present(*pmd)) 1379b291f000SNick Piggin return ret; 1380b291f000SNick Piggin 13812ec74c3eSSagi Grimberg mmun_start = address; 13822ec74c3eSSagi Grimberg mmun_end = end; 13832ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 13842ec74c3eSSagi Grimberg 1385b291f000SNick Piggin /* 1386af8e3354SHugh Dickins * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1387b291f000SNick Piggin * keep the sem while scanning the cluster for mlocking pages. 1388b291f000SNick Piggin */ 1389af8e3354SHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1390b291f000SNick Piggin locked_vma = (vma->vm_flags & VM_LOCKED); 1391b291f000SNick Piggin if (!locked_vma) 1392b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1393b291f000SNick Piggin } 1394c0718806SHugh Dickins 1395c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 13961da177e4SLinus Torvalds 1397365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1398365e9c87SHugh Dickins update_hiwater_rss(mm); 1399365e9c87SHugh Dickins 1400c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 14011da177e4SLinus Torvalds if (!pte_present(*pte)) 14021da177e4SLinus Torvalds continue; 14036aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 14046aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 14051da177e4SLinus Torvalds 1406b291f000SNick Piggin if (locked_vma) { 1407b291f000SNick Piggin mlock_vma_page(page); /* no-op if already mlocked */ 1408b291f000SNick Piggin if (page == check_page) 1409b291f000SNick Piggin ret = SWAP_MLOCK; 1410b291f000SNick Piggin continue; /* don't unmap */ 1411b291f000SNick Piggin } 1412b291f000SNick Piggin 1413cddb8a5cSAndrea Arcangeli if (ptep_clear_flush_young_notify(vma, address, pte)) 14141da177e4SLinus Torvalds continue; 14151da177e4SLinus Torvalds 14161da177e4SLinus Torvalds /* Nuke the page table entry. */ 1417eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 14182ec74c3eSSagi Grimberg pteval = ptep_clear_flush(vma, address, pte); 14191da177e4SLinus Torvalds 14201da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 14211da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 14221da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 14231da177e4SLinus Torvalds 14241da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 14251da177e4SLinus Torvalds if (pte_dirty(pteval)) 14261da177e4SLinus Torvalds set_page_dirty(page); 14271da177e4SLinus Torvalds 1428edc315fdSHugh Dickins page_remove_rmap(page); 14291da177e4SLinus Torvalds page_cache_release(page); 1430d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 14311da177e4SLinus Torvalds (*mapcount)--; 14321da177e4SLinus Torvalds } 1433c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 14342ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1435b291f000SNick Piggin if (locked_vma) 1436b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); 1437b291f000SNick Piggin return ret; 14381da177e4SLinus Torvalds } 14391da177e4SLinus Torvalds 144071e3aac0SAndrea Arcangeli bool is_vma_temporary_stack(struct vm_area_struct *vma) 1441a8bef8ffSMel Gorman { 1442a8bef8ffSMel Gorman int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1443a8bef8ffSMel Gorman 1444a8bef8ffSMel Gorman if (!maybe_stack) 1445a8bef8ffSMel Gorman return false; 1446a8bef8ffSMel Gorman 1447a8bef8ffSMel Gorman if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1448a8bef8ffSMel Gorman VM_STACK_INCOMPLETE_SETUP) 1449a8bef8ffSMel Gorman return true; 1450a8bef8ffSMel Gorman 1451a8bef8ffSMel Gorman return false; 1452a8bef8ffSMel Gorman } 1453a8bef8ffSMel Gorman 1454b291f000SNick Piggin /** 1455b291f000SNick Piggin * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1456b291f000SNick Piggin * rmap method 1457b291f000SNick Piggin * @page: the page to unmap/unlock 14588051be5eSHuang Shijie * @flags: action and flags 1459b291f000SNick Piggin * 1460b291f000SNick Piggin * Find all the mappings of a page using the mapping pointer and the vma chains 1461b291f000SNick Piggin * contained in the anon_vma struct it points to. 1462b291f000SNick Piggin * 1463b291f000SNick Piggin * This function is only called from try_to_unmap/try_to_munlock for 1464b291f000SNick Piggin * anonymous pages. 1465b291f000SNick Piggin * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1466b291f000SNick Piggin * where the page was found will be held for write. So, we won't recheck 1467b291f000SNick Piggin * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1468b291f000SNick Piggin * 'LOCKED. 1469b291f000SNick Piggin */ 147014fa31b8SAndi Kleen static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 14711da177e4SLinus Torvalds { 14721da177e4SLinus Torvalds struct anon_vma *anon_vma; 1473bf181b9fSMichel Lespinasse pgoff_t pgoff; 14745beb4930SRik van Riel struct anon_vma_chain *avc; 14751da177e4SLinus Torvalds int ret = SWAP_AGAIN; 1476b291f000SNick Piggin 14771da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 14781da177e4SLinus Torvalds if (!anon_vma) 14791da177e4SLinus Torvalds return ret; 14801da177e4SLinus Torvalds 1481bf181b9fSMichel Lespinasse pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1482bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 14835beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1484a8bef8ffSMel Gorman unsigned long address; 1485a8bef8ffSMel Gorman 1486a8bef8ffSMel Gorman /* 1487a8bef8ffSMel Gorman * During exec, a temporary VMA is setup and later moved. 1488a8bef8ffSMel Gorman * The VMA is moved under the anon_vma lock but not the 1489a8bef8ffSMel Gorman * page tables leading to a race where migration cannot 1490a8bef8ffSMel Gorman * find the migration ptes. Rather than increasing the 1491a8bef8ffSMel Gorman * locking requirements of exec(), migration skips 1492a8bef8ffSMel Gorman * temporary VMAs until after exec() completes. 1493a8bef8ffSMel Gorman */ 1494ce1744f4SKonstantin Khlebnikov if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1495a8bef8ffSMel Gorman is_vma_temporary_stack(vma)) 1496a8bef8ffSMel Gorman continue; 1497a8bef8ffSMel Gorman 1498a8bef8ffSMel Gorman address = vma_address(page, vma); 14991cb1729bSHugh Dickins ret = try_to_unmap_one(page, vma, address, flags); 150053f79acbSHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) 15011da177e4SLinus Torvalds break; 15021da177e4SLinus Torvalds } 150334bbd704SOleg Nesterov 150434bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 15051da177e4SLinus Torvalds return ret; 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds /** 1509b291f000SNick Piggin * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1510b291f000SNick Piggin * @page: the page to unmap/unlock 151114fa31b8SAndi Kleen * @flags: action and flags 15121da177e4SLinus Torvalds * 15131da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 15141da177e4SLinus Torvalds * contained in the address_space struct it points to. 15151da177e4SLinus Torvalds * 1516b291f000SNick Piggin * This function is only called from try_to_unmap/try_to_munlock for 1517b291f000SNick Piggin * object-based pages. 1518b291f000SNick Piggin * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1519b291f000SNick Piggin * where the page was found will be held for write. So, we won't recheck 1520b291f000SNick Piggin * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1521b291f000SNick Piggin * 'LOCKED. 15221da177e4SLinus Torvalds */ 152314fa31b8SAndi Kleen static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 15241da177e4SLinus Torvalds { 15251da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 15261da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 15271da177e4SLinus Torvalds struct vm_area_struct *vma; 15281da177e4SLinus Torvalds int ret = SWAP_AGAIN; 15291da177e4SLinus Torvalds unsigned long cursor; 15301da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 15311da177e4SLinus Torvalds unsigned long max_nl_size = 0; 15321da177e4SLinus Torvalds unsigned int mapcount; 15331da177e4SLinus Torvalds 15343d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 15356b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 15361cb1729bSHugh Dickins unsigned long address = vma_address(page, vma); 15371cb1729bSHugh Dickins ret = try_to_unmap_one(page, vma, address, flags); 153853f79acbSHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) 15391da177e4SLinus Torvalds goto out; 15401da177e4SLinus Torvalds } 1541b291f000SNick Piggin 15421da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 15431da177e4SLinus Torvalds goto out; 15441da177e4SLinus Torvalds 154553f79acbSHugh Dickins /* 154653f79acbSHugh Dickins * We don't bother to try to find the munlocked page in nonlinears. 154753f79acbSHugh Dickins * It's costly. Instead, later, page reclaim logic may call 154853f79acbSHugh Dickins * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 154953f79acbSHugh Dickins */ 155053f79acbSHugh Dickins if (TTU_ACTION(flags) == TTU_MUNLOCK) 155153f79acbSHugh Dickins goto out; 155253f79acbSHugh Dickins 15531da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 15546b2dbba8SMichel Lespinasse shared.nonlinear) { 15551da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 15561da177e4SLinus Torvalds if (cursor > max_nl_cursor) 15571da177e4SLinus Torvalds max_nl_cursor = cursor; 15581da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 15591da177e4SLinus Torvalds if (cursor > max_nl_size) 15601da177e4SLinus Torvalds max_nl_size = cursor; 15611da177e4SLinus Torvalds } 15621da177e4SLinus Torvalds 1563b291f000SNick Piggin if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 15641da177e4SLinus Torvalds ret = SWAP_FAIL; 15651da177e4SLinus Torvalds goto out; 15661da177e4SLinus Torvalds } 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds /* 15691da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 15701da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 15711da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 15721da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 15731da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 15741da177e4SLinus Torvalds */ 15751da177e4SLinus Torvalds mapcount = page_mapcount(page); 15761da177e4SLinus Torvalds if (!mapcount) 15771da177e4SLinus Torvalds goto out; 15783d48ae45SPeter Zijlstra cond_resched(); 15791da177e4SLinus Torvalds 15801da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 15811da177e4SLinus Torvalds if (max_nl_cursor == 0) 15821da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 15831da177e4SLinus Torvalds 15841da177e4SLinus Torvalds do { 15851da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 15866b2dbba8SMichel Lespinasse shared.nonlinear) { 15871da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 1588839b9685SHugh Dickins while ( cursor < max_nl_cursor && 15891da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 159053f79acbSHugh Dickins if (try_to_unmap_cluster(cursor, &mapcount, 159153f79acbSHugh Dickins vma, page) == SWAP_MLOCK) 159253f79acbSHugh Dickins ret = SWAP_MLOCK; 15931da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 15941da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 15951da177e4SLinus Torvalds if ((int)mapcount <= 0) 15961da177e4SLinus Torvalds goto out; 15971da177e4SLinus Torvalds } 15981da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 15991da177e4SLinus Torvalds } 16003d48ae45SPeter Zijlstra cond_resched(); 16011da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 16021da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds /* 16051da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 16061da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 16071da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 16081da177e4SLinus Torvalds */ 16096b2dbba8SMichel Lespinasse list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) 16101da177e4SLinus Torvalds vma->vm_private_data = NULL; 16111da177e4SLinus Torvalds out: 16123d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 16131da177e4SLinus Torvalds return ret; 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds 16161da177e4SLinus Torvalds /** 16171da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 16181da177e4SLinus Torvalds * @page: the page to get unmapped 161914fa31b8SAndi Kleen * @flags: action and flags 16201da177e4SLinus Torvalds * 16211da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 16221da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 16231da177e4SLinus Torvalds * Return values are: 16241da177e4SLinus Torvalds * 16251da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 16261da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 16271da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 1628b291f000SNick Piggin * SWAP_MLOCK - page is mlocked. 16291da177e4SLinus Torvalds */ 163014fa31b8SAndi Kleen int try_to_unmap(struct page *page, enum ttu_flags flags) 16311da177e4SLinus Torvalds { 16321da177e4SLinus Torvalds int ret; 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 163591600e9eSAndrea Arcangeli VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 16361da177e4SLinus Torvalds 16375ad64688SHugh Dickins if (unlikely(PageKsm(page))) 16385ad64688SHugh Dickins ret = try_to_unmap_ksm(page, flags); 16395ad64688SHugh Dickins else if (PageAnon(page)) 164014fa31b8SAndi Kleen ret = try_to_unmap_anon(page, flags); 16411da177e4SLinus Torvalds else 164214fa31b8SAndi Kleen ret = try_to_unmap_file(page, flags); 1643b291f000SNick Piggin if (ret != SWAP_MLOCK && !page_mapped(page)) 16441da177e4SLinus Torvalds ret = SWAP_SUCCESS; 16451da177e4SLinus Torvalds return ret; 16461da177e4SLinus Torvalds } 164781b4082dSNikita Danilov 1648b291f000SNick Piggin /** 1649b291f000SNick Piggin * try_to_munlock - try to munlock a page 1650b291f000SNick Piggin * @page: the page to be munlocked 1651b291f000SNick Piggin * 1652b291f000SNick Piggin * Called from munlock code. Checks all of the VMAs mapping the page 1653b291f000SNick Piggin * to make sure nobody else has this page mlocked. The page will be 1654b291f000SNick Piggin * returned with PG_mlocked cleared if no other vmas have it mlocked. 1655b291f000SNick Piggin * 1656b291f000SNick Piggin * Return values are: 1657b291f000SNick Piggin * 165853f79acbSHugh Dickins * SWAP_AGAIN - no vma is holding page mlocked, or, 1659b291f000SNick Piggin * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 16605ad64688SHugh Dickins * SWAP_FAIL - page cannot be located at present 1661b291f000SNick Piggin * SWAP_MLOCK - page is now mlocked. 1662b291f000SNick Piggin */ 1663b291f000SNick Piggin int try_to_munlock(struct page *page) 1664b291f000SNick Piggin { 1665b291f000SNick Piggin VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1666b291f000SNick Piggin 16675ad64688SHugh Dickins if (unlikely(PageKsm(page))) 16685ad64688SHugh Dickins return try_to_unmap_ksm(page, TTU_MUNLOCK); 16695ad64688SHugh Dickins else if (PageAnon(page)) 167014fa31b8SAndi Kleen return try_to_unmap_anon(page, TTU_MUNLOCK); 1671b291f000SNick Piggin else 167214fa31b8SAndi Kleen return try_to_unmap_file(page, TTU_MUNLOCK); 1673b291f000SNick Piggin } 1674e9995ef9SHugh Dickins 167501d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 167676545066SRik van Riel { 167776545066SRik van Riel struct anon_vma *root = anon_vma->root; 167876545066SRik van Riel 167901d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 168076545066SRik van Riel anon_vma_free(root); 168101d8b20dSPeter Zijlstra 168201d8b20dSPeter Zijlstra anon_vma_free(anon_vma); 168376545066SRik van Riel } 168476545066SRik van Riel 1685e9995ef9SHugh Dickins #ifdef CONFIG_MIGRATION 1686e9995ef9SHugh Dickins /* 1687e9995ef9SHugh Dickins * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1688e9995ef9SHugh Dickins * Called by migrate.c to remove migration ptes, but might be used more later. 1689e9995ef9SHugh Dickins */ 1690e9995ef9SHugh Dickins static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1691e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1692e9995ef9SHugh Dickins { 1693e9995ef9SHugh Dickins struct anon_vma *anon_vma; 1694bf181b9fSMichel Lespinasse pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 16955beb4930SRik van Riel struct anon_vma_chain *avc; 1696e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1697e9995ef9SHugh Dickins 1698e9995ef9SHugh Dickins /* 1699e9995ef9SHugh Dickins * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1700e9995ef9SHugh Dickins * because that depends on page_mapped(); but not all its usages 17013f6c8272SMel Gorman * are holding mmap_sem. Users without mmap_sem are required to 17023f6c8272SMel Gorman * take a reference count to prevent the anon_vma disappearing 1703e9995ef9SHugh Dickins */ 1704e9995ef9SHugh Dickins anon_vma = page_anon_vma(page); 1705e9995ef9SHugh Dickins if (!anon_vma) 1706e9995ef9SHugh Dickins return ret; 1707cba48b98SRik van Riel anon_vma_lock(anon_vma); 1708bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 17095beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1710e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 1711e9995ef9SHugh Dickins ret = rmap_one(page, vma, address, arg); 1712e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 1713e9995ef9SHugh Dickins break; 1714e9995ef9SHugh Dickins } 1715cba48b98SRik van Riel anon_vma_unlock(anon_vma); 1716e9995ef9SHugh Dickins return ret; 1717e9995ef9SHugh Dickins } 1718e9995ef9SHugh Dickins 1719e9995ef9SHugh Dickins static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1720e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1721e9995ef9SHugh Dickins { 1722e9995ef9SHugh Dickins struct address_space *mapping = page->mapping; 1723e9995ef9SHugh Dickins pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1724e9995ef9SHugh Dickins struct vm_area_struct *vma; 1725e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1726e9995ef9SHugh Dickins 1727e9995ef9SHugh Dickins if (!mapping) 1728e9995ef9SHugh Dickins return ret; 17293d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 17306b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1731e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 1732e9995ef9SHugh Dickins ret = rmap_one(page, vma, address, arg); 1733e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 1734e9995ef9SHugh Dickins break; 1735e9995ef9SHugh Dickins } 1736e9995ef9SHugh Dickins /* 1737e9995ef9SHugh Dickins * No nonlinear handling: being always shared, nonlinear vmas 1738e9995ef9SHugh Dickins * never contain migration ptes. Decide what to do about this 1739e9995ef9SHugh Dickins * limitation to linear when we need rmap_walk() on nonlinear. 1740e9995ef9SHugh Dickins */ 17413d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 1742e9995ef9SHugh Dickins return ret; 1743e9995ef9SHugh Dickins } 1744e9995ef9SHugh Dickins 1745e9995ef9SHugh Dickins int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1746e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1747e9995ef9SHugh Dickins { 1748e9995ef9SHugh Dickins VM_BUG_ON(!PageLocked(page)); 1749e9995ef9SHugh Dickins 1750e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 1751e9995ef9SHugh Dickins return rmap_walk_ksm(page, rmap_one, arg); 1752e9995ef9SHugh Dickins else if (PageAnon(page)) 1753e9995ef9SHugh Dickins return rmap_walk_anon(page, rmap_one, arg); 1754e9995ef9SHugh Dickins else 1755e9995ef9SHugh Dickins return rmap_walk_file(page, rmap_one, arg); 1756e9995ef9SHugh Dickins } 1757e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 17580fe6e20bSNaoya Horiguchi 1759e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 17600fe6e20bSNaoya Horiguchi /* 17610fe6e20bSNaoya Horiguchi * The following three functions are for anonymous (private mapped) hugepages. 17620fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 17630fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 17640fe6e20bSNaoya Horiguchi */ 17650fe6e20bSNaoya Horiguchi static void __hugepage_set_anon_rmap(struct page *page, 17660fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address, int exclusive) 17670fe6e20bSNaoya Horiguchi { 17680fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 1769433abed6SNaoya Horiguchi 17700fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 1771433abed6SNaoya Horiguchi 1772433abed6SNaoya Horiguchi if (PageAnon(page)) 1773433abed6SNaoya Horiguchi return; 1774433abed6SNaoya Horiguchi if (!exclusive) 1775433abed6SNaoya Horiguchi anon_vma = anon_vma->root; 1776433abed6SNaoya Horiguchi 17770fe6e20bSNaoya Horiguchi anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 17780fe6e20bSNaoya Horiguchi page->mapping = (struct address_space *) anon_vma; 17790fe6e20bSNaoya Horiguchi page->index = linear_page_index(vma, address); 17800fe6e20bSNaoya Horiguchi } 17810fe6e20bSNaoya Horiguchi 17820fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 17830fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 17840fe6e20bSNaoya Horiguchi { 17850fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 17860fe6e20bSNaoya Horiguchi int first; 1787a850ea30SNaoya Horiguchi 1788a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 17890fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 17905dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 17910fe6e20bSNaoya Horiguchi first = atomic_inc_and_test(&page->_mapcount); 17920fe6e20bSNaoya Horiguchi if (first) 17930fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 0); 17940fe6e20bSNaoya Horiguchi } 17950fe6e20bSNaoya Horiguchi 17960fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 17970fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 17980fe6e20bSNaoya Horiguchi { 17990fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 18000fe6e20bSNaoya Horiguchi atomic_set(&page->_mapcount, 0); 18010fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 1); 18020fe6e20bSNaoya Horiguchi } 1803e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 1804