11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 241da177e4SLinus Torvalds * mm->mmap_sem 251da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 263d48ae45SPeter Zijlstra * mapping->i_mmap_mutex 275a505085SIngo Molnar * anon_vma->rwsem 28b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 29053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 305d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 311da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 321da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 33250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 38f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 396a46079cSAndi Kleen * 405a505085SIngo Molnar * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 416a46079cSAndi Kleen * ->tasklist_lock 426a46079cSAndi Kleen * pte map lock 431da177e4SLinus Torvalds */ 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/pagemap.h> 471da177e4SLinus Torvalds #include <linux/swap.h> 481da177e4SLinus Torvalds #include <linux/swapops.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/init.h> 515ad64688SHugh Dickins #include <linux/ksm.h> 521da177e4SLinus Torvalds #include <linux/rmap.h> 531da177e4SLinus Torvalds #include <linux/rcupdate.h> 54b95f1b31SPaul Gortmaker #include <linux/export.h> 558a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 56cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 5764cdd548SKOSAKI Motohiro #include <linux/migrate.h> 580fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 59ef5d437fSJan Kara #include <linux/backing-dev.h> 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds #include <asm/tlbflush.h> 621da177e4SLinus Torvalds 63b291f000SNick Piggin #include "internal.h" 64b291f000SNick Piggin 65fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 665beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 67fdd2e5f8SAdrian Bunk 68fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 69fdd2e5f8SAdrian Bunk { 7001d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 7101d8b20dSPeter Zijlstra 7201d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 7301d8b20dSPeter Zijlstra if (anon_vma) { 7401d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 7501d8b20dSPeter Zijlstra /* 7601d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 7701d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 7801d8b20dSPeter Zijlstra */ 7901d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 80fdd2e5f8SAdrian Bunk } 81fdd2e5f8SAdrian Bunk 8201d8b20dSPeter Zijlstra return anon_vma; 8301d8b20dSPeter Zijlstra } 8401d8b20dSPeter Zijlstra 8501d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 86fdd2e5f8SAdrian Bunk { 8701d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 8888c22088SPeter Zijlstra 8988c22088SPeter Zijlstra /* 904fc3f1d6SIngo Molnar * Synchronize against page_lock_anon_vma_read() such that 9188c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 9288c22088SPeter Zijlstra * freed. 9388c22088SPeter Zijlstra * 9488c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 9588c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 964fc3f1d6SIngo Molnar * down_read_trylock() from page_lock_anon_vma_read(). This orders: 9788c22088SPeter Zijlstra * 984fc3f1d6SIngo Molnar * page_lock_anon_vma_read() VS put_anon_vma() 994fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 10088c22088SPeter Zijlstra * LOCK MB 1014fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 10288c22088SPeter Zijlstra * 10388c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 10488c22088SPeter Zijlstra * happen _before_ what follows. 10588c22088SPeter Zijlstra */ 106*7f39dda9SHugh Dickins might_sleep(); 1075a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1084fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 10908b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 11088c22088SPeter Zijlstra } 11188c22088SPeter Zijlstra 112fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 113fdd2e5f8SAdrian Bunk } 1141da177e4SLinus Torvalds 115dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1165beb4930SRik van Riel { 117dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1185beb4930SRik van Riel } 1195beb4930SRik van Riel 120e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1215beb4930SRik van Riel { 1225beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1235beb4930SRik van Riel } 1245beb4930SRik van Riel 1256583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1266583a843SKautuk Consul struct anon_vma_chain *avc, 1276583a843SKautuk Consul struct anon_vma *anon_vma) 1286583a843SKautuk Consul { 1296583a843SKautuk Consul avc->vma = vma; 1306583a843SKautuk Consul avc->anon_vma = anon_vma; 1316583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 132bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1336583a843SKautuk Consul } 1346583a843SKautuk Consul 135d9d332e0SLinus Torvalds /** 136d9d332e0SLinus Torvalds * anon_vma_prepare - attach an anon_vma to a memory region 137d9d332e0SLinus Torvalds * @vma: the memory region in question 138d9d332e0SLinus Torvalds * 139d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 140d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 141d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 142d9d332e0SLinus Torvalds * 143d9d332e0SLinus Torvalds * The common case will be that we already have one, but if 14423a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 145d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 146d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 147d9d332e0SLinus Torvalds * allocate a new one. 148d9d332e0SLinus Torvalds * 149d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1504fc3f1d6SIngo Molnar * optimistically looked up an anon_vma in page_lock_anon_vma_read() 151d9d332e0SLinus Torvalds * and that may actually touch the spinlock even in the newly 152d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 153d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 154d9d332e0SLinus Torvalds * 155d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 156d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 157d9d332e0SLinus Torvalds * to do any locking for the common case of already having 158d9d332e0SLinus Torvalds * an anon_vma. 159d9d332e0SLinus Torvalds * 160d9d332e0SLinus Torvalds * This must be called with the mmap_sem held for reading. 161d9d332e0SLinus Torvalds */ 1621da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 1631da177e4SLinus Torvalds { 1641da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1655beb4930SRik van Riel struct anon_vma_chain *avc; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds might_sleep(); 1681da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 1691da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 170d9d332e0SLinus Torvalds struct anon_vma *allocated; 1711da177e4SLinus Torvalds 172dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1735beb4930SRik van Riel if (!avc) 1745beb4930SRik van Riel goto out_enomem; 1755beb4930SRik van Riel 1761da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1771da177e4SLinus Torvalds allocated = NULL; 178d9d332e0SLinus Torvalds if (!anon_vma) { 1791da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1801da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1815beb4930SRik van Riel goto out_enomem_free_avc; 1821da177e4SLinus Torvalds allocated = anon_vma; 1831da177e4SLinus Torvalds } 1841da177e4SLinus Torvalds 1854fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 1861da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1871da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 1881da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1891da177e4SLinus Torvalds vma->anon_vma = anon_vma; 1906583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 1911da177e4SLinus Torvalds allocated = NULL; 19231f2b0ebSOleg Nesterov avc = NULL; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 19508b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 19631f2b0ebSOleg Nesterov 19731f2b0ebSOleg Nesterov if (unlikely(allocated)) 19801d8b20dSPeter Zijlstra put_anon_vma(allocated); 19931f2b0ebSOleg Nesterov if (unlikely(avc)) 2005beb4930SRik van Riel anon_vma_chain_free(avc); 2015beb4930SRik van Riel } 2021da177e4SLinus Torvalds return 0; 2035beb4930SRik van Riel 2045beb4930SRik van Riel out_enomem_free_avc: 2055beb4930SRik van Riel anon_vma_chain_free(avc); 2065beb4930SRik van Riel out_enomem: 2075beb4930SRik van Riel return -ENOMEM; 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 210bb4aa396SLinus Torvalds /* 211bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 212bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 213bb4aa396SLinus Torvalds * have the same vma. 214bb4aa396SLinus Torvalds * 215bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 216bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 217bb4aa396SLinus Torvalds */ 218bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 219bb4aa396SLinus Torvalds { 220bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 221bb4aa396SLinus Torvalds if (new_root != root) { 222bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2235a505085SIngo Molnar up_write(&root->rwsem); 224bb4aa396SLinus Torvalds root = new_root; 2255a505085SIngo Molnar down_write(&root->rwsem); 226bb4aa396SLinus Torvalds } 227bb4aa396SLinus Torvalds return root; 228bb4aa396SLinus Torvalds } 229bb4aa396SLinus Torvalds 230bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 231bb4aa396SLinus Torvalds { 232bb4aa396SLinus Torvalds if (root) 2335a505085SIngo Molnar up_write(&root->rwsem); 234bb4aa396SLinus Torvalds } 235bb4aa396SLinus Torvalds 2365beb4930SRik van Riel /* 2375beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2385beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2395beb4930SRik van Riel */ 2405beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2415beb4930SRik van Riel { 2425beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 243bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2445beb4930SRik van Riel 245646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 246bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 247bb4aa396SLinus Torvalds 248dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 249dd34739cSLinus Torvalds if (unlikely(!avc)) { 250dd34739cSLinus Torvalds unlock_anon_vma_root(root); 251dd34739cSLinus Torvalds root = NULL; 252dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2535beb4930SRik van Riel if (!avc) 2545beb4930SRik van Riel goto enomem_failure; 255dd34739cSLinus Torvalds } 256bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 257bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 258bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2595beb4930SRik van Riel } 260bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 2615beb4930SRik van Riel return 0; 2625beb4930SRik van Riel 2635beb4930SRik van Riel enomem_failure: 2645beb4930SRik van Riel unlink_anon_vmas(dst); 2655beb4930SRik van Riel return -ENOMEM; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds 2685beb4930SRik van Riel /* 2695beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 2705beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 2715beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 2725beb4930SRik van Riel */ 2735beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 2741da177e4SLinus Torvalds { 2755beb4930SRik van Riel struct anon_vma_chain *avc; 2765beb4930SRik van Riel struct anon_vma *anon_vma; 2775beb4930SRik van Riel 2785beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 2795beb4930SRik van Riel if (!pvma->anon_vma) 2805beb4930SRik van Riel return 0; 2815beb4930SRik van Riel 2825beb4930SRik van Riel /* 2835beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 2845beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 2855beb4930SRik van Riel */ 2865beb4930SRik van Riel if (anon_vma_clone(vma, pvma)) 2875beb4930SRik van Riel return -ENOMEM; 2885beb4930SRik van Riel 2895beb4930SRik van Riel /* Then add our own anon_vma. */ 2905beb4930SRik van Riel anon_vma = anon_vma_alloc(); 2915beb4930SRik van Riel if (!anon_vma) 2925beb4930SRik van Riel goto out_error; 293dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2945beb4930SRik van Riel if (!avc) 2955beb4930SRik van Riel goto out_error_free_anon_vma; 2965c341ee1SRik van Riel 2975c341ee1SRik van Riel /* 2985c341ee1SRik van Riel * The root anon_vma's spinlock is the lock actually used when we 2995c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3005c341ee1SRik van Riel */ 3015c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 30276545066SRik van Riel /* 30301d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 30401d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 30501d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 30676545066SRik van Riel */ 30776545066SRik van Riel get_anon_vma(anon_vma->root); 3085beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3095beb4930SRik van Riel vma->anon_vma = anon_vma; 3104fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3115c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 31208b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3135beb4930SRik van Riel 3145beb4930SRik van Riel return 0; 3155beb4930SRik van Riel 3165beb4930SRik van Riel out_error_free_anon_vma: 31701d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3185beb4930SRik van Riel out_error: 3194946d54cSRik van Riel unlink_anon_vmas(vma); 3205beb4930SRik van Riel return -ENOMEM; 3215beb4930SRik van Riel } 3225beb4930SRik van Riel 3235beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3245beb4930SRik van Riel { 3255beb4930SRik van Riel struct anon_vma_chain *avc, *next; 326eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 3275beb4930SRik van Riel 3285c341ee1SRik van Riel /* 3295c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 3305c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 3315c341ee1SRik van Riel */ 3325beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 333eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 334eee2acbaSPeter Zijlstra 335eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 336bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 337eee2acbaSPeter Zijlstra 338eee2acbaSPeter Zijlstra /* 339eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 340eee2acbaSPeter Zijlstra * to free them outside the lock. 341eee2acbaSPeter Zijlstra */ 342bf181b9fSMichel Lespinasse if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 343eee2acbaSPeter Zijlstra continue; 344eee2acbaSPeter Zijlstra 345eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 346eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 347eee2acbaSPeter Zijlstra } 348eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 349eee2acbaSPeter Zijlstra 350eee2acbaSPeter Zijlstra /* 351eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 352eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 3535a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 354eee2acbaSPeter Zijlstra */ 355eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 356eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 357eee2acbaSPeter Zijlstra 358eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 359eee2acbaSPeter Zijlstra 3605beb4930SRik van Riel list_del(&avc->same_vma); 3615beb4930SRik van Riel anon_vma_chain_free(avc); 3625beb4930SRik van Riel } 3635beb4930SRik van Riel } 3645beb4930SRik van Riel 36551cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 3661da177e4SLinus Torvalds { 3671da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 3681da177e4SLinus Torvalds 3695a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 37083813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 371bf181b9fSMichel Lespinasse anon_vma->rb_root = RB_ROOT; 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds void __init anon_vma_init(void) 3751da177e4SLinus Torvalds { 3761da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 37720c2df83SPaul Mundt 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 3785beb4930SRik van Riel anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 3811da177e4SLinus Torvalds /* 3826111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 3836111e4caSPeter Zijlstra * 3846111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 3856111e4caSPeter Zijlstra * the best this function can do is return a locked anon_vma that might 3866111e4caSPeter Zijlstra * have been relevant to this page. 3876111e4caSPeter Zijlstra * 3886111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 3896111e4caSPeter Zijlstra * returned may already be freed (and even reused). 3906111e4caSPeter Zijlstra * 391bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 392bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 393bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 394bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 395bc658c96SPeter Zijlstra * 3966111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 3976111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 3986111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 3996111e4caSPeter Zijlstra * 4006111e4caSPeter Zijlstra * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 4016111e4caSPeter Zijlstra * that the anon_vma pointer from page->mapping is valid if there is a 4026111e4caSPeter Zijlstra * mapcount, we can dereference the anon_vma after observing those. 4031da177e4SLinus Torvalds */ 404746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4051da177e4SLinus Torvalds { 406746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4071da177e4SLinus Torvalds unsigned long anon_mapping; 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds rcu_read_lock(); 41080e14822SHugh Dickins anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 4113ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4121da177e4SLinus Torvalds goto out; 4131da177e4SLinus Torvalds if (!page_mapped(page)) 4141da177e4SLinus Torvalds goto out; 4151da177e4SLinus Torvalds 4161da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 417746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 418746b18d4SPeter Zijlstra anon_vma = NULL; 419746b18d4SPeter Zijlstra goto out; 420746b18d4SPeter Zijlstra } 421f1819427SHugh Dickins 422f1819427SHugh Dickins /* 423f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 424746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 425746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 426746b18d4SPeter Zijlstra * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 427746b18d4SPeter Zijlstra * above cannot corrupt). 428f1819427SHugh Dickins */ 429746b18d4SPeter Zijlstra if (!page_mapped(page)) { 430*7f39dda9SHugh Dickins rcu_read_unlock(); 431746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 432*7f39dda9SHugh Dickins return NULL; 433746b18d4SPeter Zijlstra } 4341da177e4SLinus Torvalds out: 4351da177e4SLinus Torvalds rcu_read_unlock(); 436746b18d4SPeter Zijlstra 437746b18d4SPeter Zijlstra return anon_vma; 438746b18d4SPeter Zijlstra } 439746b18d4SPeter Zijlstra 44088c22088SPeter Zijlstra /* 44188c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 44288c22088SPeter Zijlstra * 44388c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 44488c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 44588c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 44688c22088SPeter Zijlstra */ 4474fc3f1d6SIngo Molnar struct anon_vma *page_lock_anon_vma_read(struct page *page) 448746b18d4SPeter Zijlstra { 44988c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 450eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 45188c22088SPeter Zijlstra unsigned long anon_mapping; 452746b18d4SPeter Zijlstra 45388c22088SPeter Zijlstra rcu_read_lock(); 45488c22088SPeter Zijlstra anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 45588c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 45688c22088SPeter Zijlstra goto out; 45788c22088SPeter Zijlstra if (!page_mapped(page)) 45888c22088SPeter Zijlstra goto out; 45988c22088SPeter Zijlstra 46088c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 461eee0f252SHugh Dickins root_anon_vma = ACCESS_ONCE(anon_vma->root); 4624fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 46388c22088SPeter Zijlstra /* 464eee0f252SHugh Dickins * If the page is still mapped, then this anon_vma is still 465eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 466bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 46788c22088SPeter Zijlstra */ 468eee0f252SHugh Dickins if (!page_mapped(page)) { 4694fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 47088c22088SPeter Zijlstra anon_vma = NULL; 47188c22088SPeter Zijlstra } 47288c22088SPeter Zijlstra goto out; 47388c22088SPeter Zijlstra } 47488c22088SPeter Zijlstra 47588c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 47688c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 47788c22088SPeter Zijlstra anon_vma = NULL; 47888c22088SPeter Zijlstra goto out; 47988c22088SPeter Zijlstra } 48088c22088SPeter Zijlstra 48188c22088SPeter Zijlstra if (!page_mapped(page)) { 482*7f39dda9SHugh Dickins rcu_read_unlock(); 48388c22088SPeter Zijlstra put_anon_vma(anon_vma); 484*7f39dda9SHugh Dickins return NULL; 48588c22088SPeter Zijlstra } 48688c22088SPeter Zijlstra 48788c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 48888c22088SPeter Zijlstra rcu_read_unlock(); 4894fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 490746b18d4SPeter Zijlstra 49188c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 49288c22088SPeter Zijlstra /* 49388c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 49488c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 4954fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 49688c22088SPeter Zijlstra */ 4974fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 49888c22088SPeter Zijlstra __put_anon_vma(anon_vma); 49988c22088SPeter Zijlstra anon_vma = NULL; 50088c22088SPeter Zijlstra } 50188c22088SPeter Zijlstra 50288c22088SPeter Zijlstra return anon_vma; 50388c22088SPeter Zijlstra 50488c22088SPeter Zijlstra out: 50588c22088SPeter Zijlstra rcu_read_unlock(); 506746b18d4SPeter Zijlstra return anon_vma; 50734bbd704SOleg Nesterov } 50834bbd704SOleg Nesterov 5094fc3f1d6SIngo Molnar void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 51034bbd704SOleg Nesterov { 5114fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 5121da177e4SLinus Torvalds } 5131da177e4SLinus Torvalds 5141da177e4SLinus Torvalds /* 5153ad33b24SLee Schermerhorn * At what user virtual address is page expected in @vma? 5161da177e4SLinus Torvalds */ 51786c2ad19SMichel Lespinasse static inline unsigned long 51886c2ad19SMichel Lespinasse __vma_address(struct page *page, struct vm_area_struct *vma) 5191da177e4SLinus Torvalds { 5201da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 5211da177e4SLinus Torvalds 5220fe6e20bSNaoya Horiguchi if (unlikely(is_vm_hugetlb_page(vma))) 5230fe6e20bSNaoya Horiguchi pgoff = page->index << huge_page_order(page_hstate(page)); 52486c2ad19SMichel Lespinasse 52586c2ad19SMichel Lespinasse return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 5261da177e4SLinus Torvalds } 52786c2ad19SMichel Lespinasse 52886c2ad19SMichel Lespinasse inline unsigned long 52986c2ad19SMichel Lespinasse vma_address(struct page *page, struct vm_area_struct *vma) 53086c2ad19SMichel Lespinasse { 53186c2ad19SMichel Lespinasse unsigned long address = __vma_address(page, vma); 53286c2ad19SMichel Lespinasse 53386c2ad19SMichel Lespinasse /* page should be within @vma mapping range */ 53486c2ad19SMichel Lespinasse VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 53586c2ad19SMichel Lespinasse 5361da177e4SLinus Torvalds return address; 5371da177e4SLinus Torvalds } 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds /* 540bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 541ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 5421da177e4SLinus Torvalds */ 5431da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 5441da177e4SLinus Torvalds { 54586c2ad19SMichel Lespinasse unsigned long address; 54621d0d443SAndrea Arcangeli if (PageAnon(page)) { 5474829b906SHugh Dickins struct anon_vma *page__anon_vma = page_anon_vma(page); 5484829b906SHugh Dickins /* 5494829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 5504829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 5514829b906SHugh Dickins */ 5524829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 5534829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 55421d0d443SAndrea Arcangeli return -EFAULT; 55521d0d443SAndrea Arcangeli } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 556ee498ed7SHugh Dickins if (!vma->vm_file || 557ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 5581da177e4SLinus Torvalds return -EFAULT; 5591da177e4SLinus Torvalds } else 5601da177e4SLinus Torvalds return -EFAULT; 56186c2ad19SMichel Lespinasse address = __vma_address(page, vma); 56286c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 56386c2ad19SMichel Lespinasse return -EFAULT; 56486c2ad19SMichel Lespinasse return address; 5651da177e4SLinus Torvalds } 5661da177e4SLinus Torvalds 5676219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 5686219049aSBob Liu { 5696219049aSBob Liu pgd_t *pgd; 5706219049aSBob Liu pud_t *pud; 5716219049aSBob Liu pmd_t *pmd = NULL; 5726219049aSBob Liu 5736219049aSBob Liu pgd = pgd_offset(mm, address); 5746219049aSBob Liu if (!pgd_present(*pgd)) 5756219049aSBob Liu goto out; 5766219049aSBob Liu 5776219049aSBob Liu pud = pud_offset(pgd, address); 5786219049aSBob Liu if (!pud_present(*pud)) 5796219049aSBob Liu goto out; 5806219049aSBob Liu 5816219049aSBob Liu pmd = pmd_offset(pud, address); 5826219049aSBob Liu if (!pmd_present(*pmd)) 5836219049aSBob Liu pmd = NULL; 5846219049aSBob Liu out: 5856219049aSBob Liu return pmd; 5866219049aSBob Liu } 5876219049aSBob Liu 5881da177e4SLinus Torvalds /* 58981b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 59081b4082dSNikita Danilov * 591479db0bfSNick Piggin * If @sync is false, page_check_address may perform a racy check to avoid 592479db0bfSNick Piggin * the page table lock when the pte is not present (helpful when reclaiming 593479db0bfSNick Piggin * highly shared pages). 594479db0bfSNick Piggin * 595b8072f09SHugh Dickins * On success returns with pte mapped and locked. 59681b4082dSNikita Danilov */ 597e9a81a82SNamhyung Kim pte_t *__page_check_address(struct page *page, struct mm_struct *mm, 598479db0bfSNick Piggin unsigned long address, spinlock_t **ptlp, int sync) 59981b4082dSNikita Danilov { 60081b4082dSNikita Danilov pmd_t *pmd; 60181b4082dSNikita Danilov pte_t *pte; 602c0718806SHugh Dickins spinlock_t *ptl; 60381b4082dSNikita Danilov 6040fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) { 60598398c32SJianguo Wu /* when pud is not present, pte will be NULL */ 6060fe6e20bSNaoya Horiguchi pte = huge_pte_offset(mm, address); 60798398c32SJianguo Wu if (!pte) 60898398c32SJianguo Wu return NULL; 60998398c32SJianguo Wu 610cb900f41SKirill A. Shutemov ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 6110fe6e20bSNaoya Horiguchi goto check; 6120fe6e20bSNaoya Horiguchi } 6130fe6e20bSNaoya Horiguchi 6146219049aSBob Liu pmd = mm_find_pmd(mm, address); 6156219049aSBob Liu if (!pmd) 616c0718806SHugh Dickins return NULL; 617c0718806SHugh Dickins 61871e3aac0SAndrea Arcangeli if (pmd_trans_huge(*pmd)) 61971e3aac0SAndrea Arcangeli return NULL; 620c0718806SHugh Dickins 62181b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 622c0718806SHugh Dickins /* Make a quick check before getting the lock */ 623479db0bfSNick Piggin if (!sync && !pte_present(*pte)) { 62481b4082dSNikita Danilov pte_unmap(pte); 625c0718806SHugh Dickins return NULL; 62681b4082dSNikita Danilov } 627c0718806SHugh Dickins 6284c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 6290fe6e20bSNaoya Horiguchi check: 630c0718806SHugh Dickins spin_lock(ptl); 631c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 632c0718806SHugh Dickins *ptlp = ptl; 633c0718806SHugh Dickins return pte; 63481b4082dSNikita Danilov } 635c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 636c0718806SHugh Dickins return NULL; 63781b4082dSNikita Danilov } 63881b4082dSNikita Danilov 639b291f000SNick Piggin /** 640b291f000SNick Piggin * page_mapped_in_vma - check whether a page is really mapped in a VMA 641b291f000SNick Piggin * @page: the page to test 642b291f000SNick Piggin * @vma: the VMA to test 643b291f000SNick Piggin * 644b291f000SNick Piggin * Returns 1 if the page is mapped into the page tables of the VMA, 0 645b291f000SNick Piggin * if the page is not mapped into the page tables of this VMA. Only 646b291f000SNick Piggin * valid for normal file or anonymous VMAs. 647b291f000SNick Piggin */ 6486a46079cSAndi Kleen int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 649b291f000SNick Piggin { 650b291f000SNick Piggin unsigned long address; 651b291f000SNick Piggin pte_t *pte; 652b291f000SNick Piggin spinlock_t *ptl; 653b291f000SNick Piggin 65486c2ad19SMichel Lespinasse address = __vma_address(page, vma); 65586c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 656b291f000SNick Piggin return 0; 657b291f000SNick Piggin pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 658b291f000SNick Piggin if (!pte) /* the page is not in this mm */ 659b291f000SNick Piggin return 0; 660b291f000SNick Piggin pte_unmap_unlock(pte, ptl); 661b291f000SNick Piggin 662b291f000SNick Piggin return 1; 663b291f000SNick Piggin } 664b291f000SNick Piggin 6659f32624bSJoonsoo Kim struct page_referenced_arg { 6669f32624bSJoonsoo Kim int mapcount; 6679f32624bSJoonsoo Kim int referenced; 6689f32624bSJoonsoo Kim unsigned long vm_flags; 6699f32624bSJoonsoo Kim struct mem_cgroup *memcg; 6709f32624bSJoonsoo Kim }; 67181b4082dSNikita Danilov /* 6729f32624bSJoonsoo Kim * arg: page_referenced_arg will be passed 6731da177e4SLinus Torvalds */ 6745ad64688SHugh Dickins int page_referenced_one(struct page *page, struct vm_area_struct *vma, 6759f32624bSJoonsoo Kim unsigned long address, void *arg) 6761da177e4SLinus Torvalds { 6771da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 678117b0791SKirill A. Shutemov spinlock_t *ptl; 6791da177e4SLinus Torvalds int referenced = 0; 6809f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 6811da177e4SLinus Torvalds 6822da28bfdSAndrea Arcangeli if (unlikely(PageTransHuge(page))) { 6832da28bfdSAndrea Arcangeli pmd_t *pmd; 6842da28bfdSAndrea Arcangeli 685b291f000SNick Piggin /* 6862da28bfdSAndrea Arcangeli * rmap might return false positives; we must filter 6872da28bfdSAndrea Arcangeli * these out using page_check_address_pmd(). 688b291f000SNick Piggin */ 6892da28bfdSAndrea Arcangeli pmd = page_check_address_pmd(page, mm, address, 690117b0791SKirill A. Shutemov PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); 691117b0791SKirill A. Shutemov if (!pmd) 6929f32624bSJoonsoo Kim return SWAP_AGAIN; 6932da28bfdSAndrea Arcangeli 6945a9bbdcdSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 695117b0791SKirill A. Shutemov spin_unlock(ptl); 6969f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 6979f32624bSJoonsoo Kim return SWAP_FAIL; /* To break the loop */ 698b291f000SNick Piggin } 699b291f000SNick Piggin 7002da28bfdSAndrea Arcangeli /* go ahead even if the pmd is pmd_trans_splitting() */ 7012da28bfdSAndrea Arcangeli if (pmdp_clear_flush_young_notify(vma, address, pmd)) 70271e3aac0SAndrea Arcangeli referenced++; 703117b0791SKirill A. Shutemov spin_unlock(ptl); 70471e3aac0SAndrea Arcangeli } else { 70571e3aac0SAndrea Arcangeli pte_t *pte; 70671e3aac0SAndrea Arcangeli 7072da28bfdSAndrea Arcangeli /* 7082da28bfdSAndrea Arcangeli * rmap might return false positives; we must filter 7092da28bfdSAndrea Arcangeli * these out using page_check_address(). 7102da28bfdSAndrea Arcangeli */ 71171e3aac0SAndrea Arcangeli pte = page_check_address(page, mm, address, &ptl, 0); 71271e3aac0SAndrea Arcangeli if (!pte) 7139f32624bSJoonsoo Kim return SWAP_AGAIN; 71471e3aac0SAndrea Arcangeli 7152da28bfdSAndrea Arcangeli if (vma->vm_flags & VM_LOCKED) { 7162da28bfdSAndrea Arcangeli pte_unmap_unlock(pte, ptl); 7179f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 7189f32624bSJoonsoo Kim return SWAP_FAIL; /* To break the loop */ 7192da28bfdSAndrea Arcangeli } 7202da28bfdSAndrea Arcangeli 7214917e5d0SJohannes Weiner if (ptep_clear_flush_young_notify(vma, address, pte)) { 7224917e5d0SJohannes Weiner /* 7234917e5d0SJohannes Weiner * Don't treat a reference through a sequentially read 7244917e5d0SJohannes Weiner * mapping as such. If the page has been used in 7254917e5d0SJohannes Weiner * another mapping, we will catch it; if this other 7264917e5d0SJohannes Weiner * mapping is already gone, the unmap path will have 7274917e5d0SJohannes Weiner * set PG_referenced or activated the page. 7284917e5d0SJohannes Weiner */ 72964363aadSJoe Perches if (likely(!(vma->vm_flags & VM_SEQ_READ))) 7301da177e4SLinus Torvalds referenced++; 7314917e5d0SJohannes Weiner } 732c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 73371e3aac0SAndrea Arcangeli } 73471e3aac0SAndrea Arcangeli 7359f32624bSJoonsoo Kim if (referenced) { 7369f32624bSJoonsoo Kim pra->referenced++; 7379f32624bSJoonsoo Kim pra->vm_flags |= vma->vm_flags; 7381da177e4SLinus Torvalds } 7391da177e4SLinus Torvalds 7409f32624bSJoonsoo Kim pra->mapcount--; 7419f32624bSJoonsoo Kim if (!pra->mapcount) 7429f32624bSJoonsoo Kim return SWAP_SUCCESS; /* To break the loop */ 7439f32624bSJoonsoo Kim 7449f32624bSJoonsoo Kim return SWAP_AGAIN; 7459f32624bSJoonsoo Kim } 7469f32624bSJoonsoo Kim 7479f32624bSJoonsoo Kim static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 7481da177e4SLinus Torvalds { 7499f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 7509f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 7511da177e4SLinus Torvalds 7529f32624bSJoonsoo Kim if (!mm_match_cgroup(vma->vm_mm, memcg)) 7539f32624bSJoonsoo Kim return true; 7541da177e4SLinus Torvalds 7559f32624bSJoonsoo Kim return false; 7561da177e4SLinus Torvalds } 7571da177e4SLinus Torvalds 7581da177e4SLinus Torvalds /** 7591da177e4SLinus Torvalds * page_referenced - test if the page was referenced 7601da177e4SLinus Torvalds * @page: the page to test 7611da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 76272835c86SJohannes Weiner * @memcg: target memory cgroup 7636fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 7641da177e4SLinus Torvalds * 7651da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 7661da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 7671da177e4SLinus Torvalds */ 7686fe6b7e3SWu Fengguang int page_referenced(struct page *page, 7696fe6b7e3SWu Fengguang int is_locked, 77072835c86SJohannes Weiner struct mem_cgroup *memcg, 7716fe6b7e3SWu Fengguang unsigned long *vm_flags) 7721da177e4SLinus Torvalds { 7739f32624bSJoonsoo Kim int ret; 7745ad64688SHugh Dickins int we_locked = 0; 7759f32624bSJoonsoo Kim struct page_referenced_arg pra = { 7769f32624bSJoonsoo Kim .mapcount = page_mapcount(page), 7779f32624bSJoonsoo Kim .memcg = memcg, 7789f32624bSJoonsoo Kim }; 7799f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 7809f32624bSJoonsoo Kim .rmap_one = page_referenced_one, 7819f32624bSJoonsoo Kim .arg = (void *)&pra, 7829f32624bSJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 7839f32624bSJoonsoo Kim }; 7841da177e4SLinus Torvalds 7856fe6b7e3SWu Fengguang *vm_flags = 0; 7869f32624bSJoonsoo Kim if (!page_mapped(page)) 7879f32624bSJoonsoo Kim return 0; 7889f32624bSJoonsoo Kim 7899f32624bSJoonsoo Kim if (!page_rmapping(page)) 7909f32624bSJoonsoo Kim return 0; 7919f32624bSJoonsoo Kim 7925ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 7935ad64688SHugh Dickins we_locked = trylock_page(page); 7949f32624bSJoonsoo Kim if (!we_locked) 7959f32624bSJoonsoo Kim return 1; 7965ad64688SHugh Dickins } 7979f32624bSJoonsoo Kim 7989f32624bSJoonsoo Kim /* 7999f32624bSJoonsoo Kim * If we are reclaiming on behalf of a cgroup, skip 8009f32624bSJoonsoo Kim * counting on behalf of references from different 8019f32624bSJoonsoo Kim * cgroups 8029f32624bSJoonsoo Kim */ 8039f32624bSJoonsoo Kim if (memcg) { 8049f32624bSJoonsoo Kim rwc.invalid_vma = invalid_page_referenced_vma; 8055ad64688SHugh Dickins } 8069f32624bSJoonsoo Kim 8079f32624bSJoonsoo Kim ret = rmap_walk(page, &rwc); 8089f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 8099f32624bSJoonsoo Kim 8105ad64688SHugh Dickins if (we_locked) 8111da177e4SLinus Torvalds unlock_page(page); 8129f32624bSJoonsoo Kim 8139f32624bSJoonsoo Kim return pra.referenced; 8141da177e4SLinus Torvalds } 8151da177e4SLinus Torvalds 8161cb1729bSHugh Dickins static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 8179853a407SJoonsoo Kim unsigned long address, void *arg) 818d08b3851SPeter Zijlstra { 819d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 820c2fda5feSPeter Zijlstra pte_t *pte; 821d08b3851SPeter Zijlstra spinlock_t *ptl; 822d08b3851SPeter Zijlstra int ret = 0; 8239853a407SJoonsoo Kim int *cleaned = arg; 824d08b3851SPeter Zijlstra 825479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 1); 826d08b3851SPeter Zijlstra if (!pte) 827d08b3851SPeter Zijlstra goto out; 828d08b3851SPeter Zijlstra 829c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 830c2fda5feSPeter Zijlstra pte_t entry; 831d08b3851SPeter Zijlstra 832c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 8332ec74c3eSSagi Grimberg entry = ptep_clear_flush(vma, address, pte); 834d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 835c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 836d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 837d08b3851SPeter Zijlstra ret = 1; 838c2fda5feSPeter Zijlstra } 839d08b3851SPeter Zijlstra 840d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 8412ec74c3eSSagi Grimberg 8429853a407SJoonsoo Kim if (ret) { 8432ec74c3eSSagi Grimberg mmu_notifier_invalidate_page(mm, address); 8449853a407SJoonsoo Kim (*cleaned)++; 8459853a407SJoonsoo Kim } 846d08b3851SPeter Zijlstra out: 8479853a407SJoonsoo Kim return SWAP_AGAIN; 848d08b3851SPeter Zijlstra } 849d08b3851SPeter Zijlstra 8509853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 851d08b3851SPeter Zijlstra { 8529853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 853871beb8cSFengguang Wu return false; 854d08b3851SPeter Zijlstra 855871beb8cSFengguang Wu return true; 856d08b3851SPeter Zijlstra } 857d08b3851SPeter Zijlstra 858d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 859d08b3851SPeter Zijlstra { 8609853a407SJoonsoo Kim int cleaned = 0; 8619853a407SJoonsoo Kim struct address_space *mapping; 8629853a407SJoonsoo Kim struct rmap_walk_control rwc = { 8639853a407SJoonsoo Kim .arg = (void *)&cleaned, 8649853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 8659853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 8669853a407SJoonsoo Kim }; 867d08b3851SPeter Zijlstra 868d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 869d08b3851SPeter Zijlstra 8709853a407SJoonsoo Kim if (!page_mapped(page)) 8719853a407SJoonsoo Kim return 0; 872d08b3851SPeter Zijlstra 8739853a407SJoonsoo Kim mapping = page_mapping(page); 8749853a407SJoonsoo Kim if (!mapping) 8759853a407SJoonsoo Kim return 0; 8769853a407SJoonsoo Kim 8779853a407SJoonsoo Kim rmap_walk(page, &rwc); 8789853a407SJoonsoo Kim 8799853a407SJoonsoo Kim return cleaned; 880d08b3851SPeter Zijlstra } 88160b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 882d08b3851SPeter Zijlstra 8831da177e4SLinus Torvalds /** 884c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 885c44b6743SRik van Riel * @page: the page to move to our anon_vma 886c44b6743SRik van Riel * @vma: the vma the page belongs to 887c44b6743SRik van Riel * @address: the user virtual address mapped 888c44b6743SRik van Riel * 889c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 890c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 891c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 892c44b6743SRik van Riel * processes. 893c44b6743SRik van Riel */ 894c44b6743SRik van Riel void page_move_anon_rmap(struct page *page, 895c44b6743SRik van Riel struct vm_area_struct *vma, unsigned long address) 896c44b6743SRik van Riel { 897c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 898c44b6743SRik van Riel 899309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 900c44b6743SRik van Riel VM_BUG_ON(!anon_vma); 901309381feSSasha Levin VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 902c44b6743SRik van Riel 903c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 904c44b6743SRik van Riel page->mapping = (struct address_space *) anon_vma; 905c44b6743SRik van Riel } 906c44b6743SRik van Riel 907c44b6743SRik van Riel /** 90843d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 9094e1c1975SAndi Kleen * @page: Page to add to rmap 9104e1c1975SAndi Kleen * @vma: VM area to add page to. 9114e1c1975SAndi Kleen * @address: User virtual address of the mapping 912e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 9131da177e4SLinus Torvalds */ 9149617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 915e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 9161da177e4SLinus Torvalds { 917e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 9182822c1aaSNick Piggin 919e8a03febSRik van Riel BUG_ON(!anon_vma); 920ea90002bSLinus Torvalds 9214e1c1975SAndi Kleen if (PageAnon(page)) 9224e1c1975SAndi Kleen return; 9234e1c1975SAndi Kleen 924ea90002bSLinus Torvalds /* 925e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 926e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 927e8a03febSRik van Riel * page mapping! 928ea90002bSLinus Torvalds */ 9294e1c1975SAndi Kleen if (!exclusive) 930288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 931ea90002bSLinus Torvalds 9321da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 9332822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 9344d7670e0SNick Piggin page->index = linear_page_index(vma, address); 9351da177e4SLinus Torvalds } 9369617d95eSNick Piggin 9379617d95eSNick Piggin /** 93843d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 939c97a9e10SNick Piggin * @page: the page to add the mapping to 940c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 941c97a9e10SNick Piggin * @address: the user virtual address mapped 942c97a9e10SNick Piggin */ 943c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 944c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 945c97a9e10SNick Piggin { 946c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 947c97a9e10SNick Piggin /* 948c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 949c97a9e10SNick Piggin * be set up correctly at this point. 950c97a9e10SNick Piggin * 951c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 952c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 953c97a9e10SNick Piggin * in which case the page is already known to be setup. 954c97a9e10SNick Piggin * 955c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 956c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 957c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 958c97a9e10SNick Piggin */ 95944ab57a0SAndrea Arcangeli BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 960c97a9e10SNick Piggin BUG_ON(page->index != linear_page_index(vma, address)); 961c97a9e10SNick Piggin #endif 962c97a9e10SNick Piggin } 963c97a9e10SNick Piggin 964c97a9e10SNick Piggin /** 9659617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 9669617d95eSNick Piggin * @page: the page to add the mapping to 9679617d95eSNick Piggin * @vma: the vm area in which the mapping is added 9689617d95eSNick Piggin * @address: the user virtual address mapped 9699617d95eSNick Piggin * 9705ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 97180e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 97280e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 97380e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 9749617d95eSNick Piggin */ 9759617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 9769617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 9779617d95eSNick Piggin { 978ad8c2ee8SRik van Riel do_page_add_anon_rmap(page, vma, address, 0); 979ad8c2ee8SRik van Riel } 980ad8c2ee8SRik van Riel 981ad8c2ee8SRik van Riel /* 982ad8c2ee8SRik van Riel * Special version of the above for do_swap_page, which often runs 983ad8c2ee8SRik van Riel * into pages that are exclusively owned by the current process. 984ad8c2ee8SRik van Riel * Everybody else should continue to use page_add_anon_rmap above. 985ad8c2ee8SRik van Riel */ 986ad8c2ee8SRik van Riel void do_page_add_anon_rmap(struct page *page, 987ad8c2ee8SRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 988ad8c2ee8SRik van Riel { 9895ad64688SHugh Dickins int first = atomic_inc_and_test(&page->_mapcount); 99079134171SAndrea Arcangeli if (first) { 9913cd14fcdSKirill A. Shutemov if (PageTransHuge(page)) 99279134171SAndrea Arcangeli __inc_zone_page_state(page, 99379134171SAndrea Arcangeli NR_ANON_TRANSPARENT_HUGEPAGES); 9943cd14fcdSKirill A. Shutemov __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 9953cd14fcdSKirill A. Shutemov hpage_nr_pages(page)); 99679134171SAndrea Arcangeli } 9975ad64688SHugh Dickins if (unlikely(PageKsm(page))) 9985ad64688SHugh Dickins return; 9995ad64688SHugh Dickins 1000309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 10015dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 10025ad64688SHugh Dickins if (first) 1003ad8c2ee8SRik van Riel __page_set_anon_rmap(page, vma, address, exclusive); 100469029cd5SKAMEZAWA Hiroyuki else 1005c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 10061da177e4SLinus Torvalds } 10071da177e4SLinus Torvalds 100843d8eac4SRandy Dunlap /** 10099617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 10109617d95eSNick Piggin * @page: the page to add the mapping to 10119617d95eSNick Piggin * @vma: the vm area in which the mapping is added 10129617d95eSNick Piggin * @address: the user virtual address mapped 10139617d95eSNick Piggin * 10149617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 10159617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1016c97a9e10SNick Piggin * Page does not have to be locked. 10179617d95eSNick Piggin */ 10189617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 10199617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 10209617d95eSNick Piggin { 1021b5934c53SHugh Dickins VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1022cbf84b7aSHugh Dickins SetPageSwapBacked(page); 1023cbf84b7aSHugh Dickins atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 10243cd14fcdSKirill A. Shutemov if (PageTransHuge(page)) 102579134171SAndrea Arcangeli __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 10263cd14fcdSKirill A. Shutemov __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 10273cd14fcdSKirill A. Shutemov hpage_nr_pages(page)); 1028e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 1029c53954a0SMel Gorman if (!mlocked_vma_newpage(vma, page)) { 1030c53954a0SMel Gorman SetPageActive(page); 1031c53954a0SMel Gorman lru_cache_add(page); 1032c53954a0SMel Gorman } else 1033b5934c53SHugh Dickins add_page_to_unevictable_list(page); 10349617d95eSNick Piggin } 10359617d95eSNick Piggin 10361da177e4SLinus Torvalds /** 10371da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 10381da177e4SLinus Torvalds * @page: the page to add the mapping to 10391da177e4SLinus Torvalds * 1040b8072f09SHugh Dickins * The caller needs to hold the pte lock. 10411da177e4SLinus Torvalds */ 10421da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 10431da177e4SLinus Torvalds { 104489c06bd5SKAMEZAWA Hiroyuki bool locked; 104589c06bd5SKAMEZAWA Hiroyuki unsigned long flags; 104689c06bd5SKAMEZAWA Hiroyuki 104789c06bd5SKAMEZAWA Hiroyuki mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1048d69b042fSBalbir Singh if (atomic_inc_and_test(&page->_mapcount)) { 104965ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 105068b4876dSSha Zhengju mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1051d69b042fSBalbir Singh } 105289c06bd5SKAMEZAWA Hiroyuki mem_cgroup_end_update_page_stat(page, &locked, &flags); 10531da177e4SLinus Torvalds } 10541da177e4SLinus Torvalds 10551da177e4SLinus Torvalds /** 10561da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 10571da177e4SLinus Torvalds * @page: page to remove mapping from 10581da177e4SLinus Torvalds * 1059b8072f09SHugh Dickins * The caller needs to hold the pte lock. 10601da177e4SLinus Torvalds */ 1061edc315fdSHugh Dickins void page_remove_rmap(struct page *page) 10621da177e4SLinus Torvalds { 106389c06bd5SKAMEZAWA Hiroyuki bool anon = PageAnon(page); 106489c06bd5SKAMEZAWA Hiroyuki bool locked; 106589c06bd5SKAMEZAWA Hiroyuki unsigned long flags; 106689c06bd5SKAMEZAWA Hiroyuki 106789c06bd5SKAMEZAWA Hiroyuki /* 106889c06bd5SKAMEZAWA Hiroyuki * The anon case has no mem_cgroup page_stat to update; but may 106989c06bd5SKAMEZAWA Hiroyuki * uncharge_page() below, where the lock ordering can deadlock if 107089c06bd5SKAMEZAWA Hiroyuki * we hold the lock against page_stat move: so avoid it on anon. 107189c06bd5SKAMEZAWA Hiroyuki */ 107289c06bd5SKAMEZAWA Hiroyuki if (!anon) 107389c06bd5SKAMEZAWA Hiroyuki mem_cgroup_begin_update_page_stat(page, &locked, &flags); 107489c06bd5SKAMEZAWA Hiroyuki 1075b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1076b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 107789c06bd5SKAMEZAWA Hiroyuki goto out; 1078b904dcfeSKOSAKI Motohiro 10791da177e4SLinus Torvalds /* 10800fe6e20bSNaoya Horiguchi * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 10810fe6e20bSNaoya Horiguchi * and not charged by memcg for now. 10820fe6e20bSNaoya Horiguchi */ 10830fe6e20bSNaoya Horiguchi if (unlikely(PageHuge(page))) 108489c06bd5SKAMEZAWA Hiroyuki goto out; 108589c06bd5SKAMEZAWA Hiroyuki if (anon) { 108616f8c5b2SHugh Dickins mem_cgroup_uncharge_page(page); 10873cd14fcdSKirill A. Shutemov if (PageTransHuge(page)) 108879134171SAndrea Arcangeli __dec_zone_page_state(page, 108979134171SAndrea Arcangeli NR_ANON_TRANSPARENT_HUGEPAGES); 10903cd14fcdSKirill A. Shutemov __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 10913cd14fcdSKirill A. Shutemov -hpage_nr_pages(page)); 1092b904dcfeSKOSAKI Motohiro } else { 1093b904dcfeSKOSAKI Motohiro __dec_zone_page_state(page, NR_FILE_MAPPED); 109468b4876dSSha Zhengju mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1095e6c509f8SHugh Dickins mem_cgroup_end_update_page_stat(page, &locked, &flags); 1096b904dcfeSKOSAKI Motohiro } 1097e6c509f8SHugh Dickins if (unlikely(PageMlocked(page))) 1098e6c509f8SHugh Dickins clear_page_mlock(page); 109916f8c5b2SHugh Dickins /* 11001da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 11011da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 11021da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 11031da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 11041da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 11051da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 11061da177e4SLinus Torvalds * faster for those pages still in swapcache. 11071da177e4SLinus Torvalds */ 1108e6c509f8SHugh Dickins return; 110989c06bd5SKAMEZAWA Hiroyuki out: 111089c06bd5SKAMEZAWA Hiroyuki if (!anon) 111189c06bd5SKAMEZAWA Hiroyuki mem_cgroup_end_update_page_stat(page, &locked, &flags); 11121da177e4SLinus Torvalds } 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds /* 111552629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 11161da177e4SLinus Torvalds */ 11175ad64688SHugh Dickins int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 111852629506SJoonsoo Kim unsigned long address, void *arg) 11191da177e4SLinus Torvalds { 11201da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 11211da177e4SLinus Torvalds pte_t *pte; 11221da177e4SLinus Torvalds pte_t pteval; 1123c0718806SHugh Dickins spinlock_t *ptl; 11241da177e4SLinus Torvalds int ret = SWAP_AGAIN; 112552629506SJoonsoo Kim enum ttu_flags flags = (enum ttu_flags)arg; 11261da177e4SLinus Torvalds 1127479db0bfSNick Piggin pte = page_check_address(page, mm, address, &ptl, 0); 1128c0718806SHugh Dickins if (!pte) 112981b4082dSNikita Danilov goto out; 11301da177e4SLinus Torvalds 11311da177e4SLinus Torvalds /* 11321da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 11331da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 11341da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 11351da177e4SLinus Torvalds */ 113614fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 1137caed0f48SKOSAKI Motohiro if (vma->vm_flags & VM_LOCKED) 1138caed0f48SKOSAKI Motohiro goto out_mlock; 1139caed0f48SKOSAKI Motohiro 1140af8e3354SHugh Dickins if (TTU_ACTION(flags) == TTU_MUNLOCK) 114153f79acbSHugh Dickins goto out_unmap; 114214fa31b8SAndi Kleen } 114314fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_ACCESS)) { 1144b291f000SNick Piggin if (ptep_clear_flush_young_notify(vma, address, pte)) { 11451da177e4SLinus Torvalds ret = SWAP_FAIL; 11461da177e4SLinus Torvalds goto out_unmap; 11471da177e4SLinus Torvalds } 1148b291f000SNick Piggin } 11491da177e4SLinus Torvalds 11501da177e4SLinus Torvalds /* Nuke the page table entry. */ 11511da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 11522ec74c3eSSagi Grimberg pteval = ptep_clear_flush(vma, address, pte); 11531da177e4SLinus Torvalds 11541da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 11551da177e4SLinus Torvalds if (pte_dirty(pteval)) 11561da177e4SLinus Torvalds set_page_dirty(page); 11571da177e4SLinus Torvalds 1158365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1159365e9c87SHugh Dickins update_hiwater_rss(mm); 1160365e9c87SHugh Dickins 1161888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 11625f24ae58SNaoya Horiguchi if (!PageHuge(page)) { 1163888b9f7cSAndi Kleen if (PageAnon(page)) 1164d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1165888b9f7cSAndi Kleen else 1166d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 11675f24ae58SNaoya Horiguchi } 1168888b9f7cSAndi Kleen set_pte_at(mm, address, pte, 1169888b9f7cSAndi Kleen swp_entry_to_pte(make_hwpoison_entry(page))); 117045961722SKonstantin Weitz } else if (pte_unused(pteval)) { 117145961722SKonstantin Weitz /* 117245961722SKonstantin Weitz * The guest indicated that the page content is of no 117345961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 117445961722SKonstantin Weitz * will take care of the rest. 117545961722SKonstantin Weitz */ 117645961722SKonstantin Weitz if (PageAnon(page)) 117745961722SKonstantin Weitz dec_mm_counter(mm, MM_ANONPAGES); 117845961722SKonstantin Weitz else 117945961722SKonstantin Weitz dec_mm_counter(mm, MM_FILEPAGES); 1180888b9f7cSAndi Kleen } else if (PageAnon(page)) { 11814c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 1182179ef71cSCyrill Gorcunov pte_t swp_pte; 11830697212aSChristoph Lameter 11840697212aSChristoph Lameter if (PageSwapCache(page)) { 11851da177e4SLinus Torvalds /* 11861da177e4SLinus Torvalds * Store the swap location in the pte. 11871da177e4SLinus Torvalds * See handle_pte_fault() ... 11881da177e4SLinus Torvalds */ 1189570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1190570a335bSHugh Dickins set_pte_at(mm, address, pte, pteval); 1191570a335bSHugh Dickins ret = SWAP_FAIL; 1192570a335bSHugh Dickins goto out_unmap; 1193570a335bSHugh Dickins } 11941da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 11951da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1196f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 11971da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 11981da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 11991da177e4SLinus Torvalds } 1200d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1201b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1202ce1744f4SKonstantin Khlebnikov } else if (IS_ENABLED(CONFIG_MIGRATION)) { 12030697212aSChristoph Lameter /* 12040697212aSChristoph Lameter * Store the pfn of the page in a special migration 12050697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 12060697212aSChristoph Lameter * pte is removed and then restart fault handling. 12070697212aSChristoph Lameter */ 120814fa31b8SAndi Kleen BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 12090697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 12100697212aSChristoph Lameter } 1211179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 1212179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1213179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1214179ef71cSCyrill Gorcunov set_pte_at(mm, address, pte, swp_pte); 12151da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 1216ce1744f4SKonstantin Khlebnikov } else if (IS_ENABLED(CONFIG_MIGRATION) && 1217ce1744f4SKonstantin Khlebnikov (TTU_ACTION(flags) == TTU_MIGRATION)) { 121804e62a29SChristoph Lameter /* Establish migration entry for a file page */ 121904e62a29SChristoph Lameter swp_entry_t entry; 122004e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 122104e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 122204e62a29SChristoph Lameter } else 1223d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 12241da177e4SLinus Torvalds 1225edc315fdSHugh Dickins page_remove_rmap(page); 12261da177e4SLinus Torvalds page_cache_release(page); 12271da177e4SLinus Torvalds 12281da177e4SLinus Torvalds out_unmap: 1229c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 12302ec74c3eSSagi Grimberg if (ret != SWAP_FAIL) 12312ec74c3eSSagi Grimberg mmu_notifier_invalidate_page(mm, address); 1232caed0f48SKOSAKI Motohiro out: 1233caed0f48SKOSAKI Motohiro return ret; 123453f79acbSHugh Dickins 1235caed0f48SKOSAKI Motohiro out_mlock: 1236caed0f48SKOSAKI Motohiro pte_unmap_unlock(pte, ptl); 1237caed0f48SKOSAKI Motohiro 1238caed0f48SKOSAKI Motohiro 1239caed0f48SKOSAKI Motohiro /* 1240caed0f48SKOSAKI Motohiro * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1241caed0f48SKOSAKI Motohiro * unstable result and race. Plus, We can't wait here because 12425a505085SIngo Molnar * we now hold anon_vma->rwsem or mapping->i_mmap_mutex. 1243caed0f48SKOSAKI Motohiro * if trylock failed, the page remain in evictable lru and later 1244caed0f48SKOSAKI Motohiro * vmscan could retry to move the page to unevictable lru if the 1245caed0f48SKOSAKI Motohiro * page is actually mlocked. 1246caed0f48SKOSAKI Motohiro */ 124753f79acbSHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 124853f79acbSHugh Dickins if (vma->vm_flags & VM_LOCKED) { 124953f79acbSHugh Dickins mlock_vma_page(page); 125053f79acbSHugh Dickins ret = SWAP_MLOCK; 125153f79acbSHugh Dickins } 125253f79acbSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 125353f79acbSHugh Dickins } 12541da177e4SLinus Torvalds return ret; 12551da177e4SLinus Torvalds } 12561da177e4SLinus Torvalds 12571da177e4SLinus Torvalds /* 12581da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 12591da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 12601da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 12611da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 12621da177e4SLinus Torvalds * 12631da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 12641da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 12651da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 12661da177e4SLinus Torvalds * around the vma's virtual address space. 12671da177e4SLinus Torvalds * 12681da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 12691da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 12701da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 12711da177e4SLinus Torvalds * 12721da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 12731da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 12741da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 1275b291f000SNick Piggin * 1276b291f000SNick Piggin * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1277b291f000SNick Piggin * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1278b291f000SNick Piggin * rather than unmapping them. If we encounter the "check_page" that vmscan is 1279b291f000SNick Piggin * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 12801da177e4SLinus Torvalds */ 12811da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 12821da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 12831da177e4SLinus Torvalds 1284b291f000SNick Piggin static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1285b291f000SNick Piggin struct vm_area_struct *vma, struct page *check_page) 12861da177e4SLinus Torvalds { 12871da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 12881da177e4SLinus Torvalds pmd_t *pmd; 1289c0718806SHugh Dickins pte_t *pte; 12901da177e4SLinus Torvalds pte_t pteval; 1291c0718806SHugh Dickins spinlock_t *ptl; 12921da177e4SLinus Torvalds struct page *page; 12931da177e4SLinus Torvalds unsigned long address; 12942ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 12952ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 12961da177e4SLinus Torvalds unsigned long end; 1297b291f000SNick Piggin int ret = SWAP_AGAIN; 1298b291f000SNick Piggin int locked_vma = 0; 12991da177e4SLinus Torvalds 13001da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 13011da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 13021da177e4SLinus Torvalds if (address < vma->vm_start) 13031da177e4SLinus Torvalds address = vma->vm_start; 13041da177e4SLinus Torvalds if (end > vma->vm_end) 13051da177e4SLinus Torvalds end = vma->vm_end; 13061da177e4SLinus Torvalds 13076219049aSBob Liu pmd = mm_find_pmd(mm, address); 13086219049aSBob Liu if (!pmd) 1309b291f000SNick Piggin return ret; 1310b291f000SNick Piggin 13112ec74c3eSSagi Grimberg mmun_start = address; 13122ec74c3eSSagi Grimberg mmun_end = end; 13132ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 13142ec74c3eSSagi Grimberg 1315b291f000SNick Piggin /* 1316af8e3354SHugh Dickins * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1317b291f000SNick Piggin * keep the sem while scanning the cluster for mlocking pages. 1318b291f000SNick Piggin */ 1319af8e3354SHugh Dickins if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1320b291f000SNick Piggin locked_vma = (vma->vm_flags & VM_LOCKED); 1321b291f000SNick Piggin if (!locked_vma) 1322b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1323b291f000SNick Piggin } 1324c0718806SHugh Dickins 1325c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 13261da177e4SLinus Torvalds 1327365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1328365e9c87SHugh Dickins update_hiwater_rss(mm); 1329365e9c87SHugh Dickins 1330c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 13311da177e4SLinus Torvalds if (!pte_present(*pte)) 13321da177e4SLinus Torvalds continue; 13336aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 13346aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 13351da177e4SLinus Torvalds 1336b291f000SNick Piggin if (locked_vma) { 133757e68e9cSVlastimil Babka if (page == check_page) { 133857e68e9cSVlastimil Babka /* we know we have check_page locked */ 133957e68e9cSVlastimil Babka mlock_vma_page(page); 1340b291f000SNick Piggin ret = SWAP_MLOCK; 134157e68e9cSVlastimil Babka } else if (trylock_page(page)) { 134257e68e9cSVlastimil Babka /* 134357e68e9cSVlastimil Babka * If we can lock the page, perform mlock. 134457e68e9cSVlastimil Babka * Otherwise leave the page alone, it will be 134557e68e9cSVlastimil Babka * eventually encountered again later. 134657e68e9cSVlastimil Babka */ 134757e68e9cSVlastimil Babka mlock_vma_page(page); 134857e68e9cSVlastimil Babka unlock_page(page); 134957e68e9cSVlastimil Babka } 1350b291f000SNick Piggin continue; /* don't unmap */ 1351b291f000SNick Piggin } 1352b291f000SNick Piggin 1353cddb8a5cSAndrea Arcangeli if (ptep_clear_flush_young_notify(vma, address, pte)) 13541da177e4SLinus Torvalds continue; 13551da177e4SLinus Torvalds 13561da177e4SLinus Torvalds /* Nuke the page table entry. */ 1357eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 13582ec74c3eSSagi Grimberg pteval = ptep_clear_flush(vma, address, pte); 13591da177e4SLinus Torvalds 13601da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 136141bb3476SCyrill Gorcunov if (page->index != linear_page_index(vma, address)) { 136241bb3476SCyrill Gorcunov pte_t ptfile = pgoff_to_pte(page->index); 136341bb3476SCyrill Gorcunov if (pte_soft_dirty(pteval)) 136441bb3476SCyrill Gorcunov pte_file_mksoft_dirty(ptfile); 136541bb3476SCyrill Gorcunov set_pte_at(mm, address, pte, ptfile); 136641bb3476SCyrill Gorcunov } 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 13691da177e4SLinus Torvalds if (pte_dirty(pteval)) 13701da177e4SLinus Torvalds set_page_dirty(page); 13711da177e4SLinus Torvalds 1372edc315fdSHugh Dickins page_remove_rmap(page); 13731da177e4SLinus Torvalds page_cache_release(page); 1374d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_FILEPAGES); 13751da177e4SLinus Torvalds (*mapcount)--; 13761da177e4SLinus Torvalds } 1377c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 13782ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1379b291f000SNick Piggin if (locked_vma) 1380b291f000SNick Piggin up_read(&vma->vm_mm->mmap_sem); 1381b291f000SNick Piggin return ret; 13821da177e4SLinus Torvalds } 13831da177e4SLinus Torvalds 13840f843c6aSJoonsoo Kim static int try_to_unmap_nonlinear(struct page *page, 13857e09e738SHugh Dickins struct address_space *mapping, void *arg) 13860f843c6aSJoonsoo Kim { 13877e09e738SHugh Dickins struct vm_area_struct *vma; 13880f843c6aSJoonsoo Kim int ret = SWAP_AGAIN; 13890f843c6aSJoonsoo Kim unsigned long cursor; 13900f843c6aSJoonsoo Kim unsigned long max_nl_cursor = 0; 13910f843c6aSJoonsoo Kim unsigned long max_nl_size = 0; 13920f843c6aSJoonsoo Kim unsigned int mapcount; 13930f843c6aSJoonsoo Kim 13940f843c6aSJoonsoo Kim list_for_each_entry(vma, 13950f843c6aSJoonsoo Kim &mapping->i_mmap_nonlinear, shared.nonlinear) { 13960f843c6aSJoonsoo Kim 13970f843c6aSJoonsoo Kim cursor = (unsigned long) vma->vm_private_data; 13980f843c6aSJoonsoo Kim if (cursor > max_nl_cursor) 13990f843c6aSJoonsoo Kim max_nl_cursor = cursor; 14000f843c6aSJoonsoo Kim cursor = vma->vm_end - vma->vm_start; 14010f843c6aSJoonsoo Kim if (cursor > max_nl_size) 14020f843c6aSJoonsoo Kim max_nl_size = cursor; 14030f843c6aSJoonsoo Kim } 14040f843c6aSJoonsoo Kim 14050f843c6aSJoonsoo Kim if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 14060f843c6aSJoonsoo Kim return SWAP_FAIL; 14070f843c6aSJoonsoo Kim } 14080f843c6aSJoonsoo Kim 14090f843c6aSJoonsoo Kim /* 14100f843c6aSJoonsoo Kim * We don't try to search for this page in the nonlinear vmas, 14110f843c6aSJoonsoo Kim * and page_referenced wouldn't have found it anyway. Instead 14120f843c6aSJoonsoo Kim * just walk the nonlinear vmas trying to age and unmap some. 14130f843c6aSJoonsoo Kim * The mapcount of the page we came in with is irrelevant, 14140f843c6aSJoonsoo Kim * but even so use it as a guide to how hard we should try? 14150f843c6aSJoonsoo Kim */ 14160f843c6aSJoonsoo Kim mapcount = page_mapcount(page); 14170f843c6aSJoonsoo Kim if (!mapcount) 14180f843c6aSJoonsoo Kim return ret; 14190f843c6aSJoonsoo Kim 14200f843c6aSJoonsoo Kim cond_resched(); 14210f843c6aSJoonsoo Kim 14220f843c6aSJoonsoo Kim max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 14230f843c6aSJoonsoo Kim if (max_nl_cursor == 0) 14240f843c6aSJoonsoo Kim max_nl_cursor = CLUSTER_SIZE; 14250f843c6aSJoonsoo Kim 14260f843c6aSJoonsoo Kim do { 14270f843c6aSJoonsoo Kim list_for_each_entry(vma, 14280f843c6aSJoonsoo Kim &mapping->i_mmap_nonlinear, shared.nonlinear) { 14290f843c6aSJoonsoo Kim 14300f843c6aSJoonsoo Kim cursor = (unsigned long) vma->vm_private_data; 14310f843c6aSJoonsoo Kim while (cursor < max_nl_cursor && 14320f843c6aSJoonsoo Kim cursor < vma->vm_end - vma->vm_start) { 14330f843c6aSJoonsoo Kim if (try_to_unmap_cluster(cursor, &mapcount, 14340f843c6aSJoonsoo Kim vma, page) == SWAP_MLOCK) 14350f843c6aSJoonsoo Kim ret = SWAP_MLOCK; 14360f843c6aSJoonsoo Kim cursor += CLUSTER_SIZE; 14370f843c6aSJoonsoo Kim vma->vm_private_data = (void *) cursor; 14380f843c6aSJoonsoo Kim if ((int)mapcount <= 0) 14390f843c6aSJoonsoo Kim return ret; 14400f843c6aSJoonsoo Kim } 14410f843c6aSJoonsoo Kim vma->vm_private_data = (void *) max_nl_cursor; 14420f843c6aSJoonsoo Kim } 14430f843c6aSJoonsoo Kim cond_resched(); 14440f843c6aSJoonsoo Kim max_nl_cursor += CLUSTER_SIZE; 14450f843c6aSJoonsoo Kim } while (max_nl_cursor <= max_nl_size); 14460f843c6aSJoonsoo Kim 14470f843c6aSJoonsoo Kim /* 14480f843c6aSJoonsoo Kim * Don't loop forever (perhaps all the remaining pages are 14490f843c6aSJoonsoo Kim * in locked vmas). Reset cursor on all unreserved nonlinear 14500f843c6aSJoonsoo Kim * vmas, now forgetting on which ones it had fallen behind. 14510f843c6aSJoonsoo Kim */ 14520f843c6aSJoonsoo Kim list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) 14530f843c6aSJoonsoo Kim vma->vm_private_data = NULL; 14540f843c6aSJoonsoo Kim 14550f843c6aSJoonsoo Kim return ret; 14560f843c6aSJoonsoo Kim } 14570f843c6aSJoonsoo Kim 145871e3aac0SAndrea Arcangeli bool is_vma_temporary_stack(struct vm_area_struct *vma) 1459a8bef8ffSMel Gorman { 1460a8bef8ffSMel Gorman int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1461a8bef8ffSMel Gorman 1462a8bef8ffSMel Gorman if (!maybe_stack) 1463a8bef8ffSMel Gorman return false; 1464a8bef8ffSMel Gorman 1465a8bef8ffSMel Gorman if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1466a8bef8ffSMel Gorman VM_STACK_INCOMPLETE_SETUP) 1467a8bef8ffSMel Gorman return true; 1468a8bef8ffSMel Gorman 1469a8bef8ffSMel Gorman return false; 1470a8bef8ffSMel Gorman } 1471a8bef8ffSMel Gorman 147252629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 147352629506SJoonsoo Kim { 147452629506SJoonsoo Kim return is_vma_temporary_stack(vma); 147552629506SJoonsoo Kim } 147652629506SJoonsoo Kim 147752629506SJoonsoo Kim static int page_not_mapped(struct page *page) 147852629506SJoonsoo Kim { 147952629506SJoonsoo Kim return !page_mapped(page); 148052629506SJoonsoo Kim }; 148152629506SJoonsoo Kim 14821da177e4SLinus Torvalds /** 14831da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 14841da177e4SLinus Torvalds * @page: the page to get unmapped 148514fa31b8SAndi Kleen * @flags: action and flags 14861da177e4SLinus Torvalds * 14871da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 14881da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 14891da177e4SLinus Torvalds * Return values are: 14901da177e4SLinus Torvalds * 14911da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 14921da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 14931da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 1494b291f000SNick Piggin * SWAP_MLOCK - page is mlocked. 14951da177e4SLinus Torvalds */ 149614fa31b8SAndi Kleen int try_to_unmap(struct page *page, enum ttu_flags flags) 14971da177e4SLinus Torvalds { 14981da177e4SLinus Torvalds int ret; 149952629506SJoonsoo Kim struct rmap_walk_control rwc = { 150052629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 150152629506SJoonsoo Kim .arg = (void *)flags, 150252629506SJoonsoo Kim .done = page_not_mapped, 150352629506SJoonsoo Kim .file_nonlinear = try_to_unmap_nonlinear, 150452629506SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 150552629506SJoonsoo Kim }; 15061da177e4SLinus Torvalds 1507309381feSSasha Levin VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); 15081da177e4SLinus Torvalds 150952629506SJoonsoo Kim /* 151052629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 151152629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 151252629506SJoonsoo Kim * page tables leading to a race where migration cannot 151352629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 151452629506SJoonsoo Kim * locking requirements of exec(), migration skips 151552629506SJoonsoo Kim * temporary VMAs until after exec() completes. 151652629506SJoonsoo Kim */ 151752629506SJoonsoo Kim if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page)) 151852629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 151952629506SJoonsoo Kim 152052629506SJoonsoo Kim ret = rmap_walk(page, &rwc); 152152629506SJoonsoo Kim 1522b291f000SNick Piggin if (ret != SWAP_MLOCK && !page_mapped(page)) 15231da177e4SLinus Torvalds ret = SWAP_SUCCESS; 15241da177e4SLinus Torvalds return ret; 15251da177e4SLinus Torvalds } 152681b4082dSNikita Danilov 1527b291f000SNick Piggin /** 1528b291f000SNick Piggin * try_to_munlock - try to munlock a page 1529b291f000SNick Piggin * @page: the page to be munlocked 1530b291f000SNick Piggin * 1531b291f000SNick Piggin * Called from munlock code. Checks all of the VMAs mapping the page 1532b291f000SNick Piggin * to make sure nobody else has this page mlocked. The page will be 1533b291f000SNick Piggin * returned with PG_mlocked cleared if no other vmas have it mlocked. 1534b291f000SNick Piggin * 1535b291f000SNick Piggin * Return values are: 1536b291f000SNick Piggin * 153753f79acbSHugh Dickins * SWAP_AGAIN - no vma is holding page mlocked, or, 1538b291f000SNick Piggin * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 15395ad64688SHugh Dickins * SWAP_FAIL - page cannot be located at present 1540b291f000SNick Piggin * SWAP_MLOCK - page is now mlocked. 1541b291f000SNick Piggin */ 1542b291f000SNick Piggin int try_to_munlock(struct page *page) 1543b291f000SNick Piggin { 1544e8351ac9SJoonsoo Kim int ret; 1545e8351ac9SJoonsoo Kim struct rmap_walk_control rwc = { 1546e8351ac9SJoonsoo Kim .rmap_one = try_to_unmap_one, 1547e8351ac9SJoonsoo Kim .arg = (void *)TTU_MUNLOCK, 1548e8351ac9SJoonsoo Kim .done = page_not_mapped, 1549e8351ac9SJoonsoo Kim /* 1550e8351ac9SJoonsoo Kim * We don't bother to try to find the munlocked page in 1551e8351ac9SJoonsoo Kim * nonlinears. It's costly. Instead, later, page reclaim logic 1552e8351ac9SJoonsoo Kim * may call try_to_unmap() and recover PG_mlocked lazily. 1553e8351ac9SJoonsoo Kim */ 1554e8351ac9SJoonsoo Kim .file_nonlinear = NULL, 1555e8351ac9SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 1556e8351ac9SJoonsoo Kim 1557e8351ac9SJoonsoo Kim }; 1558e8351ac9SJoonsoo Kim 1559309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1560b291f000SNick Piggin 1561e8351ac9SJoonsoo Kim ret = rmap_walk(page, &rwc); 1562e8351ac9SJoonsoo Kim return ret; 1563b291f000SNick Piggin } 1564e9995ef9SHugh Dickins 156501d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 156676545066SRik van Riel { 156776545066SRik van Riel struct anon_vma *root = anon_vma->root; 156876545066SRik van Riel 156901d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 157076545066SRik van Riel anon_vma_free(root); 157101d8b20dSPeter Zijlstra 157201d8b20dSPeter Zijlstra anon_vma_free(anon_vma); 157376545066SRik van Riel } 157476545066SRik van Riel 15750dd1c7bbSJoonsoo Kim static struct anon_vma *rmap_walk_anon_lock(struct page *page, 15760dd1c7bbSJoonsoo Kim struct rmap_walk_control *rwc) 1577faecd8ddSJoonsoo Kim { 1578faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 1579faecd8ddSJoonsoo Kim 15800dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 15810dd1c7bbSJoonsoo Kim return rwc->anon_lock(page); 15820dd1c7bbSJoonsoo Kim 1583faecd8ddSJoonsoo Kim /* 1584faecd8ddSJoonsoo Kim * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1585faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 1586faecd8ddSJoonsoo Kim * are holding mmap_sem. Users without mmap_sem are required to 1587faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 1588faecd8ddSJoonsoo Kim */ 1589faecd8ddSJoonsoo Kim anon_vma = page_anon_vma(page); 1590faecd8ddSJoonsoo Kim if (!anon_vma) 1591faecd8ddSJoonsoo Kim return NULL; 1592faecd8ddSJoonsoo Kim 1593faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 1594faecd8ddSJoonsoo Kim return anon_vma; 1595faecd8ddSJoonsoo Kim } 1596faecd8ddSJoonsoo Kim 1597e9995ef9SHugh Dickins /* 1598e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 1599e8351ac9SJoonsoo Kim * rmap method 1600e8351ac9SJoonsoo Kim * @page: the page to be handled 1601e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1602e8351ac9SJoonsoo Kim * 1603e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1604e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 1605e8351ac9SJoonsoo Kim * 1606e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1607e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1608e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1609e8351ac9SJoonsoo Kim * LOCKED. 1610e9995ef9SHugh Dickins */ 1611051ac83aSJoonsoo Kim static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1612e9995ef9SHugh Dickins { 1613e9995ef9SHugh Dickins struct anon_vma *anon_vma; 1614bf181b9fSMichel Lespinasse pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 16155beb4930SRik van Riel struct anon_vma_chain *avc; 1616e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1617e9995ef9SHugh Dickins 16180dd1c7bbSJoonsoo Kim anon_vma = rmap_walk_anon_lock(page, rwc); 1619e9995ef9SHugh Dickins if (!anon_vma) 1620e9995ef9SHugh Dickins return ret; 1621faecd8ddSJoonsoo Kim 1622bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 16235beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1624e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 16250dd1c7bbSJoonsoo Kim 16260dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 16270dd1c7bbSJoonsoo Kim continue; 16280dd1c7bbSJoonsoo Kim 1629051ac83aSJoonsoo Kim ret = rwc->rmap_one(page, vma, address, rwc->arg); 1630e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 1631e9995ef9SHugh Dickins break; 16320dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 16330dd1c7bbSJoonsoo Kim break; 1634e9995ef9SHugh Dickins } 16354fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 1636e9995ef9SHugh Dickins return ret; 1637e9995ef9SHugh Dickins } 1638e9995ef9SHugh Dickins 1639e8351ac9SJoonsoo Kim /* 1640e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 1641e8351ac9SJoonsoo Kim * @page: the page to be handled 1642e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1643e8351ac9SJoonsoo Kim * 1644e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1645e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 1646e8351ac9SJoonsoo Kim * 1647e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1648e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1649e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1650e8351ac9SJoonsoo Kim * LOCKED. 1651e8351ac9SJoonsoo Kim */ 1652051ac83aSJoonsoo Kim static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1653e9995ef9SHugh Dickins { 1654e9995ef9SHugh Dickins struct address_space *mapping = page->mapping; 1655b854f711SJoonsoo Kim pgoff_t pgoff = page->index << compound_order(page); 1656e9995ef9SHugh Dickins struct vm_area_struct *vma; 1657e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1658e9995ef9SHugh Dickins 16599f32624bSJoonsoo Kim /* 16609f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 16619f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 16629f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 16639f32624bSJoonsoo Kim * so we can safely take mapping->i_mmap_mutex. 16649f32624bSJoonsoo Kim */ 16659f32624bSJoonsoo Kim VM_BUG_ON(!PageLocked(page)); 16669f32624bSJoonsoo Kim 1667e9995ef9SHugh Dickins if (!mapping) 1668e9995ef9SHugh Dickins return ret; 16693d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 16706b2dbba8SMichel Lespinasse vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1671e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 16720dd1c7bbSJoonsoo Kim 16730dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 16740dd1c7bbSJoonsoo Kim continue; 16750dd1c7bbSJoonsoo Kim 1676051ac83aSJoonsoo Kim ret = rwc->rmap_one(page, vma, address, rwc->arg); 1677e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) 16780dd1c7bbSJoonsoo Kim goto done; 16790dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 16800dd1c7bbSJoonsoo Kim goto done; 1681e9995ef9SHugh Dickins } 16820dd1c7bbSJoonsoo Kim 16830dd1c7bbSJoonsoo Kim if (!rwc->file_nonlinear) 16840dd1c7bbSJoonsoo Kim goto done; 16850dd1c7bbSJoonsoo Kim 16860dd1c7bbSJoonsoo Kim if (list_empty(&mapping->i_mmap_nonlinear)) 16870dd1c7bbSJoonsoo Kim goto done; 16880dd1c7bbSJoonsoo Kim 16897e09e738SHugh Dickins ret = rwc->file_nonlinear(page, mapping, rwc->arg); 16900dd1c7bbSJoonsoo Kim 16910dd1c7bbSJoonsoo Kim done: 16923d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 1693e9995ef9SHugh Dickins return ret; 1694e9995ef9SHugh Dickins } 1695e9995ef9SHugh Dickins 1696051ac83aSJoonsoo Kim int rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1697e9995ef9SHugh Dickins { 1698e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 1699051ac83aSJoonsoo Kim return rmap_walk_ksm(page, rwc); 1700e9995ef9SHugh Dickins else if (PageAnon(page)) 1701051ac83aSJoonsoo Kim return rmap_walk_anon(page, rwc); 1702e9995ef9SHugh Dickins else 1703051ac83aSJoonsoo Kim return rmap_walk_file(page, rwc); 1704e9995ef9SHugh Dickins } 17050fe6e20bSNaoya Horiguchi 1706e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 17070fe6e20bSNaoya Horiguchi /* 17080fe6e20bSNaoya Horiguchi * The following three functions are for anonymous (private mapped) hugepages. 17090fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 17100fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 17110fe6e20bSNaoya Horiguchi */ 17120fe6e20bSNaoya Horiguchi static void __hugepage_set_anon_rmap(struct page *page, 17130fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address, int exclusive) 17140fe6e20bSNaoya Horiguchi { 17150fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 1716433abed6SNaoya Horiguchi 17170fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 1718433abed6SNaoya Horiguchi 1719433abed6SNaoya Horiguchi if (PageAnon(page)) 1720433abed6SNaoya Horiguchi return; 1721433abed6SNaoya Horiguchi if (!exclusive) 1722433abed6SNaoya Horiguchi anon_vma = anon_vma->root; 1723433abed6SNaoya Horiguchi 17240fe6e20bSNaoya Horiguchi anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 17250fe6e20bSNaoya Horiguchi page->mapping = (struct address_space *) anon_vma; 17260fe6e20bSNaoya Horiguchi page->index = linear_page_index(vma, address); 17270fe6e20bSNaoya Horiguchi } 17280fe6e20bSNaoya Horiguchi 17290fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 17300fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 17310fe6e20bSNaoya Horiguchi { 17320fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 17330fe6e20bSNaoya Horiguchi int first; 1734a850ea30SNaoya Horiguchi 1735a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 17360fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 17375dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 17380fe6e20bSNaoya Horiguchi first = atomic_inc_and_test(&page->_mapcount); 17390fe6e20bSNaoya Horiguchi if (first) 17400fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 0); 17410fe6e20bSNaoya Horiguchi } 17420fe6e20bSNaoya Horiguchi 17430fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 17440fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 17450fe6e20bSNaoya Horiguchi { 17460fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 17470fe6e20bSNaoya Horiguchi atomic_set(&page->_mapcount, 0); 17480fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 1); 17490fe6e20bSNaoya Horiguchi } 1750e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 1751