11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 241da177e4SLinus Torvalds * mm->mmap_sem 251da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 2688f306b6SKirill A. Shutemov * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 285a505085SIngo Molnar * anon_vma->rwsem 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30f4b7e272SAndrey Ryabinin * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 34c4843a75SGreg Thelen * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 35b93b0163SMatthew Wilcox * i_pages lock (widely used) 36250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 37f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 381da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 39b93b0163SMatthew Wilcox * i_pages lock (widely used, in set_page_dirty, 401da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 41f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 426a46079cSAndi Kleen * 435a505085SIngo Molnar * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 446a46079cSAndi Kleen * ->tasklist_lock 456a46079cSAndi Kleen * pte map lock 461da177e4SLinus Torvalds */ 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds #include <linux/mm.h> 496e84f315SIngo Molnar #include <linux/sched/mm.h> 5029930025SIngo Molnar #include <linux/sched/task.h> 511da177e4SLinus Torvalds #include <linux/pagemap.h> 521da177e4SLinus Torvalds #include <linux/swap.h> 531da177e4SLinus Torvalds #include <linux/swapops.h> 541da177e4SLinus Torvalds #include <linux/slab.h> 551da177e4SLinus Torvalds #include <linux/init.h> 565ad64688SHugh Dickins #include <linux/ksm.h> 571da177e4SLinus Torvalds #include <linux/rmap.h> 581da177e4SLinus Torvalds #include <linux/rcupdate.h> 59b95f1b31SPaul Gortmaker #include <linux/export.h> 608a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 61cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 6264cdd548SKOSAKI Motohiro #include <linux/migrate.h> 630fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 64444f84fdSBen Dooks #include <linux/huge_mm.h> 65ef5d437fSJan Kara #include <linux/backing-dev.h> 6633c3fc71SVladimir Davydov #include <linux/page_idle.h> 67a5430ddaSJérôme Glisse #include <linux/memremap.h> 68bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h> 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds #include <asm/tlbflush.h> 711da177e4SLinus Torvalds 7272b252aeSMel Gorman #include <trace/events/tlb.h> 7372b252aeSMel Gorman 74b291f000SNick Piggin #include "internal.h" 75b291f000SNick Piggin 76fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 775beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 78fdd2e5f8SAdrian Bunk 79fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 80fdd2e5f8SAdrian Bunk { 8101d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 8201d8b20dSPeter Zijlstra 8301d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 8401d8b20dSPeter Zijlstra if (anon_vma) { 8501d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 867a3ef208SKonstantin Khlebnikov anon_vma->degree = 1; /* Reference for first vma */ 877a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 8801d8b20dSPeter Zijlstra /* 8901d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 9001d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 9101d8b20dSPeter Zijlstra */ 9201d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 93fdd2e5f8SAdrian Bunk } 94fdd2e5f8SAdrian Bunk 9501d8b20dSPeter Zijlstra return anon_vma; 9601d8b20dSPeter Zijlstra } 9701d8b20dSPeter Zijlstra 9801d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 99fdd2e5f8SAdrian Bunk { 10001d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 10188c22088SPeter Zijlstra 10288c22088SPeter Zijlstra /* 1034fc3f1d6SIngo Molnar * Synchronize against page_lock_anon_vma_read() such that 10488c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 10588c22088SPeter Zijlstra * freed. 10688c22088SPeter Zijlstra * 10788c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 10888c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1094fc3f1d6SIngo Molnar * down_read_trylock() from page_lock_anon_vma_read(). This orders: 11088c22088SPeter Zijlstra * 1114fc3f1d6SIngo Molnar * page_lock_anon_vma_read() VS put_anon_vma() 1124fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 11388c22088SPeter Zijlstra * LOCK MB 1144fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 11588c22088SPeter Zijlstra * 11688c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 11788c22088SPeter Zijlstra * happen _before_ what follows. 11888c22088SPeter Zijlstra */ 1197f39dda9SHugh Dickins might_sleep(); 1205a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1214fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 12208b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 12388c22088SPeter Zijlstra } 12488c22088SPeter Zijlstra 125fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 126fdd2e5f8SAdrian Bunk } 1271da177e4SLinus Torvalds 128dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1295beb4930SRik van Riel { 130dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1315beb4930SRik van Riel } 1325beb4930SRik van Riel 133e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1345beb4930SRik van Riel { 1355beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1365beb4930SRik van Riel } 1375beb4930SRik van Riel 1386583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1396583a843SKautuk Consul struct anon_vma_chain *avc, 1406583a843SKautuk Consul struct anon_vma *anon_vma) 1416583a843SKautuk Consul { 1426583a843SKautuk Consul avc->vma = vma; 1436583a843SKautuk Consul avc->anon_vma = anon_vma; 1446583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 145bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1466583a843SKautuk Consul } 1476583a843SKautuk Consul 148d9d332e0SLinus Torvalds /** 149d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 150d9d332e0SLinus Torvalds * @vma: the memory region in question 151d9d332e0SLinus Torvalds * 152d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 153d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 154d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 155d9d332e0SLinus Torvalds * 156d5a187daSVlastimil Babka * The common case will be that we already have one, which 157d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 15823a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 159d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 160d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 161d9d332e0SLinus Torvalds * allocate a new one. 162d9d332e0SLinus Torvalds * 163d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1644fc3f1d6SIngo Molnar * optimistically looked up an anon_vma in page_lock_anon_vma_read() 165d9d332e0SLinus Torvalds * and that may actually touch the spinlock even in the newly 166d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 167d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 168d9d332e0SLinus Torvalds * 169d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 170d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 171d9d332e0SLinus Torvalds * to do any locking for the common case of already having 172d9d332e0SLinus Torvalds * an anon_vma. 173d9d332e0SLinus Torvalds * 174d9d332e0SLinus Torvalds * This must be called with the mmap_sem held for reading. 175d9d332e0SLinus Torvalds */ 176d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1771da177e4SLinus Torvalds { 178d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 179d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1805beb4930SRik van Riel struct anon_vma_chain *avc; 1811da177e4SLinus Torvalds 1821da177e4SLinus Torvalds might_sleep(); 1831da177e4SLinus Torvalds 184dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1855beb4930SRik van Riel if (!avc) 1865beb4930SRik van Riel goto out_enomem; 1875beb4930SRik van Riel 1881da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1891da177e4SLinus Torvalds allocated = NULL; 190d9d332e0SLinus Torvalds if (!anon_vma) { 1911da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1921da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1935beb4930SRik van Riel goto out_enomem_free_avc; 1941da177e4SLinus Torvalds allocated = anon_vma; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1974fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 1981da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1991da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 2001da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 2011da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2026583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2037a3ef208SKonstantin Khlebnikov /* vma reference or self-parent link for new root */ 2047a3ef208SKonstantin Khlebnikov anon_vma->degree++; 2051da177e4SLinus Torvalds allocated = NULL; 20631f2b0ebSOleg Nesterov avc = NULL; 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 20908b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 21031f2b0ebSOleg Nesterov 21131f2b0ebSOleg Nesterov if (unlikely(allocated)) 21201d8b20dSPeter Zijlstra put_anon_vma(allocated); 21331f2b0ebSOleg Nesterov if (unlikely(avc)) 2145beb4930SRik van Riel anon_vma_chain_free(avc); 215d5a187daSVlastimil Babka 2161da177e4SLinus Torvalds return 0; 2175beb4930SRik van Riel 2185beb4930SRik van Riel out_enomem_free_avc: 2195beb4930SRik van Riel anon_vma_chain_free(avc); 2205beb4930SRik van Riel out_enomem: 2215beb4930SRik van Riel return -ENOMEM; 2221da177e4SLinus Torvalds } 2231da177e4SLinus Torvalds 224bb4aa396SLinus Torvalds /* 225bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 226bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 227bb4aa396SLinus Torvalds * have the same vma. 228bb4aa396SLinus Torvalds * 229bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 230bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 231bb4aa396SLinus Torvalds */ 232bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 233bb4aa396SLinus Torvalds { 234bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 235bb4aa396SLinus Torvalds if (new_root != root) { 236bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2375a505085SIngo Molnar up_write(&root->rwsem); 238bb4aa396SLinus Torvalds root = new_root; 2395a505085SIngo Molnar down_write(&root->rwsem); 240bb4aa396SLinus Torvalds } 241bb4aa396SLinus Torvalds return root; 242bb4aa396SLinus Torvalds } 243bb4aa396SLinus Torvalds 244bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 245bb4aa396SLinus Torvalds { 246bb4aa396SLinus Torvalds if (root) 2475a505085SIngo Molnar up_write(&root->rwsem); 248bb4aa396SLinus Torvalds } 249bb4aa396SLinus Torvalds 2505beb4930SRik van Riel /* 2515beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2525beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2537a3ef208SKonstantin Khlebnikov * 25447b390d2SWei Yang * anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and 25547b390d2SWei Yang * anon_vma_fork(). The first three want an exact copy of src, while the last 25647b390d2SWei Yang * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 25747b390d2SWei Yang * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 25847b390d2SWei Yang * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 25947b390d2SWei Yang * 26047b390d2SWei Yang * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 26147b390d2SWei Yang * and reuse existing anon_vma which has no vmas and only one child anon_vma. 26247b390d2SWei Yang * This prevents degradation of anon_vma hierarchy to endless linear chain in 26347b390d2SWei Yang * case of constantly forking task. On the other hand, an anon_vma with more 26447b390d2SWei Yang * than one child isn't reused even if there was no alive vma, thus rmap 26547b390d2SWei Yang * walker has a good chance of avoiding scanning the whole hierarchy when it 26647b390d2SWei Yang * searches where page is mapped. 2675beb4930SRik van Riel */ 2685beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2695beb4930SRik van Riel { 2705beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 271bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2724e4a9eb9SWei Yang struct vm_area_struct *prev = dst->vm_prev, *pprev = src->vm_prev; 2734e4a9eb9SWei Yang 2744e4a9eb9SWei Yang /* 2754e4a9eb9SWei Yang * If parent share anon_vma with its vm_prev, keep this sharing in in 2764e4a9eb9SWei Yang * child. 2774e4a9eb9SWei Yang * 2784e4a9eb9SWei Yang * 1. Parent has vm_prev, which implies we have vm_prev. 2794e4a9eb9SWei Yang * 2. Parent and its vm_prev have the same anon_vma. 2804e4a9eb9SWei Yang */ 2814e4a9eb9SWei Yang if (!dst->anon_vma && src->anon_vma && 2824e4a9eb9SWei Yang pprev && pprev->anon_vma == src->anon_vma) 2834e4a9eb9SWei Yang dst->anon_vma = prev->anon_vma; 2844e4a9eb9SWei Yang 2855beb4930SRik van Riel 286646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 287bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 288bb4aa396SLinus Torvalds 289dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 290dd34739cSLinus Torvalds if (unlikely(!avc)) { 291dd34739cSLinus Torvalds unlock_anon_vma_root(root); 292dd34739cSLinus Torvalds root = NULL; 293dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2945beb4930SRik van Riel if (!avc) 2955beb4930SRik van Riel goto enomem_failure; 296dd34739cSLinus Torvalds } 297bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 298bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 299bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 3007a3ef208SKonstantin Khlebnikov 3017a3ef208SKonstantin Khlebnikov /* 3027a3ef208SKonstantin Khlebnikov * Reuse existing anon_vma if its degree lower than two, 3037a3ef208SKonstantin Khlebnikov * that means it has no vma and only one anon_vma child. 3047a3ef208SKonstantin Khlebnikov * 3057a3ef208SKonstantin Khlebnikov * Do not chose parent anon_vma, otherwise first child 3067a3ef208SKonstantin Khlebnikov * will always reuse it. Root anon_vma is never reused: 3077a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 3087a3ef208SKonstantin Khlebnikov */ 30947b390d2SWei Yang if (!dst->anon_vma && src->anon_vma && 31047b390d2SWei Yang anon_vma != src->anon_vma && anon_vma->degree < 2) 3117a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 3125beb4930SRik van Riel } 3137a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 3147a3ef208SKonstantin Khlebnikov dst->anon_vma->degree++; 315bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 3165beb4930SRik van Riel return 0; 3175beb4930SRik van Riel 3185beb4930SRik van Riel enomem_failure: 3193fe89b3eSLeon Yu /* 3203fe89b3eSLeon Yu * dst->anon_vma is dropped here otherwise its degree can be incorrectly 3213fe89b3eSLeon Yu * decremented in unlink_anon_vmas(). 3223fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3233fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3243fe89b3eSLeon Yu */ 3253fe89b3eSLeon Yu dst->anon_vma = NULL; 3265beb4930SRik van Riel unlink_anon_vmas(dst); 3275beb4930SRik van Riel return -ENOMEM; 3281da177e4SLinus Torvalds } 3291da177e4SLinus Torvalds 3305beb4930SRik van Riel /* 3315beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3325beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3335beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3345beb4930SRik van Riel */ 3355beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3361da177e4SLinus Torvalds { 3375beb4930SRik van Riel struct anon_vma_chain *avc; 3385beb4930SRik van Riel struct anon_vma *anon_vma; 339c4ea95d7SDaniel Forrest int error; 3405beb4930SRik van Riel 3415beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3425beb4930SRik van Riel if (!pvma->anon_vma) 3435beb4930SRik van Riel return 0; 3445beb4930SRik van Riel 3457a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3467a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3477a3ef208SKonstantin Khlebnikov 3485beb4930SRik van Riel /* 3495beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3505beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3515beb4930SRik van Riel */ 352c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 353c4ea95d7SDaniel Forrest if (error) 354c4ea95d7SDaniel Forrest return error; 3555beb4930SRik van Riel 3567a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3577a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3587a3ef208SKonstantin Khlebnikov return 0; 3597a3ef208SKonstantin Khlebnikov 3605beb4930SRik van Riel /* Then add our own anon_vma. */ 3615beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3625beb4930SRik van Riel if (!anon_vma) 3635beb4930SRik van Riel goto out_error; 364dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3655beb4930SRik van Riel if (!avc) 3665beb4930SRik van Riel goto out_error_free_anon_vma; 3675c341ee1SRik van Riel 3685c341ee1SRik van Riel /* 3695c341ee1SRik van Riel * The root anon_vma's spinlock is the lock actually used when we 3705c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3715c341ee1SRik van Riel */ 3725c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3737a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 37476545066SRik van Riel /* 37501d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 37601d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 37701d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 37876545066SRik van Riel */ 37976545066SRik van Riel get_anon_vma(anon_vma->root); 3805beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3815beb4930SRik van Riel vma->anon_vma = anon_vma; 3824fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3835c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3847a3ef208SKonstantin Khlebnikov anon_vma->parent->degree++; 38508b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3865beb4930SRik van Riel 3875beb4930SRik van Riel return 0; 3885beb4930SRik van Riel 3895beb4930SRik van Riel out_error_free_anon_vma: 39001d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3915beb4930SRik van Riel out_error: 3924946d54cSRik van Riel unlink_anon_vmas(vma); 3935beb4930SRik van Riel return -ENOMEM; 3945beb4930SRik van Riel } 3955beb4930SRik van Riel 3965beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3975beb4930SRik van Riel { 3985beb4930SRik van Riel struct anon_vma_chain *avc, *next; 399eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 4005beb4930SRik van Riel 4015c341ee1SRik van Riel /* 4025c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 4035c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 4045c341ee1SRik van Riel */ 4055beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 406eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 407eee2acbaSPeter Zijlstra 408eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 409bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 410eee2acbaSPeter Zijlstra 411eee2acbaSPeter Zijlstra /* 412eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 413eee2acbaSPeter Zijlstra * to free them outside the lock. 414eee2acbaSPeter Zijlstra */ 415f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 4167a3ef208SKonstantin Khlebnikov anon_vma->parent->degree--; 417eee2acbaSPeter Zijlstra continue; 4187a3ef208SKonstantin Khlebnikov } 419eee2acbaSPeter Zijlstra 420eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 421eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 422eee2acbaSPeter Zijlstra } 4237a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 4247a3ef208SKonstantin Khlebnikov vma->anon_vma->degree--; 425eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 426eee2acbaSPeter Zijlstra 427eee2acbaSPeter Zijlstra /* 428eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 429eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4305a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 431eee2acbaSPeter Zijlstra */ 432eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 433eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 434eee2acbaSPeter Zijlstra 435e4c5800aSKonstantin Khlebnikov VM_WARN_ON(anon_vma->degree); 436eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 437eee2acbaSPeter Zijlstra 4385beb4930SRik van Riel list_del(&avc->same_vma); 4395beb4930SRik van Riel anon_vma_chain_free(avc); 4405beb4930SRik van Riel } 4415beb4930SRik van Riel } 4425beb4930SRik van Riel 44351cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4441da177e4SLinus Torvalds { 4451da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4461da177e4SLinus Torvalds 4475a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 44883813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 449f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4501da177e4SLinus Torvalds } 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds void __init anon_vma_init(void) 4531da177e4SLinus Torvalds { 4541da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4555f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4565d097056SVladimir Davydov anon_vma_ctor); 4575d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4585d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4591da177e4SLinus Torvalds } 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds /* 4626111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4636111e4caSPeter Zijlstra * 4646111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 4656111e4caSPeter Zijlstra * the best this function can do is return a locked anon_vma that might 4666111e4caSPeter Zijlstra * have been relevant to this page. 4676111e4caSPeter Zijlstra * 4686111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4696111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4706111e4caSPeter Zijlstra * 471bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 472bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 473bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 474bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 475bc658c96SPeter Zijlstra * 4766111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4776111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4786111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4796111e4caSPeter Zijlstra * 480*091e4299SMiles Chen * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 481*091e4299SMiles Chen * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 482*091e4299SMiles Chen * if there is a mapcount, we can dereference the anon_vma after observing 483*091e4299SMiles Chen * those. 4841da177e4SLinus Torvalds */ 485746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4861da177e4SLinus Torvalds { 487746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4881da177e4SLinus Torvalds unsigned long anon_mapping; 4891da177e4SLinus Torvalds 4901da177e4SLinus Torvalds rcu_read_lock(); 4914db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 4923ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4931da177e4SLinus Torvalds goto out; 4941da177e4SLinus Torvalds if (!page_mapped(page)) 4951da177e4SLinus Torvalds goto out; 4961da177e4SLinus Torvalds 4971da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 498746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 499746b18d4SPeter Zijlstra anon_vma = NULL; 500746b18d4SPeter Zijlstra goto out; 501746b18d4SPeter Zijlstra } 502f1819427SHugh Dickins 503f1819427SHugh Dickins /* 504f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 505746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 506746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 5075f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 508746b18d4SPeter Zijlstra * above cannot corrupt). 509f1819427SHugh Dickins */ 510746b18d4SPeter Zijlstra if (!page_mapped(page)) { 5117f39dda9SHugh Dickins rcu_read_unlock(); 512746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 5137f39dda9SHugh Dickins return NULL; 514746b18d4SPeter Zijlstra } 5151da177e4SLinus Torvalds out: 5161da177e4SLinus Torvalds rcu_read_unlock(); 517746b18d4SPeter Zijlstra 518746b18d4SPeter Zijlstra return anon_vma; 519746b18d4SPeter Zijlstra } 520746b18d4SPeter Zijlstra 52188c22088SPeter Zijlstra /* 52288c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 52388c22088SPeter Zijlstra * 52488c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 52588c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 52688c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 52788c22088SPeter Zijlstra */ 5284fc3f1d6SIngo Molnar struct anon_vma *page_lock_anon_vma_read(struct page *page) 529746b18d4SPeter Zijlstra { 53088c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 531eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 53288c22088SPeter Zijlstra unsigned long anon_mapping; 533746b18d4SPeter Zijlstra 53488c22088SPeter Zijlstra rcu_read_lock(); 5354db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 53688c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 53788c22088SPeter Zijlstra goto out; 53888c22088SPeter Zijlstra if (!page_mapped(page)) 53988c22088SPeter Zijlstra goto out; 54088c22088SPeter Zijlstra 54188c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5424db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5434fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 54488c22088SPeter Zijlstra /* 545eee0f252SHugh Dickins * If the page is still mapped, then this anon_vma is still 546eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 547bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 54888c22088SPeter Zijlstra */ 549eee0f252SHugh Dickins if (!page_mapped(page)) { 5504fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 55188c22088SPeter Zijlstra anon_vma = NULL; 55288c22088SPeter Zijlstra } 55388c22088SPeter Zijlstra goto out; 55488c22088SPeter Zijlstra } 55588c22088SPeter Zijlstra 55688c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 55788c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 55888c22088SPeter Zijlstra anon_vma = NULL; 55988c22088SPeter Zijlstra goto out; 56088c22088SPeter Zijlstra } 56188c22088SPeter Zijlstra 56288c22088SPeter Zijlstra if (!page_mapped(page)) { 5637f39dda9SHugh Dickins rcu_read_unlock(); 56488c22088SPeter Zijlstra put_anon_vma(anon_vma); 5657f39dda9SHugh Dickins return NULL; 56688c22088SPeter Zijlstra } 56788c22088SPeter Zijlstra 56888c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 56988c22088SPeter Zijlstra rcu_read_unlock(); 5704fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 571746b18d4SPeter Zijlstra 57288c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 57388c22088SPeter Zijlstra /* 57488c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 57588c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 5764fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 57788c22088SPeter Zijlstra */ 5784fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 57988c22088SPeter Zijlstra __put_anon_vma(anon_vma); 58088c22088SPeter Zijlstra anon_vma = NULL; 58188c22088SPeter Zijlstra } 58288c22088SPeter Zijlstra 58388c22088SPeter Zijlstra return anon_vma; 58488c22088SPeter Zijlstra 58588c22088SPeter Zijlstra out: 58688c22088SPeter Zijlstra rcu_read_unlock(); 587746b18d4SPeter Zijlstra return anon_vma; 58834bbd704SOleg Nesterov } 58934bbd704SOleg Nesterov 5904fc3f1d6SIngo Molnar void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 59134bbd704SOleg Nesterov { 5924fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds 59572b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 59672b252aeSMel Gorman /* 59772b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 59872b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 59972b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 60072b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 60172b252aeSMel Gorman */ 60272b252aeSMel Gorman void try_to_unmap_flush(void) 60372b252aeSMel Gorman { 60472b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 60572b252aeSMel Gorman 60672b252aeSMel Gorman if (!tlb_ubc->flush_required) 60772b252aeSMel Gorman return; 60872b252aeSMel Gorman 609e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 61072b252aeSMel Gorman tlb_ubc->flush_required = false; 611d950c947SMel Gorman tlb_ubc->writable = false; 61272b252aeSMel Gorman } 61372b252aeSMel Gorman 614d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 615d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 616d950c947SMel Gorman { 617d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 618d950c947SMel Gorman 619d950c947SMel Gorman if (tlb_ubc->writable) 620d950c947SMel Gorman try_to_unmap_flush(); 621d950c947SMel Gorman } 622d950c947SMel Gorman 623c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 62472b252aeSMel Gorman { 62572b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 62672b252aeSMel Gorman 627e73ad5ffSAndy Lutomirski arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 62872b252aeSMel Gorman tlb_ubc->flush_required = true; 629d950c947SMel Gorman 630d950c947SMel Gorman /* 6313ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6323ea27719SMel Gorman * before the PTE is cleared. 6333ea27719SMel Gorman */ 6343ea27719SMel Gorman barrier(); 6353ea27719SMel Gorman mm->tlb_flush_batched = true; 6363ea27719SMel Gorman 6373ea27719SMel Gorman /* 638d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 639d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 640d950c947SMel Gorman * before the page is queued for IO. 641d950c947SMel Gorman */ 642d950c947SMel Gorman if (writable) 643d950c947SMel Gorman tlb_ubc->writable = true; 64472b252aeSMel Gorman } 64572b252aeSMel Gorman 64672b252aeSMel Gorman /* 64772b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 64872b252aeSMel Gorman * unmap operations to reduce IPIs. 64972b252aeSMel Gorman */ 65072b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 65172b252aeSMel Gorman { 65272b252aeSMel Gorman bool should_defer = false; 65372b252aeSMel Gorman 65472b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 65572b252aeSMel Gorman return false; 65672b252aeSMel Gorman 65772b252aeSMel Gorman /* If remote CPUs need to be flushed then defer batch the flush */ 65872b252aeSMel Gorman if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 65972b252aeSMel Gorman should_defer = true; 66072b252aeSMel Gorman put_cpu(); 66172b252aeSMel Gorman 66272b252aeSMel Gorman return should_defer; 66372b252aeSMel Gorman } 6643ea27719SMel Gorman 6653ea27719SMel Gorman /* 6663ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 6673ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 6683ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 6693ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 6703ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 6713ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 6723ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 6733ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 6743ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 6753ea27719SMel Gorman * 6763ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 6773ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 6783ea27719SMel Gorman * via the PTL. 6793ea27719SMel Gorman */ 6803ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 6813ea27719SMel Gorman { 6823ea27719SMel Gorman if (mm->tlb_flush_batched) { 6833ea27719SMel Gorman flush_tlb_mm(mm); 6843ea27719SMel Gorman 6853ea27719SMel Gorman /* 6863ea27719SMel Gorman * Do not allow the compiler to re-order the clearing of 6873ea27719SMel Gorman * tlb_flush_batched before the tlb is flushed. 6883ea27719SMel Gorman */ 6893ea27719SMel Gorman barrier(); 6903ea27719SMel Gorman mm->tlb_flush_batched = false; 6913ea27719SMel Gorman } 6923ea27719SMel Gorman } 69372b252aeSMel Gorman #else 694c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 69572b252aeSMel Gorman { 69672b252aeSMel Gorman } 69772b252aeSMel Gorman 69872b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 69972b252aeSMel Gorman { 70072b252aeSMel Gorman return false; 70172b252aeSMel Gorman } 70272b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 70372b252aeSMel Gorman 7041da177e4SLinus Torvalds /* 705bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 706ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 7071da177e4SLinus Torvalds */ 7081da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 7091da177e4SLinus Torvalds { 71086c2ad19SMichel Lespinasse unsigned long address; 71121d0d443SAndrea Arcangeli if (PageAnon(page)) { 7124829b906SHugh Dickins struct anon_vma *page__anon_vma = page_anon_vma(page); 7134829b906SHugh Dickins /* 7144829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 7154829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 7164829b906SHugh Dickins */ 7174829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 7184829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 71921d0d443SAndrea Arcangeli return -EFAULT; 72027ba0644SKirill A. Shutemov } else if (page->mapping) { 72127ba0644SKirill A. Shutemov if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 7221da177e4SLinus Torvalds return -EFAULT; 7231da177e4SLinus Torvalds } else 7241da177e4SLinus Torvalds return -EFAULT; 72586c2ad19SMichel Lespinasse address = __vma_address(page, vma); 72686c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 72786c2ad19SMichel Lespinasse return -EFAULT; 72886c2ad19SMichel Lespinasse return address; 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7316219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 7326219049aSBob Liu { 7336219049aSBob Liu pgd_t *pgd; 734c2febafcSKirill A. Shutemov p4d_t *p4d; 7356219049aSBob Liu pud_t *pud; 7366219049aSBob Liu pmd_t *pmd = NULL; 737f72e7dcdSHugh Dickins pmd_t pmde; 7386219049aSBob Liu 7396219049aSBob Liu pgd = pgd_offset(mm, address); 7406219049aSBob Liu if (!pgd_present(*pgd)) 7416219049aSBob Liu goto out; 7426219049aSBob Liu 743c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 744c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 745c2febafcSKirill A. Shutemov goto out; 746c2febafcSKirill A. Shutemov 747c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 7486219049aSBob Liu if (!pud_present(*pud)) 7496219049aSBob Liu goto out; 7506219049aSBob Liu 7516219049aSBob Liu pmd = pmd_offset(pud, address); 752f72e7dcdSHugh Dickins /* 7538809aa2dSAneesh Kumar K.V * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 754f72e7dcdSHugh Dickins * without holding anon_vma lock for write. So when looking for a 755f72e7dcdSHugh Dickins * genuine pmde (in which to find pte), test present and !THP together. 756f72e7dcdSHugh Dickins */ 757e37c6982SChristian Borntraeger pmde = *pmd; 758e37c6982SChristian Borntraeger barrier(); 759f72e7dcdSHugh Dickins if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 7606219049aSBob Liu pmd = NULL; 7616219049aSBob Liu out: 7626219049aSBob Liu return pmd; 7636219049aSBob Liu } 7646219049aSBob Liu 7659f32624bSJoonsoo Kim struct page_referenced_arg { 7669f32624bSJoonsoo Kim int mapcount; 7679f32624bSJoonsoo Kim int referenced; 7689f32624bSJoonsoo Kim unsigned long vm_flags; 7699f32624bSJoonsoo Kim struct mem_cgroup *memcg; 7709f32624bSJoonsoo Kim }; 77181b4082dSNikita Danilov /* 7729f32624bSJoonsoo Kim * arg: page_referenced_arg will be passed 7731da177e4SLinus Torvalds */ 774e4b82222SMinchan Kim static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 7759f32624bSJoonsoo Kim unsigned long address, void *arg) 7761da177e4SLinus Torvalds { 7779f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 7788eaededeSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 7798eaededeSKirill A. Shutemov .page = page, 7808eaededeSKirill A. Shutemov .vma = vma, 7818eaededeSKirill A. Shutemov .address = address, 7828eaededeSKirill A. Shutemov }; 7838749cfeaSVladimir Davydov int referenced = 0; 7842da28bfdSAndrea Arcangeli 7858eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 7868eaededeSKirill A. Shutemov address = pvmw.address; 7872da28bfdSAndrea Arcangeli 788b20ce5e0SKirill A. Shutemov if (vma->vm_flags & VM_LOCKED) { 7898eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 7909f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 791e4b82222SMinchan Kim return false; /* To break the loop */ 7922da28bfdSAndrea Arcangeli } 7932da28bfdSAndrea Arcangeli 7948eaededeSKirill A. Shutemov if (pvmw.pte) { 7958eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 7968eaededeSKirill A. Shutemov pvmw.pte)) { 7974917e5d0SJohannes Weiner /* 7988eaededeSKirill A. Shutemov * Don't treat a reference through 7998eaededeSKirill A. Shutemov * a sequentially read mapping as such. 8008eaededeSKirill A. Shutemov * If the page has been used in another mapping, 8018eaededeSKirill A. Shutemov * we will catch it; if this other mapping is 8028eaededeSKirill A. Shutemov * already gone, the unmap path will have set 8038eaededeSKirill A. Shutemov * PG_referenced or activated the page. 8044917e5d0SJohannes Weiner */ 80564363aadSJoe Perches if (likely(!(vma->vm_flags & VM_SEQ_READ))) 8061da177e4SLinus Torvalds referenced++; 8074917e5d0SJohannes Weiner } 8088749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 8098eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 8108eaededeSKirill A. Shutemov pvmw.pmd)) 8118749cfeaSVladimir Davydov referenced++; 8128749cfeaSVladimir Davydov } else { 8138749cfeaSVladimir Davydov /* unexpected pmd-mapped page? */ 8148749cfeaSVladimir Davydov WARN_ON_ONCE(1); 8158749cfeaSVladimir Davydov } 8168eaededeSKirill A. Shutemov 8178eaededeSKirill A. Shutemov pra->mapcount--; 8188eaededeSKirill A. Shutemov } 81971e3aac0SAndrea Arcangeli 82033c3fc71SVladimir Davydov if (referenced) 82133c3fc71SVladimir Davydov clear_page_idle(page); 82233c3fc71SVladimir Davydov if (test_and_clear_page_young(page)) 82333c3fc71SVladimir Davydov referenced++; 82433c3fc71SVladimir Davydov 8259f32624bSJoonsoo Kim if (referenced) { 8269f32624bSJoonsoo Kim pra->referenced++; 8279f32624bSJoonsoo Kim pra->vm_flags |= vma->vm_flags; 8281da177e4SLinus Torvalds } 8291da177e4SLinus Torvalds 8309f32624bSJoonsoo Kim if (!pra->mapcount) 831e4b82222SMinchan Kim return false; /* To break the loop */ 8329f32624bSJoonsoo Kim 833e4b82222SMinchan Kim return true; 8349f32624bSJoonsoo Kim } 8359f32624bSJoonsoo Kim 8369f32624bSJoonsoo Kim static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 8371da177e4SLinus Torvalds { 8389f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 8399f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 8401da177e4SLinus Torvalds 8419f32624bSJoonsoo Kim if (!mm_match_cgroup(vma->vm_mm, memcg)) 8429f32624bSJoonsoo Kim return true; 8431da177e4SLinus Torvalds 8449f32624bSJoonsoo Kim return false; 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds /** 8481da177e4SLinus Torvalds * page_referenced - test if the page was referenced 8491da177e4SLinus Torvalds * @page: the page to test 8501da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 85172835c86SJohannes Weiner * @memcg: target memory cgroup 8526fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 8531da177e4SLinus Torvalds * 8541da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 8551da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 8561da177e4SLinus Torvalds */ 8576fe6b7e3SWu Fengguang int page_referenced(struct page *page, 8586fe6b7e3SWu Fengguang int is_locked, 85972835c86SJohannes Weiner struct mem_cgroup *memcg, 8606fe6b7e3SWu Fengguang unsigned long *vm_flags) 8611da177e4SLinus Torvalds { 8625ad64688SHugh Dickins int we_locked = 0; 8639f32624bSJoonsoo Kim struct page_referenced_arg pra = { 864b20ce5e0SKirill A. Shutemov .mapcount = total_mapcount(page), 8659f32624bSJoonsoo Kim .memcg = memcg, 8669f32624bSJoonsoo Kim }; 8679f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 8689f32624bSJoonsoo Kim .rmap_one = page_referenced_one, 8699f32624bSJoonsoo Kim .arg = (void *)&pra, 8709f32624bSJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 8719f32624bSJoonsoo Kim }; 8721da177e4SLinus Torvalds 8736fe6b7e3SWu Fengguang *vm_flags = 0; 874059d8442SHuang Shijie if (!pra.mapcount) 8759f32624bSJoonsoo Kim return 0; 8769f32624bSJoonsoo Kim 8779f32624bSJoonsoo Kim if (!page_rmapping(page)) 8789f32624bSJoonsoo Kim return 0; 8799f32624bSJoonsoo Kim 8805ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 8815ad64688SHugh Dickins we_locked = trylock_page(page); 8829f32624bSJoonsoo Kim if (!we_locked) 8839f32624bSJoonsoo Kim return 1; 8845ad64688SHugh Dickins } 8859f32624bSJoonsoo Kim 8869f32624bSJoonsoo Kim /* 8879f32624bSJoonsoo Kim * If we are reclaiming on behalf of a cgroup, skip 8889f32624bSJoonsoo Kim * counting on behalf of references from different 8899f32624bSJoonsoo Kim * cgroups 8909f32624bSJoonsoo Kim */ 8919f32624bSJoonsoo Kim if (memcg) { 8929f32624bSJoonsoo Kim rwc.invalid_vma = invalid_page_referenced_vma; 8935ad64688SHugh Dickins } 8949f32624bSJoonsoo Kim 895c24f386cSMinchan Kim rmap_walk(page, &rwc); 8969f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 8979f32624bSJoonsoo Kim 8985ad64688SHugh Dickins if (we_locked) 8991da177e4SLinus Torvalds unlock_page(page); 9009f32624bSJoonsoo Kim 9019f32624bSJoonsoo Kim return pra.referenced; 9021da177e4SLinus Torvalds } 9031da177e4SLinus Torvalds 904e4b82222SMinchan Kim static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 9059853a407SJoonsoo Kim unsigned long address, void *arg) 906d08b3851SPeter Zijlstra { 907f27176cfSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 908f27176cfSKirill A. Shutemov .page = page, 909f27176cfSKirill A. Shutemov .vma = vma, 910f27176cfSKirill A. Shutemov .address = address, 911f27176cfSKirill A. Shutemov .flags = PVMW_SYNC, 912f27176cfSKirill A. Shutemov }; 913ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 9149853a407SJoonsoo Kim int *cleaned = arg; 915d08b3851SPeter Zijlstra 916369ea824SJérôme Glisse /* 917369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 918369ea824SJérôme Glisse * the page can not be free from this function. 919369ea824SJérôme Glisse */ 9207269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 9217269f999SJérôme Glisse 0, vma, vma->vm_mm, address, 922a50b854eSMatthew Wilcox (Oracle) min(vma->vm_end, address + page_size(page))); 923ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 924369ea824SJérôme Glisse 925f27176cfSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 926f27176cfSKirill A. Shutemov int ret = 0; 927369ea824SJérôme Glisse 9281f18b296SYueHaibing address = pvmw.address; 929f27176cfSKirill A. Shutemov if (pvmw.pte) { 930c2fda5feSPeter Zijlstra pte_t entry; 931f27176cfSKirill A. Shutemov pte_t *pte = pvmw.pte; 932f27176cfSKirill A. Shutemov 933f27176cfSKirill A. Shutemov if (!pte_dirty(*pte) && !pte_write(*pte)) 934f27176cfSKirill A. Shutemov continue; 935d08b3851SPeter Zijlstra 936785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pte)); 937785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 938d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 939c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 940785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 941d08b3851SPeter Zijlstra ret = 1; 942f27176cfSKirill A. Shutemov } else { 943f27176cfSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 944f27176cfSKirill A. Shutemov pmd_t *pmd = pvmw.pmd; 945f27176cfSKirill A. Shutemov pmd_t entry; 946d08b3851SPeter Zijlstra 947f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 948f27176cfSKirill A. Shutemov continue; 949f27176cfSKirill A. Shutemov 950785373b4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 951024eee0eSAneesh Kumar K.V entry = pmdp_invalidate(vma, address, pmd); 952f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 953f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 954785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 955f27176cfSKirill A. Shutemov ret = 1; 956f27176cfSKirill A. Shutemov #else 957f27176cfSKirill A. Shutemov /* unexpected pmd-mapped page? */ 958f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 959f27176cfSKirill A. Shutemov #endif 960f27176cfSKirill A. Shutemov } 9612ec74c3eSSagi Grimberg 9620f10851eSJérôme Glisse /* 9630f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 9640f10851eSJérôme Glisse * downgrading page table protection not changing it to point 9650f10851eSJérôme Glisse * to a new page. 9660f10851eSJérôme Glisse * 967ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 9680f10851eSJérôme Glisse */ 9690f10851eSJérôme Glisse if (ret) 9709853a407SJoonsoo Kim (*cleaned)++; 9719853a407SJoonsoo Kim } 972f27176cfSKirill A. Shutemov 973ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 974369ea824SJérôme Glisse 975e4b82222SMinchan Kim return true; 976d08b3851SPeter Zijlstra } 977d08b3851SPeter Zijlstra 9789853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 979d08b3851SPeter Zijlstra { 9809853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 981871beb8cSFengguang Wu return false; 982d08b3851SPeter Zijlstra 983871beb8cSFengguang Wu return true; 984d08b3851SPeter Zijlstra } 985d08b3851SPeter Zijlstra 986d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 987d08b3851SPeter Zijlstra { 9889853a407SJoonsoo Kim int cleaned = 0; 9899853a407SJoonsoo Kim struct address_space *mapping; 9909853a407SJoonsoo Kim struct rmap_walk_control rwc = { 9919853a407SJoonsoo Kim .arg = (void *)&cleaned, 9929853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 9939853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 9949853a407SJoonsoo Kim }; 995d08b3851SPeter Zijlstra 996d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 997d08b3851SPeter Zijlstra 9989853a407SJoonsoo Kim if (!page_mapped(page)) 9999853a407SJoonsoo Kim return 0; 1000d08b3851SPeter Zijlstra 10019853a407SJoonsoo Kim mapping = page_mapping(page); 10029853a407SJoonsoo Kim if (!mapping) 10039853a407SJoonsoo Kim return 0; 10049853a407SJoonsoo Kim 10059853a407SJoonsoo Kim rmap_walk(page, &rwc); 10069853a407SJoonsoo Kim 10079853a407SJoonsoo Kim return cleaned; 1008d08b3851SPeter Zijlstra } 100960b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 1010d08b3851SPeter Zijlstra 10111da177e4SLinus Torvalds /** 1012c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 1013c44b6743SRik van Riel * @page: the page to move to our anon_vma 1014c44b6743SRik van Riel * @vma: the vma the page belongs to 1015c44b6743SRik van Riel * 1016c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 1017c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 1018c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 1019c44b6743SRik van Riel * processes. 1020c44b6743SRik van Riel */ 10215a49973dSHugh Dickins void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1022c44b6743SRik van Riel { 1023c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 1024c44b6743SRik van Riel 10255a49973dSHugh Dickins page = compound_head(page); 10265a49973dSHugh Dickins 1027309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 102881d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1029c44b6743SRik van Riel 1030c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1031414e2fb8SVladimir Davydov /* 1032414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1033414e2fb8SVladimir Davydov * simultaneously, so a concurrent reader (eg page_referenced()'s 1034414e2fb8SVladimir Davydov * PageAnon()) will not see one without the other. 1035414e2fb8SVladimir Davydov */ 1036414e2fb8SVladimir Davydov WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1037c44b6743SRik van Riel } 1038c44b6743SRik van Riel 1039c44b6743SRik van Riel /** 104043d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 1041451b9514SKirill Tkhai * @page: Page or Hugepage to add to rmap 10424e1c1975SAndi Kleen * @vma: VM area to add page to. 10434e1c1975SAndi Kleen * @address: User virtual address of the mapping 1044e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 10451da177e4SLinus Torvalds */ 10469617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 1047e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 10481da177e4SLinus Torvalds { 1049e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 10502822c1aaSNick Piggin 1051e8a03febSRik van Riel BUG_ON(!anon_vma); 1052ea90002bSLinus Torvalds 10534e1c1975SAndi Kleen if (PageAnon(page)) 10544e1c1975SAndi Kleen return; 10554e1c1975SAndi Kleen 1056ea90002bSLinus Torvalds /* 1057e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 1058e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 1059e8a03febSRik van Riel * page mapping! 1060ea90002bSLinus Torvalds */ 10614e1c1975SAndi Kleen if (!exclusive) 1062288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1063ea90002bSLinus Torvalds 10641da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 10652822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 10664d7670e0SNick Piggin page->index = linear_page_index(vma, address); 10671da177e4SLinus Torvalds } 10689617d95eSNick Piggin 10699617d95eSNick Piggin /** 107043d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1071c97a9e10SNick Piggin * @page: the page to add the mapping to 1072c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1073c97a9e10SNick Piggin * @address: the user virtual address mapped 1074c97a9e10SNick Piggin */ 1075c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 1076c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1077c97a9e10SNick Piggin { 1078c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 1079c97a9e10SNick Piggin /* 1080c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1081c97a9e10SNick Piggin * be set up correctly at this point. 1082c97a9e10SNick Piggin * 1083c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 1084c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 1085c97a9e10SNick Piggin * in which case the page is already known to be setup. 1086c97a9e10SNick Piggin * 1087c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1088c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1089c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1090c97a9e10SNick Piggin */ 109144ab57a0SAndrea Arcangeli BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 109253f9263bSKirill A. Shutemov BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1093c97a9e10SNick Piggin #endif 1094c97a9e10SNick Piggin } 1095c97a9e10SNick Piggin 1096c97a9e10SNick Piggin /** 10979617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 10989617d95eSNick Piggin * @page: the page to add the mapping to 10999617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11009617d95eSNick Piggin * @address: the user virtual address mapped 1101d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 11029617d95eSNick Piggin * 11035ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 110480e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 110580e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 110680e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 11079617d95eSNick Piggin */ 11089617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 1109d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 11109617d95eSNick Piggin { 1111d281ee61SKirill A. Shutemov do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1112ad8c2ee8SRik van Riel } 1113ad8c2ee8SRik van Riel 1114ad8c2ee8SRik van Riel /* 1115ad8c2ee8SRik van Riel * Special version of the above for do_swap_page, which often runs 1116ad8c2ee8SRik van Riel * into pages that are exclusively owned by the current process. 1117ad8c2ee8SRik van Riel * Everybody else should continue to use page_add_anon_rmap above. 1118ad8c2ee8SRik van Riel */ 1119ad8c2ee8SRik van Riel void do_page_add_anon_rmap(struct page *page, 1120d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, int flags) 1121ad8c2ee8SRik van Riel { 1122d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 112353f9263bSKirill A. Shutemov bool first; 112453f9263bSKirill A. Shutemov 112553f9263bSKirill A. Shutemov if (compound) { 112653f9263bSKirill A. Shutemov atomic_t *mapcount; 1127e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 112853f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 112953f9263bSKirill A. Shutemov mapcount = compound_mapcount_ptr(page); 113053f9263bSKirill A. Shutemov first = atomic_inc_and_test(mapcount); 113153f9263bSKirill A. Shutemov } else { 113253f9263bSKirill A. Shutemov first = atomic_inc_and_test(&page->_mapcount); 113353f9263bSKirill A. Shutemov } 113453f9263bSKirill A. Shutemov 113553f9263bSKirill A. Shutemov if (first) { 1136d281ee61SKirill A. Shutemov int nr = compound ? hpage_nr_pages(page) : 1; 1137bea04b07SJianyu Zhan /* 1138bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1139bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1140bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption 1141bea04b07SJianyu Zhan * disabled. 1142bea04b07SJianyu Zhan */ 114365c45377SKirill A. Shutemov if (compound) 114411fb9989SMel Gorman __inc_node_page_state(page, NR_ANON_THPS); 11454b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 114679134171SAndrea Arcangeli } 11475ad64688SHugh Dickins if (unlikely(PageKsm(page))) 11485ad64688SHugh Dickins return; 11495ad64688SHugh Dickins 1150309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 115153f9263bSKirill A. Shutemov 11525dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 11535ad64688SHugh Dickins if (first) 1154d281ee61SKirill A. Shutemov __page_set_anon_rmap(page, vma, address, 1155d281ee61SKirill A. Shutemov flags & RMAP_EXCLUSIVE); 115669029cd5SKAMEZAWA Hiroyuki else 1157c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 11581da177e4SLinus Torvalds } 11591da177e4SLinus Torvalds 116043d8eac4SRandy Dunlap /** 11619617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 11629617d95eSNick Piggin * @page: the page to add the mapping to 11639617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11649617d95eSNick Piggin * @address: the user virtual address mapped 1165d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 11669617d95eSNick Piggin * 11679617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 11689617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1169c97a9e10SNick Piggin * Page does not have to be locked. 11709617d95eSNick Piggin */ 11719617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 1172d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 11739617d95eSNick Piggin { 1174d281ee61SKirill A. Shutemov int nr = compound ? hpage_nr_pages(page) : 1; 1175d281ee61SKirill A. Shutemov 117681d1b09cSSasha Levin VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1177fa9949daSHugh Dickins __SetPageSwapBacked(page); 1178d281ee61SKirill A. Shutemov if (compound) { 1179d281ee61SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 118053f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 118153f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 118211fb9989SMel Gorman __inc_node_page_state(page, NR_ANON_THPS); 118353f9263bSKirill A. Shutemov } else { 118453f9263bSKirill A. Shutemov /* Anon THP always mapped first with PMD */ 118553f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(PageTransCompound(page), page); 118653f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 118753f9263bSKirill A. Shutemov atomic_set(&page->_mapcount, 0); 1188d281ee61SKirill A. Shutemov } 11894b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1190e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 11919617d95eSNick Piggin } 11929617d95eSNick Piggin 11931da177e4SLinus Torvalds /** 11941da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 11951da177e4SLinus Torvalds * @page: the page to add the mapping to 1196e8b098fcSMike Rapoport * @compound: charge the page as compound or small page 11971da177e4SLinus Torvalds * 1198b8072f09SHugh Dickins * The caller needs to hold the pte lock. 11991da177e4SLinus Torvalds */ 1200dd78feddSKirill A. Shutemov void page_add_file_rmap(struct page *page, bool compound) 12011da177e4SLinus Torvalds { 1202dd78feddSKirill A. Shutemov int i, nr = 1; 1203dd78feddSKirill A. Shutemov 1204dd78feddSKirill A. Shutemov VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 120562cccb8cSJohannes Weiner lock_page_memcg(page); 1206dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1207dd78feddSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1208dd78feddSKirill A. Shutemov if (atomic_inc_and_test(&page[i]._mapcount)) 1209dd78feddSKirill A. Shutemov nr++; 1210d69b042fSBalbir Singh } 1211dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1212dd78feddSKirill A. Shutemov goto out; 121399cb0dbdSSong Liu if (PageSwapBacked(page)) 121411fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 121599cb0dbdSSong Liu else 121699cb0dbdSSong Liu __inc_node_page_state(page, NR_FILE_PMDMAPPED); 1217dd78feddSKirill A. Shutemov } else { 1218c8efc390SKirill A. Shutemov if (PageTransCompound(page) && page_mapping(page)) { 1219c8efc390SKirill A. Shutemov VM_WARN_ON_ONCE(!PageLocked(page)); 1220c8efc390SKirill A. Shutemov 12219a73f61bSKirill A. Shutemov SetPageDoubleMap(compound_head(page)); 12229a73f61bSKirill A. Shutemov if (PageMlocked(page)) 12239a73f61bSKirill A. Shutemov clear_page_mlock(compound_head(page)); 12249a73f61bSKirill A. Shutemov } 1225dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(&page->_mapcount)) 1226dd78feddSKirill A. Shutemov goto out; 1227dd78feddSKirill A. Shutemov } 122800f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1229dd78feddSKirill A. Shutemov out: 123062cccb8cSJohannes Weiner unlock_page_memcg(page); 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds 1233dd78feddSKirill A. Shutemov static void page_remove_file_rmap(struct page *page, bool compound) 12348186eb6aSJohannes Weiner { 1235dd78feddSKirill A. Shutemov int i, nr = 1; 1236dd78feddSKirill A. Shutemov 123757dea93aSSteve Capper VM_BUG_ON_PAGE(compound && !PageHead(page), page); 123862cccb8cSJohannes Weiner lock_page_memcg(page); 12398186eb6aSJohannes Weiner 124053f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 124153f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) { 124253f9263bSKirill A. Shutemov /* hugetlb pages are always mapped with pmds */ 124353f9263bSKirill A. Shutemov atomic_dec(compound_mapcount_ptr(page)); 124453f9263bSKirill A. Shutemov goto out; 124553f9263bSKirill A. Shutemov } 124653f9263bSKirill A. Shutemov 12478186eb6aSJohannes Weiner /* page still mapped by someone else? */ 1248dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1249dd78feddSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1250dd78feddSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 1251dd78feddSKirill A. Shutemov nr++; 1252dd78feddSKirill A. Shutemov } 1253dd78feddSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1254dd78feddSKirill A. Shutemov goto out; 125599cb0dbdSSong Liu if (PageSwapBacked(page)) 125611fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 125799cb0dbdSSong Liu else 125899cb0dbdSSong Liu __dec_node_page_state(page, NR_FILE_PMDMAPPED); 1259dd78feddSKirill A. Shutemov } else { 12608186eb6aSJohannes Weiner if (!atomic_add_negative(-1, &page->_mapcount)) 12618186eb6aSJohannes Weiner goto out; 1262dd78feddSKirill A. Shutemov } 12638186eb6aSJohannes Weiner 12648186eb6aSJohannes Weiner /* 126500f3ca2cSJohannes Weiner * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 12668186eb6aSJohannes Weiner * these counters are not modified in interrupt context, and 12678186eb6aSJohannes Weiner * pte lock(a spinlock) is held, which implies preemption disabled. 12688186eb6aSJohannes Weiner */ 126900f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 12708186eb6aSJohannes Weiner 12718186eb6aSJohannes Weiner if (unlikely(PageMlocked(page))) 12728186eb6aSJohannes Weiner clear_page_mlock(page); 12738186eb6aSJohannes Weiner out: 127462cccb8cSJohannes Weiner unlock_page_memcg(page); 12758186eb6aSJohannes Weiner } 12768186eb6aSJohannes Weiner 127753f9263bSKirill A. Shutemov static void page_remove_anon_compound_rmap(struct page *page) 127853f9263bSKirill A. Shutemov { 127953f9263bSKirill A. Shutemov int i, nr; 128053f9263bSKirill A. Shutemov 128153f9263bSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 128253f9263bSKirill A. Shutemov return; 128353f9263bSKirill A. Shutemov 128453f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_ANON_PAGES for now. */ 128553f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) 128653f9263bSKirill A. Shutemov return; 128753f9263bSKirill A. Shutemov 128853f9263bSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 128953f9263bSKirill A. Shutemov return; 129053f9263bSKirill A. Shutemov 129111fb9989SMel Gorman __dec_node_page_state(page, NR_ANON_THPS); 129253f9263bSKirill A. Shutemov 129353f9263bSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 129453f9263bSKirill A. Shutemov /* 129553f9263bSKirill A. Shutemov * Subpages can be mapped with PTEs too. Check how many of 129653f9263bSKirill A. Shutemov * themi are still mapped. 129753f9263bSKirill A. Shutemov */ 129853f9263bSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 129953f9263bSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 130053f9263bSKirill A. Shutemov nr++; 130153f9263bSKirill A. Shutemov } 130253f9263bSKirill A. Shutemov } else { 130353f9263bSKirill A. Shutemov nr = HPAGE_PMD_NR; 130453f9263bSKirill A. Shutemov } 130553f9263bSKirill A. Shutemov 1306e90309c9SKirill A. Shutemov if (unlikely(PageMlocked(page))) 1307e90309c9SKirill A. Shutemov clear_page_mlock(page); 1308e90309c9SKirill A. Shutemov 13099a982250SKirill A. Shutemov if (nr) { 13104b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 13119a982250SKirill A. Shutemov deferred_split_huge_page(page); 13129a982250SKirill A. Shutemov } 131353f9263bSKirill A. Shutemov } 131453f9263bSKirill A. Shutemov 13151da177e4SLinus Torvalds /** 13161da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 13171da177e4SLinus Torvalds * @page: page to remove mapping from 1318d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 13191da177e4SLinus Torvalds * 1320b8072f09SHugh Dickins * The caller needs to hold the pte lock. 13211da177e4SLinus Torvalds */ 1322d281ee61SKirill A. Shutemov void page_remove_rmap(struct page *page, bool compound) 13231da177e4SLinus Torvalds { 1324dd78feddSKirill A. Shutemov if (!PageAnon(page)) 1325dd78feddSKirill A. Shutemov return page_remove_file_rmap(page, compound); 132689c06bd5SKAMEZAWA Hiroyuki 132753f9263bSKirill A. Shutemov if (compound) 132853f9263bSKirill A. Shutemov return page_remove_anon_compound_rmap(page); 132953f9263bSKirill A. Shutemov 1330b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1331b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 13328186eb6aSJohannes Weiner return; 13338186eb6aSJohannes Weiner 13341da177e4SLinus Torvalds /* 1335bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1336bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1337bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption disabled. 13380fe6e20bSNaoya Horiguchi */ 13394b9d0fabSMel Gorman __dec_node_page_state(page, NR_ANON_MAPPED); 13408186eb6aSJohannes Weiner 1341e6c509f8SHugh Dickins if (unlikely(PageMlocked(page))) 1342e6c509f8SHugh Dickins clear_page_mlock(page); 13438186eb6aSJohannes Weiner 13449a982250SKirill A. Shutemov if (PageTransCompound(page)) 13459a982250SKirill A. Shutemov deferred_split_huge_page(compound_head(page)); 13469a982250SKirill A. Shutemov 134716f8c5b2SHugh Dickins /* 13481da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 13491da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 13501da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 13512d4894b5SMel Gorman * before us: so leave the reset to free_unref_page, 13521da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 13531da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 13541da177e4SLinus Torvalds * faster for those pages still in swapcache. 13551da177e4SLinus Torvalds */ 13561da177e4SLinus Torvalds } 13571da177e4SLinus Torvalds 13581da177e4SLinus Torvalds /* 135952629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 13601da177e4SLinus Torvalds */ 1361e4b82222SMinchan Kim static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 136252629506SJoonsoo Kim unsigned long address, void *arg) 13631da177e4SLinus Torvalds { 13641da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1365c7ab0d2fSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 1366c7ab0d2fSKirill A. Shutemov .page = page, 1367c7ab0d2fSKirill A. Shutemov .vma = vma, 1368c7ab0d2fSKirill A. Shutemov .address = address, 1369c7ab0d2fSKirill A. Shutemov }; 13701da177e4SLinus Torvalds pte_t pteval; 1371c7ab0d2fSKirill A. Shutemov struct page *subpage; 1372785373b4SLinus Torvalds bool ret = true; 1373ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 1374802a3a92SShaohua Li enum ttu_flags flags = (enum ttu_flags)arg; 13751da177e4SLinus Torvalds 1376b87537d9SHugh Dickins /* munlock has nothing to gain from examining un-locked vmas */ 1377b87537d9SHugh Dickins if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1378e4b82222SMinchan Kim return true; 1379b87537d9SHugh Dickins 1380a5430ddaSJérôme Glisse if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1381a5430ddaSJérôme Glisse is_zone_device_page(page) && !is_device_private_page(page)) 1382a5430ddaSJérôme Glisse return true; 1383a5430ddaSJérôme Glisse 1384fec89c10SKirill A. Shutemov if (flags & TTU_SPLIT_HUGE_PMD) { 1385fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, address, 1386b5ff8161SNaoya Horiguchi flags & TTU_SPLIT_FREEZE, page); 1387fec89c10SKirill A. Shutemov } 1388fec89c10SKirill A. Shutemov 1389369ea824SJérôme Glisse /* 1390017b1660SMike Kravetz * For THP, we have to assume the worse case ie pmd for invalidation. 1391017b1660SMike Kravetz * For hugetlb, it could be much worse if we need to do pud 1392017b1660SMike Kravetz * invalidation in the case of pmd sharing. 1393017b1660SMike Kravetz * 1394017b1660SMike Kravetz * Note that the page can not be free in this function as call of 1395017b1660SMike Kravetz * try_to_unmap() must hold a reference on the page. 1396369ea824SJérôme Glisse */ 13977269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 13986f4f13e8SJérôme Glisse address, 1399a50b854eSMatthew Wilcox (Oracle) min(vma->vm_end, address + page_size(page))); 1400017b1660SMike Kravetz if (PageHuge(page)) { 1401017b1660SMike Kravetz /* 1402017b1660SMike Kravetz * If sharing is possible, start and end will be adjusted 1403017b1660SMike Kravetz * accordingly. 1404017b1660SMike Kravetz */ 1405ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, 1406ac46d4f3SJérôme Glisse &range.end); 1407017b1660SMike Kravetz } 1408ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1409369ea824SJérôme Glisse 1410c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1411616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1412616b8371SZi Yan /* PMD-mapped THP migration entry */ 1413616b8371SZi Yan if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1414616b8371SZi Yan VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1415616b8371SZi Yan 1416616b8371SZi Yan set_pmd_migration_entry(&pvmw, page); 1417616b8371SZi Yan continue; 1418616b8371SZi Yan } 1419616b8371SZi Yan #endif 1420616b8371SZi Yan 14211da177e4SLinus Torvalds /* 14221da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 14231da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 14241da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 14251da177e4SLinus Torvalds */ 142614fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 1427b87537d9SHugh Dickins if (vma->vm_flags & VM_LOCKED) { 14289a73f61bSKirill A. Shutemov /* PTE-mapped THP are never mlocked */ 14299a73f61bSKirill A. Shutemov if (!PageTransCompound(page)) { 14309a73f61bSKirill A. Shutemov /* 14319a73f61bSKirill A. Shutemov * Holding pte lock, we do *not* need 14329a73f61bSKirill A. Shutemov * mmap_sem here 14339a73f61bSKirill A. Shutemov */ 1434b87537d9SHugh Dickins mlock_vma_page(page); 14359a73f61bSKirill A. Shutemov } 1436e4b82222SMinchan Kim ret = false; 1437c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1438c7ab0d2fSKirill A. Shutemov break; 1439b87537d9SHugh Dickins } 1440daa5ba76SKonstantin Khlebnikov if (flags & TTU_MUNLOCK) 1441c7ab0d2fSKirill A. Shutemov continue; 144214fa31b8SAndi Kleen } 1443c7ab0d2fSKirill A. Shutemov 14448346242aSKirill A. Shutemov /* Unexpected PMD-mapped THP? */ 14458346242aSKirill A. Shutemov VM_BUG_ON_PAGE(!pvmw.pte, page); 14468346242aSKirill A. Shutemov 14478346242aSKirill A. Shutemov subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1448785373b4SLinus Torvalds address = pvmw.address; 1449785373b4SLinus Torvalds 1450017b1660SMike Kravetz if (PageHuge(page)) { 1451017b1660SMike Kravetz if (huge_pmd_unshare(mm, &address, pvmw.pte)) { 1452017b1660SMike Kravetz /* 1453017b1660SMike Kravetz * huge_pmd_unshare unmapped an entire PMD 1454017b1660SMike Kravetz * page. There is no way of knowing exactly 1455017b1660SMike Kravetz * which PMDs may be cached for this mm, so 1456017b1660SMike Kravetz * we must flush them all. start/end were 1457017b1660SMike Kravetz * already adjusted above to cover this range. 1458017b1660SMike Kravetz */ 1459ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end); 1460ac46d4f3SJérôme Glisse flush_tlb_range(vma, range.start, range.end); 1461ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range(mm, range.start, 1462ac46d4f3SJérôme Glisse range.end); 1463017b1660SMike Kravetz 1464017b1660SMike Kravetz /* 1465017b1660SMike Kravetz * The ref count of the PMD page was dropped 1466017b1660SMike Kravetz * which is part of the way map counting 1467017b1660SMike Kravetz * is done for shared PMDs. Return 'true' 1468017b1660SMike Kravetz * here. When there is no other sharing, 1469017b1660SMike Kravetz * huge_pmd_unshare returns false and we will 1470017b1660SMike Kravetz * unmap the actual page and drop map count 1471017b1660SMike Kravetz * to zero. 1472017b1660SMike Kravetz */ 1473017b1660SMike Kravetz page_vma_mapped_walk_done(&pvmw); 1474017b1660SMike Kravetz break; 1475017b1660SMike Kravetz } 1476017b1660SMike Kravetz } 14778346242aSKirill A. Shutemov 1478a5430ddaSJérôme Glisse if (IS_ENABLED(CONFIG_MIGRATION) && 1479a5430ddaSJérôme Glisse (flags & TTU_MIGRATION) && 1480a5430ddaSJérôme Glisse is_zone_device_page(page)) { 1481a5430ddaSJérôme Glisse swp_entry_t entry; 1482a5430ddaSJérôme Glisse pte_t swp_pte; 1483a5430ddaSJérôme Glisse 1484a5430ddaSJérôme Glisse pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); 1485a5430ddaSJérôme Glisse 1486a5430ddaSJérôme Glisse /* 1487a5430ddaSJérôme Glisse * Store the pfn of the page in a special migration 1488a5430ddaSJérôme Glisse * pte. do_swap_page() will wait until the migration 1489a5430ddaSJérôme Glisse * pte is removed and then restart fault handling. 1490a5430ddaSJérôme Glisse */ 1491a5430ddaSJérôme Glisse entry = make_migration_entry(page, 0); 1492a5430ddaSJérôme Glisse swp_pte = swp_entry_to_pte(entry); 1493a5430ddaSJérôme Glisse if (pte_soft_dirty(pteval)) 1494a5430ddaSJérôme Glisse swp_pte = pte_swp_mksoft_dirty(swp_pte); 1495a5430ddaSJérôme Glisse set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 14960f10851eSJérôme Glisse /* 14970f10851eSJérôme Glisse * No need to invalidate here it will synchronize on 14980f10851eSJérôme Glisse * against the special swap migration pte. 14991de13ee5SRalph Campbell * 15001de13ee5SRalph Campbell * The assignment to subpage above was computed from a 15011de13ee5SRalph Campbell * swap PTE which results in an invalid pointer. 15021de13ee5SRalph Campbell * Since only PAGE_SIZE pages can currently be 15031de13ee5SRalph Campbell * migrated, just set it to page. This will need to be 15041de13ee5SRalph Campbell * changed when hugepage migrations to device private 15051de13ee5SRalph Campbell * memory are supported. 15060f10851eSJérôme Glisse */ 15071de13ee5SRalph Campbell subpage = page; 1508a5430ddaSJérôme Glisse goto discard; 1509a5430ddaSJérôme Glisse } 1510a5430ddaSJérôme Glisse 151114fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_ACCESS)) { 1512785373b4SLinus Torvalds if (ptep_clear_flush_young_notify(vma, address, 1513c7ab0d2fSKirill A. Shutemov pvmw.pte)) { 1514e4b82222SMinchan Kim ret = false; 1515c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1516c7ab0d2fSKirill A. Shutemov break; 15171da177e4SLinus Torvalds } 1518b291f000SNick Piggin } 15191da177e4SLinus Torvalds 15201da177e4SLinus Torvalds /* Nuke the page table entry. */ 1521785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 152272b252aeSMel Gorman if (should_defer_flush(mm, flags)) { 152372b252aeSMel Gorman /* 1524c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1525c7ab0d2fSKirill A. Shutemov * a remote CPU could still be writing to the page. 1526c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1527c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1528c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1529c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 153072b252aeSMel Gorman */ 1531785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 153272b252aeSMel Gorman 1533c7ab0d2fSKirill A. Shutemov set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 153472b252aeSMel Gorman } else { 1535785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 153672b252aeSMel Gorman } 15371da177e4SLinus Torvalds 1538c7ab0d2fSKirill A. Shutemov /* Move the dirty bit to the page. Now the pte is gone. */ 15391da177e4SLinus Torvalds if (pte_dirty(pteval)) 15401da177e4SLinus Torvalds set_page_dirty(page); 15411da177e4SLinus Torvalds 1542365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1543365e9c87SHugh Dickins update_hiwater_rss(mm); 1544365e9c87SHugh Dickins 1545888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 15465fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 15475d317b2bSNaoya Horiguchi if (PageHuge(page)) { 1548d8c6546bSMatthew Wilcox (Oracle) hugetlb_count_sub(compound_nr(page), mm); 1549785373b4SLinus Torvalds set_huge_swap_pte_at(mm, address, 15505fd27b8eSPunit Agrawal pvmw.pte, pteval, 15515fd27b8eSPunit Agrawal vma_mmu_pagesize(vma)); 15525d317b2bSNaoya Horiguchi } else { 1553eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 1554785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 15555f24ae58SNaoya Horiguchi } 1556c7ab0d2fSKirill A. Shutemov 1557bce73e48SChristian Borntraeger } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 155845961722SKonstantin Weitz /* 155945961722SKonstantin Weitz * The guest indicated that the page content is of no 156045961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 156145961722SKonstantin Weitz * will take care of the rest. 1562bce73e48SChristian Borntraeger * A future reference will then fault in a new zero 1563bce73e48SChristian Borntraeger * page. When userfaultfd is active, we must not drop 1564bce73e48SChristian Borntraeger * this page though, as its main user (postcopy 1565bce73e48SChristian Borntraeger * migration) will not expect userfaults on already 1566bce73e48SChristian Borntraeger * copied pages. 156745961722SKonstantin Weitz */ 1568eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 15690f10851eSJérôme Glisse /* We have to invalidate as we cleared the pte */ 15700f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 15710f10851eSJérôme Glisse address + PAGE_SIZE); 1572c7ab0d2fSKirill A. Shutemov } else if (IS_ENABLED(CONFIG_MIGRATION) && 1573b5ff8161SNaoya Horiguchi (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { 1574470f119fSHugh Dickins swp_entry_t entry; 1575470f119fSHugh Dickins pte_t swp_pte; 1576ca827d55SKhalid Aziz 1577ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1578ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1579ca827d55SKhalid Aziz ret = false; 1580ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1581ca827d55SKhalid Aziz break; 1582ca827d55SKhalid Aziz } 1583ca827d55SKhalid Aziz 1584470f119fSHugh Dickins /* 1585470f119fSHugh Dickins * Store the pfn of the page in a special migration 1586470f119fSHugh Dickins * pte. do_swap_page() will wait until the migration 1587470f119fSHugh Dickins * pte is removed and then restart fault handling. 1588470f119fSHugh Dickins */ 1589c7ab0d2fSKirill A. Shutemov entry = make_migration_entry(subpage, 1590c7ab0d2fSKirill A. Shutemov pte_write(pteval)); 1591470f119fSHugh Dickins swp_pte = swp_entry_to_pte(entry); 1592470f119fSHugh Dickins if (pte_soft_dirty(pteval)) 1593470f119fSHugh Dickins swp_pte = pte_swp_mksoft_dirty(swp_pte); 1594785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 15950f10851eSJérôme Glisse /* 15960f10851eSJérôme Glisse * No need to invalidate here it will synchronize on 15970f10851eSJérôme Glisse * against the special swap migration pte. 15980f10851eSJérôme Glisse */ 1599888b9f7cSAndi Kleen } else if (PageAnon(page)) { 1600c7ab0d2fSKirill A. Shutemov swp_entry_t entry = { .val = page_private(subpage) }; 1601179ef71cSCyrill Gorcunov pte_t swp_pte; 16021da177e4SLinus Torvalds /* 16031da177e4SLinus Torvalds * Store the swap location in the pte. 16041da177e4SLinus Torvalds * See handle_pte_fault() ... 16051da177e4SLinus Torvalds */ 1606eb94a878SMinchan Kim if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1607eb94a878SMinchan Kim WARN_ON_ONCE(1); 160883612a94SMinchan Kim ret = false; 1609369ea824SJérôme Glisse /* We have to invalidate as we cleared the pte */ 16100f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 16110f10851eSJérôme Glisse address + PAGE_SIZE); 1612eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1613eb94a878SMinchan Kim break; 1614eb94a878SMinchan Kim } 1615854e9ed0SMinchan Kim 1616802a3a92SShaohua Li /* MADV_FREE page check */ 1617802a3a92SShaohua Li if (!PageSwapBacked(page)) { 1618a128ca71SShaohua Li if (!PageDirty(page)) { 16190f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 16200f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, 16210f10851eSJérôme Glisse address, address + PAGE_SIZE); 1622854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1623854e9ed0SMinchan Kim goto discard; 1624854e9ed0SMinchan Kim } 1625854e9ed0SMinchan Kim 1626802a3a92SShaohua Li /* 1627802a3a92SShaohua Li * If the page was redirtied, it cannot be 1628802a3a92SShaohua Li * discarded. Remap the page to page table. 1629802a3a92SShaohua Li */ 1630785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 163118863d3aSMinchan Kim SetPageSwapBacked(page); 1632e4b82222SMinchan Kim ret = false; 1633802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1634802a3a92SShaohua Li break; 1635802a3a92SShaohua Li } 1636802a3a92SShaohua Li 1637570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1638785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1639e4b82222SMinchan Kim ret = false; 1640c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1641c7ab0d2fSKirill A. Shutemov break; 1642570a335bSHugh Dickins } 1643ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1644ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1645ca827d55SKhalid Aziz ret = false; 1646ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1647ca827d55SKhalid Aziz break; 1648ca827d55SKhalid Aziz } 16491da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 16501da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1651f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 16521da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 16531da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 16541da177e4SLinus Torvalds } 1655d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1656b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1657179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 1658179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1659179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1660785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 16610f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 1662369ea824SJérôme Glisse mmu_notifier_invalidate_range(mm, address, 1663369ea824SJérôme Glisse address + PAGE_SIZE); 16640f10851eSJérôme Glisse } else { 16650f10851eSJérôme Glisse /* 1666906f9cdfSHugh Dickins * This is a locked file-backed page, thus it cannot 1667906f9cdfSHugh Dickins * be removed from the page cache and replaced by a new 1668906f9cdfSHugh Dickins * page before mmu_notifier_invalidate_range_end, so no 16690f10851eSJérôme Glisse * concurrent thread might update its page table to 16700f10851eSJérôme Glisse * point at new page while a device still is using this 16710f10851eSJérôme Glisse * page. 16720f10851eSJérôme Glisse * 1673ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 16740f10851eSJérôme Glisse */ 16750f10851eSJérôme Glisse dec_mm_counter(mm, mm_counter_file(page)); 16760f10851eSJérôme Glisse } 16770f10851eSJérôme Glisse discard: 16780f10851eSJérôme Glisse /* 16790f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() it has be 16800f10851eSJérôme Glisse * done above for all cases requiring it to happen under page 16810f10851eSJérôme Glisse * table lock before mmu_notifier_invalidate_range_end() 16820f10851eSJérôme Glisse * 1683ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 16840f10851eSJérôme Glisse */ 16850f10851eSJérôme Glisse page_remove_rmap(subpage, PageHuge(page)); 16860f10851eSJérôme Glisse put_page(page); 1687c7ab0d2fSKirill A. Shutemov } 1688369ea824SJérôme Glisse 1689ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1690369ea824SJérôme Glisse 1691caed0f48SKOSAKI Motohiro return ret; 16921da177e4SLinus Torvalds } 16931da177e4SLinus Torvalds 169471e3aac0SAndrea Arcangeli bool is_vma_temporary_stack(struct vm_area_struct *vma) 1695a8bef8ffSMel Gorman { 1696a8bef8ffSMel Gorman int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1697a8bef8ffSMel Gorman 1698a8bef8ffSMel Gorman if (!maybe_stack) 1699a8bef8ffSMel Gorman return false; 1700a8bef8ffSMel Gorman 1701a8bef8ffSMel Gorman if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1702a8bef8ffSMel Gorman VM_STACK_INCOMPLETE_SETUP) 1703a8bef8ffSMel Gorman return true; 1704a8bef8ffSMel Gorman 1705a8bef8ffSMel Gorman return false; 1706a8bef8ffSMel Gorman } 1707a8bef8ffSMel Gorman 170852629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 170952629506SJoonsoo Kim { 171052629506SJoonsoo Kim return is_vma_temporary_stack(vma); 171152629506SJoonsoo Kim } 171252629506SJoonsoo Kim 17132a52bcbcSKirill A. Shutemov static int page_mapcount_is_zero(struct page *page) 171452629506SJoonsoo Kim { 1715c7ab0d2fSKirill A. Shutemov return !total_mapcount(page); 17162a52bcbcSKirill A. Shutemov } 171752629506SJoonsoo Kim 17181da177e4SLinus Torvalds /** 17191da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 17201da177e4SLinus Torvalds * @page: the page to get unmapped 172114fa31b8SAndi Kleen * @flags: action and flags 17221da177e4SLinus Torvalds * 17231da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 17241da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 17251da177e4SLinus Torvalds * 1726666e5a40SMinchan Kim * If unmap is successful, return true. Otherwise, false. 17271da177e4SLinus Torvalds */ 1728666e5a40SMinchan Kim bool try_to_unmap(struct page *page, enum ttu_flags flags) 17291da177e4SLinus Torvalds { 173052629506SJoonsoo Kim struct rmap_walk_control rwc = { 173152629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1732802a3a92SShaohua Li .arg = (void *)flags, 17332a52bcbcSKirill A. Shutemov .done = page_mapcount_is_zero, 173452629506SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 173552629506SJoonsoo Kim }; 17361da177e4SLinus Torvalds 173752629506SJoonsoo Kim /* 173852629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 173952629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 174052629506SJoonsoo Kim * page tables leading to a race where migration cannot 174152629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 174252629506SJoonsoo Kim * locking requirements of exec(), migration skips 174352629506SJoonsoo Kim * temporary VMAs until after exec() completes. 174452629506SJoonsoo Kim */ 1745b5ff8161SNaoya Horiguchi if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE)) 1746b5ff8161SNaoya Horiguchi && !PageKsm(page) && PageAnon(page)) 174752629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 174852629506SJoonsoo Kim 17492a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 175033fc80e2SMinchan Kim rmap_walk_locked(page, &rwc); 17512a52bcbcSKirill A. Shutemov else 175233fc80e2SMinchan Kim rmap_walk(page, &rwc); 175352629506SJoonsoo Kim 1754666e5a40SMinchan Kim return !page_mapcount(page) ? true : false; 17551da177e4SLinus Torvalds } 175681b4082dSNikita Danilov 17572a52bcbcSKirill A. Shutemov static int page_not_mapped(struct page *page) 17582a52bcbcSKirill A. Shutemov { 17592a52bcbcSKirill A. Shutemov return !page_mapped(page); 17602a52bcbcSKirill A. Shutemov }; 17612a52bcbcSKirill A. Shutemov 1762b291f000SNick Piggin /** 1763b291f000SNick Piggin * try_to_munlock - try to munlock a page 1764b291f000SNick Piggin * @page: the page to be munlocked 1765b291f000SNick Piggin * 1766b291f000SNick Piggin * Called from munlock code. Checks all of the VMAs mapping the page 1767b291f000SNick Piggin * to make sure nobody else has this page mlocked. The page will be 1768b291f000SNick Piggin * returned with PG_mlocked cleared if no other vmas have it mlocked. 1769b291f000SNick Piggin */ 1770854e9ed0SMinchan Kim 1771192d7232SMinchan Kim void try_to_munlock(struct page *page) 1772192d7232SMinchan Kim { 1773e8351ac9SJoonsoo Kim struct rmap_walk_control rwc = { 1774e8351ac9SJoonsoo Kim .rmap_one = try_to_unmap_one, 1775802a3a92SShaohua Li .arg = (void *)TTU_MUNLOCK, 1776e8351ac9SJoonsoo Kim .done = page_not_mapped, 1777e8351ac9SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 1778e8351ac9SJoonsoo Kim 1779e8351ac9SJoonsoo Kim }; 1780e8351ac9SJoonsoo Kim 1781309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1782192d7232SMinchan Kim VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 1783b291f000SNick Piggin 1784192d7232SMinchan Kim rmap_walk(page, &rwc); 1785b291f000SNick Piggin } 1786e9995ef9SHugh Dickins 178701d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 178876545066SRik van Riel { 178976545066SRik van Riel struct anon_vma *root = anon_vma->root; 179076545066SRik van Riel 1791624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 179201d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 179376545066SRik van Riel anon_vma_free(root); 179476545066SRik van Riel } 179576545066SRik van Riel 17960dd1c7bbSJoonsoo Kim static struct anon_vma *rmap_walk_anon_lock(struct page *page, 17970dd1c7bbSJoonsoo Kim struct rmap_walk_control *rwc) 1798faecd8ddSJoonsoo Kim { 1799faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 1800faecd8ddSJoonsoo Kim 18010dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 18020dd1c7bbSJoonsoo Kim return rwc->anon_lock(page); 18030dd1c7bbSJoonsoo Kim 1804faecd8ddSJoonsoo Kim /* 1805faecd8ddSJoonsoo Kim * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1806faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 1807faecd8ddSJoonsoo Kim * are holding mmap_sem. Users without mmap_sem are required to 1808faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 1809faecd8ddSJoonsoo Kim */ 1810faecd8ddSJoonsoo Kim anon_vma = page_anon_vma(page); 1811faecd8ddSJoonsoo Kim if (!anon_vma) 1812faecd8ddSJoonsoo Kim return NULL; 1813faecd8ddSJoonsoo Kim 1814faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 1815faecd8ddSJoonsoo Kim return anon_vma; 1816faecd8ddSJoonsoo Kim } 1817faecd8ddSJoonsoo Kim 1818e9995ef9SHugh Dickins /* 1819e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 1820e8351ac9SJoonsoo Kim * rmap method 1821e8351ac9SJoonsoo Kim * @page: the page to be handled 1822e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1823e8351ac9SJoonsoo Kim * 1824e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1825e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 1826e8351ac9SJoonsoo Kim * 1827e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1828e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1829e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1830e8351ac9SJoonsoo Kim * LOCKED. 1831e9995ef9SHugh Dickins */ 18321df631aeSMinchan Kim static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1833b9773199SKirill A. Shutemov bool locked) 1834e9995ef9SHugh Dickins { 1835e9995ef9SHugh Dickins struct anon_vma *anon_vma; 1836a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 18375beb4930SRik van Riel struct anon_vma_chain *avc; 1838e9995ef9SHugh Dickins 1839b9773199SKirill A. Shutemov if (locked) { 1840b9773199SKirill A. Shutemov anon_vma = page_anon_vma(page); 1841b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 1842b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(!anon_vma, page); 1843b9773199SKirill A. Shutemov } else { 18440dd1c7bbSJoonsoo Kim anon_vma = rmap_walk_anon_lock(page, rwc); 1845b9773199SKirill A. Shutemov } 1846e9995ef9SHugh Dickins if (!anon_vma) 18471df631aeSMinchan Kim return; 1848faecd8ddSJoonsoo Kim 1849a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 1850a8fa41adSKirill A. Shutemov pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1851a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1852a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 18535beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1854e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 18550dd1c7bbSJoonsoo Kim 1856ad12695fSAndrea Arcangeli cond_resched(); 1857ad12695fSAndrea Arcangeli 18580dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 18590dd1c7bbSJoonsoo Kim continue; 18600dd1c7bbSJoonsoo Kim 1861e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1862e9995ef9SHugh Dickins break; 18630dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 18640dd1c7bbSJoonsoo Kim break; 1865e9995ef9SHugh Dickins } 1866b9773199SKirill A. Shutemov 1867b9773199SKirill A. Shutemov if (!locked) 18684fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 1869e9995ef9SHugh Dickins } 1870e9995ef9SHugh Dickins 1871e8351ac9SJoonsoo Kim /* 1872e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 1873e8351ac9SJoonsoo Kim * @page: the page to be handled 1874e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1875e8351ac9SJoonsoo Kim * 1876e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1877e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 1878e8351ac9SJoonsoo Kim * 1879e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1880e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1881e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1882e8351ac9SJoonsoo Kim * LOCKED. 1883e8351ac9SJoonsoo Kim */ 18841df631aeSMinchan Kim static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1885b9773199SKirill A. Shutemov bool locked) 1886e9995ef9SHugh Dickins { 1887b9773199SKirill A. Shutemov struct address_space *mapping = page_mapping(page); 1888a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 1889e9995ef9SHugh Dickins struct vm_area_struct *vma; 1890e9995ef9SHugh Dickins 18919f32624bSJoonsoo Kim /* 18929f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 18939f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 18949f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 1895c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 18969f32624bSJoonsoo Kim */ 189781d1b09cSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 18989f32624bSJoonsoo Kim 1899e9995ef9SHugh Dickins if (!mapping) 19001df631aeSMinchan Kim return; 19013dec0ba0SDavidlohr Bueso 1902a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 1903a8fa41adSKirill A. Shutemov pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1904b9773199SKirill A. Shutemov if (!locked) 19053dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 1906a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 1907a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 1908e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 19090dd1c7bbSJoonsoo Kim 1910ad12695fSAndrea Arcangeli cond_resched(); 1911ad12695fSAndrea Arcangeli 19120dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 19130dd1c7bbSJoonsoo Kim continue; 19140dd1c7bbSJoonsoo Kim 1915e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 19160dd1c7bbSJoonsoo Kim goto done; 19170dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 19180dd1c7bbSJoonsoo Kim goto done; 1919e9995ef9SHugh Dickins } 19200dd1c7bbSJoonsoo Kim 19210dd1c7bbSJoonsoo Kim done: 1922b9773199SKirill A. Shutemov if (!locked) 19233dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 1924e9995ef9SHugh Dickins } 1925e9995ef9SHugh Dickins 19261df631aeSMinchan Kim void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1927e9995ef9SHugh Dickins { 1928e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 19291df631aeSMinchan Kim rmap_walk_ksm(page, rwc); 1930e9995ef9SHugh Dickins else if (PageAnon(page)) 19311df631aeSMinchan Kim rmap_walk_anon(page, rwc, false); 1932e9995ef9SHugh Dickins else 19331df631aeSMinchan Kim rmap_walk_file(page, rwc, false); 1934b9773199SKirill A. Shutemov } 1935b9773199SKirill A. Shutemov 1936b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 19371df631aeSMinchan Kim void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1938b9773199SKirill A. Shutemov { 1939b9773199SKirill A. Shutemov /* no ksm support for now */ 1940b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(PageKsm(page), page); 1941b9773199SKirill A. Shutemov if (PageAnon(page)) 19421df631aeSMinchan Kim rmap_walk_anon(page, rwc, true); 1943b9773199SKirill A. Shutemov else 19441df631aeSMinchan Kim rmap_walk_file(page, rwc, true); 1945e9995ef9SHugh Dickins } 19460fe6e20bSNaoya Horiguchi 1947e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 19480fe6e20bSNaoya Horiguchi /* 1949451b9514SKirill Tkhai * The following two functions are for anonymous (private mapped) hugepages. 19500fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 19510fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 19520fe6e20bSNaoya Horiguchi */ 19530fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 19540fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 19550fe6e20bSNaoya Horiguchi { 19560fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 19570fe6e20bSNaoya Horiguchi int first; 1958a850ea30SNaoya Horiguchi 1959a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 19600fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 19615dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 196253f9263bSKirill A. Shutemov first = atomic_inc_and_test(compound_mapcount_ptr(page)); 19630fe6e20bSNaoya Horiguchi if (first) 1964451b9514SKirill Tkhai __page_set_anon_rmap(page, vma, address, 0); 19650fe6e20bSNaoya Horiguchi } 19660fe6e20bSNaoya Horiguchi 19670fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 19680fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 19690fe6e20bSNaoya Horiguchi { 19700fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 197153f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 1972451b9514SKirill Tkhai __page_set_anon_rmap(page, vma, address, 1); 19730fe6e20bSNaoya Horiguchi } 1974e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 1975