11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 241da177e4SLinus Torvalds * mm->mmap_sem 251da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 2688f306b6SKirill A. Shutemov * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 285a505085SIngo Molnar * anon_vma->rwsem 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30a52633d8SMel Gorman * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 34c4843a75SGreg Thelen * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 35c4843a75SGreg Thelen * mapping->tree_lock (widely used) 36250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 37f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 381da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 391da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 401da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 41f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 426a46079cSAndi Kleen * 435a505085SIngo Molnar * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 446a46079cSAndi Kleen * ->tasklist_lock 456a46079cSAndi Kleen * pte map lock 461da177e4SLinus Torvalds */ 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds #include <linux/mm.h> 496e84f315SIngo Molnar #include <linux/sched/mm.h> 5029930025SIngo Molnar #include <linux/sched/task.h> 511da177e4SLinus Torvalds #include <linux/pagemap.h> 521da177e4SLinus Torvalds #include <linux/swap.h> 531da177e4SLinus Torvalds #include <linux/swapops.h> 541da177e4SLinus Torvalds #include <linux/slab.h> 551da177e4SLinus Torvalds #include <linux/init.h> 565ad64688SHugh Dickins #include <linux/ksm.h> 571da177e4SLinus Torvalds #include <linux/rmap.h> 581da177e4SLinus Torvalds #include <linux/rcupdate.h> 59b95f1b31SPaul Gortmaker #include <linux/export.h> 608a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 61cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 6264cdd548SKOSAKI Motohiro #include <linux/migrate.h> 630fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 64ef5d437fSJan Kara #include <linux/backing-dev.h> 6533c3fc71SVladimir Davydov #include <linux/page_idle.h> 66a5430ddaSJérôme Glisse #include <linux/memremap.h> 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <asm/tlbflush.h> 691da177e4SLinus Torvalds 7072b252aeSMel Gorman #include <trace/events/tlb.h> 7172b252aeSMel Gorman 72b291f000SNick Piggin #include "internal.h" 73b291f000SNick Piggin 74fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 755beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 76fdd2e5f8SAdrian Bunk 77fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 78fdd2e5f8SAdrian Bunk { 7901d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 8001d8b20dSPeter Zijlstra 8101d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 8201d8b20dSPeter Zijlstra if (anon_vma) { 8301d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 847a3ef208SKonstantin Khlebnikov anon_vma->degree = 1; /* Reference for first vma */ 857a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 8601d8b20dSPeter Zijlstra /* 8701d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 8801d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 8901d8b20dSPeter Zijlstra */ 9001d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 91fdd2e5f8SAdrian Bunk } 92fdd2e5f8SAdrian Bunk 9301d8b20dSPeter Zijlstra return anon_vma; 9401d8b20dSPeter Zijlstra } 9501d8b20dSPeter Zijlstra 9601d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 97fdd2e5f8SAdrian Bunk { 9801d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 9988c22088SPeter Zijlstra 10088c22088SPeter Zijlstra /* 1014fc3f1d6SIngo Molnar * Synchronize against page_lock_anon_vma_read() such that 10288c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 10388c22088SPeter Zijlstra * freed. 10488c22088SPeter Zijlstra * 10588c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 10688c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1074fc3f1d6SIngo Molnar * down_read_trylock() from page_lock_anon_vma_read(). This orders: 10888c22088SPeter Zijlstra * 1094fc3f1d6SIngo Molnar * page_lock_anon_vma_read() VS put_anon_vma() 1104fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 11188c22088SPeter Zijlstra * LOCK MB 1124fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 11388c22088SPeter Zijlstra * 11488c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 11588c22088SPeter Zijlstra * happen _before_ what follows. 11688c22088SPeter Zijlstra */ 1177f39dda9SHugh Dickins might_sleep(); 1185a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1194fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 12008b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 12188c22088SPeter Zijlstra } 12288c22088SPeter Zijlstra 123fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 124fdd2e5f8SAdrian Bunk } 1251da177e4SLinus Torvalds 126dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1275beb4930SRik van Riel { 128dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1295beb4930SRik van Riel } 1305beb4930SRik van Riel 131e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1325beb4930SRik van Riel { 1335beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1345beb4930SRik van Riel } 1355beb4930SRik van Riel 1366583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1376583a843SKautuk Consul struct anon_vma_chain *avc, 1386583a843SKautuk Consul struct anon_vma *anon_vma) 1396583a843SKautuk Consul { 1406583a843SKautuk Consul avc->vma = vma; 1416583a843SKautuk Consul avc->anon_vma = anon_vma; 1426583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 143bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1446583a843SKautuk Consul } 1456583a843SKautuk Consul 146d9d332e0SLinus Torvalds /** 147d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 148d9d332e0SLinus Torvalds * @vma: the memory region in question 149d9d332e0SLinus Torvalds * 150d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 151d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 152d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 153d9d332e0SLinus Torvalds * 154d5a187daSVlastimil Babka * The common case will be that we already have one, which 155d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 15623a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 157d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 158d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 159d9d332e0SLinus Torvalds * allocate a new one. 160d9d332e0SLinus Torvalds * 161d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1624fc3f1d6SIngo Molnar * optimistically looked up an anon_vma in page_lock_anon_vma_read() 163d9d332e0SLinus Torvalds * and that may actually touch the spinlock even in the newly 164d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 165d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 166d9d332e0SLinus Torvalds * 167d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 168d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 169d9d332e0SLinus Torvalds * to do any locking for the common case of already having 170d9d332e0SLinus Torvalds * an anon_vma. 171d9d332e0SLinus Torvalds * 172d9d332e0SLinus Torvalds * This must be called with the mmap_sem held for reading. 173d9d332e0SLinus Torvalds */ 174d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1751da177e4SLinus Torvalds { 176d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 177d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1785beb4930SRik van Riel struct anon_vma_chain *avc; 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds might_sleep(); 1811da177e4SLinus Torvalds 182dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1835beb4930SRik van Riel if (!avc) 1845beb4930SRik van Riel goto out_enomem; 1855beb4930SRik van Riel 1861da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1871da177e4SLinus Torvalds allocated = NULL; 188d9d332e0SLinus Torvalds if (!anon_vma) { 1891da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1901da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1915beb4930SRik van Riel goto out_enomem_free_avc; 1921da177e4SLinus Torvalds allocated = anon_vma; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 1954fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 1961da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1971da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 1981da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1991da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2006583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2017a3ef208SKonstantin Khlebnikov /* vma reference or self-parent link for new root */ 2027a3ef208SKonstantin Khlebnikov anon_vma->degree++; 2031da177e4SLinus Torvalds allocated = NULL; 20431f2b0ebSOleg Nesterov avc = NULL; 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 20708b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 20831f2b0ebSOleg Nesterov 20931f2b0ebSOleg Nesterov if (unlikely(allocated)) 21001d8b20dSPeter Zijlstra put_anon_vma(allocated); 21131f2b0ebSOleg Nesterov if (unlikely(avc)) 2125beb4930SRik van Riel anon_vma_chain_free(avc); 213d5a187daSVlastimil Babka 2141da177e4SLinus Torvalds return 0; 2155beb4930SRik van Riel 2165beb4930SRik van Riel out_enomem_free_avc: 2175beb4930SRik van Riel anon_vma_chain_free(avc); 2185beb4930SRik van Riel out_enomem: 2195beb4930SRik van Riel return -ENOMEM; 2201da177e4SLinus Torvalds } 2211da177e4SLinus Torvalds 222bb4aa396SLinus Torvalds /* 223bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 224bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 225bb4aa396SLinus Torvalds * have the same vma. 226bb4aa396SLinus Torvalds * 227bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 228bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 229bb4aa396SLinus Torvalds */ 230bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 231bb4aa396SLinus Torvalds { 232bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 233bb4aa396SLinus Torvalds if (new_root != root) { 234bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2355a505085SIngo Molnar up_write(&root->rwsem); 236bb4aa396SLinus Torvalds root = new_root; 2375a505085SIngo Molnar down_write(&root->rwsem); 238bb4aa396SLinus Torvalds } 239bb4aa396SLinus Torvalds return root; 240bb4aa396SLinus Torvalds } 241bb4aa396SLinus Torvalds 242bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 243bb4aa396SLinus Torvalds { 244bb4aa396SLinus Torvalds if (root) 2455a505085SIngo Molnar up_write(&root->rwsem); 246bb4aa396SLinus Torvalds } 247bb4aa396SLinus Torvalds 2485beb4930SRik van Riel /* 2495beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2505beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2517a3ef208SKonstantin Khlebnikov * 2527a3ef208SKonstantin Khlebnikov * If dst->anon_vma is NULL this function tries to find and reuse existing 2537a3ef208SKonstantin Khlebnikov * anon_vma which has no vmas and only one child anon_vma. This prevents 2547a3ef208SKonstantin Khlebnikov * degradation of anon_vma hierarchy to endless linear chain in case of 2557a3ef208SKonstantin Khlebnikov * constantly forking task. On the other hand, an anon_vma with more than one 2567a3ef208SKonstantin Khlebnikov * child isn't reused even if there was no alive vma, thus rmap walker has a 2577a3ef208SKonstantin Khlebnikov * good chance of avoiding scanning the whole hierarchy when it searches where 2587a3ef208SKonstantin Khlebnikov * page is mapped. 2595beb4930SRik van Riel */ 2605beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2615beb4930SRik van Riel { 2625beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 263bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2645beb4930SRik van Riel 265646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 266bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 267bb4aa396SLinus Torvalds 268dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 269dd34739cSLinus Torvalds if (unlikely(!avc)) { 270dd34739cSLinus Torvalds unlock_anon_vma_root(root); 271dd34739cSLinus Torvalds root = NULL; 272dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2735beb4930SRik van Riel if (!avc) 2745beb4930SRik van Riel goto enomem_failure; 275dd34739cSLinus Torvalds } 276bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 277bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 278bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2797a3ef208SKonstantin Khlebnikov 2807a3ef208SKonstantin Khlebnikov /* 2817a3ef208SKonstantin Khlebnikov * Reuse existing anon_vma if its degree lower than two, 2827a3ef208SKonstantin Khlebnikov * that means it has no vma and only one anon_vma child. 2837a3ef208SKonstantin Khlebnikov * 2847a3ef208SKonstantin Khlebnikov * Do not chose parent anon_vma, otherwise first child 2857a3ef208SKonstantin Khlebnikov * will always reuse it. Root anon_vma is never reused: 2867a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 2877a3ef208SKonstantin Khlebnikov */ 2887a3ef208SKonstantin Khlebnikov if (!dst->anon_vma && anon_vma != src->anon_vma && 2897a3ef208SKonstantin Khlebnikov anon_vma->degree < 2) 2907a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 2915beb4930SRik van Riel } 2927a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 2937a3ef208SKonstantin Khlebnikov dst->anon_vma->degree++; 294bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 2955beb4930SRik van Riel return 0; 2965beb4930SRik van Riel 2975beb4930SRik van Riel enomem_failure: 2983fe89b3eSLeon Yu /* 2993fe89b3eSLeon Yu * dst->anon_vma is dropped here otherwise its degree can be incorrectly 3003fe89b3eSLeon Yu * decremented in unlink_anon_vmas(). 3013fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3023fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3033fe89b3eSLeon Yu */ 3043fe89b3eSLeon Yu dst->anon_vma = NULL; 3055beb4930SRik van Riel unlink_anon_vmas(dst); 3065beb4930SRik van Riel return -ENOMEM; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 3095beb4930SRik van Riel /* 3105beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3115beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3125beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3135beb4930SRik van Riel */ 3145beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3151da177e4SLinus Torvalds { 3165beb4930SRik van Riel struct anon_vma_chain *avc; 3175beb4930SRik van Riel struct anon_vma *anon_vma; 318c4ea95d7SDaniel Forrest int error; 3195beb4930SRik van Riel 3205beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3215beb4930SRik van Riel if (!pvma->anon_vma) 3225beb4930SRik van Riel return 0; 3235beb4930SRik van Riel 3247a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3257a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3267a3ef208SKonstantin Khlebnikov 3275beb4930SRik van Riel /* 3285beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3295beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3305beb4930SRik van Riel */ 331c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 332c4ea95d7SDaniel Forrest if (error) 333c4ea95d7SDaniel Forrest return error; 3345beb4930SRik van Riel 3357a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3367a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3377a3ef208SKonstantin Khlebnikov return 0; 3387a3ef208SKonstantin Khlebnikov 3395beb4930SRik van Riel /* Then add our own anon_vma. */ 3405beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3415beb4930SRik van Riel if (!anon_vma) 3425beb4930SRik van Riel goto out_error; 343dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3445beb4930SRik van Riel if (!avc) 3455beb4930SRik van Riel goto out_error_free_anon_vma; 3465c341ee1SRik van Riel 3475c341ee1SRik van Riel /* 3485c341ee1SRik van Riel * The root anon_vma's spinlock is the lock actually used when we 3495c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3505c341ee1SRik van Riel */ 3515c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3527a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 35376545066SRik van Riel /* 35401d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 35501d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 35601d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 35776545066SRik van Riel */ 35876545066SRik van Riel get_anon_vma(anon_vma->root); 3595beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3605beb4930SRik van Riel vma->anon_vma = anon_vma; 3614fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3625c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3637a3ef208SKonstantin Khlebnikov anon_vma->parent->degree++; 36408b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3655beb4930SRik van Riel 3665beb4930SRik van Riel return 0; 3675beb4930SRik van Riel 3685beb4930SRik van Riel out_error_free_anon_vma: 36901d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3705beb4930SRik van Riel out_error: 3714946d54cSRik van Riel unlink_anon_vmas(vma); 3725beb4930SRik van Riel return -ENOMEM; 3735beb4930SRik van Riel } 3745beb4930SRik van Riel 3755beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3765beb4930SRik van Riel { 3775beb4930SRik van Riel struct anon_vma_chain *avc, *next; 378eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 3795beb4930SRik van Riel 3805c341ee1SRik van Riel /* 3815c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 3825c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 3835c341ee1SRik van Riel */ 3845beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 385eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 386eee2acbaSPeter Zijlstra 387eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 388bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 389eee2acbaSPeter Zijlstra 390eee2acbaSPeter Zijlstra /* 391eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 392eee2acbaSPeter Zijlstra * to free them outside the lock. 393eee2acbaSPeter Zijlstra */ 394*f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 3957a3ef208SKonstantin Khlebnikov anon_vma->parent->degree--; 396eee2acbaSPeter Zijlstra continue; 3977a3ef208SKonstantin Khlebnikov } 398eee2acbaSPeter Zijlstra 399eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 400eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 401eee2acbaSPeter Zijlstra } 4027a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 4037a3ef208SKonstantin Khlebnikov vma->anon_vma->degree--; 404eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 405eee2acbaSPeter Zijlstra 406eee2acbaSPeter Zijlstra /* 407eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 408eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4095a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 410eee2acbaSPeter Zijlstra */ 411eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 412eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 413eee2acbaSPeter Zijlstra 414e4c5800aSKonstantin Khlebnikov VM_WARN_ON(anon_vma->degree); 415eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 416eee2acbaSPeter Zijlstra 4175beb4930SRik van Riel list_del(&avc->same_vma); 4185beb4930SRik van Riel anon_vma_chain_free(avc); 4195beb4930SRik van Riel } 4205beb4930SRik van Riel } 4215beb4930SRik van Riel 42251cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4231da177e4SLinus Torvalds { 4241da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4251da177e4SLinus Torvalds 4265a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 42783813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 428*f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4291da177e4SLinus Torvalds } 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds void __init anon_vma_init(void) 4321da177e4SLinus Torvalds { 4331da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4345f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4355d097056SVladimir Davydov anon_vma_ctor); 4365d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4375d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds /* 4416111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4426111e4caSPeter Zijlstra * 4436111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 4446111e4caSPeter Zijlstra * the best this function can do is return a locked anon_vma that might 4456111e4caSPeter Zijlstra * have been relevant to this page. 4466111e4caSPeter Zijlstra * 4476111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4486111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4496111e4caSPeter Zijlstra * 450bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 451bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 452bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 453bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 454bc658c96SPeter Zijlstra * 4556111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4566111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4576111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4586111e4caSPeter Zijlstra * 4596111e4caSPeter Zijlstra * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 4606111e4caSPeter Zijlstra * that the anon_vma pointer from page->mapping is valid if there is a 4616111e4caSPeter Zijlstra * mapcount, we can dereference the anon_vma after observing those. 4621da177e4SLinus Torvalds */ 463746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4641da177e4SLinus Torvalds { 465746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4661da177e4SLinus Torvalds unsigned long anon_mapping; 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds rcu_read_lock(); 4694db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 4703ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4711da177e4SLinus Torvalds goto out; 4721da177e4SLinus Torvalds if (!page_mapped(page)) 4731da177e4SLinus Torvalds goto out; 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 476746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 477746b18d4SPeter Zijlstra anon_vma = NULL; 478746b18d4SPeter Zijlstra goto out; 479746b18d4SPeter Zijlstra } 480f1819427SHugh Dickins 481f1819427SHugh Dickins /* 482f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 483746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 484746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 4855f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 486746b18d4SPeter Zijlstra * above cannot corrupt). 487f1819427SHugh Dickins */ 488746b18d4SPeter Zijlstra if (!page_mapped(page)) { 4897f39dda9SHugh Dickins rcu_read_unlock(); 490746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 4917f39dda9SHugh Dickins return NULL; 492746b18d4SPeter Zijlstra } 4931da177e4SLinus Torvalds out: 4941da177e4SLinus Torvalds rcu_read_unlock(); 495746b18d4SPeter Zijlstra 496746b18d4SPeter Zijlstra return anon_vma; 497746b18d4SPeter Zijlstra } 498746b18d4SPeter Zijlstra 49988c22088SPeter Zijlstra /* 50088c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 50188c22088SPeter Zijlstra * 50288c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 50388c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 50488c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 50588c22088SPeter Zijlstra */ 5064fc3f1d6SIngo Molnar struct anon_vma *page_lock_anon_vma_read(struct page *page) 507746b18d4SPeter Zijlstra { 50888c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 509eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 51088c22088SPeter Zijlstra unsigned long anon_mapping; 511746b18d4SPeter Zijlstra 51288c22088SPeter Zijlstra rcu_read_lock(); 5134db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 51488c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 51588c22088SPeter Zijlstra goto out; 51688c22088SPeter Zijlstra if (!page_mapped(page)) 51788c22088SPeter Zijlstra goto out; 51888c22088SPeter Zijlstra 51988c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5204db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5214fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 52288c22088SPeter Zijlstra /* 523eee0f252SHugh Dickins * If the page is still mapped, then this anon_vma is still 524eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 525bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 52688c22088SPeter Zijlstra */ 527eee0f252SHugh Dickins if (!page_mapped(page)) { 5284fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 52988c22088SPeter Zijlstra anon_vma = NULL; 53088c22088SPeter Zijlstra } 53188c22088SPeter Zijlstra goto out; 53288c22088SPeter Zijlstra } 53388c22088SPeter Zijlstra 53488c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 53588c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 53688c22088SPeter Zijlstra anon_vma = NULL; 53788c22088SPeter Zijlstra goto out; 53888c22088SPeter Zijlstra } 53988c22088SPeter Zijlstra 54088c22088SPeter Zijlstra if (!page_mapped(page)) { 5417f39dda9SHugh Dickins rcu_read_unlock(); 54288c22088SPeter Zijlstra put_anon_vma(anon_vma); 5437f39dda9SHugh Dickins return NULL; 54488c22088SPeter Zijlstra } 54588c22088SPeter Zijlstra 54688c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 54788c22088SPeter Zijlstra rcu_read_unlock(); 5484fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 549746b18d4SPeter Zijlstra 55088c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 55188c22088SPeter Zijlstra /* 55288c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 55388c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 5544fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 55588c22088SPeter Zijlstra */ 5564fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 55788c22088SPeter Zijlstra __put_anon_vma(anon_vma); 55888c22088SPeter Zijlstra anon_vma = NULL; 55988c22088SPeter Zijlstra } 56088c22088SPeter Zijlstra 56188c22088SPeter Zijlstra return anon_vma; 56288c22088SPeter Zijlstra 56388c22088SPeter Zijlstra out: 56488c22088SPeter Zijlstra rcu_read_unlock(); 565746b18d4SPeter Zijlstra return anon_vma; 56634bbd704SOleg Nesterov } 56734bbd704SOleg Nesterov 5684fc3f1d6SIngo Molnar void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 56934bbd704SOleg Nesterov { 5704fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 5711da177e4SLinus Torvalds } 5721da177e4SLinus Torvalds 57372b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 57472b252aeSMel Gorman /* 57572b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 57672b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 57772b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 57872b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 57972b252aeSMel Gorman */ 58072b252aeSMel Gorman void try_to_unmap_flush(void) 58172b252aeSMel Gorman { 58272b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 58372b252aeSMel Gorman 58472b252aeSMel Gorman if (!tlb_ubc->flush_required) 58572b252aeSMel Gorman return; 58672b252aeSMel Gorman 587e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 58872b252aeSMel Gorman tlb_ubc->flush_required = false; 589d950c947SMel Gorman tlb_ubc->writable = false; 59072b252aeSMel Gorman } 59172b252aeSMel Gorman 592d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 593d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 594d950c947SMel Gorman { 595d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 596d950c947SMel Gorman 597d950c947SMel Gorman if (tlb_ubc->writable) 598d950c947SMel Gorman try_to_unmap_flush(); 599d950c947SMel Gorman } 600d950c947SMel Gorman 601c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 60272b252aeSMel Gorman { 60372b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 60472b252aeSMel Gorman 605e73ad5ffSAndy Lutomirski arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 60672b252aeSMel Gorman tlb_ubc->flush_required = true; 607d950c947SMel Gorman 608d950c947SMel Gorman /* 6093ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6103ea27719SMel Gorman * before the PTE is cleared. 6113ea27719SMel Gorman */ 6123ea27719SMel Gorman barrier(); 6133ea27719SMel Gorman mm->tlb_flush_batched = true; 6143ea27719SMel Gorman 6153ea27719SMel Gorman /* 616d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 617d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 618d950c947SMel Gorman * before the page is queued for IO. 619d950c947SMel Gorman */ 620d950c947SMel Gorman if (writable) 621d950c947SMel Gorman tlb_ubc->writable = true; 62272b252aeSMel Gorman } 62372b252aeSMel Gorman 62472b252aeSMel Gorman /* 62572b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 62672b252aeSMel Gorman * unmap operations to reduce IPIs. 62772b252aeSMel Gorman */ 62872b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 62972b252aeSMel Gorman { 63072b252aeSMel Gorman bool should_defer = false; 63172b252aeSMel Gorman 63272b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 63372b252aeSMel Gorman return false; 63472b252aeSMel Gorman 63572b252aeSMel Gorman /* If remote CPUs need to be flushed then defer batch the flush */ 63672b252aeSMel Gorman if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 63772b252aeSMel Gorman should_defer = true; 63872b252aeSMel Gorman put_cpu(); 63972b252aeSMel Gorman 64072b252aeSMel Gorman return should_defer; 64172b252aeSMel Gorman } 6423ea27719SMel Gorman 6433ea27719SMel Gorman /* 6443ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 6453ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 6463ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 6473ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 6483ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 6493ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 6503ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 6513ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 6523ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 6533ea27719SMel Gorman * 6543ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 6553ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 6563ea27719SMel Gorman * via the PTL. 6573ea27719SMel Gorman */ 6583ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 6593ea27719SMel Gorman { 6603ea27719SMel Gorman if (mm->tlb_flush_batched) { 6613ea27719SMel Gorman flush_tlb_mm(mm); 6623ea27719SMel Gorman 6633ea27719SMel Gorman /* 6643ea27719SMel Gorman * Do not allow the compiler to re-order the clearing of 6653ea27719SMel Gorman * tlb_flush_batched before the tlb is flushed. 6663ea27719SMel Gorman */ 6673ea27719SMel Gorman barrier(); 6683ea27719SMel Gorman mm->tlb_flush_batched = false; 6693ea27719SMel Gorman } 6703ea27719SMel Gorman } 67172b252aeSMel Gorman #else 672c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 67372b252aeSMel Gorman { 67472b252aeSMel Gorman } 67572b252aeSMel Gorman 67672b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 67772b252aeSMel Gorman { 67872b252aeSMel Gorman return false; 67972b252aeSMel Gorman } 68072b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 68172b252aeSMel Gorman 6821da177e4SLinus Torvalds /* 683bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 684ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 6851da177e4SLinus Torvalds */ 6861da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 6871da177e4SLinus Torvalds { 68886c2ad19SMichel Lespinasse unsigned long address; 68921d0d443SAndrea Arcangeli if (PageAnon(page)) { 6904829b906SHugh Dickins struct anon_vma *page__anon_vma = page_anon_vma(page); 6914829b906SHugh Dickins /* 6924829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 6934829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 6944829b906SHugh Dickins */ 6954829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 6964829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 69721d0d443SAndrea Arcangeli return -EFAULT; 69827ba0644SKirill A. Shutemov } else if (page->mapping) { 69927ba0644SKirill A. Shutemov if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 7001da177e4SLinus Torvalds return -EFAULT; 7011da177e4SLinus Torvalds } else 7021da177e4SLinus Torvalds return -EFAULT; 70386c2ad19SMichel Lespinasse address = __vma_address(page, vma); 70486c2ad19SMichel Lespinasse if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 70586c2ad19SMichel Lespinasse return -EFAULT; 70686c2ad19SMichel Lespinasse return address; 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7096219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 7106219049aSBob Liu { 7116219049aSBob Liu pgd_t *pgd; 712c2febafcSKirill A. Shutemov p4d_t *p4d; 7136219049aSBob Liu pud_t *pud; 7146219049aSBob Liu pmd_t *pmd = NULL; 715f72e7dcdSHugh Dickins pmd_t pmde; 7166219049aSBob Liu 7176219049aSBob Liu pgd = pgd_offset(mm, address); 7186219049aSBob Liu if (!pgd_present(*pgd)) 7196219049aSBob Liu goto out; 7206219049aSBob Liu 721c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 722c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 723c2febafcSKirill A. Shutemov goto out; 724c2febafcSKirill A. Shutemov 725c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 7266219049aSBob Liu if (!pud_present(*pud)) 7276219049aSBob Liu goto out; 7286219049aSBob Liu 7296219049aSBob Liu pmd = pmd_offset(pud, address); 730f72e7dcdSHugh Dickins /* 7318809aa2dSAneesh Kumar K.V * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 732f72e7dcdSHugh Dickins * without holding anon_vma lock for write. So when looking for a 733f72e7dcdSHugh Dickins * genuine pmde (in which to find pte), test present and !THP together. 734f72e7dcdSHugh Dickins */ 735e37c6982SChristian Borntraeger pmde = *pmd; 736e37c6982SChristian Borntraeger barrier(); 737f72e7dcdSHugh Dickins if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 7386219049aSBob Liu pmd = NULL; 7396219049aSBob Liu out: 7406219049aSBob Liu return pmd; 7416219049aSBob Liu } 7426219049aSBob Liu 7439f32624bSJoonsoo Kim struct page_referenced_arg { 7449f32624bSJoonsoo Kim int mapcount; 7459f32624bSJoonsoo Kim int referenced; 7469f32624bSJoonsoo Kim unsigned long vm_flags; 7479f32624bSJoonsoo Kim struct mem_cgroup *memcg; 7489f32624bSJoonsoo Kim }; 74981b4082dSNikita Danilov /* 7509f32624bSJoonsoo Kim * arg: page_referenced_arg will be passed 7511da177e4SLinus Torvalds */ 752e4b82222SMinchan Kim static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 7539f32624bSJoonsoo Kim unsigned long address, void *arg) 7541da177e4SLinus Torvalds { 7559f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 7568eaededeSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 7578eaededeSKirill A. Shutemov .page = page, 7588eaededeSKirill A. Shutemov .vma = vma, 7598eaededeSKirill A. Shutemov .address = address, 7608eaededeSKirill A. Shutemov }; 7618749cfeaSVladimir Davydov int referenced = 0; 7622da28bfdSAndrea Arcangeli 7638eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 7648eaededeSKirill A. Shutemov address = pvmw.address; 7652da28bfdSAndrea Arcangeli 766b20ce5e0SKirill A. Shutemov if (vma->vm_flags & VM_LOCKED) { 7678eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 7689f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 769e4b82222SMinchan Kim return false; /* To break the loop */ 7702da28bfdSAndrea Arcangeli } 7712da28bfdSAndrea Arcangeli 7728eaededeSKirill A. Shutemov if (pvmw.pte) { 7738eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 7748eaededeSKirill A. Shutemov pvmw.pte)) { 7754917e5d0SJohannes Weiner /* 7768eaededeSKirill A. Shutemov * Don't treat a reference through 7778eaededeSKirill A. Shutemov * a sequentially read mapping as such. 7788eaededeSKirill A. Shutemov * If the page has been used in another mapping, 7798eaededeSKirill A. Shutemov * we will catch it; if this other mapping is 7808eaededeSKirill A. Shutemov * already gone, the unmap path will have set 7818eaededeSKirill A. Shutemov * PG_referenced or activated the page. 7824917e5d0SJohannes Weiner */ 78364363aadSJoe Perches if (likely(!(vma->vm_flags & VM_SEQ_READ))) 7841da177e4SLinus Torvalds referenced++; 7854917e5d0SJohannes Weiner } 7868749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 7878eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 7888eaededeSKirill A. Shutemov pvmw.pmd)) 7898749cfeaSVladimir Davydov referenced++; 7908749cfeaSVladimir Davydov } else { 7918749cfeaSVladimir Davydov /* unexpected pmd-mapped page? */ 7928749cfeaSVladimir Davydov WARN_ON_ONCE(1); 7938749cfeaSVladimir Davydov } 7948eaededeSKirill A. Shutemov 7958eaededeSKirill A. Shutemov pra->mapcount--; 7968eaededeSKirill A. Shutemov } 79771e3aac0SAndrea Arcangeli 79833c3fc71SVladimir Davydov if (referenced) 79933c3fc71SVladimir Davydov clear_page_idle(page); 80033c3fc71SVladimir Davydov if (test_and_clear_page_young(page)) 80133c3fc71SVladimir Davydov referenced++; 80233c3fc71SVladimir Davydov 8039f32624bSJoonsoo Kim if (referenced) { 8049f32624bSJoonsoo Kim pra->referenced++; 8059f32624bSJoonsoo Kim pra->vm_flags |= vma->vm_flags; 8061da177e4SLinus Torvalds } 8071da177e4SLinus Torvalds 8089f32624bSJoonsoo Kim if (!pra->mapcount) 809e4b82222SMinchan Kim return false; /* To break the loop */ 8109f32624bSJoonsoo Kim 811e4b82222SMinchan Kim return true; 8129f32624bSJoonsoo Kim } 8139f32624bSJoonsoo Kim 8149f32624bSJoonsoo Kim static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 8151da177e4SLinus Torvalds { 8169f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 8179f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 8181da177e4SLinus Torvalds 8199f32624bSJoonsoo Kim if (!mm_match_cgroup(vma->vm_mm, memcg)) 8209f32624bSJoonsoo Kim return true; 8211da177e4SLinus Torvalds 8229f32624bSJoonsoo Kim return false; 8231da177e4SLinus Torvalds } 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds /** 8261da177e4SLinus Torvalds * page_referenced - test if the page was referenced 8271da177e4SLinus Torvalds * @page: the page to test 8281da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 82972835c86SJohannes Weiner * @memcg: target memory cgroup 8306fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 8311da177e4SLinus Torvalds * 8321da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 8331da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 8341da177e4SLinus Torvalds */ 8356fe6b7e3SWu Fengguang int page_referenced(struct page *page, 8366fe6b7e3SWu Fengguang int is_locked, 83772835c86SJohannes Weiner struct mem_cgroup *memcg, 8386fe6b7e3SWu Fengguang unsigned long *vm_flags) 8391da177e4SLinus Torvalds { 8405ad64688SHugh Dickins int we_locked = 0; 8419f32624bSJoonsoo Kim struct page_referenced_arg pra = { 842b20ce5e0SKirill A. Shutemov .mapcount = total_mapcount(page), 8439f32624bSJoonsoo Kim .memcg = memcg, 8449f32624bSJoonsoo Kim }; 8459f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 8469f32624bSJoonsoo Kim .rmap_one = page_referenced_one, 8479f32624bSJoonsoo Kim .arg = (void *)&pra, 8489f32624bSJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 8499f32624bSJoonsoo Kim }; 8501da177e4SLinus Torvalds 8516fe6b7e3SWu Fengguang *vm_flags = 0; 8529f32624bSJoonsoo Kim if (!page_mapped(page)) 8539f32624bSJoonsoo Kim return 0; 8549f32624bSJoonsoo Kim 8559f32624bSJoonsoo Kim if (!page_rmapping(page)) 8569f32624bSJoonsoo Kim return 0; 8579f32624bSJoonsoo Kim 8585ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 8595ad64688SHugh Dickins we_locked = trylock_page(page); 8609f32624bSJoonsoo Kim if (!we_locked) 8619f32624bSJoonsoo Kim return 1; 8625ad64688SHugh Dickins } 8639f32624bSJoonsoo Kim 8649f32624bSJoonsoo Kim /* 8659f32624bSJoonsoo Kim * If we are reclaiming on behalf of a cgroup, skip 8669f32624bSJoonsoo Kim * counting on behalf of references from different 8679f32624bSJoonsoo Kim * cgroups 8689f32624bSJoonsoo Kim */ 8699f32624bSJoonsoo Kim if (memcg) { 8709f32624bSJoonsoo Kim rwc.invalid_vma = invalid_page_referenced_vma; 8715ad64688SHugh Dickins } 8729f32624bSJoonsoo Kim 873c24f386cSMinchan Kim rmap_walk(page, &rwc); 8749f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 8759f32624bSJoonsoo Kim 8765ad64688SHugh Dickins if (we_locked) 8771da177e4SLinus Torvalds unlock_page(page); 8789f32624bSJoonsoo Kim 8799f32624bSJoonsoo Kim return pra.referenced; 8801da177e4SLinus Torvalds } 8811da177e4SLinus Torvalds 882e4b82222SMinchan Kim static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 8839853a407SJoonsoo Kim unsigned long address, void *arg) 884d08b3851SPeter Zijlstra { 885f27176cfSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 886f27176cfSKirill A. Shutemov .page = page, 887f27176cfSKirill A. Shutemov .vma = vma, 888f27176cfSKirill A. Shutemov .address = address, 889f27176cfSKirill A. Shutemov .flags = PVMW_SYNC, 890f27176cfSKirill A. Shutemov }; 891369ea824SJérôme Glisse unsigned long start = address, end; 8929853a407SJoonsoo Kim int *cleaned = arg; 893d08b3851SPeter Zijlstra 894369ea824SJérôme Glisse /* 895369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 896369ea824SJérôme Glisse * the page can not be free from this function. 897369ea824SJérôme Glisse */ 898369ea824SJérôme Glisse end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); 899369ea824SJérôme Glisse mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); 900369ea824SJérôme Glisse 901f27176cfSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 902369ea824SJérôme Glisse unsigned long cstart, cend; 903f27176cfSKirill A. Shutemov int ret = 0; 904369ea824SJérôme Glisse 905369ea824SJérôme Glisse cstart = address = pvmw.address; 906f27176cfSKirill A. Shutemov if (pvmw.pte) { 907c2fda5feSPeter Zijlstra pte_t entry; 908f27176cfSKirill A. Shutemov pte_t *pte = pvmw.pte; 909f27176cfSKirill A. Shutemov 910f27176cfSKirill A. Shutemov if (!pte_dirty(*pte) && !pte_write(*pte)) 911f27176cfSKirill A. Shutemov continue; 912d08b3851SPeter Zijlstra 913785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pte)); 914785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 915d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 916c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 917785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 918369ea824SJérôme Glisse cend = cstart + PAGE_SIZE; 919d08b3851SPeter Zijlstra ret = 1; 920f27176cfSKirill A. Shutemov } else { 921f27176cfSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 922f27176cfSKirill A. Shutemov pmd_t *pmd = pvmw.pmd; 923f27176cfSKirill A. Shutemov pmd_t entry; 924d08b3851SPeter Zijlstra 925f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 926f27176cfSKirill A. Shutemov continue; 927f27176cfSKirill A. Shutemov 928785373b4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 929785373b4SLinus Torvalds entry = pmdp_huge_clear_flush(vma, address, pmd); 930f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 931f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 932785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 933369ea824SJérôme Glisse cstart &= PMD_MASK; 934369ea824SJérôme Glisse cend = cstart + PMD_SIZE; 935f27176cfSKirill A. Shutemov ret = 1; 936f27176cfSKirill A. Shutemov #else 937f27176cfSKirill A. Shutemov /* unexpected pmd-mapped page? */ 938f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 939f27176cfSKirill A. Shutemov #endif 940f27176cfSKirill A. Shutemov } 9412ec74c3eSSagi Grimberg 9429853a407SJoonsoo Kim if (ret) { 943369ea824SJérôme Glisse mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend); 9449853a407SJoonsoo Kim (*cleaned)++; 9459853a407SJoonsoo Kim } 946f27176cfSKirill A. Shutemov } 947f27176cfSKirill A. Shutemov 948369ea824SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 949369ea824SJérôme Glisse 950e4b82222SMinchan Kim return true; 951d08b3851SPeter Zijlstra } 952d08b3851SPeter Zijlstra 9539853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 954d08b3851SPeter Zijlstra { 9559853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 956871beb8cSFengguang Wu return false; 957d08b3851SPeter Zijlstra 958871beb8cSFengguang Wu return true; 959d08b3851SPeter Zijlstra } 960d08b3851SPeter Zijlstra 961d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 962d08b3851SPeter Zijlstra { 9639853a407SJoonsoo Kim int cleaned = 0; 9649853a407SJoonsoo Kim struct address_space *mapping; 9659853a407SJoonsoo Kim struct rmap_walk_control rwc = { 9669853a407SJoonsoo Kim .arg = (void *)&cleaned, 9679853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 9689853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 9699853a407SJoonsoo Kim }; 970d08b3851SPeter Zijlstra 971d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 972d08b3851SPeter Zijlstra 9739853a407SJoonsoo Kim if (!page_mapped(page)) 9749853a407SJoonsoo Kim return 0; 975d08b3851SPeter Zijlstra 9769853a407SJoonsoo Kim mapping = page_mapping(page); 9779853a407SJoonsoo Kim if (!mapping) 9789853a407SJoonsoo Kim return 0; 9799853a407SJoonsoo Kim 9809853a407SJoonsoo Kim rmap_walk(page, &rwc); 9819853a407SJoonsoo Kim 9829853a407SJoonsoo Kim return cleaned; 983d08b3851SPeter Zijlstra } 98460b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 985d08b3851SPeter Zijlstra 9861da177e4SLinus Torvalds /** 987c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 988c44b6743SRik van Riel * @page: the page to move to our anon_vma 989c44b6743SRik van Riel * @vma: the vma the page belongs to 990c44b6743SRik van Riel * 991c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 992c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 993c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 994c44b6743SRik van Riel * processes. 995c44b6743SRik van Riel */ 9965a49973dSHugh Dickins void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 997c44b6743SRik van Riel { 998c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 999c44b6743SRik van Riel 10005a49973dSHugh Dickins page = compound_head(page); 10015a49973dSHugh Dickins 1002309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 100381d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1004c44b6743SRik van Riel 1005c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1006414e2fb8SVladimir Davydov /* 1007414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1008414e2fb8SVladimir Davydov * simultaneously, so a concurrent reader (eg page_referenced()'s 1009414e2fb8SVladimir Davydov * PageAnon()) will not see one without the other. 1010414e2fb8SVladimir Davydov */ 1011414e2fb8SVladimir Davydov WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1012c44b6743SRik van Riel } 1013c44b6743SRik van Riel 1014c44b6743SRik van Riel /** 101543d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 10164e1c1975SAndi Kleen * @page: Page to add to rmap 10174e1c1975SAndi Kleen * @vma: VM area to add page to. 10184e1c1975SAndi Kleen * @address: User virtual address of the mapping 1019e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 10201da177e4SLinus Torvalds */ 10219617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 1022e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 10231da177e4SLinus Torvalds { 1024e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 10252822c1aaSNick Piggin 1026e8a03febSRik van Riel BUG_ON(!anon_vma); 1027ea90002bSLinus Torvalds 10284e1c1975SAndi Kleen if (PageAnon(page)) 10294e1c1975SAndi Kleen return; 10304e1c1975SAndi Kleen 1031ea90002bSLinus Torvalds /* 1032e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 1033e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 1034e8a03febSRik van Riel * page mapping! 1035ea90002bSLinus Torvalds */ 10364e1c1975SAndi Kleen if (!exclusive) 1037288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1038ea90002bSLinus Torvalds 10391da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 10402822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 10414d7670e0SNick Piggin page->index = linear_page_index(vma, address); 10421da177e4SLinus Torvalds } 10439617d95eSNick Piggin 10449617d95eSNick Piggin /** 104543d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1046c97a9e10SNick Piggin * @page: the page to add the mapping to 1047c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1048c97a9e10SNick Piggin * @address: the user virtual address mapped 1049c97a9e10SNick Piggin */ 1050c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 1051c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1052c97a9e10SNick Piggin { 1053c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM 1054c97a9e10SNick Piggin /* 1055c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1056c97a9e10SNick Piggin * be set up correctly at this point. 1057c97a9e10SNick Piggin * 1058c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 1059c97a9e10SNick Piggin * always holds the page locked, except if called from page_dup_rmap, 1060c97a9e10SNick Piggin * in which case the page is already known to be setup. 1061c97a9e10SNick Piggin * 1062c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1063c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1064c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1065c97a9e10SNick Piggin */ 106644ab57a0SAndrea Arcangeli BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 106753f9263bSKirill A. Shutemov BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1068c97a9e10SNick Piggin #endif 1069c97a9e10SNick Piggin } 1070c97a9e10SNick Piggin 1071c97a9e10SNick Piggin /** 10729617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 10739617d95eSNick Piggin * @page: the page to add the mapping to 10749617d95eSNick Piggin * @vma: the vm area in which the mapping is added 10759617d95eSNick Piggin * @address: the user virtual address mapped 1076d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 10779617d95eSNick Piggin * 10785ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 107980e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 108080e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 108180e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 10829617d95eSNick Piggin */ 10839617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 1084d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 10859617d95eSNick Piggin { 1086d281ee61SKirill A. Shutemov do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1087ad8c2ee8SRik van Riel } 1088ad8c2ee8SRik van Riel 1089ad8c2ee8SRik van Riel /* 1090ad8c2ee8SRik van Riel * Special version of the above for do_swap_page, which often runs 1091ad8c2ee8SRik van Riel * into pages that are exclusively owned by the current process. 1092ad8c2ee8SRik van Riel * Everybody else should continue to use page_add_anon_rmap above. 1093ad8c2ee8SRik van Riel */ 1094ad8c2ee8SRik van Riel void do_page_add_anon_rmap(struct page *page, 1095d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, int flags) 1096ad8c2ee8SRik van Riel { 1097d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 109853f9263bSKirill A. Shutemov bool first; 109953f9263bSKirill A. Shutemov 110053f9263bSKirill A. Shutemov if (compound) { 110153f9263bSKirill A. Shutemov atomic_t *mapcount; 1102e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 110353f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 110453f9263bSKirill A. Shutemov mapcount = compound_mapcount_ptr(page); 110553f9263bSKirill A. Shutemov first = atomic_inc_and_test(mapcount); 110653f9263bSKirill A. Shutemov } else { 110753f9263bSKirill A. Shutemov first = atomic_inc_and_test(&page->_mapcount); 110853f9263bSKirill A. Shutemov } 110953f9263bSKirill A. Shutemov 111053f9263bSKirill A. Shutemov if (first) { 1111d281ee61SKirill A. Shutemov int nr = compound ? hpage_nr_pages(page) : 1; 1112bea04b07SJianyu Zhan /* 1113bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1114bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1115bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption 1116bea04b07SJianyu Zhan * disabled. 1117bea04b07SJianyu Zhan */ 111865c45377SKirill A. Shutemov if (compound) 111911fb9989SMel Gorman __inc_node_page_state(page, NR_ANON_THPS); 11204b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 112179134171SAndrea Arcangeli } 11225ad64688SHugh Dickins if (unlikely(PageKsm(page))) 11235ad64688SHugh Dickins return; 11245ad64688SHugh Dickins 1125309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 112653f9263bSKirill A. Shutemov 11275dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 11285ad64688SHugh Dickins if (first) 1129d281ee61SKirill A. Shutemov __page_set_anon_rmap(page, vma, address, 1130d281ee61SKirill A. Shutemov flags & RMAP_EXCLUSIVE); 113169029cd5SKAMEZAWA Hiroyuki else 1132c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 11331da177e4SLinus Torvalds } 11341da177e4SLinus Torvalds 113543d8eac4SRandy Dunlap /** 11369617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 11379617d95eSNick Piggin * @page: the page to add the mapping to 11389617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11399617d95eSNick Piggin * @address: the user virtual address mapped 1140d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 11419617d95eSNick Piggin * 11429617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 11439617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1144c97a9e10SNick Piggin * Page does not have to be locked. 11459617d95eSNick Piggin */ 11469617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 1147d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 11489617d95eSNick Piggin { 1149d281ee61SKirill A. Shutemov int nr = compound ? hpage_nr_pages(page) : 1; 1150d281ee61SKirill A. Shutemov 115181d1b09cSSasha Levin VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1152fa9949daSHugh Dickins __SetPageSwapBacked(page); 1153d281ee61SKirill A. Shutemov if (compound) { 1154d281ee61SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 115553f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 115653f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 115711fb9989SMel Gorman __inc_node_page_state(page, NR_ANON_THPS); 115853f9263bSKirill A. Shutemov } else { 115953f9263bSKirill A. Shutemov /* Anon THP always mapped first with PMD */ 116053f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(PageTransCompound(page), page); 116153f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 116253f9263bSKirill A. Shutemov atomic_set(&page->_mapcount, 0); 1163d281ee61SKirill A. Shutemov } 11644b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1165e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 11669617d95eSNick Piggin } 11679617d95eSNick Piggin 11681da177e4SLinus Torvalds /** 11691da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 11701da177e4SLinus Torvalds * @page: the page to add the mapping to 11711da177e4SLinus Torvalds * 1172b8072f09SHugh Dickins * The caller needs to hold the pte lock. 11731da177e4SLinus Torvalds */ 1174dd78feddSKirill A. Shutemov void page_add_file_rmap(struct page *page, bool compound) 11751da177e4SLinus Torvalds { 1176dd78feddSKirill A. Shutemov int i, nr = 1; 1177dd78feddSKirill A. Shutemov 1178dd78feddSKirill A. Shutemov VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 117962cccb8cSJohannes Weiner lock_page_memcg(page); 1180dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1181dd78feddSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1182dd78feddSKirill A. Shutemov if (atomic_inc_and_test(&page[i]._mapcount)) 1183dd78feddSKirill A. Shutemov nr++; 1184d69b042fSBalbir Singh } 1185dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1186dd78feddSKirill A. Shutemov goto out; 118765c45377SKirill A. Shutemov VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 118811fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1189dd78feddSKirill A. Shutemov } else { 1190c8efc390SKirill A. Shutemov if (PageTransCompound(page) && page_mapping(page)) { 1191c8efc390SKirill A. Shutemov VM_WARN_ON_ONCE(!PageLocked(page)); 1192c8efc390SKirill A. Shutemov 11939a73f61bSKirill A. Shutemov SetPageDoubleMap(compound_head(page)); 11949a73f61bSKirill A. Shutemov if (PageMlocked(page)) 11959a73f61bSKirill A. Shutemov clear_page_mlock(compound_head(page)); 11969a73f61bSKirill A. Shutemov } 1197dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(&page->_mapcount)) 1198dd78feddSKirill A. Shutemov goto out; 1199dd78feddSKirill A. Shutemov } 120000f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1201dd78feddSKirill A. Shutemov out: 120262cccb8cSJohannes Weiner unlock_page_memcg(page); 12031da177e4SLinus Torvalds } 12041da177e4SLinus Torvalds 1205dd78feddSKirill A. Shutemov static void page_remove_file_rmap(struct page *page, bool compound) 12068186eb6aSJohannes Weiner { 1207dd78feddSKirill A. Shutemov int i, nr = 1; 1208dd78feddSKirill A. Shutemov 120957dea93aSSteve Capper VM_BUG_ON_PAGE(compound && !PageHead(page), page); 121062cccb8cSJohannes Weiner lock_page_memcg(page); 12118186eb6aSJohannes Weiner 121253f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 121353f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) { 121453f9263bSKirill A. Shutemov /* hugetlb pages are always mapped with pmds */ 121553f9263bSKirill A. Shutemov atomic_dec(compound_mapcount_ptr(page)); 121653f9263bSKirill A. Shutemov goto out; 121753f9263bSKirill A. Shutemov } 121853f9263bSKirill A. Shutemov 12198186eb6aSJohannes Weiner /* page still mapped by someone else? */ 1220dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1221dd78feddSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1222dd78feddSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 1223dd78feddSKirill A. Shutemov nr++; 1224dd78feddSKirill A. Shutemov } 1225dd78feddSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1226dd78feddSKirill A. Shutemov goto out; 122765c45377SKirill A. Shutemov VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 122811fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1229dd78feddSKirill A. Shutemov } else { 12308186eb6aSJohannes Weiner if (!atomic_add_negative(-1, &page->_mapcount)) 12318186eb6aSJohannes Weiner goto out; 1232dd78feddSKirill A. Shutemov } 12338186eb6aSJohannes Weiner 12348186eb6aSJohannes Weiner /* 123500f3ca2cSJohannes Weiner * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 12368186eb6aSJohannes Weiner * these counters are not modified in interrupt context, and 12378186eb6aSJohannes Weiner * pte lock(a spinlock) is held, which implies preemption disabled. 12388186eb6aSJohannes Weiner */ 123900f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 12408186eb6aSJohannes Weiner 12418186eb6aSJohannes Weiner if (unlikely(PageMlocked(page))) 12428186eb6aSJohannes Weiner clear_page_mlock(page); 12438186eb6aSJohannes Weiner out: 124462cccb8cSJohannes Weiner unlock_page_memcg(page); 12458186eb6aSJohannes Weiner } 12468186eb6aSJohannes Weiner 124753f9263bSKirill A. Shutemov static void page_remove_anon_compound_rmap(struct page *page) 124853f9263bSKirill A. Shutemov { 124953f9263bSKirill A. Shutemov int i, nr; 125053f9263bSKirill A. Shutemov 125153f9263bSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 125253f9263bSKirill A. Shutemov return; 125353f9263bSKirill A. Shutemov 125453f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_ANON_PAGES for now. */ 125553f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) 125653f9263bSKirill A. Shutemov return; 125753f9263bSKirill A. Shutemov 125853f9263bSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 125953f9263bSKirill A. Shutemov return; 126053f9263bSKirill A. Shutemov 126111fb9989SMel Gorman __dec_node_page_state(page, NR_ANON_THPS); 126253f9263bSKirill A. Shutemov 126353f9263bSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 126453f9263bSKirill A. Shutemov /* 126553f9263bSKirill A. Shutemov * Subpages can be mapped with PTEs too. Check how many of 126653f9263bSKirill A. Shutemov * themi are still mapped. 126753f9263bSKirill A. Shutemov */ 126853f9263bSKirill A. Shutemov for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 126953f9263bSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 127053f9263bSKirill A. Shutemov nr++; 127153f9263bSKirill A. Shutemov } 127253f9263bSKirill A. Shutemov } else { 127353f9263bSKirill A. Shutemov nr = HPAGE_PMD_NR; 127453f9263bSKirill A. Shutemov } 127553f9263bSKirill A. Shutemov 1276e90309c9SKirill A. Shutemov if (unlikely(PageMlocked(page))) 1277e90309c9SKirill A. Shutemov clear_page_mlock(page); 1278e90309c9SKirill A. Shutemov 12799a982250SKirill A. Shutemov if (nr) { 12804b9d0fabSMel Gorman __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 12819a982250SKirill A. Shutemov deferred_split_huge_page(page); 12829a982250SKirill A. Shutemov } 128353f9263bSKirill A. Shutemov } 128453f9263bSKirill A. Shutemov 12851da177e4SLinus Torvalds /** 12861da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 12871da177e4SLinus Torvalds * @page: page to remove mapping from 1288d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 12891da177e4SLinus Torvalds * 1290b8072f09SHugh Dickins * The caller needs to hold the pte lock. 12911da177e4SLinus Torvalds */ 1292d281ee61SKirill A. Shutemov void page_remove_rmap(struct page *page, bool compound) 12931da177e4SLinus Torvalds { 1294dd78feddSKirill A. Shutemov if (!PageAnon(page)) 1295dd78feddSKirill A. Shutemov return page_remove_file_rmap(page, compound); 129689c06bd5SKAMEZAWA Hiroyuki 129753f9263bSKirill A. Shutemov if (compound) 129853f9263bSKirill A. Shutemov return page_remove_anon_compound_rmap(page); 129953f9263bSKirill A. Shutemov 1300b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1301b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 13028186eb6aSJohannes Weiner return; 13038186eb6aSJohannes Weiner 13041da177e4SLinus Torvalds /* 1305bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1306bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1307bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption disabled. 13080fe6e20bSNaoya Horiguchi */ 13094b9d0fabSMel Gorman __dec_node_page_state(page, NR_ANON_MAPPED); 13108186eb6aSJohannes Weiner 1311e6c509f8SHugh Dickins if (unlikely(PageMlocked(page))) 1312e6c509f8SHugh Dickins clear_page_mlock(page); 13138186eb6aSJohannes Weiner 13149a982250SKirill A. Shutemov if (PageTransCompound(page)) 13159a982250SKirill A. Shutemov deferred_split_huge_page(compound_head(page)); 13169a982250SKirill A. Shutemov 131716f8c5b2SHugh Dickins /* 13181da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 13191da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 13201da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 13211da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 13221da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 13231da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 13241da177e4SLinus Torvalds * faster for those pages still in swapcache. 13251da177e4SLinus Torvalds */ 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 13281da177e4SLinus Torvalds /* 132952629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 13301da177e4SLinus Torvalds */ 1331e4b82222SMinchan Kim static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 133252629506SJoonsoo Kim unsigned long address, void *arg) 13331da177e4SLinus Torvalds { 13341da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1335c7ab0d2fSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 1336c7ab0d2fSKirill A. Shutemov .page = page, 1337c7ab0d2fSKirill A. Shutemov .vma = vma, 1338c7ab0d2fSKirill A. Shutemov .address = address, 1339c7ab0d2fSKirill A. Shutemov }; 13401da177e4SLinus Torvalds pte_t pteval; 1341c7ab0d2fSKirill A. Shutemov struct page *subpage; 1342785373b4SLinus Torvalds bool ret = true; 1343369ea824SJérôme Glisse unsigned long start = address, end; 1344802a3a92SShaohua Li enum ttu_flags flags = (enum ttu_flags)arg; 13451da177e4SLinus Torvalds 1346b87537d9SHugh Dickins /* munlock has nothing to gain from examining un-locked vmas */ 1347b87537d9SHugh Dickins if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1348e4b82222SMinchan Kim return true; 1349b87537d9SHugh Dickins 1350a5430ddaSJérôme Glisse if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1351a5430ddaSJérôme Glisse is_zone_device_page(page) && !is_device_private_page(page)) 1352a5430ddaSJérôme Glisse return true; 1353a5430ddaSJérôme Glisse 1354fec89c10SKirill A. Shutemov if (flags & TTU_SPLIT_HUGE_PMD) { 1355fec89c10SKirill A. Shutemov split_huge_pmd_address(vma, address, 1356b5ff8161SNaoya Horiguchi flags & TTU_SPLIT_FREEZE, page); 1357fec89c10SKirill A. Shutemov } 1358fec89c10SKirill A. Shutemov 1359369ea824SJérôme Glisse /* 1360369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 1361369ea824SJérôme Glisse * the page can not be free in this function as call of try_to_unmap() 1362369ea824SJérôme Glisse * must hold a reference on the page. 1363369ea824SJérôme Glisse */ 1364369ea824SJérôme Glisse end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); 1365369ea824SJérôme Glisse mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); 1366369ea824SJérôme Glisse 1367c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1368616b8371SZi Yan #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1369616b8371SZi Yan /* PMD-mapped THP migration entry */ 1370616b8371SZi Yan if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1371616b8371SZi Yan VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1372616b8371SZi Yan 1373616b8371SZi Yan if (!PageAnon(page)) 1374616b8371SZi Yan continue; 1375616b8371SZi Yan 1376616b8371SZi Yan set_pmd_migration_entry(&pvmw, page); 1377616b8371SZi Yan continue; 1378616b8371SZi Yan } 1379616b8371SZi Yan #endif 1380616b8371SZi Yan 13811da177e4SLinus Torvalds /* 13821da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 13831da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 13841da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 13851da177e4SLinus Torvalds */ 138614fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 1387b87537d9SHugh Dickins if (vma->vm_flags & VM_LOCKED) { 13889a73f61bSKirill A. Shutemov /* PTE-mapped THP are never mlocked */ 13899a73f61bSKirill A. Shutemov if (!PageTransCompound(page)) { 13909a73f61bSKirill A. Shutemov /* 13919a73f61bSKirill A. Shutemov * Holding pte lock, we do *not* need 13929a73f61bSKirill A. Shutemov * mmap_sem here 13939a73f61bSKirill A. Shutemov */ 1394b87537d9SHugh Dickins mlock_vma_page(page); 13959a73f61bSKirill A. Shutemov } 1396e4b82222SMinchan Kim ret = false; 1397c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1398c7ab0d2fSKirill A. Shutemov break; 1399b87537d9SHugh Dickins } 1400daa5ba76SKonstantin Khlebnikov if (flags & TTU_MUNLOCK) 1401c7ab0d2fSKirill A. Shutemov continue; 140214fa31b8SAndi Kleen } 1403c7ab0d2fSKirill A. Shutemov 14048346242aSKirill A. Shutemov /* Unexpected PMD-mapped THP? */ 14058346242aSKirill A. Shutemov VM_BUG_ON_PAGE(!pvmw.pte, page); 14068346242aSKirill A. Shutemov 14078346242aSKirill A. Shutemov subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1408785373b4SLinus Torvalds address = pvmw.address; 1409785373b4SLinus Torvalds 14108346242aSKirill A. Shutemov 1411a5430ddaSJérôme Glisse if (IS_ENABLED(CONFIG_MIGRATION) && 1412a5430ddaSJérôme Glisse (flags & TTU_MIGRATION) && 1413a5430ddaSJérôme Glisse is_zone_device_page(page)) { 1414a5430ddaSJérôme Glisse swp_entry_t entry; 1415a5430ddaSJérôme Glisse pte_t swp_pte; 1416a5430ddaSJérôme Glisse 1417a5430ddaSJérôme Glisse pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); 1418a5430ddaSJérôme Glisse 1419a5430ddaSJérôme Glisse /* 1420a5430ddaSJérôme Glisse * Store the pfn of the page in a special migration 1421a5430ddaSJérôme Glisse * pte. do_swap_page() will wait until the migration 1422a5430ddaSJérôme Glisse * pte is removed and then restart fault handling. 1423a5430ddaSJérôme Glisse */ 1424a5430ddaSJérôme Glisse entry = make_migration_entry(page, 0); 1425a5430ddaSJérôme Glisse swp_pte = swp_entry_to_pte(entry); 1426a5430ddaSJérôme Glisse if (pte_soft_dirty(pteval)) 1427a5430ddaSJérôme Glisse swp_pte = pte_swp_mksoft_dirty(swp_pte); 1428a5430ddaSJérôme Glisse set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1429a5430ddaSJérôme Glisse goto discard; 1430a5430ddaSJérôme Glisse } 1431a5430ddaSJérôme Glisse 143214fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_ACCESS)) { 1433785373b4SLinus Torvalds if (ptep_clear_flush_young_notify(vma, address, 1434c7ab0d2fSKirill A. Shutemov pvmw.pte)) { 1435e4b82222SMinchan Kim ret = false; 1436c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1437c7ab0d2fSKirill A. Shutemov break; 14381da177e4SLinus Torvalds } 1439b291f000SNick Piggin } 14401da177e4SLinus Torvalds 14411da177e4SLinus Torvalds /* Nuke the page table entry. */ 1442785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 144372b252aeSMel Gorman if (should_defer_flush(mm, flags)) { 144472b252aeSMel Gorman /* 1445c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1446c7ab0d2fSKirill A. Shutemov * a remote CPU could still be writing to the page. 1447c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1448c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1449c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1450c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 145172b252aeSMel Gorman */ 1452785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 145372b252aeSMel Gorman 1454c7ab0d2fSKirill A. Shutemov set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 145572b252aeSMel Gorman } else { 1456785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 145772b252aeSMel Gorman } 14581da177e4SLinus Torvalds 1459c7ab0d2fSKirill A. Shutemov /* Move the dirty bit to the page. Now the pte is gone. */ 14601da177e4SLinus Torvalds if (pte_dirty(pteval)) 14611da177e4SLinus Torvalds set_page_dirty(page); 14621da177e4SLinus Torvalds 1463365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1464365e9c87SHugh Dickins update_hiwater_rss(mm); 1465365e9c87SHugh Dickins 1466888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 14675fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 14685d317b2bSNaoya Horiguchi if (PageHuge(page)) { 1469c7ab0d2fSKirill A. Shutemov int nr = 1 << compound_order(page); 1470c7ab0d2fSKirill A. Shutemov hugetlb_count_sub(nr, mm); 1471785373b4SLinus Torvalds set_huge_swap_pte_at(mm, address, 14725fd27b8eSPunit Agrawal pvmw.pte, pteval, 14735fd27b8eSPunit Agrawal vma_mmu_pagesize(vma)); 14745d317b2bSNaoya Horiguchi } else { 1475eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 1476785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 14775f24ae58SNaoya Horiguchi } 1478c7ab0d2fSKirill A. Shutemov 147945961722SKonstantin Weitz } else if (pte_unused(pteval)) { 148045961722SKonstantin Weitz /* 148145961722SKonstantin Weitz * The guest indicated that the page content is of no 148245961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 148345961722SKonstantin Weitz * will take care of the rest. 148445961722SKonstantin Weitz */ 1485eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 1486c7ab0d2fSKirill A. Shutemov } else if (IS_ENABLED(CONFIG_MIGRATION) && 1487b5ff8161SNaoya Horiguchi (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { 1488470f119fSHugh Dickins swp_entry_t entry; 1489470f119fSHugh Dickins pte_t swp_pte; 1490470f119fSHugh Dickins /* 1491470f119fSHugh Dickins * Store the pfn of the page in a special migration 1492470f119fSHugh Dickins * pte. do_swap_page() will wait until the migration 1493470f119fSHugh Dickins * pte is removed and then restart fault handling. 1494470f119fSHugh Dickins */ 1495c7ab0d2fSKirill A. Shutemov entry = make_migration_entry(subpage, 1496c7ab0d2fSKirill A. Shutemov pte_write(pteval)); 1497470f119fSHugh Dickins swp_pte = swp_entry_to_pte(entry); 1498470f119fSHugh Dickins if (pte_soft_dirty(pteval)) 1499470f119fSHugh Dickins swp_pte = pte_swp_mksoft_dirty(swp_pte); 1500785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 1501888b9f7cSAndi Kleen } else if (PageAnon(page)) { 1502c7ab0d2fSKirill A. Shutemov swp_entry_t entry = { .val = page_private(subpage) }; 1503179ef71cSCyrill Gorcunov pte_t swp_pte; 15041da177e4SLinus Torvalds /* 15051da177e4SLinus Torvalds * Store the swap location in the pte. 15061da177e4SLinus Torvalds * See handle_pte_fault() ... 15071da177e4SLinus Torvalds */ 1508eb94a878SMinchan Kim if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1509eb94a878SMinchan Kim WARN_ON_ONCE(1); 151083612a94SMinchan Kim ret = false; 1511369ea824SJérôme Glisse /* We have to invalidate as we cleared the pte */ 1512eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1513eb94a878SMinchan Kim break; 1514eb94a878SMinchan Kim } 1515854e9ed0SMinchan Kim 1516802a3a92SShaohua Li /* MADV_FREE page check */ 1517802a3a92SShaohua Li if (!PageSwapBacked(page)) { 1518a128ca71SShaohua Li if (!PageDirty(page)) { 1519854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1520854e9ed0SMinchan Kim goto discard; 1521854e9ed0SMinchan Kim } 1522854e9ed0SMinchan Kim 1523802a3a92SShaohua Li /* 1524802a3a92SShaohua Li * If the page was redirtied, it cannot be 1525802a3a92SShaohua Li * discarded. Remap the page to page table. 1526802a3a92SShaohua Li */ 1527785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 152818863d3aSMinchan Kim SetPageSwapBacked(page); 1529e4b82222SMinchan Kim ret = false; 1530802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1531802a3a92SShaohua Li break; 1532802a3a92SShaohua Li } 1533802a3a92SShaohua Li 1534570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1535785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1536e4b82222SMinchan Kim ret = false; 1537c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1538c7ab0d2fSKirill A. Shutemov break; 1539570a335bSHugh Dickins } 15401da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 15411da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1542f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 15431da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 15441da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 15451da177e4SLinus Torvalds } 1546d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1547b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1548179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 1549179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1550179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1551785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 155204e62a29SChristoph Lameter } else 1553eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter_file(page)); 1554854e9ed0SMinchan Kim discard: 1555c7ab0d2fSKirill A. Shutemov page_remove_rmap(subpage, PageHuge(page)); 155609cbfeafSKirill A. Shutemov put_page(page); 1557369ea824SJérôme Glisse mmu_notifier_invalidate_range(mm, address, 1558369ea824SJérôme Glisse address + PAGE_SIZE); 1559c7ab0d2fSKirill A. Shutemov } 1560369ea824SJérôme Glisse 1561369ea824SJérôme Glisse mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); 1562369ea824SJérôme Glisse 1563caed0f48SKOSAKI Motohiro return ret; 15641da177e4SLinus Torvalds } 15651da177e4SLinus Torvalds 156671e3aac0SAndrea Arcangeli bool is_vma_temporary_stack(struct vm_area_struct *vma) 1567a8bef8ffSMel Gorman { 1568a8bef8ffSMel Gorman int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1569a8bef8ffSMel Gorman 1570a8bef8ffSMel Gorman if (!maybe_stack) 1571a8bef8ffSMel Gorman return false; 1572a8bef8ffSMel Gorman 1573a8bef8ffSMel Gorman if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1574a8bef8ffSMel Gorman VM_STACK_INCOMPLETE_SETUP) 1575a8bef8ffSMel Gorman return true; 1576a8bef8ffSMel Gorman 1577a8bef8ffSMel Gorman return false; 1578a8bef8ffSMel Gorman } 1579a8bef8ffSMel Gorman 158052629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 158152629506SJoonsoo Kim { 158252629506SJoonsoo Kim return is_vma_temporary_stack(vma); 158352629506SJoonsoo Kim } 158452629506SJoonsoo Kim 15852a52bcbcSKirill A. Shutemov static int page_mapcount_is_zero(struct page *page) 158652629506SJoonsoo Kim { 1587c7ab0d2fSKirill A. Shutemov return !total_mapcount(page); 15882a52bcbcSKirill A. Shutemov } 158952629506SJoonsoo Kim 15901da177e4SLinus Torvalds /** 15911da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 15921da177e4SLinus Torvalds * @page: the page to get unmapped 159314fa31b8SAndi Kleen * @flags: action and flags 15941da177e4SLinus Torvalds * 15951da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 15961da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 15971da177e4SLinus Torvalds * 1598666e5a40SMinchan Kim * If unmap is successful, return true. Otherwise, false. 15991da177e4SLinus Torvalds */ 1600666e5a40SMinchan Kim bool try_to_unmap(struct page *page, enum ttu_flags flags) 16011da177e4SLinus Torvalds { 160252629506SJoonsoo Kim struct rmap_walk_control rwc = { 160352629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1604802a3a92SShaohua Li .arg = (void *)flags, 16052a52bcbcSKirill A. Shutemov .done = page_mapcount_is_zero, 160652629506SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 160752629506SJoonsoo Kim }; 16081da177e4SLinus Torvalds 160952629506SJoonsoo Kim /* 161052629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 161152629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 161252629506SJoonsoo Kim * page tables leading to a race where migration cannot 161352629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 161452629506SJoonsoo Kim * locking requirements of exec(), migration skips 161552629506SJoonsoo Kim * temporary VMAs until after exec() completes. 161652629506SJoonsoo Kim */ 1617b5ff8161SNaoya Horiguchi if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE)) 1618b5ff8161SNaoya Horiguchi && !PageKsm(page) && PageAnon(page)) 161952629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 162052629506SJoonsoo Kim 16212a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 162233fc80e2SMinchan Kim rmap_walk_locked(page, &rwc); 16232a52bcbcSKirill A. Shutemov else 162433fc80e2SMinchan Kim rmap_walk(page, &rwc); 162552629506SJoonsoo Kim 1626666e5a40SMinchan Kim return !page_mapcount(page) ? true : false; 16271da177e4SLinus Torvalds } 162881b4082dSNikita Danilov 16292a52bcbcSKirill A. Shutemov static int page_not_mapped(struct page *page) 16302a52bcbcSKirill A. Shutemov { 16312a52bcbcSKirill A. Shutemov return !page_mapped(page); 16322a52bcbcSKirill A. Shutemov }; 16332a52bcbcSKirill A. Shutemov 1634b291f000SNick Piggin /** 1635b291f000SNick Piggin * try_to_munlock - try to munlock a page 1636b291f000SNick Piggin * @page: the page to be munlocked 1637b291f000SNick Piggin * 1638b291f000SNick Piggin * Called from munlock code. Checks all of the VMAs mapping the page 1639b291f000SNick Piggin * to make sure nobody else has this page mlocked. The page will be 1640b291f000SNick Piggin * returned with PG_mlocked cleared if no other vmas have it mlocked. 1641b291f000SNick Piggin */ 1642854e9ed0SMinchan Kim 1643192d7232SMinchan Kim void try_to_munlock(struct page *page) 1644192d7232SMinchan Kim { 1645e8351ac9SJoonsoo Kim struct rmap_walk_control rwc = { 1646e8351ac9SJoonsoo Kim .rmap_one = try_to_unmap_one, 1647802a3a92SShaohua Li .arg = (void *)TTU_MUNLOCK, 1648e8351ac9SJoonsoo Kim .done = page_not_mapped, 1649e8351ac9SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 1650e8351ac9SJoonsoo Kim 1651e8351ac9SJoonsoo Kim }; 1652e8351ac9SJoonsoo Kim 1653309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1654192d7232SMinchan Kim VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 1655b291f000SNick Piggin 1656192d7232SMinchan Kim rmap_walk(page, &rwc); 1657b291f000SNick Piggin } 1658e9995ef9SHugh Dickins 165901d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 166076545066SRik van Riel { 166176545066SRik van Riel struct anon_vma *root = anon_vma->root; 166276545066SRik van Riel 1663624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 166401d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 166576545066SRik van Riel anon_vma_free(root); 166676545066SRik van Riel } 166776545066SRik van Riel 16680dd1c7bbSJoonsoo Kim static struct anon_vma *rmap_walk_anon_lock(struct page *page, 16690dd1c7bbSJoonsoo Kim struct rmap_walk_control *rwc) 1670faecd8ddSJoonsoo Kim { 1671faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 1672faecd8ddSJoonsoo Kim 16730dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 16740dd1c7bbSJoonsoo Kim return rwc->anon_lock(page); 16750dd1c7bbSJoonsoo Kim 1676faecd8ddSJoonsoo Kim /* 1677faecd8ddSJoonsoo Kim * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1678faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 1679faecd8ddSJoonsoo Kim * are holding mmap_sem. Users without mmap_sem are required to 1680faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 1681faecd8ddSJoonsoo Kim */ 1682faecd8ddSJoonsoo Kim anon_vma = page_anon_vma(page); 1683faecd8ddSJoonsoo Kim if (!anon_vma) 1684faecd8ddSJoonsoo Kim return NULL; 1685faecd8ddSJoonsoo Kim 1686faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 1687faecd8ddSJoonsoo Kim return anon_vma; 1688faecd8ddSJoonsoo Kim } 1689faecd8ddSJoonsoo Kim 1690e9995ef9SHugh Dickins /* 1691e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 1692e8351ac9SJoonsoo Kim * rmap method 1693e8351ac9SJoonsoo Kim * @page: the page to be handled 1694e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1695e8351ac9SJoonsoo Kim * 1696e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1697e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 1698e8351ac9SJoonsoo Kim * 1699e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1700e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1701e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1702e8351ac9SJoonsoo Kim * LOCKED. 1703e9995ef9SHugh Dickins */ 17041df631aeSMinchan Kim static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1705b9773199SKirill A. Shutemov bool locked) 1706e9995ef9SHugh Dickins { 1707e9995ef9SHugh Dickins struct anon_vma *anon_vma; 1708a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 17095beb4930SRik van Riel struct anon_vma_chain *avc; 1710e9995ef9SHugh Dickins 1711b9773199SKirill A. Shutemov if (locked) { 1712b9773199SKirill A. Shutemov anon_vma = page_anon_vma(page); 1713b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 1714b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(!anon_vma, page); 1715b9773199SKirill A. Shutemov } else { 17160dd1c7bbSJoonsoo Kim anon_vma = rmap_walk_anon_lock(page, rwc); 1717b9773199SKirill A. Shutemov } 1718e9995ef9SHugh Dickins if (!anon_vma) 17191df631aeSMinchan Kim return; 1720faecd8ddSJoonsoo Kim 1721a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 1722a8fa41adSKirill A. Shutemov pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1723a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1724a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 17255beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 1726e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 17270dd1c7bbSJoonsoo Kim 1728ad12695fSAndrea Arcangeli cond_resched(); 1729ad12695fSAndrea Arcangeli 17300dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 17310dd1c7bbSJoonsoo Kim continue; 17320dd1c7bbSJoonsoo Kim 1733e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1734e9995ef9SHugh Dickins break; 17350dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 17360dd1c7bbSJoonsoo Kim break; 1737e9995ef9SHugh Dickins } 1738b9773199SKirill A. Shutemov 1739b9773199SKirill A. Shutemov if (!locked) 17404fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 1741e9995ef9SHugh Dickins } 1742e9995ef9SHugh Dickins 1743e8351ac9SJoonsoo Kim /* 1744e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 1745e8351ac9SJoonsoo Kim * @page: the page to be handled 1746e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 1747e8351ac9SJoonsoo Kim * 1748e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 1749e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 1750e8351ac9SJoonsoo Kim * 1751e8351ac9SJoonsoo Kim * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1752e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 1753e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1754e8351ac9SJoonsoo Kim * LOCKED. 1755e8351ac9SJoonsoo Kim */ 17561df631aeSMinchan Kim static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1757b9773199SKirill A. Shutemov bool locked) 1758e9995ef9SHugh Dickins { 1759b9773199SKirill A. Shutemov struct address_space *mapping = page_mapping(page); 1760a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 1761e9995ef9SHugh Dickins struct vm_area_struct *vma; 1762e9995ef9SHugh Dickins 17639f32624bSJoonsoo Kim /* 17649f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 17659f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 17669f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 1767c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 17689f32624bSJoonsoo Kim */ 176981d1b09cSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 17709f32624bSJoonsoo Kim 1771e9995ef9SHugh Dickins if (!mapping) 17721df631aeSMinchan Kim return; 17733dec0ba0SDavidlohr Bueso 1774a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 1775a8fa41adSKirill A. Shutemov pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1776b9773199SKirill A. Shutemov if (!locked) 17773dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 1778a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 1779a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 1780e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 17810dd1c7bbSJoonsoo Kim 1782ad12695fSAndrea Arcangeli cond_resched(); 1783ad12695fSAndrea Arcangeli 17840dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 17850dd1c7bbSJoonsoo Kim continue; 17860dd1c7bbSJoonsoo Kim 1787e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 17880dd1c7bbSJoonsoo Kim goto done; 17890dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 17900dd1c7bbSJoonsoo Kim goto done; 1791e9995ef9SHugh Dickins } 17920dd1c7bbSJoonsoo Kim 17930dd1c7bbSJoonsoo Kim done: 1794b9773199SKirill A. Shutemov if (!locked) 17953dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 1796e9995ef9SHugh Dickins } 1797e9995ef9SHugh Dickins 17981df631aeSMinchan Kim void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1799e9995ef9SHugh Dickins { 1800e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 18011df631aeSMinchan Kim rmap_walk_ksm(page, rwc); 1802e9995ef9SHugh Dickins else if (PageAnon(page)) 18031df631aeSMinchan Kim rmap_walk_anon(page, rwc, false); 1804e9995ef9SHugh Dickins else 18051df631aeSMinchan Kim rmap_walk_file(page, rwc, false); 1806b9773199SKirill A. Shutemov } 1807b9773199SKirill A. Shutemov 1808b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 18091df631aeSMinchan Kim void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1810b9773199SKirill A. Shutemov { 1811b9773199SKirill A. Shutemov /* no ksm support for now */ 1812b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(PageKsm(page), page); 1813b9773199SKirill A. Shutemov if (PageAnon(page)) 18141df631aeSMinchan Kim rmap_walk_anon(page, rwc, true); 1815b9773199SKirill A. Shutemov else 18161df631aeSMinchan Kim rmap_walk_file(page, rwc, true); 1817e9995ef9SHugh Dickins } 18180fe6e20bSNaoya Horiguchi 1819e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 18200fe6e20bSNaoya Horiguchi /* 18210fe6e20bSNaoya Horiguchi * The following three functions are for anonymous (private mapped) hugepages. 18220fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 18230fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 18240fe6e20bSNaoya Horiguchi */ 18250fe6e20bSNaoya Horiguchi static void __hugepage_set_anon_rmap(struct page *page, 18260fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address, int exclusive) 18270fe6e20bSNaoya Horiguchi { 18280fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 1829433abed6SNaoya Horiguchi 18300fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 1831433abed6SNaoya Horiguchi 1832433abed6SNaoya Horiguchi if (PageAnon(page)) 1833433abed6SNaoya Horiguchi return; 1834433abed6SNaoya Horiguchi if (!exclusive) 1835433abed6SNaoya Horiguchi anon_vma = anon_vma->root; 1836433abed6SNaoya Horiguchi 18370fe6e20bSNaoya Horiguchi anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 18380fe6e20bSNaoya Horiguchi page->mapping = (struct address_space *) anon_vma; 18390fe6e20bSNaoya Horiguchi page->index = linear_page_index(vma, address); 18400fe6e20bSNaoya Horiguchi } 18410fe6e20bSNaoya Horiguchi 18420fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 18430fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 18440fe6e20bSNaoya Horiguchi { 18450fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 18460fe6e20bSNaoya Horiguchi int first; 1847a850ea30SNaoya Horiguchi 1848a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 18490fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 18505dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 185153f9263bSKirill A. Shutemov first = atomic_inc_and_test(compound_mapcount_ptr(page)); 18520fe6e20bSNaoya Horiguchi if (first) 18530fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 0); 18540fe6e20bSNaoya Horiguchi } 18550fe6e20bSNaoya Horiguchi 18560fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 18570fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 18580fe6e20bSNaoya Horiguchi { 18590fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 186053f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 18610fe6e20bSNaoya Horiguchi __hugepage_set_anon_rmap(page, vma, address, 1); 18620fe6e20bSNaoya Horiguchi } 1863e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 1864