11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 24c1e8d7c6SMichel Lespinasse * mm->mmap_lock 25c0d0381aSMike Kravetz * page->flags PG_locked (lock_page) * (see huegtlbfs below) 2688f306b6SKirill A. Shutemov * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 28c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 295a505085SIngo Molnar * anon_vma->rwsem 30b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 3415b44736SHugh Dickins * lock_page_memcg move_lock (in __set_page_dirty_buffers) 35b93b0163SMatthew Wilcox * i_pages lock (widely used) 3615b44736SHugh Dickins * lruvec->lru_lock (in lock_page_lruvec_irq) 37250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 391da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 40b93b0163SMatthew Wilcox * i_pages lock (widely used, in set_page_dirty, 411da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 42f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 436a46079cSAndi Kleen * 445a505085SIngo Molnar * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 456a46079cSAndi Kleen * ->tasklist_lock 466a46079cSAndi Kleen * pte map lock 47c0d0381aSMike Kravetz * 48c0d0381aSMike Kravetz * * hugetlbfs PageHuge() pages take locks in this order: 49c0d0381aSMike Kravetz * mapping->i_mmap_rwsem 50c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51c0d0381aSMike Kravetz * page->flags PG_locked (lock_page) 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds #include <linux/mm.h> 556e84f315SIngo Molnar #include <linux/sched/mm.h> 5629930025SIngo Molnar #include <linux/sched/task.h> 571da177e4SLinus Torvalds #include <linux/pagemap.h> 581da177e4SLinus Torvalds #include <linux/swap.h> 591da177e4SLinus Torvalds #include <linux/swapops.h> 601da177e4SLinus Torvalds #include <linux/slab.h> 611da177e4SLinus Torvalds #include <linux/init.h> 625ad64688SHugh Dickins #include <linux/ksm.h> 631da177e4SLinus Torvalds #include <linux/rmap.h> 641da177e4SLinus Torvalds #include <linux/rcupdate.h> 65b95f1b31SPaul Gortmaker #include <linux/export.h> 668a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 67cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 6864cdd548SKOSAKI Motohiro #include <linux/migrate.h> 690fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 70444f84fdSBen Dooks #include <linux/huge_mm.h> 71ef5d437fSJan Kara #include <linux/backing-dev.h> 7233c3fc71SVladimir Davydov #include <linux/page_idle.h> 73a5430ddaSJérôme Glisse #include <linux/memremap.h> 74bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h> 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds #include <asm/tlbflush.h> 771da177e4SLinus Torvalds 7872b252aeSMel Gorman #include <trace/events/tlb.h> 7972b252aeSMel Gorman 80b291f000SNick Piggin #include "internal.h" 81b291f000SNick Piggin 82fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 835beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 84fdd2e5f8SAdrian Bunk 85fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 86fdd2e5f8SAdrian Bunk { 8701d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 8801d8b20dSPeter Zijlstra 8901d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 9001d8b20dSPeter Zijlstra if (anon_vma) { 9101d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 927a3ef208SKonstantin Khlebnikov anon_vma->degree = 1; /* Reference for first vma */ 937a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 9401d8b20dSPeter Zijlstra /* 9501d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 9601d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 9701d8b20dSPeter Zijlstra */ 9801d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 99fdd2e5f8SAdrian Bunk } 100fdd2e5f8SAdrian Bunk 10101d8b20dSPeter Zijlstra return anon_vma; 10201d8b20dSPeter Zijlstra } 10301d8b20dSPeter Zijlstra 10401d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 105fdd2e5f8SAdrian Bunk { 10601d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 10788c22088SPeter Zijlstra 10888c22088SPeter Zijlstra /* 1094fc3f1d6SIngo Molnar * Synchronize against page_lock_anon_vma_read() such that 11088c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 11188c22088SPeter Zijlstra * freed. 11288c22088SPeter Zijlstra * 11388c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 11488c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1154fc3f1d6SIngo Molnar * down_read_trylock() from page_lock_anon_vma_read(). This orders: 11688c22088SPeter Zijlstra * 1174fc3f1d6SIngo Molnar * page_lock_anon_vma_read() VS put_anon_vma() 1184fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 11988c22088SPeter Zijlstra * LOCK MB 1204fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 12188c22088SPeter Zijlstra * 12288c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 12388c22088SPeter Zijlstra * happen _before_ what follows. 12488c22088SPeter Zijlstra */ 1257f39dda9SHugh Dickins might_sleep(); 1265a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1274fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 12808b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 12988c22088SPeter Zijlstra } 13088c22088SPeter Zijlstra 131fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 132fdd2e5f8SAdrian Bunk } 1331da177e4SLinus Torvalds 134dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1355beb4930SRik van Riel { 136dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1375beb4930SRik van Riel } 1385beb4930SRik van Riel 139e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1405beb4930SRik van Riel { 1415beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1425beb4930SRik van Riel } 1435beb4930SRik van Riel 1446583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1456583a843SKautuk Consul struct anon_vma_chain *avc, 1466583a843SKautuk Consul struct anon_vma *anon_vma) 1476583a843SKautuk Consul { 1486583a843SKautuk Consul avc->vma = vma; 1496583a843SKautuk Consul avc->anon_vma = anon_vma; 1506583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 151bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1526583a843SKautuk Consul } 1536583a843SKautuk Consul 154d9d332e0SLinus Torvalds /** 155d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 156d9d332e0SLinus Torvalds * @vma: the memory region in question 157d9d332e0SLinus Torvalds * 158d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 159d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 160d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 161d9d332e0SLinus Torvalds * 162d5a187daSVlastimil Babka * The common case will be that we already have one, which 163d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 16423a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 165d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 166d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 167d9d332e0SLinus Torvalds * allocate a new one. 168d9d332e0SLinus Torvalds * 169d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1704fc3f1d6SIngo Molnar * optimistically looked up an anon_vma in page_lock_anon_vma_read() 171aaf1f990SMiaohe Lin * and that may actually touch the rwsem even in the newly 172d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 173d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 174d9d332e0SLinus Torvalds * 175d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 176d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 177d9d332e0SLinus Torvalds * to do any locking for the common case of already having 178d9d332e0SLinus Torvalds * an anon_vma. 179d9d332e0SLinus Torvalds * 180c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for reading. 181d9d332e0SLinus Torvalds */ 182d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1831da177e4SLinus Torvalds { 184d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 185d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1865beb4930SRik van Riel struct anon_vma_chain *avc; 1871da177e4SLinus Torvalds 1881da177e4SLinus Torvalds might_sleep(); 1891da177e4SLinus Torvalds 190dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1915beb4930SRik van Riel if (!avc) 1925beb4930SRik van Riel goto out_enomem; 1935beb4930SRik van Riel 1941da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1951da177e4SLinus Torvalds allocated = NULL; 196d9d332e0SLinus Torvalds if (!anon_vma) { 1971da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 1981da177e4SLinus Torvalds if (unlikely(!anon_vma)) 1995beb4930SRik van Riel goto out_enomem_free_avc; 2001da177e4SLinus Torvalds allocated = anon_vma; 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds 2034fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 2041da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 2051da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 2061da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 2071da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2086583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2097a3ef208SKonstantin Khlebnikov /* vma reference or self-parent link for new root */ 2107a3ef208SKonstantin Khlebnikov anon_vma->degree++; 2111da177e4SLinus Torvalds allocated = NULL; 21231f2b0ebSOleg Nesterov avc = NULL; 2131da177e4SLinus Torvalds } 2141da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 21508b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 21631f2b0ebSOleg Nesterov 21731f2b0ebSOleg Nesterov if (unlikely(allocated)) 21801d8b20dSPeter Zijlstra put_anon_vma(allocated); 21931f2b0ebSOleg Nesterov if (unlikely(avc)) 2205beb4930SRik van Riel anon_vma_chain_free(avc); 221d5a187daSVlastimil Babka 2221da177e4SLinus Torvalds return 0; 2235beb4930SRik van Riel 2245beb4930SRik van Riel out_enomem_free_avc: 2255beb4930SRik van Riel anon_vma_chain_free(avc); 2265beb4930SRik van Riel out_enomem: 2275beb4930SRik van Riel return -ENOMEM; 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds 230bb4aa396SLinus Torvalds /* 231bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 232bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 233bb4aa396SLinus Torvalds * have the same vma. 234bb4aa396SLinus Torvalds * 235bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 236bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 237bb4aa396SLinus Torvalds */ 238bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 239bb4aa396SLinus Torvalds { 240bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 241bb4aa396SLinus Torvalds if (new_root != root) { 242bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2435a505085SIngo Molnar up_write(&root->rwsem); 244bb4aa396SLinus Torvalds root = new_root; 2455a505085SIngo Molnar down_write(&root->rwsem); 246bb4aa396SLinus Torvalds } 247bb4aa396SLinus Torvalds return root; 248bb4aa396SLinus Torvalds } 249bb4aa396SLinus Torvalds 250bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 251bb4aa396SLinus Torvalds { 252bb4aa396SLinus Torvalds if (root) 2535a505085SIngo Molnar up_write(&root->rwsem); 254bb4aa396SLinus Torvalds } 255bb4aa396SLinus Torvalds 2565beb4930SRik van Riel /* 2575beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2585beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2597a3ef208SKonstantin Khlebnikov * 260cb152a1aSShijie Luo * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 26147b390d2SWei Yang * anon_vma_fork(). The first three want an exact copy of src, while the last 26247b390d2SWei Yang * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 26347b390d2SWei Yang * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 26447b390d2SWei Yang * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 26547b390d2SWei Yang * 26647b390d2SWei Yang * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 26747b390d2SWei Yang * and reuse existing anon_vma which has no vmas and only one child anon_vma. 26847b390d2SWei Yang * This prevents degradation of anon_vma hierarchy to endless linear chain in 26947b390d2SWei Yang * case of constantly forking task. On the other hand, an anon_vma with more 27047b390d2SWei Yang * than one child isn't reused even if there was no alive vma, thus rmap 27147b390d2SWei Yang * walker has a good chance of avoiding scanning the whole hierarchy when it 27247b390d2SWei Yang * searches where page is mapped. 2735beb4930SRik van Riel */ 2745beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2755beb4930SRik van Riel { 2765beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 277bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2785beb4930SRik van Riel 279646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 280bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 281bb4aa396SLinus Torvalds 282dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 283dd34739cSLinus Torvalds if (unlikely(!avc)) { 284dd34739cSLinus Torvalds unlock_anon_vma_root(root); 285dd34739cSLinus Torvalds root = NULL; 286dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2875beb4930SRik van Riel if (!avc) 2885beb4930SRik van Riel goto enomem_failure; 289dd34739cSLinus Torvalds } 290bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 291bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 292bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2937a3ef208SKonstantin Khlebnikov 2947a3ef208SKonstantin Khlebnikov /* 2957a3ef208SKonstantin Khlebnikov * Reuse existing anon_vma if its degree lower than two, 2967a3ef208SKonstantin Khlebnikov * that means it has no vma and only one anon_vma child. 2977a3ef208SKonstantin Khlebnikov * 2987a3ef208SKonstantin Khlebnikov * Do not chose parent anon_vma, otherwise first child 2997a3ef208SKonstantin Khlebnikov * will always reuse it. Root anon_vma is never reused: 3007a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 3017a3ef208SKonstantin Khlebnikov */ 30247b390d2SWei Yang if (!dst->anon_vma && src->anon_vma && 30347b390d2SWei Yang anon_vma != src->anon_vma && anon_vma->degree < 2) 3047a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 3055beb4930SRik van Riel } 3067a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 3077a3ef208SKonstantin Khlebnikov dst->anon_vma->degree++; 308bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 3095beb4930SRik van Riel return 0; 3105beb4930SRik van Riel 3115beb4930SRik van Riel enomem_failure: 3123fe89b3eSLeon Yu /* 3133fe89b3eSLeon Yu * dst->anon_vma is dropped here otherwise its degree can be incorrectly 3143fe89b3eSLeon Yu * decremented in unlink_anon_vmas(). 3153fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3163fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3173fe89b3eSLeon Yu */ 3183fe89b3eSLeon Yu dst->anon_vma = NULL; 3195beb4930SRik van Riel unlink_anon_vmas(dst); 3205beb4930SRik van Riel return -ENOMEM; 3211da177e4SLinus Torvalds } 3221da177e4SLinus Torvalds 3235beb4930SRik van Riel /* 3245beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3255beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3265beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3275beb4930SRik van Riel */ 3285beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3291da177e4SLinus Torvalds { 3305beb4930SRik van Riel struct anon_vma_chain *avc; 3315beb4930SRik van Riel struct anon_vma *anon_vma; 332c4ea95d7SDaniel Forrest int error; 3335beb4930SRik van Riel 3345beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3355beb4930SRik van Riel if (!pvma->anon_vma) 3365beb4930SRik van Riel return 0; 3375beb4930SRik van Riel 3387a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3397a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3407a3ef208SKonstantin Khlebnikov 3415beb4930SRik van Riel /* 3425beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3435beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3445beb4930SRik van Riel */ 345c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 346c4ea95d7SDaniel Forrest if (error) 347c4ea95d7SDaniel Forrest return error; 3485beb4930SRik van Riel 3497a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3507a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3517a3ef208SKonstantin Khlebnikov return 0; 3527a3ef208SKonstantin Khlebnikov 3535beb4930SRik van Riel /* Then add our own anon_vma. */ 3545beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3555beb4930SRik van Riel if (!anon_vma) 3565beb4930SRik van Riel goto out_error; 357dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3585beb4930SRik van Riel if (!avc) 3595beb4930SRik van Riel goto out_error_free_anon_vma; 3605c341ee1SRik van Riel 3615c341ee1SRik van Riel /* 362aaf1f990SMiaohe Lin * The root anon_vma's rwsem is the lock actually used when we 3635c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3645c341ee1SRik van Riel */ 3655c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3667a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 36776545066SRik van Riel /* 36801d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 36901d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 37001d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 37176545066SRik van Riel */ 37276545066SRik van Riel get_anon_vma(anon_vma->root); 3735beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3745beb4930SRik van Riel vma->anon_vma = anon_vma; 3754fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3765c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3777a3ef208SKonstantin Khlebnikov anon_vma->parent->degree++; 37808b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3795beb4930SRik van Riel 3805beb4930SRik van Riel return 0; 3815beb4930SRik van Riel 3825beb4930SRik van Riel out_error_free_anon_vma: 38301d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3845beb4930SRik van Riel out_error: 3854946d54cSRik van Riel unlink_anon_vmas(vma); 3865beb4930SRik van Riel return -ENOMEM; 3875beb4930SRik van Riel } 3885beb4930SRik van Riel 3895beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3905beb4930SRik van Riel { 3915beb4930SRik van Riel struct anon_vma_chain *avc, *next; 392eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 3935beb4930SRik van Riel 3945c341ee1SRik van Riel /* 3955c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 3965c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 3975c341ee1SRik van Riel */ 3985beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 399eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 400eee2acbaSPeter Zijlstra 401eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 402bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 403eee2acbaSPeter Zijlstra 404eee2acbaSPeter Zijlstra /* 405eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 406eee2acbaSPeter Zijlstra * to free them outside the lock. 407eee2acbaSPeter Zijlstra */ 408f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 4097a3ef208SKonstantin Khlebnikov anon_vma->parent->degree--; 410eee2acbaSPeter Zijlstra continue; 4117a3ef208SKonstantin Khlebnikov } 412eee2acbaSPeter Zijlstra 413eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 414eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 415eee2acbaSPeter Zijlstra } 416ee8ab190SLi Xinhai if (vma->anon_vma) { 4177a3ef208SKonstantin Khlebnikov vma->anon_vma->degree--; 418ee8ab190SLi Xinhai 419ee8ab190SLi Xinhai /* 420ee8ab190SLi Xinhai * vma would still be needed after unlink, and anon_vma will be prepared 421ee8ab190SLi Xinhai * when handle fault. 422ee8ab190SLi Xinhai */ 423ee8ab190SLi Xinhai vma->anon_vma = NULL; 424ee8ab190SLi Xinhai } 425eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 426eee2acbaSPeter Zijlstra 427eee2acbaSPeter Zijlstra /* 428eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 429eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4305a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 431eee2acbaSPeter Zijlstra */ 432eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 433eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 434eee2acbaSPeter Zijlstra 435e4c5800aSKonstantin Khlebnikov VM_WARN_ON(anon_vma->degree); 436eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 437eee2acbaSPeter Zijlstra 4385beb4930SRik van Riel list_del(&avc->same_vma); 4395beb4930SRik van Riel anon_vma_chain_free(avc); 4405beb4930SRik van Riel } 4415beb4930SRik van Riel } 4425beb4930SRik van Riel 44351cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4441da177e4SLinus Torvalds { 4451da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4461da177e4SLinus Torvalds 4475a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 44883813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 449f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4501da177e4SLinus Torvalds } 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds void __init anon_vma_init(void) 4531da177e4SLinus Torvalds { 4541da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4555f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4565d097056SVladimir Davydov anon_vma_ctor); 4575d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4585d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4591da177e4SLinus Torvalds } 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds /* 4626111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4636111e4caSPeter Zijlstra * 4646111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 465ad8a20cfSMiaohe Lin * the best this function can do is return a refcount increased anon_vma 466ad8a20cfSMiaohe Lin * that might have been relevant to this page. 4676111e4caSPeter Zijlstra * 4686111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4696111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4706111e4caSPeter Zijlstra * 471bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 472bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 473bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 474bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 475bc658c96SPeter Zijlstra * 4766111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4776111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4786111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4796111e4caSPeter Zijlstra * 480091e4299SMiles Chen * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 481091e4299SMiles Chen * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 482091e4299SMiles Chen * if there is a mapcount, we can dereference the anon_vma after observing 483091e4299SMiles Chen * those. 4841da177e4SLinus Torvalds */ 485746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4861da177e4SLinus Torvalds { 487746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4881da177e4SLinus Torvalds unsigned long anon_mapping; 4891da177e4SLinus Torvalds 4901da177e4SLinus Torvalds rcu_read_lock(); 4914db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 4923ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4931da177e4SLinus Torvalds goto out; 4941da177e4SLinus Torvalds if (!page_mapped(page)) 4951da177e4SLinus Torvalds goto out; 4961da177e4SLinus Torvalds 4971da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 498746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 499746b18d4SPeter Zijlstra anon_vma = NULL; 500746b18d4SPeter Zijlstra goto out; 501746b18d4SPeter Zijlstra } 502f1819427SHugh Dickins 503f1819427SHugh Dickins /* 504f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 505746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 506746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 5075f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 508746b18d4SPeter Zijlstra * above cannot corrupt). 509f1819427SHugh Dickins */ 510746b18d4SPeter Zijlstra if (!page_mapped(page)) { 5117f39dda9SHugh Dickins rcu_read_unlock(); 512746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 5137f39dda9SHugh Dickins return NULL; 514746b18d4SPeter Zijlstra } 5151da177e4SLinus Torvalds out: 5161da177e4SLinus Torvalds rcu_read_unlock(); 517746b18d4SPeter Zijlstra 518746b18d4SPeter Zijlstra return anon_vma; 519746b18d4SPeter Zijlstra } 520746b18d4SPeter Zijlstra 52188c22088SPeter Zijlstra /* 52288c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 52388c22088SPeter Zijlstra * 52488c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 52588c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 52688c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 52788c22088SPeter Zijlstra */ 5284fc3f1d6SIngo Molnar struct anon_vma *page_lock_anon_vma_read(struct page *page) 529746b18d4SPeter Zijlstra { 53088c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 531eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 53288c22088SPeter Zijlstra unsigned long anon_mapping; 533746b18d4SPeter Zijlstra 53488c22088SPeter Zijlstra rcu_read_lock(); 5354db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 53688c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 53788c22088SPeter Zijlstra goto out; 53888c22088SPeter Zijlstra if (!page_mapped(page)) 53988c22088SPeter Zijlstra goto out; 54088c22088SPeter Zijlstra 54188c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5424db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5434fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 54488c22088SPeter Zijlstra /* 545eee0f252SHugh Dickins * If the page is still mapped, then this anon_vma is still 546eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 547bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 54888c22088SPeter Zijlstra */ 549eee0f252SHugh Dickins if (!page_mapped(page)) { 5504fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 55188c22088SPeter Zijlstra anon_vma = NULL; 55288c22088SPeter Zijlstra } 55388c22088SPeter Zijlstra goto out; 55488c22088SPeter Zijlstra } 55588c22088SPeter Zijlstra 55688c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 55788c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 55888c22088SPeter Zijlstra anon_vma = NULL; 55988c22088SPeter Zijlstra goto out; 56088c22088SPeter Zijlstra } 56188c22088SPeter Zijlstra 56288c22088SPeter Zijlstra if (!page_mapped(page)) { 5637f39dda9SHugh Dickins rcu_read_unlock(); 56488c22088SPeter Zijlstra put_anon_vma(anon_vma); 5657f39dda9SHugh Dickins return NULL; 56688c22088SPeter Zijlstra } 56788c22088SPeter Zijlstra 56888c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 56988c22088SPeter Zijlstra rcu_read_unlock(); 5704fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 571746b18d4SPeter Zijlstra 57288c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 57388c22088SPeter Zijlstra /* 57488c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 57588c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 5764fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 57788c22088SPeter Zijlstra */ 5784fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 57988c22088SPeter Zijlstra __put_anon_vma(anon_vma); 58088c22088SPeter Zijlstra anon_vma = NULL; 58188c22088SPeter Zijlstra } 58288c22088SPeter Zijlstra 58388c22088SPeter Zijlstra return anon_vma; 58488c22088SPeter Zijlstra 58588c22088SPeter Zijlstra out: 58688c22088SPeter Zijlstra rcu_read_unlock(); 587746b18d4SPeter Zijlstra return anon_vma; 58834bbd704SOleg Nesterov } 58934bbd704SOleg Nesterov 5904fc3f1d6SIngo Molnar void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 59134bbd704SOleg Nesterov { 5924fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds 59572b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 59672b252aeSMel Gorman /* 59772b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 59872b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 59972b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 60072b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 60172b252aeSMel Gorman */ 60272b252aeSMel Gorman void try_to_unmap_flush(void) 60372b252aeSMel Gorman { 60472b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 60572b252aeSMel Gorman 60672b252aeSMel Gorman if (!tlb_ubc->flush_required) 60772b252aeSMel Gorman return; 60872b252aeSMel Gorman 609e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 61072b252aeSMel Gorman tlb_ubc->flush_required = false; 611d950c947SMel Gorman tlb_ubc->writable = false; 61272b252aeSMel Gorman } 61372b252aeSMel Gorman 614d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 615d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 616d950c947SMel Gorman { 617d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 618d950c947SMel Gorman 619d950c947SMel Gorman if (tlb_ubc->writable) 620d950c947SMel Gorman try_to_unmap_flush(); 621d950c947SMel Gorman } 622d950c947SMel Gorman 623c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 62472b252aeSMel Gorman { 62572b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 62672b252aeSMel Gorman 627e73ad5ffSAndy Lutomirski arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 62872b252aeSMel Gorman tlb_ubc->flush_required = true; 629d950c947SMel Gorman 630d950c947SMel Gorman /* 6313ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6323ea27719SMel Gorman * before the PTE is cleared. 6333ea27719SMel Gorman */ 6343ea27719SMel Gorman barrier(); 6353ea27719SMel Gorman mm->tlb_flush_batched = true; 6363ea27719SMel Gorman 6373ea27719SMel Gorman /* 638d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 639d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 640d950c947SMel Gorman * before the page is queued for IO. 641d950c947SMel Gorman */ 642d950c947SMel Gorman if (writable) 643d950c947SMel Gorman tlb_ubc->writable = true; 64472b252aeSMel Gorman } 64572b252aeSMel Gorman 64672b252aeSMel Gorman /* 64772b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 64872b252aeSMel Gorman * unmap operations to reduce IPIs. 64972b252aeSMel Gorman */ 65072b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 65172b252aeSMel Gorman { 65272b252aeSMel Gorman bool should_defer = false; 65372b252aeSMel Gorman 65472b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 65572b252aeSMel Gorman return false; 65672b252aeSMel Gorman 65772b252aeSMel Gorman /* If remote CPUs need to be flushed then defer batch the flush */ 65872b252aeSMel Gorman if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 65972b252aeSMel Gorman should_defer = true; 66072b252aeSMel Gorman put_cpu(); 66172b252aeSMel Gorman 66272b252aeSMel Gorman return should_defer; 66372b252aeSMel Gorman } 6643ea27719SMel Gorman 6653ea27719SMel Gorman /* 6663ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 6673ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 6683ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 6693ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 6703ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 6713ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 6723ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 6733ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 6743ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 6753ea27719SMel Gorman * 6763ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 6773ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 6783ea27719SMel Gorman * via the PTL. 6793ea27719SMel Gorman */ 6803ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 6813ea27719SMel Gorman { 6829c1177b6SQian Cai if (data_race(mm->tlb_flush_batched)) { 6833ea27719SMel Gorman flush_tlb_mm(mm); 6843ea27719SMel Gorman 6853ea27719SMel Gorman /* 6863ea27719SMel Gorman * Do not allow the compiler to re-order the clearing of 6873ea27719SMel Gorman * tlb_flush_batched before the tlb is flushed. 6883ea27719SMel Gorman */ 6893ea27719SMel Gorman barrier(); 6903ea27719SMel Gorman mm->tlb_flush_batched = false; 6913ea27719SMel Gorman } 6923ea27719SMel Gorman } 69372b252aeSMel Gorman #else 694c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 69572b252aeSMel Gorman { 69672b252aeSMel Gorman } 69772b252aeSMel Gorman 69872b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 69972b252aeSMel Gorman { 70072b252aeSMel Gorman return false; 70172b252aeSMel Gorman } 70272b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 70372b252aeSMel Gorman 7041da177e4SLinus Torvalds /* 705bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 706ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 7071da177e4SLinus Torvalds */ 7081da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 7091da177e4SLinus Torvalds { 71021d0d443SAndrea Arcangeli if (PageAnon(page)) { 7114829b906SHugh Dickins struct anon_vma *page__anon_vma = page_anon_vma(page); 7124829b906SHugh Dickins /* 7134829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 7144829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 7154829b906SHugh Dickins */ 7164829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 7174829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 71821d0d443SAndrea Arcangeli return -EFAULT; 71931657170SJue Wang } else if (!vma->vm_file) { 7201da177e4SLinus Torvalds return -EFAULT; 72131657170SJue Wang } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { 7221da177e4SLinus Torvalds return -EFAULT; 72331657170SJue Wang } 724494334e4SHugh Dickins 725494334e4SHugh Dickins return vma_address(page, vma); 7261da177e4SLinus Torvalds } 7271da177e4SLinus Torvalds 7286219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 7296219049aSBob Liu { 7306219049aSBob Liu pgd_t *pgd; 731c2febafcSKirill A. Shutemov p4d_t *p4d; 7326219049aSBob Liu pud_t *pud; 7336219049aSBob Liu pmd_t *pmd = NULL; 734f72e7dcdSHugh Dickins pmd_t pmde; 7356219049aSBob Liu 7366219049aSBob Liu pgd = pgd_offset(mm, address); 7376219049aSBob Liu if (!pgd_present(*pgd)) 7386219049aSBob Liu goto out; 7396219049aSBob Liu 740c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 741c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 742c2febafcSKirill A. Shutemov goto out; 743c2febafcSKirill A. Shutemov 744c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 7456219049aSBob Liu if (!pud_present(*pud)) 7466219049aSBob Liu goto out; 7476219049aSBob Liu 7486219049aSBob Liu pmd = pmd_offset(pud, address); 749f72e7dcdSHugh Dickins /* 7508809aa2dSAneesh Kumar K.V * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 751f72e7dcdSHugh Dickins * without holding anon_vma lock for write. So when looking for a 752f72e7dcdSHugh Dickins * genuine pmde (in which to find pte), test present and !THP together. 753f72e7dcdSHugh Dickins */ 754e37c6982SChristian Borntraeger pmde = *pmd; 755e37c6982SChristian Borntraeger barrier(); 756f72e7dcdSHugh Dickins if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 7576219049aSBob Liu pmd = NULL; 7586219049aSBob Liu out: 7596219049aSBob Liu return pmd; 7606219049aSBob Liu } 7616219049aSBob Liu 7629f32624bSJoonsoo Kim struct page_referenced_arg { 7639f32624bSJoonsoo Kim int mapcount; 7649f32624bSJoonsoo Kim int referenced; 7659f32624bSJoonsoo Kim unsigned long vm_flags; 7669f32624bSJoonsoo Kim struct mem_cgroup *memcg; 7679f32624bSJoonsoo Kim }; 76881b4082dSNikita Danilov /* 7699f32624bSJoonsoo Kim * arg: page_referenced_arg will be passed 7701da177e4SLinus Torvalds */ 771e4b82222SMinchan Kim static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 7729f32624bSJoonsoo Kim unsigned long address, void *arg) 7731da177e4SLinus Torvalds { 7749f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 7758eaededeSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 7768eaededeSKirill A. Shutemov .page = page, 7778eaededeSKirill A. Shutemov .vma = vma, 7788eaededeSKirill A. Shutemov .address = address, 7798eaededeSKirill A. Shutemov }; 7808749cfeaSVladimir Davydov int referenced = 0; 7812da28bfdSAndrea Arcangeli 7828eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 7838eaededeSKirill A. Shutemov address = pvmw.address; 7842da28bfdSAndrea Arcangeli 785b20ce5e0SKirill A. Shutemov if (vma->vm_flags & VM_LOCKED) { 7868eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 7879f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 788e4b82222SMinchan Kim return false; /* To break the loop */ 7892da28bfdSAndrea Arcangeli } 7902da28bfdSAndrea Arcangeli 7918eaededeSKirill A. Shutemov if (pvmw.pte) { 7928eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 7938eaededeSKirill A. Shutemov pvmw.pte)) { 7944917e5d0SJohannes Weiner /* 7958eaededeSKirill A. Shutemov * Don't treat a reference through 7968eaededeSKirill A. Shutemov * a sequentially read mapping as such. 7978eaededeSKirill A. Shutemov * If the page has been used in another mapping, 7988eaededeSKirill A. Shutemov * we will catch it; if this other mapping is 7998eaededeSKirill A. Shutemov * already gone, the unmap path will have set 8008eaededeSKirill A. Shutemov * PG_referenced or activated the page. 8014917e5d0SJohannes Weiner */ 80264363aadSJoe Perches if (likely(!(vma->vm_flags & VM_SEQ_READ))) 8031da177e4SLinus Torvalds referenced++; 8044917e5d0SJohannes Weiner } 8058749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 8068eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 8078eaededeSKirill A. Shutemov pvmw.pmd)) 8088749cfeaSVladimir Davydov referenced++; 8098749cfeaSVladimir Davydov } else { 8108749cfeaSVladimir Davydov /* unexpected pmd-mapped page? */ 8118749cfeaSVladimir Davydov WARN_ON_ONCE(1); 8128749cfeaSVladimir Davydov } 8138eaededeSKirill A. Shutemov 8148eaededeSKirill A. Shutemov pra->mapcount--; 8158eaededeSKirill A. Shutemov } 81671e3aac0SAndrea Arcangeli 81733c3fc71SVladimir Davydov if (referenced) 81833c3fc71SVladimir Davydov clear_page_idle(page); 81933c3fc71SVladimir Davydov if (test_and_clear_page_young(page)) 82033c3fc71SVladimir Davydov referenced++; 82133c3fc71SVladimir Davydov 8229f32624bSJoonsoo Kim if (referenced) { 8239f32624bSJoonsoo Kim pra->referenced++; 8249f32624bSJoonsoo Kim pra->vm_flags |= vma->vm_flags; 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds 8279f32624bSJoonsoo Kim if (!pra->mapcount) 828e4b82222SMinchan Kim return false; /* To break the loop */ 8299f32624bSJoonsoo Kim 830e4b82222SMinchan Kim return true; 8319f32624bSJoonsoo Kim } 8329f32624bSJoonsoo Kim 8339f32624bSJoonsoo Kim static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 8341da177e4SLinus Torvalds { 8359f32624bSJoonsoo Kim struct page_referenced_arg *pra = arg; 8369f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 8371da177e4SLinus Torvalds 8389f32624bSJoonsoo Kim if (!mm_match_cgroup(vma->vm_mm, memcg)) 8399f32624bSJoonsoo Kim return true; 8401da177e4SLinus Torvalds 8419f32624bSJoonsoo Kim return false; 8421da177e4SLinus Torvalds } 8431da177e4SLinus Torvalds 8441da177e4SLinus Torvalds /** 8451da177e4SLinus Torvalds * page_referenced - test if the page was referenced 8461da177e4SLinus Torvalds * @page: the page to test 8471da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 84872835c86SJohannes Weiner * @memcg: target memory cgroup 8496fe6b7e3SWu Fengguang * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 8501da177e4SLinus Torvalds * 8511da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 8521da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 8531da177e4SLinus Torvalds */ 8546fe6b7e3SWu Fengguang int page_referenced(struct page *page, 8556fe6b7e3SWu Fengguang int is_locked, 85672835c86SJohannes Weiner struct mem_cgroup *memcg, 8576fe6b7e3SWu Fengguang unsigned long *vm_flags) 8581da177e4SLinus Torvalds { 8595ad64688SHugh Dickins int we_locked = 0; 8609f32624bSJoonsoo Kim struct page_referenced_arg pra = { 861b20ce5e0SKirill A. Shutemov .mapcount = total_mapcount(page), 8629f32624bSJoonsoo Kim .memcg = memcg, 8639f32624bSJoonsoo Kim }; 8649f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 8659f32624bSJoonsoo Kim .rmap_one = page_referenced_one, 8669f32624bSJoonsoo Kim .arg = (void *)&pra, 8679f32624bSJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 8689f32624bSJoonsoo Kim }; 8691da177e4SLinus Torvalds 8706fe6b7e3SWu Fengguang *vm_flags = 0; 871059d8442SHuang Shijie if (!pra.mapcount) 8729f32624bSJoonsoo Kim return 0; 8739f32624bSJoonsoo Kim 8749f32624bSJoonsoo Kim if (!page_rmapping(page)) 8759f32624bSJoonsoo Kim return 0; 8769f32624bSJoonsoo Kim 8775ad64688SHugh Dickins if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 8785ad64688SHugh Dickins we_locked = trylock_page(page); 8799f32624bSJoonsoo Kim if (!we_locked) 8809f32624bSJoonsoo Kim return 1; 8815ad64688SHugh Dickins } 8829f32624bSJoonsoo Kim 8839f32624bSJoonsoo Kim /* 8849f32624bSJoonsoo Kim * If we are reclaiming on behalf of a cgroup, skip 8859f32624bSJoonsoo Kim * counting on behalf of references from different 8869f32624bSJoonsoo Kim * cgroups 8879f32624bSJoonsoo Kim */ 8889f32624bSJoonsoo Kim if (memcg) { 8899f32624bSJoonsoo Kim rwc.invalid_vma = invalid_page_referenced_vma; 8905ad64688SHugh Dickins } 8919f32624bSJoonsoo Kim 892c24f386cSMinchan Kim rmap_walk(page, &rwc); 8939f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 8949f32624bSJoonsoo Kim 8955ad64688SHugh Dickins if (we_locked) 8961da177e4SLinus Torvalds unlock_page(page); 8979f32624bSJoonsoo Kim 8989f32624bSJoonsoo Kim return pra.referenced; 8991da177e4SLinus Torvalds } 9001da177e4SLinus Torvalds 901e4b82222SMinchan Kim static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 9029853a407SJoonsoo Kim unsigned long address, void *arg) 903d08b3851SPeter Zijlstra { 904f27176cfSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 905f27176cfSKirill A. Shutemov .page = page, 906f27176cfSKirill A. Shutemov .vma = vma, 907f27176cfSKirill A. Shutemov .address = address, 908f27176cfSKirill A. Shutemov .flags = PVMW_SYNC, 909f27176cfSKirill A. Shutemov }; 910ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 9119853a407SJoonsoo Kim int *cleaned = arg; 912d08b3851SPeter Zijlstra 913369ea824SJérôme Glisse /* 914369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 915369ea824SJérôme Glisse * the page can not be free from this function. 916369ea824SJérôme Glisse */ 9177269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 9187269f999SJérôme Glisse 0, vma, vma->vm_mm, address, 919494334e4SHugh Dickins vma_address_end(page, vma)); 920ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 921369ea824SJérôme Glisse 922f27176cfSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 923f27176cfSKirill A. Shutemov int ret = 0; 924369ea824SJérôme Glisse 9251f18b296SYueHaibing address = pvmw.address; 926f27176cfSKirill A. Shutemov if (pvmw.pte) { 927c2fda5feSPeter Zijlstra pte_t entry; 928f27176cfSKirill A. Shutemov pte_t *pte = pvmw.pte; 929f27176cfSKirill A. Shutemov 930f27176cfSKirill A. Shutemov if (!pte_dirty(*pte) && !pte_write(*pte)) 931f27176cfSKirill A. Shutemov continue; 932d08b3851SPeter Zijlstra 933785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pte)); 934785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 935d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 936c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 937785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 938d08b3851SPeter Zijlstra ret = 1; 939f27176cfSKirill A. Shutemov } else { 940396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 941f27176cfSKirill A. Shutemov pmd_t *pmd = pvmw.pmd; 942f27176cfSKirill A. Shutemov pmd_t entry; 943d08b3851SPeter Zijlstra 944f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 945f27176cfSKirill A. Shutemov continue; 946f27176cfSKirill A. Shutemov 947785373b4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 948024eee0eSAneesh Kumar K.V entry = pmdp_invalidate(vma, address, pmd); 949f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 950f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 951785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 952f27176cfSKirill A. Shutemov ret = 1; 953f27176cfSKirill A. Shutemov #else 954f27176cfSKirill A. Shutemov /* unexpected pmd-mapped page? */ 955f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 956f27176cfSKirill A. Shutemov #endif 957f27176cfSKirill A. Shutemov } 9582ec74c3eSSagi Grimberg 9590f10851eSJérôme Glisse /* 9600f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 9610f10851eSJérôme Glisse * downgrading page table protection not changing it to point 9620f10851eSJérôme Glisse * to a new page. 9630f10851eSJérôme Glisse * 964ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 9650f10851eSJérôme Glisse */ 9660f10851eSJérôme Glisse if (ret) 9679853a407SJoonsoo Kim (*cleaned)++; 9689853a407SJoonsoo Kim } 969f27176cfSKirill A. Shutemov 970ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 971369ea824SJérôme Glisse 972e4b82222SMinchan Kim return true; 973d08b3851SPeter Zijlstra } 974d08b3851SPeter Zijlstra 9759853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 976d08b3851SPeter Zijlstra { 9779853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 978871beb8cSFengguang Wu return false; 979d08b3851SPeter Zijlstra 980871beb8cSFengguang Wu return true; 981d08b3851SPeter Zijlstra } 982d08b3851SPeter Zijlstra 983d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 984d08b3851SPeter Zijlstra { 9859853a407SJoonsoo Kim int cleaned = 0; 9869853a407SJoonsoo Kim struct address_space *mapping; 9879853a407SJoonsoo Kim struct rmap_walk_control rwc = { 9889853a407SJoonsoo Kim .arg = (void *)&cleaned, 9899853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 9909853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 9919853a407SJoonsoo Kim }; 992d08b3851SPeter Zijlstra 993d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 994d08b3851SPeter Zijlstra 9959853a407SJoonsoo Kim if (!page_mapped(page)) 9969853a407SJoonsoo Kim return 0; 997d08b3851SPeter Zijlstra 9989853a407SJoonsoo Kim mapping = page_mapping(page); 9999853a407SJoonsoo Kim if (!mapping) 10009853a407SJoonsoo Kim return 0; 10019853a407SJoonsoo Kim 10029853a407SJoonsoo Kim rmap_walk(page, &rwc); 10039853a407SJoonsoo Kim 10049853a407SJoonsoo Kim return cleaned; 1005d08b3851SPeter Zijlstra } 100660b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean); 1007d08b3851SPeter Zijlstra 10081da177e4SLinus Torvalds /** 1009c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 1010c44b6743SRik van Riel * @page: the page to move to our anon_vma 1011c44b6743SRik van Riel * @vma: the vma the page belongs to 1012c44b6743SRik van Riel * 1013c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 1014c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 1015c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 1016c44b6743SRik van Riel * processes. 1017c44b6743SRik van Riel */ 10185a49973dSHugh Dickins void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1019c44b6743SRik van Riel { 1020c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 1021c44b6743SRik van Riel 10225a49973dSHugh Dickins page = compound_head(page); 10235a49973dSHugh Dickins 1024309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 102581d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1026c44b6743SRik van Riel 1027c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1028414e2fb8SVladimir Davydov /* 1029414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1030414e2fb8SVladimir Davydov * simultaneously, so a concurrent reader (eg page_referenced()'s 1031414e2fb8SVladimir Davydov * PageAnon()) will not see one without the other. 1032414e2fb8SVladimir Davydov */ 1033414e2fb8SVladimir Davydov WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1034c44b6743SRik van Riel } 1035c44b6743SRik van Riel 1036c44b6743SRik van Riel /** 103743d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 1038451b9514SKirill Tkhai * @page: Page or Hugepage to add to rmap 10394e1c1975SAndi Kleen * @vma: VM area to add page to. 10404e1c1975SAndi Kleen * @address: User virtual address of the mapping 1041e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 10421da177e4SLinus Torvalds */ 10439617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 1044e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 10451da177e4SLinus Torvalds { 1046e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 10472822c1aaSNick Piggin 1048e8a03febSRik van Riel BUG_ON(!anon_vma); 1049ea90002bSLinus Torvalds 10504e1c1975SAndi Kleen if (PageAnon(page)) 10514e1c1975SAndi Kleen return; 10524e1c1975SAndi Kleen 1053ea90002bSLinus Torvalds /* 1054e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 1055e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 1056e8a03febSRik van Riel * page mapping! 1057ea90002bSLinus Torvalds */ 10584e1c1975SAndi Kleen if (!exclusive) 1059288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1060ea90002bSLinus Torvalds 106116f5e707SAlex Shi /* 106216f5e707SAlex Shi * page_idle does a lockless/optimistic rmap scan on page->mapping. 106316f5e707SAlex Shi * Make sure the compiler doesn't split the stores of anon_vma and 106416f5e707SAlex Shi * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 106516f5e707SAlex Shi * could mistake the mapping for a struct address_space and crash. 106616f5e707SAlex Shi */ 10671da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 106816f5e707SAlex Shi WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 10694d7670e0SNick Piggin page->index = linear_page_index(vma, address); 10701da177e4SLinus Torvalds } 10719617d95eSNick Piggin 10729617d95eSNick Piggin /** 107343d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1074c97a9e10SNick Piggin * @page: the page to add the mapping to 1075c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1076c97a9e10SNick Piggin * @address: the user virtual address mapped 1077c97a9e10SNick Piggin */ 1078c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 1079c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1080c97a9e10SNick Piggin { 1081c97a9e10SNick Piggin /* 1082c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1083c97a9e10SNick Piggin * be set up correctly at this point. 1084c97a9e10SNick Piggin * 1085c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 108690aaca85SMiaohe Lin * always holds the page locked. 1087c97a9e10SNick Piggin * 1088c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1089c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1090c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1091c97a9e10SNick Piggin */ 109230c46382SYang Shi VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); 109330c46382SYang Shi VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 109430c46382SYang Shi page); 1095c97a9e10SNick Piggin } 1096c97a9e10SNick Piggin 1097c97a9e10SNick Piggin /** 10989617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 10999617d95eSNick Piggin * @page: the page to add the mapping to 11009617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11019617d95eSNick Piggin * @address: the user virtual address mapped 1102d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 11039617d95eSNick Piggin * 11045ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 110580e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 110680e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 110780e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 11089617d95eSNick Piggin */ 11099617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 1110d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 11119617d95eSNick Piggin { 1112d281ee61SKirill A. Shutemov do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1113ad8c2ee8SRik van Riel } 1114ad8c2ee8SRik van Riel 1115ad8c2ee8SRik van Riel /* 1116ad8c2ee8SRik van Riel * Special version of the above for do_swap_page, which often runs 1117ad8c2ee8SRik van Riel * into pages that are exclusively owned by the current process. 1118ad8c2ee8SRik van Riel * Everybody else should continue to use page_add_anon_rmap above. 1119ad8c2ee8SRik van Riel */ 1120ad8c2ee8SRik van Riel void do_page_add_anon_rmap(struct page *page, 1121d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, int flags) 1122ad8c2ee8SRik van Riel { 1123d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 112453f9263bSKirill A. Shutemov bool first; 112553f9263bSKirill A. Shutemov 1126be5d0a74SJohannes Weiner if (unlikely(PageKsm(page))) 1127be5d0a74SJohannes Weiner lock_page_memcg(page); 1128be5d0a74SJohannes Weiner else 1129be5d0a74SJohannes Weiner VM_BUG_ON_PAGE(!PageLocked(page), page); 1130be5d0a74SJohannes Weiner 113153f9263bSKirill A. Shutemov if (compound) { 113253f9263bSKirill A. Shutemov atomic_t *mapcount; 1133e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 113453f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 113553f9263bSKirill A. Shutemov mapcount = compound_mapcount_ptr(page); 113653f9263bSKirill A. Shutemov first = atomic_inc_and_test(mapcount); 113753f9263bSKirill A. Shutemov } else { 113853f9263bSKirill A. Shutemov first = atomic_inc_and_test(&page->_mapcount); 113953f9263bSKirill A. Shutemov } 114053f9263bSKirill A. Shutemov 114153f9263bSKirill A. Shutemov if (first) { 11426c357848SMatthew Wilcox (Oracle) int nr = compound ? thp_nr_pages(page) : 1; 1143bea04b07SJianyu Zhan /* 1144bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1145bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1146bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption 1147bea04b07SJianyu Zhan * disabled. 1148bea04b07SJianyu Zhan */ 114965c45377SKirill A. Shutemov if (compound) 115069473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1151be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 115279134171SAndrea Arcangeli } 11535ad64688SHugh Dickins 1154be5d0a74SJohannes Weiner if (unlikely(PageKsm(page))) { 1155be5d0a74SJohannes Weiner unlock_page_memcg(page); 1156be5d0a74SJohannes Weiner return; 1157be5d0a74SJohannes Weiner } 115853f9263bSKirill A. Shutemov 11595dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 11605ad64688SHugh Dickins if (first) 1161d281ee61SKirill A. Shutemov __page_set_anon_rmap(page, vma, address, 1162d281ee61SKirill A. Shutemov flags & RMAP_EXCLUSIVE); 116369029cd5SKAMEZAWA Hiroyuki else 1164c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 11651da177e4SLinus Torvalds } 11661da177e4SLinus Torvalds 116743d8eac4SRandy Dunlap /** 11689617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 11699617d95eSNick Piggin * @page: the page to add the mapping to 11709617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11719617d95eSNick Piggin * @address: the user virtual address mapped 1172d281ee61SKirill A. Shutemov * @compound: charge the page as compound or small page 11739617d95eSNick Piggin * 11749617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 11759617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1176c97a9e10SNick Piggin * Page does not have to be locked. 11779617d95eSNick Piggin */ 11789617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 1179d281ee61SKirill A. Shutemov struct vm_area_struct *vma, unsigned long address, bool compound) 11809617d95eSNick Piggin { 11816c357848SMatthew Wilcox (Oracle) int nr = compound ? thp_nr_pages(page) : 1; 1182d281ee61SKirill A. Shutemov 118381d1b09cSSasha Levin VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1184fa9949daSHugh Dickins __SetPageSwapBacked(page); 1185d281ee61SKirill A. Shutemov if (compound) { 1186d281ee61SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 118753f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 118853f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 118947e29d32SJohn Hubbard if (hpage_pincount_available(page)) 119047e29d32SJohn Hubbard atomic_set(compound_pincount_ptr(page), 0); 119147e29d32SJohn Hubbard 119269473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 119353f9263bSKirill A. Shutemov } else { 119453f9263bSKirill A. Shutemov /* Anon THP always mapped first with PMD */ 119553f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(PageTransCompound(page), page); 119653f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 119753f9263bSKirill A. Shutemov atomic_set(&page->_mapcount, 0); 1198d281ee61SKirill A. Shutemov } 1199be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1200e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 12019617d95eSNick Piggin } 12029617d95eSNick Piggin 12031da177e4SLinus Torvalds /** 12041da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 12051da177e4SLinus Torvalds * @page: the page to add the mapping to 1206e8b098fcSMike Rapoport * @compound: charge the page as compound or small page 12071da177e4SLinus Torvalds * 1208b8072f09SHugh Dickins * The caller needs to hold the pte lock. 12091da177e4SLinus Torvalds */ 1210dd78feddSKirill A. Shutemov void page_add_file_rmap(struct page *page, bool compound) 12111da177e4SLinus Torvalds { 1212dd78feddSKirill A. Shutemov int i, nr = 1; 1213dd78feddSKirill A. Shutemov 1214dd78feddSKirill A. Shutemov VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 121562cccb8cSJohannes Weiner lock_page_memcg(page); 1216dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1217a1528e21SMuchun Song int nr_pages = thp_nr_pages(page); 1218a1528e21SMuchun Song 1219a1528e21SMuchun Song for (i = 0, nr = 0; i < nr_pages; i++) { 1220dd78feddSKirill A. Shutemov if (atomic_inc_and_test(&page[i]._mapcount)) 1221dd78feddSKirill A. Shutemov nr++; 1222d69b042fSBalbir Singh } 1223dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1224dd78feddSKirill A. Shutemov goto out; 122599cb0dbdSSong Liu if (PageSwapBacked(page)) 1226a1528e21SMuchun Song __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1227a1528e21SMuchun Song nr_pages); 122899cb0dbdSSong Liu else 1229380780e7SMuchun Song __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1230380780e7SMuchun Song nr_pages); 1231dd78feddSKirill A. Shutemov } else { 1232c8efc390SKirill A. Shutemov if (PageTransCompound(page) && page_mapping(page)) { 1233c8efc390SKirill A. Shutemov VM_WARN_ON_ONCE(!PageLocked(page)); 1234c8efc390SKirill A. Shutemov 12359a73f61bSKirill A. Shutemov SetPageDoubleMap(compound_head(page)); 12369a73f61bSKirill A. Shutemov if (PageMlocked(page)) 12379a73f61bSKirill A. Shutemov clear_page_mlock(compound_head(page)); 12389a73f61bSKirill A. Shutemov } 1239dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(&page->_mapcount)) 1240dd78feddSKirill A. Shutemov goto out; 1241dd78feddSKirill A. Shutemov } 124200f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1243dd78feddSKirill A. Shutemov out: 124462cccb8cSJohannes Weiner unlock_page_memcg(page); 12451da177e4SLinus Torvalds } 12461da177e4SLinus Torvalds 1247dd78feddSKirill A. Shutemov static void page_remove_file_rmap(struct page *page, bool compound) 12488186eb6aSJohannes Weiner { 1249dd78feddSKirill A. Shutemov int i, nr = 1; 1250dd78feddSKirill A. Shutemov 125157dea93aSSteve Capper VM_BUG_ON_PAGE(compound && !PageHead(page), page); 12528186eb6aSJohannes Weiner 125353f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 125453f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) { 125553f9263bSKirill A. Shutemov /* hugetlb pages are always mapped with pmds */ 125653f9263bSKirill A. Shutemov atomic_dec(compound_mapcount_ptr(page)); 1257be5d0a74SJohannes Weiner return; 125853f9263bSKirill A. Shutemov } 125953f9263bSKirill A. Shutemov 12608186eb6aSJohannes Weiner /* page still mapped by someone else? */ 1261dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1262a1528e21SMuchun Song int nr_pages = thp_nr_pages(page); 1263a1528e21SMuchun Song 1264a1528e21SMuchun Song for (i = 0, nr = 0; i < nr_pages; i++) { 1265dd78feddSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 1266dd78feddSKirill A. Shutemov nr++; 1267dd78feddSKirill A. Shutemov } 1268dd78feddSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1269be5d0a74SJohannes Weiner return; 127099cb0dbdSSong Liu if (PageSwapBacked(page)) 1271a1528e21SMuchun Song __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1272a1528e21SMuchun Song -nr_pages); 127399cb0dbdSSong Liu else 1274380780e7SMuchun Song __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1275380780e7SMuchun Song -nr_pages); 1276dd78feddSKirill A. Shutemov } else { 12778186eb6aSJohannes Weiner if (!atomic_add_negative(-1, &page->_mapcount)) 1278be5d0a74SJohannes Weiner return; 1279dd78feddSKirill A. Shutemov } 12808186eb6aSJohannes Weiner 12818186eb6aSJohannes Weiner /* 128200f3ca2cSJohannes Weiner * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 12838186eb6aSJohannes Weiner * these counters are not modified in interrupt context, and 12848186eb6aSJohannes Weiner * pte lock(a spinlock) is held, which implies preemption disabled. 12858186eb6aSJohannes Weiner */ 128600f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 12878186eb6aSJohannes Weiner 12888186eb6aSJohannes Weiner if (unlikely(PageMlocked(page))) 12898186eb6aSJohannes Weiner clear_page_mlock(page); 12908186eb6aSJohannes Weiner } 12918186eb6aSJohannes Weiner 129253f9263bSKirill A. Shutemov static void page_remove_anon_compound_rmap(struct page *page) 129353f9263bSKirill A. Shutemov { 129453f9263bSKirill A. Shutemov int i, nr; 129553f9263bSKirill A. Shutemov 129653f9263bSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 129753f9263bSKirill A. Shutemov return; 129853f9263bSKirill A. Shutemov 129953f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_ANON_PAGES for now. */ 130053f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) 130153f9263bSKirill A. Shutemov return; 130253f9263bSKirill A. Shutemov 130353f9263bSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 130453f9263bSKirill A. Shutemov return; 130553f9263bSKirill A. Shutemov 130669473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 130753f9263bSKirill A. Shutemov 130853f9263bSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 130953f9263bSKirill A. Shutemov /* 131053f9263bSKirill A. Shutemov * Subpages can be mapped with PTEs too. Check how many of 1311f1fe80d4SKirill A. Shutemov * them are still mapped. 131253f9263bSKirill A. Shutemov */ 13135eaf35abSMatthew Wilcox (Oracle) for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 131453f9263bSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 131553f9263bSKirill A. Shutemov nr++; 131653f9263bSKirill A. Shutemov } 1317f1fe80d4SKirill A. Shutemov 1318f1fe80d4SKirill A. Shutemov /* 1319f1fe80d4SKirill A. Shutemov * Queue the page for deferred split if at least one small 1320f1fe80d4SKirill A. Shutemov * page of the compound page is unmapped, but at least one 1321f1fe80d4SKirill A. Shutemov * small page is still mapped. 1322f1fe80d4SKirill A. Shutemov */ 13235eaf35abSMatthew Wilcox (Oracle) if (nr && nr < thp_nr_pages(page)) 1324f1fe80d4SKirill A. Shutemov deferred_split_huge_page(page); 132553f9263bSKirill A. Shutemov } else { 13265eaf35abSMatthew Wilcox (Oracle) nr = thp_nr_pages(page); 132753f9263bSKirill A. Shutemov } 132853f9263bSKirill A. Shutemov 1329e90309c9SKirill A. Shutemov if (unlikely(PageMlocked(page))) 1330e90309c9SKirill A. Shutemov clear_page_mlock(page); 1331e90309c9SKirill A. Shutemov 1332f1fe80d4SKirill A. Shutemov if (nr) 1333be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 133453f9263bSKirill A. Shutemov } 133553f9263bSKirill A. Shutemov 13361da177e4SLinus Torvalds /** 13371da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 13381da177e4SLinus Torvalds * @page: page to remove mapping from 1339d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 13401da177e4SLinus Torvalds * 1341b8072f09SHugh Dickins * The caller needs to hold the pte lock. 13421da177e4SLinus Torvalds */ 1343d281ee61SKirill A. Shutemov void page_remove_rmap(struct page *page, bool compound) 13441da177e4SLinus Torvalds { 1345be5d0a74SJohannes Weiner lock_page_memcg(page); 134689c06bd5SKAMEZAWA Hiroyuki 1347be5d0a74SJohannes Weiner if (!PageAnon(page)) { 1348be5d0a74SJohannes Weiner page_remove_file_rmap(page, compound); 1349be5d0a74SJohannes Weiner goto out; 1350be5d0a74SJohannes Weiner } 1351be5d0a74SJohannes Weiner 1352be5d0a74SJohannes Weiner if (compound) { 1353be5d0a74SJohannes Weiner page_remove_anon_compound_rmap(page); 1354be5d0a74SJohannes Weiner goto out; 1355be5d0a74SJohannes Weiner } 135653f9263bSKirill A. Shutemov 1357b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1358b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 1359be5d0a74SJohannes Weiner goto out; 13608186eb6aSJohannes Weiner 13611da177e4SLinus Torvalds /* 1362bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1363bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1364bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption disabled. 13650fe6e20bSNaoya Horiguchi */ 1366be5d0a74SJohannes Weiner __dec_lruvec_page_state(page, NR_ANON_MAPPED); 13678186eb6aSJohannes Weiner 1368e6c509f8SHugh Dickins if (unlikely(PageMlocked(page))) 1369e6c509f8SHugh Dickins clear_page_mlock(page); 13708186eb6aSJohannes Weiner 13719a982250SKirill A. Shutemov if (PageTransCompound(page)) 13729a982250SKirill A. Shutemov deferred_split_huge_page(compound_head(page)); 13739a982250SKirill A. Shutemov 137416f8c5b2SHugh Dickins /* 13751da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 13761da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 13771da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 13782d4894b5SMel Gorman * before us: so leave the reset to free_unref_page, 13791da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 13801da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 13811da177e4SLinus Torvalds * faster for those pages still in swapcache. 13821da177e4SLinus Torvalds */ 1383be5d0a74SJohannes Weiner out: 1384be5d0a74SJohannes Weiner unlock_page_memcg(page); 13851da177e4SLinus Torvalds } 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds /* 138852629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 13891da177e4SLinus Torvalds */ 1390e4b82222SMinchan Kim static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 139152629506SJoonsoo Kim unsigned long address, void *arg) 13921da177e4SLinus Torvalds { 13931da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1394c7ab0d2fSKirill A. Shutemov struct page_vma_mapped_walk pvmw = { 1395c7ab0d2fSKirill A. Shutemov .page = page, 1396c7ab0d2fSKirill A. Shutemov .vma = vma, 1397c7ab0d2fSKirill A. Shutemov .address = address, 1398c7ab0d2fSKirill A. Shutemov }; 13991da177e4SLinus Torvalds pte_t pteval; 1400c7ab0d2fSKirill A. Shutemov struct page *subpage; 1401785373b4SLinus Torvalds bool ret = true; 1402ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 14034708f318SPalmer Dabbelt enum ttu_flags flags = (enum ttu_flags)(long)arg; 14041da177e4SLinus Torvalds 1405732ed558SHugh Dickins /* 1406732ed558SHugh Dickins * When racing against e.g. zap_pte_range() on another cpu, 1407732ed558SHugh Dickins * in between its ptep_get_and_clear_full() and page_remove_rmap(), 14081fb08ac6SYang Shi * try_to_unmap() may return before page_mapped() has become false, 1409732ed558SHugh Dickins * if page table locking is skipped: use TTU_SYNC to wait for that. 1410732ed558SHugh Dickins */ 1411732ed558SHugh Dickins if (flags & TTU_SYNC) 1412732ed558SHugh Dickins pvmw.flags = PVMW_SYNC; 1413732ed558SHugh Dickins 1414a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1415a98a2f0cSAlistair Popple split_huge_pmd_address(vma, address, false, page); 1416fec89c10SKirill A. Shutemov 1417369ea824SJérôme Glisse /* 1418017b1660SMike Kravetz * For THP, we have to assume the worse case ie pmd for invalidation. 1419017b1660SMike Kravetz * For hugetlb, it could be much worse if we need to do pud 1420017b1660SMike Kravetz * invalidation in the case of pmd sharing. 1421017b1660SMike Kravetz * 1422017b1660SMike Kravetz * Note that the page can not be free in this function as call of 1423017b1660SMike Kravetz * try_to_unmap() must hold a reference on the page. 1424369ea824SJérôme Glisse */ 1425494334e4SHugh Dickins range.end = PageKsm(page) ? 1426494334e4SHugh Dickins address + PAGE_SIZE : vma_address_end(page, vma); 14277269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1428494334e4SHugh Dickins address, range.end); 1429017b1660SMike Kravetz if (PageHuge(page)) { 1430017b1660SMike Kravetz /* 1431017b1660SMike Kravetz * If sharing is possible, start and end will be adjusted 1432017b1660SMike Kravetz * accordingly. 1433017b1660SMike Kravetz */ 1434ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, 1435ac46d4f3SJérôme Glisse &range.end); 1436017b1660SMike Kravetz } 1437ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1438369ea824SJérôme Glisse 1439c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 14401da177e4SLinus Torvalds /* 14411da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 14421da177e4SLinus Torvalds */ 144314fa31b8SAndi Kleen if (!(flags & TTU_IGNORE_MLOCK)) { 1444b87537d9SHugh Dickins if (vma->vm_flags & VM_LOCKED) { 14459a73f61bSKirill A. Shutemov /* PTE-mapped THP are never mlocked */ 14469a73f61bSKirill A. Shutemov if (!PageTransCompound(page)) { 14479a73f61bSKirill A. Shutemov /* 14489a73f61bSKirill A. Shutemov * Holding pte lock, we do *not* need 1449c1e8d7c6SMichel Lespinasse * mmap_lock here 14509a73f61bSKirill A. Shutemov */ 1451b87537d9SHugh Dickins mlock_vma_page(page); 14529a73f61bSKirill A. Shutemov } 1453e4b82222SMinchan Kim ret = false; 1454c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1455c7ab0d2fSKirill A. Shutemov break; 1456b87537d9SHugh Dickins } 145714fa31b8SAndi Kleen } 1458c7ab0d2fSKirill A. Shutemov 14598346242aSKirill A. Shutemov /* Unexpected PMD-mapped THP? */ 14608346242aSKirill A. Shutemov VM_BUG_ON_PAGE(!pvmw.pte, page); 14618346242aSKirill A. Shutemov 14628346242aSKirill A. Shutemov subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1463785373b4SLinus Torvalds address = pvmw.address; 1464785373b4SLinus Torvalds 1465336bf30eSMike Kravetz if (PageHuge(page) && !PageAnon(page)) { 1466c0d0381aSMike Kravetz /* 1467c0d0381aSMike Kravetz * To call huge_pmd_unshare, i_mmap_rwsem must be 1468c0d0381aSMike Kravetz * held in write mode. Caller needs to explicitly 1469c0d0381aSMike Kravetz * do this outside rmap routines. 1470c0d0381aSMike Kravetz */ 1471c0d0381aSMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 147234ae204fSMike Kravetz if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1473017b1660SMike Kravetz /* 1474017b1660SMike Kravetz * huge_pmd_unshare unmapped an entire PMD 1475017b1660SMike Kravetz * page. There is no way of knowing exactly 1476017b1660SMike Kravetz * which PMDs may be cached for this mm, so 1477017b1660SMike Kravetz * we must flush them all. start/end were 1478017b1660SMike Kravetz * already adjusted above to cover this range. 1479017b1660SMike Kravetz */ 1480ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end); 1481ac46d4f3SJérôme Glisse flush_tlb_range(vma, range.start, range.end); 1482ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range(mm, range.start, 1483ac46d4f3SJérôme Glisse range.end); 1484017b1660SMike Kravetz 1485017b1660SMike Kravetz /* 1486017b1660SMike Kravetz * The ref count of the PMD page was dropped 1487017b1660SMike Kravetz * which is part of the way map counting 1488017b1660SMike Kravetz * is done for shared PMDs. Return 'true' 1489017b1660SMike Kravetz * here. When there is no other sharing, 1490017b1660SMike Kravetz * huge_pmd_unshare returns false and we will 1491017b1660SMike Kravetz * unmap the actual page and drop map count 1492017b1660SMike Kravetz * to zero. 1493017b1660SMike Kravetz */ 1494017b1660SMike Kravetz page_vma_mapped_walk_done(&pvmw); 1495017b1660SMike Kravetz break; 1496017b1660SMike Kravetz } 1497017b1660SMike Kravetz } 14988346242aSKirill A. Shutemov 14991da177e4SLinus Torvalds /* Nuke the page table entry. */ 1500785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 150172b252aeSMel Gorman if (should_defer_flush(mm, flags)) { 150272b252aeSMel Gorman /* 1503c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1504c7ab0d2fSKirill A. Shutemov * a remote CPU could still be writing to the page. 1505c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1506c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1507c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1508c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 150972b252aeSMel Gorman */ 1510785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 151172b252aeSMel Gorman 1512c7ab0d2fSKirill A. Shutemov set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 151372b252aeSMel Gorman } else { 1514785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 151572b252aeSMel Gorman } 15161da177e4SLinus Torvalds 1517c7ab0d2fSKirill A. Shutemov /* Move the dirty bit to the page. Now the pte is gone. */ 15181da177e4SLinus Torvalds if (pte_dirty(pteval)) 15191da177e4SLinus Torvalds set_page_dirty(page); 15201da177e4SLinus Torvalds 1521365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1522365e9c87SHugh Dickins update_hiwater_rss(mm); 1523365e9c87SHugh Dickins 1524888b9f7cSAndi Kleen if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 15255fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 15265d317b2bSNaoya Horiguchi if (PageHuge(page)) { 1527d8c6546bSMatthew Wilcox (Oracle) hugetlb_count_sub(compound_nr(page), mm); 1528785373b4SLinus Torvalds set_huge_swap_pte_at(mm, address, 15295fd27b8eSPunit Agrawal pvmw.pte, pteval, 15305fd27b8eSPunit Agrawal vma_mmu_pagesize(vma)); 15315d317b2bSNaoya Horiguchi } else { 1532eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 1533785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 15345f24ae58SNaoya Horiguchi } 1535c7ab0d2fSKirill A. Shutemov 1536bce73e48SChristian Borntraeger } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 153745961722SKonstantin Weitz /* 153845961722SKonstantin Weitz * The guest indicated that the page content is of no 153945961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 154045961722SKonstantin Weitz * will take care of the rest. 1541bce73e48SChristian Borntraeger * A future reference will then fault in a new zero 1542bce73e48SChristian Borntraeger * page. When userfaultfd is active, we must not drop 1543bce73e48SChristian Borntraeger * this page though, as its main user (postcopy 1544bce73e48SChristian Borntraeger * migration) will not expect userfaults on already 1545bce73e48SChristian Borntraeger * copied pages. 154645961722SKonstantin Weitz */ 1547eca56ff9SJerome Marchand dec_mm_counter(mm, mm_counter(page)); 15480f10851eSJérôme Glisse /* We have to invalidate as we cleared the pte */ 15490f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 15500f10851eSJérôme Glisse address + PAGE_SIZE); 1551888b9f7cSAndi Kleen } else if (PageAnon(page)) { 1552c7ab0d2fSKirill A. Shutemov swp_entry_t entry = { .val = page_private(subpage) }; 1553179ef71cSCyrill Gorcunov pte_t swp_pte; 15541da177e4SLinus Torvalds /* 15551da177e4SLinus Torvalds * Store the swap location in the pte. 15561da177e4SLinus Torvalds * See handle_pte_fault() ... 15571da177e4SLinus Torvalds */ 1558eb94a878SMinchan Kim if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1559eb94a878SMinchan Kim WARN_ON_ONCE(1); 156083612a94SMinchan Kim ret = false; 1561369ea824SJérôme Glisse /* We have to invalidate as we cleared the pte */ 15620f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 15630f10851eSJérôme Glisse address + PAGE_SIZE); 1564eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1565eb94a878SMinchan Kim break; 1566eb94a878SMinchan Kim } 1567854e9ed0SMinchan Kim 1568802a3a92SShaohua Li /* MADV_FREE page check */ 1569802a3a92SShaohua Li if (!PageSwapBacked(page)) { 1570a128ca71SShaohua Li if (!PageDirty(page)) { 15710f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 15720f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, 15730f10851eSJérôme Glisse address, address + PAGE_SIZE); 1574854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1575854e9ed0SMinchan Kim goto discard; 1576854e9ed0SMinchan Kim } 1577854e9ed0SMinchan Kim 1578802a3a92SShaohua Li /* 1579802a3a92SShaohua Li * If the page was redirtied, it cannot be 1580802a3a92SShaohua Li * discarded. Remap the page to page table. 1581802a3a92SShaohua Li */ 1582785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 158318863d3aSMinchan Kim SetPageSwapBacked(page); 1584e4b82222SMinchan Kim ret = false; 1585802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1586802a3a92SShaohua Li break; 1587802a3a92SShaohua Li } 1588802a3a92SShaohua Li 1589570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1590785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1591e4b82222SMinchan Kim ret = false; 1592c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1593c7ab0d2fSKirill A. Shutemov break; 1594570a335bSHugh Dickins } 1595ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1596ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1597ca827d55SKhalid Aziz ret = false; 1598ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1599ca827d55SKhalid Aziz break; 1600ca827d55SKhalid Aziz } 16011da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 16021da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1603f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 16041da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 16051da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 16061da177e4SLinus Torvalds } 1607d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1608b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1609179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 1610179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1611179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1612f45ec5ffSPeter Xu if (pte_uffd_wp(pteval)) 1613f45ec5ffSPeter Xu swp_pte = pte_swp_mkuffd_wp(swp_pte); 1614785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 16150f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 1616369ea824SJérôme Glisse mmu_notifier_invalidate_range(mm, address, 1617369ea824SJérôme Glisse address + PAGE_SIZE); 16180f10851eSJérôme Glisse } else { 16190f10851eSJérôme Glisse /* 1620906f9cdfSHugh Dickins * This is a locked file-backed page, thus it cannot 1621906f9cdfSHugh Dickins * be removed from the page cache and replaced by a new 1622906f9cdfSHugh Dickins * page before mmu_notifier_invalidate_range_end, so no 16230f10851eSJérôme Glisse * concurrent thread might update its page table to 16240f10851eSJérôme Glisse * point at new page while a device still is using this 16250f10851eSJérôme Glisse * page. 16260f10851eSJérôme Glisse * 1627ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 16280f10851eSJérôme Glisse */ 16290f10851eSJérôme Glisse dec_mm_counter(mm, mm_counter_file(page)); 16300f10851eSJérôme Glisse } 16310f10851eSJérôme Glisse discard: 16320f10851eSJérôme Glisse /* 16330f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() it has be 16340f10851eSJérôme Glisse * done above for all cases requiring it to happen under page 16350f10851eSJérôme Glisse * table lock before mmu_notifier_invalidate_range_end() 16360f10851eSJérôme Glisse * 1637ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 16380f10851eSJérôme Glisse */ 16390f10851eSJérôme Glisse page_remove_rmap(subpage, PageHuge(page)); 16400f10851eSJérôme Glisse put_page(page); 1641c7ab0d2fSKirill A. Shutemov } 1642369ea824SJérôme Glisse 1643ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1644369ea824SJérôme Glisse 1645caed0f48SKOSAKI Motohiro return ret; 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds 164852629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 164952629506SJoonsoo Kim { 1650222100eeSAnshuman Khandual return vma_is_temporary_stack(vma); 165152629506SJoonsoo Kim } 165252629506SJoonsoo Kim 1653b7e188ecSMiaohe Lin static int page_not_mapped(struct page *page) 165452629506SJoonsoo Kim { 1655b7e188ecSMiaohe Lin return !page_mapped(page); 16562a52bcbcSKirill A. Shutemov } 165752629506SJoonsoo Kim 16581da177e4SLinus Torvalds /** 16591da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 16601da177e4SLinus Torvalds * @page: the page to get unmapped 166114fa31b8SAndi Kleen * @flags: action and flags 16621da177e4SLinus Torvalds * 16631da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 16641da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 16651da177e4SLinus Torvalds * 16661fb08ac6SYang Shi * It is the caller's responsibility to check if the page is still 16671fb08ac6SYang Shi * mapped when needed (use TTU_SYNC to prevent accounting races). 16681da177e4SLinus Torvalds */ 16691fb08ac6SYang Shi void try_to_unmap(struct page *page, enum ttu_flags flags) 16701da177e4SLinus Torvalds { 167152629506SJoonsoo Kim struct rmap_walk_control rwc = { 167252629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1673802a3a92SShaohua Li .arg = (void *)flags, 1674b7e188ecSMiaohe Lin .done = page_not_mapped, 167552629506SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 167652629506SJoonsoo Kim }; 16771da177e4SLinus Torvalds 1678a98a2f0cSAlistair Popple if (flags & TTU_RMAP_LOCKED) 1679a98a2f0cSAlistair Popple rmap_walk_locked(page, &rwc); 1680a98a2f0cSAlistair Popple else 1681a98a2f0cSAlistair Popple rmap_walk(page, &rwc); 1682a98a2f0cSAlistair Popple } 1683a98a2f0cSAlistair Popple 1684a98a2f0cSAlistair Popple /* 1685a98a2f0cSAlistair Popple * @arg: enum ttu_flags will be passed to this argument. 1686a98a2f0cSAlistair Popple * 1687a98a2f0cSAlistair Popple * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1688*64b586d1SHugh Dickins * containing migration entries. 1689a98a2f0cSAlistair Popple */ 1690a98a2f0cSAlistair Popple static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, 1691a98a2f0cSAlistair Popple unsigned long address, void *arg) 1692a98a2f0cSAlistair Popple { 1693a98a2f0cSAlistair Popple struct mm_struct *mm = vma->vm_mm; 1694a98a2f0cSAlistair Popple struct page_vma_mapped_walk pvmw = { 1695a98a2f0cSAlistair Popple .page = page, 1696a98a2f0cSAlistair Popple .vma = vma, 1697a98a2f0cSAlistair Popple .address = address, 1698a98a2f0cSAlistair Popple }; 1699a98a2f0cSAlistair Popple pte_t pteval; 1700a98a2f0cSAlistair Popple struct page *subpage; 1701a98a2f0cSAlistair Popple bool ret = true; 1702a98a2f0cSAlistair Popple struct mmu_notifier_range range; 1703a98a2f0cSAlistair Popple enum ttu_flags flags = (enum ttu_flags)(long)arg; 1704a98a2f0cSAlistair Popple 1705a98a2f0cSAlistair Popple if (is_zone_device_page(page) && !is_device_private_page(page)) 1706a98a2f0cSAlistair Popple return true; 1707a98a2f0cSAlistair Popple 1708a98a2f0cSAlistair Popple /* 1709a98a2f0cSAlistair Popple * When racing against e.g. zap_pte_range() on another cpu, 1710a98a2f0cSAlistair Popple * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1711a98a2f0cSAlistair Popple * try_to_migrate() may return before page_mapped() has become false, 1712a98a2f0cSAlistair Popple * if page table locking is skipped: use TTU_SYNC to wait for that. 1713a98a2f0cSAlistair Popple */ 1714a98a2f0cSAlistair Popple if (flags & TTU_SYNC) 1715a98a2f0cSAlistair Popple pvmw.flags = PVMW_SYNC; 1716a98a2f0cSAlistair Popple 1717a98a2f0cSAlistair Popple /* 1718a98a2f0cSAlistair Popple * unmap_page() in mm/huge_memory.c is the only user of migration with 1719a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1720a98a2f0cSAlistair Popple */ 1721a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1722a98a2f0cSAlistair Popple split_huge_pmd_address(vma, address, true, page); 1723a98a2f0cSAlistair Popple 1724a98a2f0cSAlistair Popple /* 1725a98a2f0cSAlistair Popple * For THP, we have to assume the worse case ie pmd for invalidation. 1726a98a2f0cSAlistair Popple * For hugetlb, it could be much worse if we need to do pud 1727a98a2f0cSAlistair Popple * invalidation in the case of pmd sharing. 1728a98a2f0cSAlistair Popple * 1729a98a2f0cSAlistair Popple * Note that the page can not be free in this function as call of 1730a98a2f0cSAlistair Popple * try_to_unmap() must hold a reference on the page. 1731a98a2f0cSAlistair Popple */ 1732a98a2f0cSAlistair Popple range.end = PageKsm(page) ? 1733a98a2f0cSAlistair Popple address + PAGE_SIZE : vma_address_end(page, vma); 1734a98a2f0cSAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1735a98a2f0cSAlistair Popple address, range.end); 1736a98a2f0cSAlistair Popple if (PageHuge(page)) { 1737a98a2f0cSAlistair Popple /* 1738a98a2f0cSAlistair Popple * If sharing is possible, start and end will be adjusted 1739a98a2f0cSAlistair Popple * accordingly. 1740a98a2f0cSAlistair Popple */ 1741a98a2f0cSAlistair Popple adjust_range_if_pmd_sharing_possible(vma, &range.start, 1742a98a2f0cSAlistair Popple &range.end); 1743a98a2f0cSAlistair Popple } 1744a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_start(&range); 1745a98a2f0cSAlistair Popple 1746a98a2f0cSAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 1747a98a2f0cSAlistair Popple #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1748a98a2f0cSAlistair Popple /* PMD-mapped THP migration entry */ 1749a98a2f0cSAlistair Popple if (!pvmw.pte) { 1750a98a2f0cSAlistair Popple VM_BUG_ON_PAGE(PageHuge(page) || 1751a98a2f0cSAlistair Popple !PageTransCompound(page), page); 1752a98a2f0cSAlistair Popple 1753a98a2f0cSAlistair Popple set_pmd_migration_entry(&pvmw, page); 1754a98a2f0cSAlistair Popple continue; 1755a98a2f0cSAlistair Popple } 1756a98a2f0cSAlistair Popple #endif 1757a98a2f0cSAlistair Popple 1758a98a2f0cSAlistair Popple /* Unexpected PMD-mapped THP? */ 1759a98a2f0cSAlistair Popple VM_BUG_ON_PAGE(!pvmw.pte, page); 1760a98a2f0cSAlistair Popple 1761a98a2f0cSAlistair Popple subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1762a98a2f0cSAlistair Popple address = pvmw.address; 1763a98a2f0cSAlistair Popple 1764a98a2f0cSAlistair Popple if (PageHuge(page) && !PageAnon(page)) { 1765a98a2f0cSAlistair Popple /* 1766a98a2f0cSAlistair Popple * To call huge_pmd_unshare, i_mmap_rwsem must be 1767a98a2f0cSAlistair Popple * held in write mode. Caller needs to explicitly 1768a98a2f0cSAlistair Popple * do this outside rmap routines. 1769a98a2f0cSAlistair Popple */ 1770a98a2f0cSAlistair Popple VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1771a98a2f0cSAlistair Popple if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1772a98a2f0cSAlistair Popple /* 1773a98a2f0cSAlistair Popple * huge_pmd_unshare unmapped an entire PMD 1774a98a2f0cSAlistair Popple * page. There is no way of knowing exactly 1775a98a2f0cSAlistair Popple * which PMDs may be cached for this mm, so 1776a98a2f0cSAlistair Popple * we must flush them all. start/end were 1777a98a2f0cSAlistair Popple * already adjusted above to cover this range. 1778a98a2f0cSAlistair Popple */ 1779a98a2f0cSAlistair Popple flush_cache_range(vma, range.start, range.end); 1780a98a2f0cSAlistair Popple flush_tlb_range(vma, range.start, range.end); 1781a98a2f0cSAlistair Popple mmu_notifier_invalidate_range(mm, range.start, 1782a98a2f0cSAlistair Popple range.end); 1783a98a2f0cSAlistair Popple 1784a98a2f0cSAlistair Popple /* 1785a98a2f0cSAlistair Popple * The ref count of the PMD page was dropped 1786a98a2f0cSAlistair Popple * which is part of the way map counting 1787a98a2f0cSAlistair Popple * is done for shared PMDs. Return 'true' 1788a98a2f0cSAlistair Popple * here. When there is no other sharing, 1789a98a2f0cSAlistair Popple * huge_pmd_unshare returns false and we will 1790a98a2f0cSAlistair Popple * unmap the actual page and drop map count 1791a98a2f0cSAlistair Popple * to zero. 1792a98a2f0cSAlistair Popple */ 1793a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 1794a98a2f0cSAlistair Popple break; 1795a98a2f0cSAlistair Popple } 1796a98a2f0cSAlistair Popple } 1797a98a2f0cSAlistair Popple 1798a98a2f0cSAlistair Popple /* Nuke the page table entry. */ 1799a98a2f0cSAlistair Popple flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1800a98a2f0cSAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 1801a98a2f0cSAlistair Popple 1802a98a2f0cSAlistair Popple /* Move the dirty bit to the page. Now the pte is gone. */ 1803a98a2f0cSAlistair Popple if (pte_dirty(pteval)) 1804a98a2f0cSAlistair Popple set_page_dirty(page); 1805a98a2f0cSAlistair Popple 1806a98a2f0cSAlistair Popple /* Update high watermark before we lower rss */ 1807a98a2f0cSAlistair Popple update_hiwater_rss(mm); 1808a98a2f0cSAlistair Popple 1809a98a2f0cSAlistair Popple if (is_zone_device_page(page)) { 1810a98a2f0cSAlistair Popple swp_entry_t entry; 1811a98a2f0cSAlistair Popple pte_t swp_pte; 1812a98a2f0cSAlistair Popple 1813a98a2f0cSAlistair Popple /* 1814a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 1815a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 1816a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 1817a98a2f0cSAlistair Popple */ 1818a98a2f0cSAlistair Popple entry = make_readable_migration_entry( 1819a98a2f0cSAlistair Popple page_to_pfn(page)); 1820a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 1821a98a2f0cSAlistair Popple 1822a98a2f0cSAlistair Popple /* 1823a98a2f0cSAlistair Popple * pteval maps a zone device page and is therefore 1824a98a2f0cSAlistair Popple * a swap pte. 1825a98a2f0cSAlistair Popple */ 1826a98a2f0cSAlistair Popple if (pte_swp_soft_dirty(pteval)) 1827a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 1828a98a2f0cSAlistair Popple if (pte_swp_uffd_wp(pteval)) 1829a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 1830a98a2f0cSAlistair Popple set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1831a98a2f0cSAlistair Popple /* 1832a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 1833a98a2f0cSAlistair Popple * against the special swap migration pte. 1834a98a2f0cSAlistair Popple * 1835a98a2f0cSAlistair Popple * The assignment to subpage above was computed from a 1836a98a2f0cSAlistair Popple * swap PTE which results in an invalid pointer. 1837a98a2f0cSAlistair Popple * Since only PAGE_SIZE pages can currently be 1838a98a2f0cSAlistair Popple * migrated, just set it to page. This will need to be 1839a98a2f0cSAlistair Popple * changed when hugepage migrations to device private 1840a98a2f0cSAlistair Popple * memory are supported. 1841a98a2f0cSAlistair Popple */ 1842a98a2f0cSAlistair Popple subpage = page; 1843a98a2f0cSAlistair Popple } else if (PageHWPoison(page)) { 1844a98a2f0cSAlistair Popple pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1845a98a2f0cSAlistair Popple if (PageHuge(page)) { 1846a98a2f0cSAlistair Popple hugetlb_count_sub(compound_nr(page), mm); 1847a98a2f0cSAlistair Popple set_huge_swap_pte_at(mm, address, 1848a98a2f0cSAlistair Popple pvmw.pte, pteval, 1849a98a2f0cSAlistair Popple vma_mmu_pagesize(vma)); 1850a98a2f0cSAlistair Popple } else { 1851a98a2f0cSAlistair Popple dec_mm_counter(mm, mm_counter(page)); 1852a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 1853a98a2f0cSAlistair Popple } 1854a98a2f0cSAlistair Popple 1855a98a2f0cSAlistair Popple } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1856a98a2f0cSAlistair Popple /* 1857a98a2f0cSAlistair Popple * The guest indicated that the page content is of no 1858a98a2f0cSAlistair Popple * interest anymore. Simply discard the pte, vmscan 1859a98a2f0cSAlistair Popple * will take care of the rest. 1860a98a2f0cSAlistair Popple * A future reference will then fault in a new zero 1861a98a2f0cSAlistair Popple * page. When userfaultfd is active, we must not drop 1862a98a2f0cSAlistair Popple * this page though, as its main user (postcopy 1863a98a2f0cSAlistair Popple * migration) will not expect userfaults on already 1864a98a2f0cSAlistair Popple * copied pages. 1865a98a2f0cSAlistair Popple */ 1866a98a2f0cSAlistair Popple dec_mm_counter(mm, mm_counter(page)); 1867a98a2f0cSAlistair Popple /* We have to invalidate as we cleared the pte */ 1868a98a2f0cSAlistair Popple mmu_notifier_invalidate_range(mm, address, 1869a98a2f0cSAlistair Popple address + PAGE_SIZE); 1870a98a2f0cSAlistair Popple } else { 1871a98a2f0cSAlistair Popple swp_entry_t entry; 1872a98a2f0cSAlistair Popple pte_t swp_pte; 1873a98a2f0cSAlistair Popple 1874a98a2f0cSAlistair Popple if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1875a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 1876a98a2f0cSAlistair Popple ret = false; 1877a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 1878a98a2f0cSAlistair Popple break; 1879a98a2f0cSAlistair Popple } 1880a98a2f0cSAlistair Popple 1881a98a2f0cSAlistair Popple /* 1882a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 1883a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 1884a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 1885a98a2f0cSAlistair Popple */ 1886a98a2f0cSAlistair Popple if (pte_write(pteval)) 1887a98a2f0cSAlistair Popple entry = make_writable_migration_entry( 1888a98a2f0cSAlistair Popple page_to_pfn(subpage)); 1889a98a2f0cSAlistair Popple else 1890a98a2f0cSAlistair Popple entry = make_readable_migration_entry( 1891a98a2f0cSAlistair Popple page_to_pfn(subpage)); 1892a98a2f0cSAlistair Popple 1893a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 1894a98a2f0cSAlistair Popple if (pte_soft_dirty(pteval)) 1895a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 1896a98a2f0cSAlistair Popple if (pte_uffd_wp(pteval)) 1897a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 1898a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 1899a98a2f0cSAlistair Popple /* 1900a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 1901a98a2f0cSAlistair Popple * against the special swap migration pte. 1902a98a2f0cSAlistair Popple */ 1903a98a2f0cSAlistair Popple } 1904a98a2f0cSAlistair Popple 1905a98a2f0cSAlistair Popple /* 1906a98a2f0cSAlistair Popple * No need to call mmu_notifier_invalidate_range() it has be 1907a98a2f0cSAlistair Popple * done above for all cases requiring it to happen under page 1908a98a2f0cSAlistair Popple * table lock before mmu_notifier_invalidate_range_end() 1909a98a2f0cSAlistair Popple * 1910a98a2f0cSAlistair Popple * See Documentation/vm/mmu_notifier.rst 1911a98a2f0cSAlistair Popple */ 1912a98a2f0cSAlistair Popple page_remove_rmap(subpage, PageHuge(page)); 1913a98a2f0cSAlistair Popple put_page(page); 1914a98a2f0cSAlistair Popple } 1915a98a2f0cSAlistair Popple 1916a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_end(&range); 1917a98a2f0cSAlistair Popple 1918a98a2f0cSAlistair Popple return ret; 1919a98a2f0cSAlistair Popple } 1920a98a2f0cSAlistair Popple 1921a98a2f0cSAlistair Popple /** 1922a98a2f0cSAlistair Popple * try_to_migrate - try to replace all page table mappings with swap entries 1923a98a2f0cSAlistair Popple * @page: the page to replace page table entries for 1924a98a2f0cSAlistair Popple * @flags: action and flags 1925a98a2f0cSAlistair Popple * 1926a98a2f0cSAlistair Popple * Tries to remove all the page table entries which are mapping this page and 1927a98a2f0cSAlistair Popple * replace them with special swap entries. Caller must hold the page lock. 1928a98a2f0cSAlistair Popple */ 1929a98a2f0cSAlistair Popple void try_to_migrate(struct page *page, enum ttu_flags flags) 1930a98a2f0cSAlistair Popple { 1931a98a2f0cSAlistair Popple struct rmap_walk_control rwc = { 1932a98a2f0cSAlistair Popple .rmap_one = try_to_migrate_one, 1933a98a2f0cSAlistair Popple .arg = (void *)flags, 1934a98a2f0cSAlistair Popple .done = page_not_mapped, 1935a98a2f0cSAlistair Popple .anon_lock = page_lock_anon_vma_read, 1936a98a2f0cSAlistair Popple }; 1937a98a2f0cSAlistair Popple 1938a98a2f0cSAlistair Popple /* 1939a98a2f0cSAlistair Popple * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 1940a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 1941a98a2f0cSAlistair Popple */ 1942a98a2f0cSAlistair Popple if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 1943a98a2f0cSAlistair Popple TTU_SYNC))) 1944a98a2f0cSAlistair Popple return; 1945a98a2f0cSAlistair Popple 194652629506SJoonsoo Kim /* 194752629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 194852629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 194952629506SJoonsoo Kim * page tables leading to a race where migration cannot 195052629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 195152629506SJoonsoo Kim * locking requirements of exec(), migration skips 195252629506SJoonsoo Kim * temporary VMAs until after exec() completes. 195352629506SJoonsoo Kim */ 1954a98a2f0cSAlistair Popple if (!PageKsm(page) && PageAnon(page)) 195552629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 195652629506SJoonsoo Kim 19572a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 195833fc80e2SMinchan Kim rmap_walk_locked(page, &rwc); 19592a52bcbcSKirill A. Shutemov else 196033fc80e2SMinchan Kim rmap_walk(page, &rwc); 19611da177e4SLinus Torvalds } 196281b4082dSNikita Danilov 1963cd62734cSAlistair Popple /* 1964cd62734cSAlistair Popple * Walks the vma's mapping a page and mlocks the page if any locked vma's are 1965cd62734cSAlistair Popple * found. Once one is found the page is locked and the scan can be terminated. 1966b291f000SNick Piggin */ 1967cd62734cSAlistair Popple static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, 1968cd62734cSAlistair Popple unsigned long address, void *unused) 1969cd62734cSAlistair Popple { 1970cd62734cSAlistair Popple struct page_vma_mapped_walk pvmw = { 1971cd62734cSAlistair Popple .page = page, 1972cd62734cSAlistair Popple .vma = vma, 1973cd62734cSAlistair Popple .address = address, 1974cd62734cSAlistair Popple }; 1975854e9ed0SMinchan Kim 1976cd62734cSAlistair Popple /* An un-locked vma doesn't have any pages to lock, continue the scan */ 1977cd62734cSAlistair Popple if (!(vma->vm_flags & VM_LOCKED)) 1978cd62734cSAlistair Popple return true; 1979cd62734cSAlistair Popple 1980cd62734cSAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 1981cd62734cSAlistair Popple /* 1982cd62734cSAlistair Popple * Need to recheck under the ptl to serialise with 1983cd62734cSAlistair Popple * __munlock_pagevec_fill() after VM_LOCKED is cleared in 1984cd62734cSAlistair Popple * munlock_vma_pages_range(). 1985cd62734cSAlistair Popple */ 1986cd62734cSAlistair Popple if (vma->vm_flags & VM_LOCKED) { 1987cd62734cSAlistair Popple /* PTE-mapped THP are never mlocked */ 1988cd62734cSAlistair Popple if (!PageTransCompound(page)) 1989cd62734cSAlistair Popple mlock_vma_page(page); 1990cd62734cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 1991cd62734cSAlistair Popple } 1992cd62734cSAlistair Popple 1993cd62734cSAlistair Popple /* 1994cd62734cSAlistair Popple * no need to continue scanning other vma's if the page has 1995cd62734cSAlistair Popple * been locked. 1996cd62734cSAlistair Popple */ 1997cd62734cSAlistair Popple return false; 1998cd62734cSAlistair Popple } 1999cd62734cSAlistair Popple 2000cd62734cSAlistair Popple return true; 2001cd62734cSAlistair Popple } 2002cd62734cSAlistair Popple 2003cd62734cSAlistair Popple /** 2004cd62734cSAlistair Popple * page_mlock - try to mlock a page 2005cd62734cSAlistair Popple * @page: the page to be mlocked 2006cd62734cSAlistair Popple * 2007cd62734cSAlistair Popple * Called from munlock code. Checks all of the VMAs mapping the page and mlocks 2008cd62734cSAlistair Popple * the page if any are found. The page will be returned with PG_mlocked cleared 2009cd62734cSAlistair Popple * if it is not mapped by any locked vmas. 2010cd62734cSAlistair Popple */ 2011cd62734cSAlistair Popple void page_mlock(struct page *page) 2012192d7232SMinchan Kim { 2013e8351ac9SJoonsoo Kim struct rmap_walk_control rwc = { 2014cd62734cSAlistair Popple .rmap_one = page_mlock_one, 2015e8351ac9SJoonsoo Kim .done = page_not_mapped, 2016e8351ac9SJoonsoo Kim .anon_lock = page_lock_anon_vma_read, 2017e8351ac9SJoonsoo Kim 2018e8351ac9SJoonsoo Kim }; 2019e8351ac9SJoonsoo Kim 2020309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 2021192d7232SMinchan Kim VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 2022b291f000SNick Piggin 2023192d7232SMinchan Kim rmap_walk(page, &rwc); 2024b291f000SNick Piggin } 2025e9995ef9SHugh Dickins 2026b756a3b5SAlistair Popple #ifdef CONFIG_DEVICE_PRIVATE 2027b756a3b5SAlistair Popple struct make_exclusive_args { 2028b756a3b5SAlistair Popple struct mm_struct *mm; 2029b756a3b5SAlistair Popple unsigned long address; 2030b756a3b5SAlistair Popple void *owner; 2031b756a3b5SAlistair Popple bool valid; 2032b756a3b5SAlistair Popple }; 2033b756a3b5SAlistair Popple 2034b756a3b5SAlistair Popple static bool page_make_device_exclusive_one(struct page *page, 2035b756a3b5SAlistair Popple struct vm_area_struct *vma, unsigned long address, void *priv) 2036b756a3b5SAlistair Popple { 2037b756a3b5SAlistair Popple struct mm_struct *mm = vma->vm_mm; 2038b756a3b5SAlistair Popple struct page_vma_mapped_walk pvmw = { 2039b756a3b5SAlistair Popple .page = page, 2040b756a3b5SAlistair Popple .vma = vma, 2041b756a3b5SAlistair Popple .address = address, 2042b756a3b5SAlistair Popple }; 2043b756a3b5SAlistair Popple struct make_exclusive_args *args = priv; 2044b756a3b5SAlistair Popple pte_t pteval; 2045b756a3b5SAlistair Popple struct page *subpage; 2046b756a3b5SAlistair Popple bool ret = true; 2047b756a3b5SAlistair Popple struct mmu_notifier_range range; 2048b756a3b5SAlistair Popple swp_entry_t entry; 2049b756a3b5SAlistair Popple pte_t swp_pte; 2050b756a3b5SAlistair Popple 2051b756a3b5SAlistair Popple mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2052b756a3b5SAlistair Popple vma->vm_mm, address, min(vma->vm_end, 2053b756a3b5SAlistair Popple address + page_size(page)), args->owner); 2054b756a3b5SAlistair Popple mmu_notifier_invalidate_range_start(&range); 2055b756a3b5SAlistair Popple 2056b756a3b5SAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 2057b756a3b5SAlistair Popple /* Unexpected PMD-mapped THP? */ 2058b756a3b5SAlistair Popple VM_BUG_ON_PAGE(!pvmw.pte, page); 2059b756a3b5SAlistair Popple 2060b756a3b5SAlistair Popple if (!pte_present(*pvmw.pte)) { 2061b756a3b5SAlistair Popple ret = false; 2062b756a3b5SAlistair Popple page_vma_mapped_walk_done(&pvmw); 2063b756a3b5SAlistair Popple break; 2064b756a3b5SAlistair Popple } 2065b756a3b5SAlistair Popple 2066b756a3b5SAlistair Popple subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 2067b756a3b5SAlistair Popple address = pvmw.address; 2068b756a3b5SAlistair Popple 2069b756a3b5SAlistair Popple /* Nuke the page table entry. */ 2070b756a3b5SAlistair Popple flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2071b756a3b5SAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 2072b756a3b5SAlistair Popple 2073b756a3b5SAlistair Popple /* Move the dirty bit to the page. Now the pte is gone. */ 2074b756a3b5SAlistair Popple if (pte_dirty(pteval)) 2075b756a3b5SAlistair Popple set_page_dirty(page); 2076b756a3b5SAlistair Popple 2077b756a3b5SAlistair Popple /* 2078b756a3b5SAlistair Popple * Check that our target page is still mapped at the expected 2079b756a3b5SAlistair Popple * address. 2080b756a3b5SAlistair Popple */ 2081b756a3b5SAlistair Popple if (args->mm == mm && args->address == address && 2082b756a3b5SAlistair Popple pte_write(pteval)) 2083b756a3b5SAlistair Popple args->valid = true; 2084b756a3b5SAlistair Popple 2085b756a3b5SAlistair Popple /* 2086b756a3b5SAlistair Popple * Store the pfn of the page in a special migration 2087b756a3b5SAlistair Popple * pte. do_swap_page() will wait until the migration 2088b756a3b5SAlistair Popple * pte is removed and then restart fault handling. 2089b756a3b5SAlistair Popple */ 2090b756a3b5SAlistair Popple if (pte_write(pteval)) 2091b756a3b5SAlistair Popple entry = make_writable_device_exclusive_entry( 2092b756a3b5SAlistair Popple page_to_pfn(subpage)); 2093b756a3b5SAlistair Popple else 2094b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 2095b756a3b5SAlistair Popple page_to_pfn(subpage)); 2096b756a3b5SAlistair Popple swp_pte = swp_entry_to_pte(entry); 2097b756a3b5SAlistair Popple if (pte_soft_dirty(pteval)) 2098b756a3b5SAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2099b756a3b5SAlistair Popple if (pte_uffd_wp(pteval)) 2100b756a3b5SAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2101b756a3b5SAlistair Popple 2102b756a3b5SAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 2103b756a3b5SAlistair Popple 2104b756a3b5SAlistair Popple /* 2105b756a3b5SAlistair Popple * There is a reference on the page for the swap entry which has 2106b756a3b5SAlistair Popple * been removed, so shouldn't take another. 2107b756a3b5SAlistair Popple */ 2108b756a3b5SAlistair Popple page_remove_rmap(subpage, false); 2109b756a3b5SAlistair Popple } 2110b756a3b5SAlistair Popple 2111b756a3b5SAlistair Popple mmu_notifier_invalidate_range_end(&range); 2112b756a3b5SAlistair Popple 2113b756a3b5SAlistair Popple return ret; 2114b756a3b5SAlistair Popple } 2115b756a3b5SAlistair Popple 2116b756a3b5SAlistair Popple /** 2117b756a3b5SAlistair Popple * page_make_device_exclusive - mark the page exclusively owned by a device 2118b756a3b5SAlistair Popple * @page: the page to replace page table entries for 2119b756a3b5SAlistair Popple * @mm: the mm_struct where the page is expected to be mapped 2120b756a3b5SAlistair Popple * @address: address where the page is expected to be mapped 2121b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2122b756a3b5SAlistair Popple * 2123b756a3b5SAlistair Popple * Tries to remove all the page table entries which are mapping this page and 2124b756a3b5SAlistair Popple * replace them with special device exclusive swap entries to grant a device 2125b756a3b5SAlistair Popple * exclusive access to the page. Caller must hold the page lock. 2126b756a3b5SAlistair Popple * 2127b756a3b5SAlistair Popple * Returns false if the page is still mapped, or if it could not be unmapped 2128b756a3b5SAlistair Popple * from the expected address. Otherwise returns true (success). 2129b756a3b5SAlistair Popple */ 2130b756a3b5SAlistair Popple static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm, 2131b756a3b5SAlistair Popple unsigned long address, void *owner) 2132b756a3b5SAlistair Popple { 2133b756a3b5SAlistair Popple struct make_exclusive_args args = { 2134b756a3b5SAlistair Popple .mm = mm, 2135b756a3b5SAlistair Popple .address = address, 2136b756a3b5SAlistair Popple .owner = owner, 2137b756a3b5SAlistair Popple .valid = false, 2138b756a3b5SAlistair Popple }; 2139b756a3b5SAlistair Popple struct rmap_walk_control rwc = { 2140b756a3b5SAlistair Popple .rmap_one = page_make_device_exclusive_one, 2141b756a3b5SAlistair Popple .done = page_not_mapped, 2142b756a3b5SAlistair Popple .anon_lock = page_lock_anon_vma_read, 2143b756a3b5SAlistair Popple .arg = &args, 2144b756a3b5SAlistair Popple }; 2145b756a3b5SAlistair Popple 2146b756a3b5SAlistair Popple /* 2147b756a3b5SAlistair Popple * Restrict to anonymous pages for now to avoid potential writeback 2148b756a3b5SAlistair Popple * issues. Also tail pages shouldn't be passed to rmap_walk so skip 2149b756a3b5SAlistair Popple * those. 2150b756a3b5SAlistair Popple */ 2151b756a3b5SAlistair Popple if (!PageAnon(page) || PageTail(page)) 2152b756a3b5SAlistair Popple return false; 2153b756a3b5SAlistair Popple 2154b756a3b5SAlistair Popple rmap_walk(page, &rwc); 2155b756a3b5SAlistair Popple 2156b756a3b5SAlistair Popple return args.valid && !page_mapcount(page); 2157b756a3b5SAlistair Popple } 2158b756a3b5SAlistair Popple 2159b756a3b5SAlistair Popple /** 2160b756a3b5SAlistair Popple * make_device_exclusive_range() - Mark a range for exclusive use by a device 2161b756a3b5SAlistair Popple * @mm: mm_struct of assoicated target process 2162b756a3b5SAlistair Popple * @start: start of the region to mark for exclusive device access 2163b756a3b5SAlistair Popple * @end: end address of region 2164b756a3b5SAlistair Popple * @pages: returns the pages which were successfully marked for exclusive access 2165b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2166b756a3b5SAlistair Popple * 2167b756a3b5SAlistair Popple * Returns: number of pages found in the range by GUP. A page is marked for 2168b756a3b5SAlistair Popple * exclusive access only if the page pointer is non-NULL. 2169b756a3b5SAlistair Popple * 2170b756a3b5SAlistair Popple * This function finds ptes mapping page(s) to the given address range, locks 2171b756a3b5SAlistair Popple * them and replaces mappings with special swap entries preventing userspace CPU 2172b756a3b5SAlistair Popple * access. On fault these entries are replaced with the original mapping after 2173b756a3b5SAlistair Popple * calling MMU notifiers. 2174b756a3b5SAlistair Popple * 2175b756a3b5SAlistair Popple * A driver using this to program access from a device must use a mmu notifier 2176b756a3b5SAlistair Popple * critical section to hold a device specific lock during programming. Once 2177b756a3b5SAlistair Popple * programming is complete it should drop the page lock and reference after 2178b756a3b5SAlistair Popple * which point CPU access to the page will revoke the exclusive access. 2179b756a3b5SAlistair Popple */ 2180b756a3b5SAlistair Popple int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2181b756a3b5SAlistair Popple unsigned long end, struct page **pages, 2182b756a3b5SAlistair Popple void *owner) 2183b756a3b5SAlistair Popple { 2184b756a3b5SAlistair Popple long npages = (end - start) >> PAGE_SHIFT; 2185b756a3b5SAlistair Popple long i; 2186b756a3b5SAlistair Popple 2187b756a3b5SAlistair Popple npages = get_user_pages_remote(mm, start, npages, 2188b756a3b5SAlistair Popple FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2189b756a3b5SAlistair Popple pages, NULL, NULL); 2190b756a3b5SAlistair Popple if (npages < 0) 2191b756a3b5SAlistair Popple return npages; 2192b756a3b5SAlistair Popple 2193b756a3b5SAlistair Popple for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2194b756a3b5SAlistair Popple if (!trylock_page(pages[i])) { 2195b756a3b5SAlistair Popple put_page(pages[i]); 2196b756a3b5SAlistair Popple pages[i] = NULL; 2197b756a3b5SAlistair Popple continue; 2198b756a3b5SAlistair Popple } 2199b756a3b5SAlistair Popple 2200b756a3b5SAlistair Popple if (!page_make_device_exclusive(pages[i], mm, start, owner)) { 2201b756a3b5SAlistair Popple unlock_page(pages[i]); 2202b756a3b5SAlistair Popple put_page(pages[i]); 2203b756a3b5SAlistair Popple pages[i] = NULL; 2204b756a3b5SAlistair Popple } 2205b756a3b5SAlistair Popple } 2206b756a3b5SAlistair Popple 2207b756a3b5SAlistair Popple return npages; 2208b756a3b5SAlistair Popple } 2209b756a3b5SAlistair Popple EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2210b756a3b5SAlistair Popple #endif 2211b756a3b5SAlistair Popple 221201d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 221376545066SRik van Riel { 221476545066SRik van Riel struct anon_vma *root = anon_vma->root; 221576545066SRik van Riel 2216624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 221701d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 221876545066SRik van Riel anon_vma_free(root); 221976545066SRik van Riel } 222076545066SRik van Riel 22210dd1c7bbSJoonsoo Kim static struct anon_vma *rmap_walk_anon_lock(struct page *page, 22220dd1c7bbSJoonsoo Kim struct rmap_walk_control *rwc) 2223faecd8ddSJoonsoo Kim { 2224faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 2225faecd8ddSJoonsoo Kim 22260dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 22270dd1c7bbSJoonsoo Kim return rwc->anon_lock(page); 22280dd1c7bbSJoonsoo Kim 2229faecd8ddSJoonsoo Kim /* 2230faecd8ddSJoonsoo Kim * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 2231faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 2232c1e8d7c6SMichel Lespinasse * are holding mmap_lock. Users without mmap_lock are required to 2233faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 2234faecd8ddSJoonsoo Kim */ 2235faecd8ddSJoonsoo Kim anon_vma = page_anon_vma(page); 2236faecd8ddSJoonsoo Kim if (!anon_vma) 2237faecd8ddSJoonsoo Kim return NULL; 2238faecd8ddSJoonsoo Kim 2239faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 2240faecd8ddSJoonsoo Kim return anon_vma; 2241faecd8ddSJoonsoo Kim } 2242faecd8ddSJoonsoo Kim 2243e9995ef9SHugh Dickins /* 2244e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 2245e8351ac9SJoonsoo Kim * rmap method 2246e8351ac9SJoonsoo Kim * @page: the page to be handled 2247e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2248e8351ac9SJoonsoo Kim * 2249e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2250e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 2251e8351ac9SJoonsoo Kim * 2252cd62734cSAlistair Popple * When called from page_mlock(), the mmap_lock of the mm containing the vma 2253e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 2254e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2255e8351ac9SJoonsoo Kim * LOCKED. 2256e9995ef9SHugh Dickins */ 22571df631aeSMinchan Kim static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 2258b9773199SKirill A. Shutemov bool locked) 2259e9995ef9SHugh Dickins { 2260e9995ef9SHugh Dickins struct anon_vma *anon_vma; 2261a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 22625beb4930SRik van Riel struct anon_vma_chain *avc; 2263e9995ef9SHugh Dickins 2264b9773199SKirill A. Shutemov if (locked) { 2265b9773199SKirill A. Shutemov anon_vma = page_anon_vma(page); 2266b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 2267b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(!anon_vma, page); 2268b9773199SKirill A. Shutemov } else { 22690dd1c7bbSJoonsoo Kim anon_vma = rmap_walk_anon_lock(page, rwc); 2270b9773199SKirill A. Shutemov } 2271e9995ef9SHugh Dickins if (!anon_vma) 22721df631aeSMinchan Kim return; 2273faecd8ddSJoonsoo Kim 2274a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 22756c357848SMatthew Wilcox (Oracle) pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2276a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2277a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 22785beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 2279e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 22800dd1c7bbSJoonsoo Kim 2281494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2282ad12695fSAndrea Arcangeli cond_resched(); 2283ad12695fSAndrea Arcangeli 22840dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 22850dd1c7bbSJoonsoo Kim continue; 22860dd1c7bbSJoonsoo Kim 2287e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 2288e9995ef9SHugh Dickins break; 22890dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 22900dd1c7bbSJoonsoo Kim break; 2291e9995ef9SHugh Dickins } 2292b9773199SKirill A. Shutemov 2293b9773199SKirill A. Shutemov if (!locked) 22944fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 2295e9995ef9SHugh Dickins } 2296e9995ef9SHugh Dickins 2297e8351ac9SJoonsoo Kim /* 2298e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 2299e8351ac9SJoonsoo Kim * @page: the page to be handled 2300e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2301e8351ac9SJoonsoo Kim * 2302e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2303e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 2304e8351ac9SJoonsoo Kim * 2305cd62734cSAlistair Popple * When called from page_mlock(), the mmap_lock of the mm containing the vma 2306e8351ac9SJoonsoo Kim * where the page was found will be held for write. So, we won't recheck 2307e8351ac9SJoonsoo Kim * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2308e8351ac9SJoonsoo Kim * LOCKED. 2309e8351ac9SJoonsoo Kim */ 23101df631aeSMinchan Kim static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 2311b9773199SKirill A. Shutemov bool locked) 2312e9995ef9SHugh Dickins { 2313b9773199SKirill A. Shutemov struct address_space *mapping = page_mapping(page); 2314a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 2315e9995ef9SHugh Dickins struct vm_area_struct *vma; 2316e9995ef9SHugh Dickins 23179f32624bSJoonsoo Kim /* 23189f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 23199f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 23209f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 2321c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 23229f32624bSJoonsoo Kim */ 232381d1b09cSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 23249f32624bSJoonsoo Kim 2325e9995ef9SHugh Dickins if (!mapping) 23261df631aeSMinchan Kim return; 23273dec0ba0SDavidlohr Bueso 2328a8fa41adSKirill A. Shutemov pgoff_start = page_to_pgoff(page); 23296c357848SMatthew Wilcox (Oracle) pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2330b9773199SKirill A. Shutemov if (!locked) 23313dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 2332a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 2333a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 2334e9995ef9SHugh Dickins unsigned long address = vma_address(page, vma); 23350dd1c7bbSJoonsoo Kim 2336494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2337ad12695fSAndrea Arcangeli cond_resched(); 2338ad12695fSAndrea Arcangeli 23390dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 23400dd1c7bbSJoonsoo Kim continue; 23410dd1c7bbSJoonsoo Kim 2342e4b82222SMinchan Kim if (!rwc->rmap_one(page, vma, address, rwc->arg)) 23430dd1c7bbSJoonsoo Kim goto done; 23440dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) 23450dd1c7bbSJoonsoo Kim goto done; 2346e9995ef9SHugh Dickins } 23470dd1c7bbSJoonsoo Kim 23480dd1c7bbSJoonsoo Kim done: 2349b9773199SKirill A. Shutemov if (!locked) 23503dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 2351e9995ef9SHugh Dickins } 2352e9995ef9SHugh Dickins 23531df631aeSMinchan Kim void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 2354e9995ef9SHugh Dickins { 2355e9995ef9SHugh Dickins if (unlikely(PageKsm(page))) 23561df631aeSMinchan Kim rmap_walk_ksm(page, rwc); 2357e9995ef9SHugh Dickins else if (PageAnon(page)) 23581df631aeSMinchan Kim rmap_walk_anon(page, rwc, false); 2359e9995ef9SHugh Dickins else 23601df631aeSMinchan Kim rmap_walk_file(page, rwc, false); 2361b9773199SKirill A. Shutemov } 2362b9773199SKirill A. Shutemov 2363b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 23641df631aeSMinchan Kim void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 2365b9773199SKirill A. Shutemov { 2366b9773199SKirill A. Shutemov /* no ksm support for now */ 2367b9773199SKirill A. Shutemov VM_BUG_ON_PAGE(PageKsm(page), page); 2368b9773199SKirill A. Shutemov if (PageAnon(page)) 23691df631aeSMinchan Kim rmap_walk_anon(page, rwc, true); 2370b9773199SKirill A. Shutemov else 23711df631aeSMinchan Kim rmap_walk_file(page, rwc, true); 2372e9995ef9SHugh Dickins } 23730fe6e20bSNaoya Horiguchi 2374e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 23750fe6e20bSNaoya Horiguchi /* 2376451b9514SKirill Tkhai * The following two functions are for anonymous (private mapped) hugepages. 23770fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 23780fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 23790fe6e20bSNaoya Horiguchi */ 23800fe6e20bSNaoya Horiguchi void hugepage_add_anon_rmap(struct page *page, 23810fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 23820fe6e20bSNaoya Horiguchi { 23830fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 23840fe6e20bSNaoya Horiguchi int first; 2385a850ea30SNaoya Horiguchi 2386a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 23870fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 23885dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 238953f9263bSKirill A. Shutemov first = atomic_inc_and_test(compound_mapcount_ptr(page)); 23900fe6e20bSNaoya Horiguchi if (first) 2391451b9514SKirill Tkhai __page_set_anon_rmap(page, vma, address, 0); 23920fe6e20bSNaoya Horiguchi } 23930fe6e20bSNaoya Horiguchi 23940fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 23950fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 23960fe6e20bSNaoya Horiguchi { 23970fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 239853f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 239947e29d32SJohn Hubbard if (hpage_pincount_available(page)) 240047e29d32SJohn Hubbard atomic_set(compound_pincount_ptr(page), 0); 240147e29d32SJohn Hubbard 2402451b9514SKirill Tkhai __page_set_anon_rmap(page, vma, address, 1); 24030fe6e20bSNaoya Horiguchi } 2404e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 2405