11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 239608703eSJan Kara * inode->i_rwsem (while writing or truncating, not reading or faulting) 24c1e8d7c6SMichel Lespinasse * mm->mmap_lock 25730633f0SJan Kara * mapping->invalidate_lock (in filemap_fault) 269608703eSJan Kara * page->flags PG_locked (lock_page) * (see hugetlbfs below) 2788f306b6SKirill A. Shutemov * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 28c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 29c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 305a505085SIngo Molnar * anon_vma->rwsem 31b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 325d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 331da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 34e621900aSMatthew Wilcox (Oracle) * mapping->private_lock (in block_dirty_folio) 35e621900aSMatthew Wilcox (Oracle) * folio_lock_memcg move_lock (in block_dirty_folio) 36b93b0163SMatthew Wilcox * i_pages lock (widely used) 37e809c3feSMatthew Wilcox (Oracle) * lruvec->lru_lock (in folio_lruvec_lock_irq) 38250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 39f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 401da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 41b93b0163SMatthew Wilcox * i_pages lock (widely used, in set_page_dirty, 421da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 43f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 446a46079cSAndi Kleen * 459608703eSJan Kara * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 466a46079cSAndi Kleen * ->tasklist_lock 476a46079cSAndi Kleen * pte map lock 48c0d0381aSMike Kravetz * 49c0d0381aSMike Kravetz * * hugetlbfs PageHuge() pages take locks in this order: 50c0d0381aSMike Kravetz * mapping->i_mmap_rwsem 51c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 52c0d0381aSMike Kravetz * page->flags PG_locked (lock_page) 531da177e4SLinus Torvalds */ 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds #include <linux/mm.h> 566e84f315SIngo Molnar #include <linux/sched/mm.h> 5729930025SIngo Molnar #include <linux/sched/task.h> 581da177e4SLinus Torvalds #include <linux/pagemap.h> 591da177e4SLinus Torvalds #include <linux/swap.h> 601da177e4SLinus Torvalds #include <linux/swapops.h> 611da177e4SLinus Torvalds #include <linux/slab.h> 621da177e4SLinus Torvalds #include <linux/init.h> 635ad64688SHugh Dickins #include <linux/ksm.h> 641da177e4SLinus Torvalds #include <linux/rmap.h> 651da177e4SLinus Torvalds #include <linux/rcupdate.h> 66b95f1b31SPaul Gortmaker #include <linux/export.h> 678a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 68cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 6964cdd548SKOSAKI Motohiro #include <linux/migrate.h> 700fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 71444f84fdSBen Dooks #include <linux/huge_mm.h> 72ef5d437fSJan Kara #include <linux/backing-dev.h> 7333c3fc71SVladimir Davydov #include <linux/page_idle.h> 74a5430ddaSJérôme Glisse #include <linux/memremap.h> 75bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h> 761da177e4SLinus Torvalds 771da177e4SLinus Torvalds #include <asm/tlbflush.h> 781da177e4SLinus Torvalds 794cc79b33SAnshuman Khandual #define CREATE_TRACE_POINTS 8072b252aeSMel Gorman #include <trace/events/tlb.h> 814cc79b33SAnshuman Khandual #include <trace/events/migrate.h> 8272b252aeSMel Gorman 83b291f000SNick Piggin #include "internal.h" 84b291f000SNick Piggin 85fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 865beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 87fdd2e5f8SAdrian Bunk 88fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 89fdd2e5f8SAdrian Bunk { 9001d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 9101d8b20dSPeter Zijlstra 9201d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 9301d8b20dSPeter Zijlstra if (anon_vma) { 9401d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 957a3ef208SKonstantin Khlebnikov anon_vma->degree = 1; /* Reference for first vma */ 967a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 9701d8b20dSPeter Zijlstra /* 9801d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 9901d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 10001d8b20dSPeter Zijlstra */ 10101d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 102fdd2e5f8SAdrian Bunk } 103fdd2e5f8SAdrian Bunk 10401d8b20dSPeter Zijlstra return anon_vma; 10501d8b20dSPeter Zijlstra } 10601d8b20dSPeter Zijlstra 10701d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 108fdd2e5f8SAdrian Bunk { 10901d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 11088c22088SPeter Zijlstra 11188c22088SPeter Zijlstra /* 1122f031c6fSMatthew Wilcox (Oracle) * Synchronize against folio_lock_anon_vma_read() such that 11388c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 11488c22088SPeter Zijlstra * freed. 11588c22088SPeter Zijlstra * 11688c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 11788c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1182f031c6fSMatthew Wilcox (Oracle) * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 11988c22088SPeter Zijlstra * 1202f031c6fSMatthew Wilcox (Oracle) * folio_lock_anon_vma_read() VS put_anon_vma() 1214fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 12288c22088SPeter Zijlstra * LOCK MB 1234fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 12488c22088SPeter Zijlstra * 12588c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 12688c22088SPeter Zijlstra * happen _before_ what follows. 12788c22088SPeter Zijlstra */ 1287f39dda9SHugh Dickins might_sleep(); 1295a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1304fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 13108b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 13288c22088SPeter Zijlstra } 13388c22088SPeter Zijlstra 134fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 135fdd2e5f8SAdrian Bunk } 1361da177e4SLinus Torvalds 137dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1385beb4930SRik van Riel { 139dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1405beb4930SRik van Riel } 1415beb4930SRik van Riel 142e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1435beb4930SRik van Riel { 1445beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1455beb4930SRik van Riel } 1465beb4930SRik van Riel 1476583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1486583a843SKautuk Consul struct anon_vma_chain *avc, 1496583a843SKautuk Consul struct anon_vma *anon_vma) 1506583a843SKautuk Consul { 1516583a843SKautuk Consul avc->vma = vma; 1526583a843SKautuk Consul avc->anon_vma = anon_vma; 1536583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 154bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1556583a843SKautuk Consul } 1566583a843SKautuk Consul 157d9d332e0SLinus Torvalds /** 158d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 159d9d332e0SLinus Torvalds * @vma: the memory region in question 160d9d332e0SLinus Torvalds * 161d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 162d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 163d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 164d9d332e0SLinus Torvalds * 165d5a187daSVlastimil Babka * The common case will be that we already have one, which 166d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 16723a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 168d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 169d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 170d9d332e0SLinus Torvalds * allocate a new one. 171d9d332e0SLinus Torvalds * 172d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1732f031c6fSMatthew Wilcox (Oracle) * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 174aaf1f990SMiaohe Lin * and that may actually touch the rwsem even in the newly 175d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 176d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 177d9d332e0SLinus Torvalds * 178d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 179d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 180d9d332e0SLinus Torvalds * to do any locking for the common case of already having 181d9d332e0SLinus Torvalds * an anon_vma. 182d9d332e0SLinus Torvalds * 183c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for reading. 184d9d332e0SLinus Torvalds */ 185d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1861da177e4SLinus Torvalds { 187d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 188d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1895beb4930SRik van Riel struct anon_vma_chain *avc; 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds might_sleep(); 1921da177e4SLinus Torvalds 193dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1945beb4930SRik van Riel if (!avc) 1955beb4930SRik van Riel goto out_enomem; 1965beb4930SRik van Riel 1971da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 1981da177e4SLinus Torvalds allocated = NULL; 199d9d332e0SLinus Torvalds if (!anon_vma) { 2001da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 2011da177e4SLinus Torvalds if (unlikely(!anon_vma)) 2025beb4930SRik van Riel goto out_enomem_free_avc; 2031da177e4SLinus Torvalds allocated = anon_vma; 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds 2064fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 2071da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 2081da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 2091da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 2101da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2116583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2127a3ef208SKonstantin Khlebnikov /* vma reference or self-parent link for new root */ 2137a3ef208SKonstantin Khlebnikov anon_vma->degree++; 2141da177e4SLinus Torvalds allocated = NULL; 21531f2b0ebSOleg Nesterov avc = NULL; 2161da177e4SLinus Torvalds } 2171da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 21808b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 21931f2b0ebSOleg Nesterov 22031f2b0ebSOleg Nesterov if (unlikely(allocated)) 22101d8b20dSPeter Zijlstra put_anon_vma(allocated); 22231f2b0ebSOleg Nesterov if (unlikely(avc)) 2235beb4930SRik van Riel anon_vma_chain_free(avc); 224d5a187daSVlastimil Babka 2251da177e4SLinus Torvalds return 0; 2265beb4930SRik van Riel 2275beb4930SRik van Riel out_enomem_free_avc: 2285beb4930SRik van Riel anon_vma_chain_free(avc); 2295beb4930SRik van Riel out_enomem: 2305beb4930SRik van Riel return -ENOMEM; 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds 233bb4aa396SLinus Torvalds /* 234bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 235bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 236bb4aa396SLinus Torvalds * have the same vma. 237bb4aa396SLinus Torvalds * 238bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 239bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 240bb4aa396SLinus Torvalds */ 241bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 242bb4aa396SLinus Torvalds { 243bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 244bb4aa396SLinus Torvalds if (new_root != root) { 245bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2465a505085SIngo Molnar up_write(&root->rwsem); 247bb4aa396SLinus Torvalds root = new_root; 2485a505085SIngo Molnar down_write(&root->rwsem); 249bb4aa396SLinus Torvalds } 250bb4aa396SLinus Torvalds return root; 251bb4aa396SLinus Torvalds } 252bb4aa396SLinus Torvalds 253bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 254bb4aa396SLinus Torvalds { 255bb4aa396SLinus Torvalds if (root) 2565a505085SIngo Molnar up_write(&root->rwsem); 257bb4aa396SLinus Torvalds } 258bb4aa396SLinus Torvalds 2595beb4930SRik van Riel /* 2605beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2615beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2627a3ef208SKonstantin Khlebnikov * 263cb152a1aSShijie Luo * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 26447b390d2SWei Yang * anon_vma_fork(). The first three want an exact copy of src, while the last 26547b390d2SWei Yang * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 26647b390d2SWei Yang * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 26747b390d2SWei Yang * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 26847b390d2SWei Yang * 26947b390d2SWei Yang * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 27047b390d2SWei Yang * and reuse existing anon_vma which has no vmas and only one child anon_vma. 27147b390d2SWei Yang * This prevents degradation of anon_vma hierarchy to endless linear chain in 27247b390d2SWei Yang * case of constantly forking task. On the other hand, an anon_vma with more 27347b390d2SWei Yang * than one child isn't reused even if there was no alive vma, thus rmap 27447b390d2SWei Yang * walker has a good chance of avoiding scanning the whole hierarchy when it 27547b390d2SWei Yang * searches where page is mapped. 2765beb4930SRik van Riel */ 2775beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2785beb4930SRik van Riel { 2795beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 280bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2815beb4930SRik van Riel 282646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 283bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 284bb4aa396SLinus Torvalds 285dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 286dd34739cSLinus Torvalds if (unlikely(!avc)) { 287dd34739cSLinus Torvalds unlock_anon_vma_root(root); 288dd34739cSLinus Torvalds root = NULL; 289dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2905beb4930SRik van Riel if (!avc) 2915beb4930SRik van Riel goto enomem_failure; 292dd34739cSLinus Torvalds } 293bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 294bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 295bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2967a3ef208SKonstantin Khlebnikov 2977a3ef208SKonstantin Khlebnikov /* 2987a3ef208SKonstantin Khlebnikov * Reuse existing anon_vma if its degree lower than two, 2997a3ef208SKonstantin Khlebnikov * that means it has no vma and only one anon_vma child. 3007a3ef208SKonstantin Khlebnikov * 3017a3ef208SKonstantin Khlebnikov * Do not chose parent anon_vma, otherwise first child 3027a3ef208SKonstantin Khlebnikov * will always reuse it. Root anon_vma is never reused: 3037a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 3047a3ef208SKonstantin Khlebnikov */ 30547b390d2SWei Yang if (!dst->anon_vma && src->anon_vma && 30647b390d2SWei Yang anon_vma != src->anon_vma && anon_vma->degree < 2) 3077a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 3085beb4930SRik van Riel } 3097a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 3107a3ef208SKonstantin Khlebnikov dst->anon_vma->degree++; 311bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 3125beb4930SRik van Riel return 0; 3135beb4930SRik van Riel 3145beb4930SRik van Riel enomem_failure: 3153fe89b3eSLeon Yu /* 3163fe89b3eSLeon Yu * dst->anon_vma is dropped here otherwise its degree can be incorrectly 3173fe89b3eSLeon Yu * decremented in unlink_anon_vmas(). 3183fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3193fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3203fe89b3eSLeon Yu */ 3213fe89b3eSLeon Yu dst->anon_vma = NULL; 3225beb4930SRik van Riel unlink_anon_vmas(dst); 3235beb4930SRik van Riel return -ENOMEM; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3265beb4930SRik van Riel /* 3275beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3285beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3295beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3305beb4930SRik van Riel */ 3315beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3321da177e4SLinus Torvalds { 3335beb4930SRik van Riel struct anon_vma_chain *avc; 3345beb4930SRik van Riel struct anon_vma *anon_vma; 335c4ea95d7SDaniel Forrest int error; 3365beb4930SRik van Riel 3375beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3385beb4930SRik van Riel if (!pvma->anon_vma) 3395beb4930SRik van Riel return 0; 3405beb4930SRik van Riel 3417a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3427a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3437a3ef208SKonstantin Khlebnikov 3445beb4930SRik van Riel /* 3455beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3465beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3475beb4930SRik van Riel */ 348c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 349c4ea95d7SDaniel Forrest if (error) 350c4ea95d7SDaniel Forrest return error; 3515beb4930SRik van Riel 3527a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3537a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3547a3ef208SKonstantin Khlebnikov return 0; 3557a3ef208SKonstantin Khlebnikov 3565beb4930SRik van Riel /* Then add our own anon_vma. */ 3575beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3585beb4930SRik van Riel if (!anon_vma) 3595beb4930SRik van Riel goto out_error; 360dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3615beb4930SRik van Riel if (!avc) 3625beb4930SRik van Riel goto out_error_free_anon_vma; 3635c341ee1SRik van Riel 3645c341ee1SRik van Riel /* 365aaf1f990SMiaohe Lin * The root anon_vma's rwsem is the lock actually used when we 3665c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3675c341ee1SRik van Riel */ 3685c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3697a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 37076545066SRik van Riel /* 37101d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 37201d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 37301d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 37476545066SRik van Riel */ 37576545066SRik van Riel get_anon_vma(anon_vma->root); 3765beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3775beb4930SRik van Riel vma->anon_vma = anon_vma; 3784fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3795c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3807a3ef208SKonstantin Khlebnikov anon_vma->parent->degree++; 38108b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3825beb4930SRik van Riel 3835beb4930SRik van Riel return 0; 3845beb4930SRik van Riel 3855beb4930SRik van Riel out_error_free_anon_vma: 38601d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3875beb4930SRik van Riel out_error: 3884946d54cSRik van Riel unlink_anon_vmas(vma); 3895beb4930SRik van Riel return -ENOMEM; 3905beb4930SRik van Riel } 3915beb4930SRik van Riel 3925beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3935beb4930SRik van Riel { 3945beb4930SRik van Riel struct anon_vma_chain *avc, *next; 395eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 3965beb4930SRik van Riel 3975c341ee1SRik van Riel /* 3985c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 3995c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 4005c341ee1SRik van Riel */ 4015beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 402eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 403eee2acbaSPeter Zijlstra 404eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 405bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 406eee2acbaSPeter Zijlstra 407eee2acbaSPeter Zijlstra /* 408eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 409eee2acbaSPeter Zijlstra * to free them outside the lock. 410eee2acbaSPeter Zijlstra */ 411f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 4127a3ef208SKonstantin Khlebnikov anon_vma->parent->degree--; 413eee2acbaSPeter Zijlstra continue; 4147a3ef208SKonstantin Khlebnikov } 415eee2acbaSPeter Zijlstra 416eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 417eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 418eee2acbaSPeter Zijlstra } 419ee8ab190SLi Xinhai if (vma->anon_vma) { 4207a3ef208SKonstantin Khlebnikov vma->anon_vma->degree--; 421ee8ab190SLi Xinhai 422ee8ab190SLi Xinhai /* 423ee8ab190SLi Xinhai * vma would still be needed after unlink, and anon_vma will be prepared 424ee8ab190SLi Xinhai * when handle fault. 425ee8ab190SLi Xinhai */ 426ee8ab190SLi Xinhai vma->anon_vma = NULL; 427ee8ab190SLi Xinhai } 428eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 429eee2acbaSPeter Zijlstra 430eee2acbaSPeter Zijlstra /* 431eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 432eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4335a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 434eee2acbaSPeter Zijlstra */ 435eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 436eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 437eee2acbaSPeter Zijlstra 438e4c5800aSKonstantin Khlebnikov VM_WARN_ON(anon_vma->degree); 439eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 440eee2acbaSPeter Zijlstra 4415beb4930SRik van Riel list_del(&avc->same_vma); 4425beb4930SRik van Riel anon_vma_chain_free(avc); 4435beb4930SRik van Riel } 4445beb4930SRik van Riel } 4455beb4930SRik van Riel 44651cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4471da177e4SLinus Torvalds { 4481da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4491da177e4SLinus Torvalds 4505a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 45183813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 452f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds 4551da177e4SLinus Torvalds void __init anon_vma_init(void) 4561da177e4SLinus Torvalds { 4571da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4585f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4595d097056SVladimir Davydov anon_vma_ctor); 4605d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4615d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4621da177e4SLinus Torvalds } 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds /* 4656111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4666111e4caSPeter Zijlstra * 4676111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 468ad8a20cfSMiaohe Lin * the best this function can do is return a refcount increased anon_vma 469ad8a20cfSMiaohe Lin * that might have been relevant to this page. 4706111e4caSPeter Zijlstra * 4716111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4726111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4736111e4caSPeter Zijlstra * 474bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 475bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 476bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 477bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 478bc658c96SPeter Zijlstra * 4796111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4806111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4816111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4826111e4caSPeter Zijlstra * 483091e4299SMiles Chen * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 484091e4299SMiles Chen * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 485091e4299SMiles Chen * if there is a mapcount, we can dereference the anon_vma after observing 486091e4299SMiles Chen * those. 4871da177e4SLinus Torvalds */ 488746b18d4SPeter Zijlstra struct anon_vma *page_get_anon_vma(struct page *page) 4891da177e4SLinus Torvalds { 490746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4911da177e4SLinus Torvalds unsigned long anon_mapping; 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds rcu_read_lock(); 4944db0c3c2SJason Low anon_mapping = (unsigned long)READ_ONCE(page->mapping); 4953ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 4961da177e4SLinus Torvalds goto out; 4971da177e4SLinus Torvalds if (!page_mapped(page)) 4981da177e4SLinus Torvalds goto out; 4991da177e4SLinus Torvalds 5001da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 501746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 502746b18d4SPeter Zijlstra anon_vma = NULL; 503746b18d4SPeter Zijlstra goto out; 504746b18d4SPeter Zijlstra } 505f1819427SHugh Dickins 506f1819427SHugh Dickins /* 507f1819427SHugh Dickins * If this page is still mapped, then its anon_vma cannot have been 508746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 509746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 5105f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 511746b18d4SPeter Zijlstra * above cannot corrupt). 512f1819427SHugh Dickins */ 513746b18d4SPeter Zijlstra if (!page_mapped(page)) { 5147f39dda9SHugh Dickins rcu_read_unlock(); 515746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 5167f39dda9SHugh Dickins return NULL; 517746b18d4SPeter Zijlstra } 5181da177e4SLinus Torvalds out: 5191da177e4SLinus Torvalds rcu_read_unlock(); 520746b18d4SPeter Zijlstra 521746b18d4SPeter Zijlstra return anon_vma; 522746b18d4SPeter Zijlstra } 523746b18d4SPeter Zijlstra 52488c22088SPeter Zijlstra /* 52588c22088SPeter Zijlstra * Similar to page_get_anon_vma() except it locks the anon_vma. 52688c22088SPeter Zijlstra * 52788c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 52888c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 52988c22088SPeter Zijlstra * reference like with page_get_anon_vma() and then block on the mutex. 53088c22088SPeter Zijlstra */ 5319595d769SMatthew Wilcox (Oracle) struct anon_vma *folio_lock_anon_vma_read(struct folio *folio) 532746b18d4SPeter Zijlstra { 53388c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 534eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 53588c22088SPeter Zijlstra unsigned long anon_mapping; 536746b18d4SPeter Zijlstra 53788c22088SPeter Zijlstra rcu_read_lock(); 5389595d769SMatthew Wilcox (Oracle) anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 53988c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 54088c22088SPeter Zijlstra goto out; 5419595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 54288c22088SPeter Zijlstra goto out; 54388c22088SPeter Zijlstra 54488c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5454db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5464fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 54788c22088SPeter Zijlstra /* 5489595d769SMatthew Wilcox (Oracle) * If the folio is still mapped, then this anon_vma is still 549eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 550bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 55188c22088SPeter Zijlstra */ 5529595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5534fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 55488c22088SPeter Zijlstra anon_vma = NULL; 55588c22088SPeter Zijlstra } 55688c22088SPeter Zijlstra goto out; 55788c22088SPeter Zijlstra } 55888c22088SPeter Zijlstra 55988c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 56088c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 56188c22088SPeter Zijlstra anon_vma = NULL; 56288c22088SPeter Zijlstra goto out; 56388c22088SPeter Zijlstra } 56488c22088SPeter Zijlstra 5659595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5667f39dda9SHugh Dickins rcu_read_unlock(); 56788c22088SPeter Zijlstra put_anon_vma(anon_vma); 5687f39dda9SHugh Dickins return NULL; 56988c22088SPeter Zijlstra } 57088c22088SPeter Zijlstra 57188c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 57288c22088SPeter Zijlstra rcu_read_unlock(); 5734fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 574746b18d4SPeter Zijlstra 57588c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 57688c22088SPeter Zijlstra /* 57788c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 57888c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 5794fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 58088c22088SPeter Zijlstra */ 5814fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 58288c22088SPeter Zijlstra __put_anon_vma(anon_vma); 58388c22088SPeter Zijlstra anon_vma = NULL; 58488c22088SPeter Zijlstra } 58588c22088SPeter Zijlstra 58688c22088SPeter Zijlstra return anon_vma; 58788c22088SPeter Zijlstra 58888c22088SPeter Zijlstra out: 58988c22088SPeter Zijlstra rcu_read_unlock(); 590746b18d4SPeter Zijlstra return anon_vma; 59134bbd704SOleg Nesterov } 59234bbd704SOleg Nesterov 5934fc3f1d6SIngo Molnar void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 59434bbd704SOleg Nesterov { 5954fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 5961da177e4SLinus Torvalds } 5971da177e4SLinus Torvalds 59872b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 59972b252aeSMel Gorman /* 60072b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 60172b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 60272b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 60372b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 60472b252aeSMel Gorman */ 60572b252aeSMel Gorman void try_to_unmap_flush(void) 60672b252aeSMel Gorman { 60772b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 60872b252aeSMel Gorman 60972b252aeSMel Gorman if (!tlb_ubc->flush_required) 61072b252aeSMel Gorman return; 61172b252aeSMel Gorman 612e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 61372b252aeSMel Gorman tlb_ubc->flush_required = false; 614d950c947SMel Gorman tlb_ubc->writable = false; 61572b252aeSMel Gorman } 61672b252aeSMel Gorman 617d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 618d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 619d950c947SMel Gorman { 620d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 621d950c947SMel Gorman 622d950c947SMel Gorman if (tlb_ubc->writable) 623d950c947SMel Gorman try_to_unmap_flush(); 624d950c947SMel Gorman } 625d950c947SMel Gorman 6265ee2fa2fSHuang Ying /* 6275ee2fa2fSHuang Ying * Bits 0-14 of mm->tlb_flush_batched record pending generations. 6285ee2fa2fSHuang Ying * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 6295ee2fa2fSHuang Ying */ 6305ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 6315ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_MASK \ 6325ee2fa2fSHuang Ying ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 6335ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_LARGE \ 6345ee2fa2fSHuang Ying (TLB_FLUSH_BATCH_PENDING_MASK / 2) 6355ee2fa2fSHuang Ying 636c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 63772b252aeSMel Gorman { 63872b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 6395ee2fa2fSHuang Ying int batch, nbatch; 64072b252aeSMel Gorman 641e73ad5ffSAndy Lutomirski arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 64272b252aeSMel Gorman tlb_ubc->flush_required = true; 643d950c947SMel Gorman 644d950c947SMel Gorman /* 6453ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6463ea27719SMel Gorman * before the PTE is cleared. 6473ea27719SMel Gorman */ 6483ea27719SMel Gorman barrier(); 6495ee2fa2fSHuang Ying batch = atomic_read(&mm->tlb_flush_batched); 6505ee2fa2fSHuang Ying retry: 6515ee2fa2fSHuang Ying if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 6525ee2fa2fSHuang Ying /* 6535ee2fa2fSHuang Ying * Prevent `pending' from catching up with `flushed' because of 6545ee2fa2fSHuang Ying * overflow. Reset `pending' and `flushed' to be 1 and 0 if 6555ee2fa2fSHuang Ying * `pending' becomes large. 6565ee2fa2fSHuang Ying */ 6575ee2fa2fSHuang Ying nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); 6585ee2fa2fSHuang Ying if (nbatch != batch) { 6595ee2fa2fSHuang Ying batch = nbatch; 6605ee2fa2fSHuang Ying goto retry; 6615ee2fa2fSHuang Ying } 6625ee2fa2fSHuang Ying } else { 6635ee2fa2fSHuang Ying atomic_inc(&mm->tlb_flush_batched); 6645ee2fa2fSHuang Ying } 6653ea27719SMel Gorman 6663ea27719SMel Gorman /* 667d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 668d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 669d950c947SMel Gorman * before the page is queued for IO. 670d950c947SMel Gorman */ 671d950c947SMel Gorman if (writable) 672d950c947SMel Gorman tlb_ubc->writable = true; 67372b252aeSMel Gorman } 67472b252aeSMel Gorman 67572b252aeSMel Gorman /* 67672b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 67772b252aeSMel Gorman * unmap operations to reduce IPIs. 67872b252aeSMel Gorman */ 67972b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 68072b252aeSMel Gorman { 68172b252aeSMel Gorman bool should_defer = false; 68272b252aeSMel Gorman 68372b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 68472b252aeSMel Gorman return false; 68572b252aeSMel Gorman 68672b252aeSMel Gorman /* If remote CPUs need to be flushed then defer batch the flush */ 68772b252aeSMel Gorman if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 68872b252aeSMel Gorman should_defer = true; 68972b252aeSMel Gorman put_cpu(); 69072b252aeSMel Gorman 69172b252aeSMel Gorman return should_defer; 69272b252aeSMel Gorman } 6933ea27719SMel Gorman 6943ea27719SMel Gorman /* 6953ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 6963ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 6973ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 6983ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 6993ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 7003ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 7013ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 7023ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 7033ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 7043ea27719SMel Gorman * 7053ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 7063ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 7073ea27719SMel Gorman * via the PTL. 7083ea27719SMel Gorman */ 7093ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 7103ea27719SMel Gorman { 7115ee2fa2fSHuang Ying int batch = atomic_read(&mm->tlb_flush_batched); 7125ee2fa2fSHuang Ying int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 7135ee2fa2fSHuang Ying int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 7143ea27719SMel Gorman 7155ee2fa2fSHuang Ying if (pending != flushed) { 7165ee2fa2fSHuang Ying flush_tlb_mm(mm); 7173ea27719SMel Gorman /* 7185ee2fa2fSHuang Ying * If the new TLB flushing is pending during flushing, leave 7195ee2fa2fSHuang Ying * mm->tlb_flush_batched as is, to avoid losing flushing. 7203ea27719SMel Gorman */ 7215ee2fa2fSHuang Ying atomic_cmpxchg(&mm->tlb_flush_batched, batch, 7225ee2fa2fSHuang Ying pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 7233ea27719SMel Gorman } 7243ea27719SMel Gorman } 72572b252aeSMel Gorman #else 726c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 72772b252aeSMel Gorman { 72872b252aeSMel Gorman } 72972b252aeSMel Gorman 73072b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 73172b252aeSMel Gorman { 73272b252aeSMel Gorman return false; 73372b252aeSMel Gorman } 73472b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 73572b252aeSMel Gorman 7361da177e4SLinus Torvalds /* 737bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 738ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 7391da177e4SLinus Torvalds */ 7401da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 7411da177e4SLinus Torvalds { 742e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 743e05b3453SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) { 744e05b3453SMatthew Wilcox (Oracle) struct anon_vma *page__anon_vma = folio_anon_vma(folio); 7454829b906SHugh Dickins /* 7464829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 7474829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 7484829b906SHugh Dickins */ 7494829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 7504829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 75121d0d443SAndrea Arcangeli return -EFAULT; 75231657170SJue Wang } else if (!vma->vm_file) { 7531da177e4SLinus Torvalds return -EFAULT; 754e05b3453SMatthew Wilcox (Oracle) } else if (vma->vm_file->f_mapping != folio->mapping) { 7551da177e4SLinus Torvalds return -EFAULT; 75631657170SJue Wang } 757494334e4SHugh Dickins 758494334e4SHugh Dickins return vma_address(page, vma); 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds 7616219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 7626219049aSBob Liu { 7636219049aSBob Liu pgd_t *pgd; 764c2febafcSKirill A. Shutemov p4d_t *p4d; 7656219049aSBob Liu pud_t *pud; 7666219049aSBob Liu pmd_t *pmd = NULL; 767f72e7dcdSHugh Dickins pmd_t pmde; 7686219049aSBob Liu 7696219049aSBob Liu pgd = pgd_offset(mm, address); 7706219049aSBob Liu if (!pgd_present(*pgd)) 7716219049aSBob Liu goto out; 7726219049aSBob Liu 773c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 774c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 775c2febafcSKirill A. Shutemov goto out; 776c2febafcSKirill A. Shutemov 777c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 7786219049aSBob Liu if (!pud_present(*pud)) 7796219049aSBob Liu goto out; 7806219049aSBob Liu 7816219049aSBob Liu pmd = pmd_offset(pud, address); 782f72e7dcdSHugh Dickins /* 7838809aa2dSAneesh Kumar K.V * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 784f72e7dcdSHugh Dickins * without holding anon_vma lock for write. So when looking for a 785f72e7dcdSHugh Dickins * genuine pmde (in which to find pte), test present and !THP together. 786f72e7dcdSHugh Dickins */ 787e37c6982SChristian Borntraeger pmde = *pmd; 788e37c6982SChristian Borntraeger barrier(); 789f72e7dcdSHugh Dickins if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 7906219049aSBob Liu pmd = NULL; 7916219049aSBob Liu out: 7926219049aSBob Liu return pmd; 7936219049aSBob Liu } 7946219049aSBob Liu 795b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg { 7969f32624bSJoonsoo Kim int mapcount; 7979f32624bSJoonsoo Kim int referenced; 7989f32624bSJoonsoo Kim unsigned long vm_flags; 7999f32624bSJoonsoo Kim struct mem_cgroup *memcg; 8009f32624bSJoonsoo Kim }; 80181b4082dSNikita Danilov /* 802b3ac0413SMatthew Wilcox (Oracle) * arg: folio_referenced_arg will be passed 8031da177e4SLinus Torvalds */ 8042f031c6fSMatthew Wilcox (Oracle) static bool folio_referenced_one(struct folio *folio, 8052f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long address, void *arg) 8061da177e4SLinus Torvalds { 807b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 808b3ac0413SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 8098749cfeaSVladimir Davydov int referenced = 0; 8102da28bfdSAndrea Arcangeli 8118eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 8128eaededeSKirill A. Shutemov address = pvmw.address; 8132da28bfdSAndrea Arcangeli 81447d4f3eeSHugh Dickins if ((vma->vm_flags & VM_LOCKED) && 815b3ac0413SMatthew Wilcox (Oracle) (!folio_test_large(folio) || !pvmw.pte)) { 81647d4f3eeSHugh Dickins /* Restore the mlock which got missed */ 817b3ac0413SMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, !pvmw.pte); 8188eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 8199f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 820e4b82222SMinchan Kim return false; /* To break the loop */ 8212da28bfdSAndrea Arcangeli } 8222da28bfdSAndrea Arcangeli 8238eaededeSKirill A. Shutemov if (pvmw.pte) { 8248eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 8258eaededeSKirill A. Shutemov pvmw.pte)) { 8264917e5d0SJohannes Weiner /* 8278eaededeSKirill A. Shutemov * Don't treat a reference through 8288eaededeSKirill A. Shutemov * a sequentially read mapping as such. 829b3ac0413SMatthew Wilcox (Oracle) * If the folio has been used in another mapping, 8308eaededeSKirill A. Shutemov * we will catch it; if this other mapping is 8318eaededeSKirill A. Shutemov * already gone, the unmap path will have set 832b3ac0413SMatthew Wilcox (Oracle) * the referenced flag or activated the folio. 8334917e5d0SJohannes Weiner */ 83464363aadSJoe Perches if (likely(!(vma->vm_flags & VM_SEQ_READ))) 8351da177e4SLinus Torvalds referenced++; 8364917e5d0SJohannes Weiner } 8378749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 8388eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 8398eaededeSKirill A. Shutemov pvmw.pmd)) 8408749cfeaSVladimir Davydov referenced++; 8418749cfeaSVladimir Davydov } else { 842b3ac0413SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 8438749cfeaSVladimir Davydov WARN_ON_ONCE(1); 8448749cfeaSVladimir Davydov } 8458eaededeSKirill A. Shutemov 8468eaededeSKirill A. Shutemov pra->mapcount--; 8478eaededeSKirill A. Shutemov } 84871e3aac0SAndrea Arcangeli 84933c3fc71SVladimir Davydov if (referenced) 850b3ac0413SMatthew Wilcox (Oracle) folio_clear_idle(folio); 851b3ac0413SMatthew Wilcox (Oracle) if (folio_test_clear_young(folio)) 85233c3fc71SVladimir Davydov referenced++; 85333c3fc71SVladimir Davydov 8549f32624bSJoonsoo Kim if (referenced) { 8559f32624bSJoonsoo Kim pra->referenced++; 85647d4f3eeSHugh Dickins pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds 8599f32624bSJoonsoo Kim if (!pra->mapcount) 860e4b82222SMinchan Kim return false; /* To break the loop */ 8619f32624bSJoonsoo Kim 862e4b82222SMinchan Kim return true; 8639f32624bSJoonsoo Kim } 8649f32624bSJoonsoo Kim 865b3ac0413SMatthew Wilcox (Oracle) static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 8661da177e4SLinus Torvalds { 867b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 8689f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 8691da177e4SLinus Torvalds 8709f32624bSJoonsoo Kim if (!mm_match_cgroup(vma->vm_mm, memcg)) 8719f32624bSJoonsoo Kim return true; 8721da177e4SLinus Torvalds 8739f32624bSJoonsoo Kim return false; 8741da177e4SLinus Torvalds } 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds /** 877b3ac0413SMatthew Wilcox (Oracle) * folio_referenced() - Test if the folio was referenced. 878b3ac0413SMatthew Wilcox (Oracle) * @folio: The folio to test. 879b3ac0413SMatthew Wilcox (Oracle) * @is_locked: Caller holds lock on the folio. 88072835c86SJohannes Weiner * @memcg: target memory cgroup 881b3ac0413SMatthew Wilcox (Oracle) * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 8821da177e4SLinus Torvalds * 883b3ac0413SMatthew Wilcox (Oracle) * Quick test_and_clear_referenced for all mappings of a folio, 884b3ac0413SMatthew Wilcox (Oracle) * 885b3ac0413SMatthew Wilcox (Oracle) * Return: The number of mappings which referenced the folio. 8861da177e4SLinus Torvalds */ 887b3ac0413SMatthew Wilcox (Oracle) int folio_referenced(struct folio *folio, int is_locked, 888b3ac0413SMatthew Wilcox (Oracle) struct mem_cgroup *memcg, unsigned long *vm_flags) 8891da177e4SLinus Torvalds { 8905ad64688SHugh Dickins int we_locked = 0; 891b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg pra = { 892b3ac0413SMatthew Wilcox (Oracle) .mapcount = folio_mapcount(folio), 8939f32624bSJoonsoo Kim .memcg = memcg, 8949f32624bSJoonsoo Kim }; 8959f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 896b3ac0413SMatthew Wilcox (Oracle) .rmap_one = folio_referenced_one, 8979f32624bSJoonsoo Kim .arg = (void *)&pra, 8982f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 8999f32624bSJoonsoo Kim }; 9001da177e4SLinus Torvalds 9016fe6b7e3SWu Fengguang *vm_flags = 0; 902059d8442SHuang Shijie if (!pra.mapcount) 9039f32624bSJoonsoo Kim return 0; 9049f32624bSJoonsoo Kim 905b3ac0413SMatthew Wilcox (Oracle) if (!folio_raw_mapping(folio)) 9069f32624bSJoonsoo Kim return 0; 9079f32624bSJoonsoo Kim 908b3ac0413SMatthew Wilcox (Oracle) if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 909b3ac0413SMatthew Wilcox (Oracle) we_locked = folio_trylock(folio); 9109f32624bSJoonsoo Kim if (!we_locked) 9119f32624bSJoonsoo Kim return 1; 9125ad64688SHugh Dickins } 9139f32624bSJoonsoo Kim 9149f32624bSJoonsoo Kim /* 9159f32624bSJoonsoo Kim * If we are reclaiming on behalf of a cgroup, skip 9169f32624bSJoonsoo Kim * counting on behalf of references from different 9179f32624bSJoonsoo Kim * cgroups 9189f32624bSJoonsoo Kim */ 9199f32624bSJoonsoo Kim if (memcg) { 920b3ac0413SMatthew Wilcox (Oracle) rwc.invalid_vma = invalid_folio_referenced_vma; 9215ad64688SHugh Dickins } 9229f32624bSJoonsoo Kim 9232f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 9249f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 9259f32624bSJoonsoo Kim 9265ad64688SHugh Dickins if (we_locked) 927b3ac0413SMatthew Wilcox (Oracle) folio_unlock(folio); 9289f32624bSJoonsoo Kim 9299f32624bSJoonsoo Kim return pra.referenced; 9301da177e4SLinus Torvalds } 9311da177e4SLinus Torvalds 9326a8e0596SMuchun Song static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 933d08b3851SPeter Zijlstra { 9346a8e0596SMuchun Song int cleaned = 0; 9356a8e0596SMuchun Song struct vm_area_struct *vma = pvmw->vma; 936ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 9376a8e0596SMuchun Song unsigned long address = pvmw->address; 938d08b3851SPeter Zijlstra 939369ea824SJérôme Glisse /* 940369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 941e83c09a2SMatthew Wilcox (Oracle) * the folio can not be freed from this function. 942369ea824SJérôme Glisse */ 9437269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 9447269f999SJérôme Glisse 0, vma, vma->vm_mm, address, 9456a8e0596SMuchun Song vma_address_end(pvmw)); 946ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 947369ea824SJérôme Glisse 9486a8e0596SMuchun Song while (page_vma_mapped_walk(pvmw)) { 949f27176cfSKirill A. Shutemov int ret = 0; 950369ea824SJérôme Glisse 9516a8e0596SMuchun Song address = pvmw->address; 9526a8e0596SMuchun Song if (pvmw->pte) { 953c2fda5feSPeter Zijlstra pte_t entry; 9546a8e0596SMuchun Song pte_t *pte = pvmw->pte; 955f27176cfSKirill A. Shutemov 956f27176cfSKirill A. Shutemov if (!pte_dirty(*pte) && !pte_write(*pte)) 957f27176cfSKirill A. Shutemov continue; 958d08b3851SPeter Zijlstra 959785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pte)); 960785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 961d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 962c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 963785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 964d08b3851SPeter Zijlstra ret = 1; 965f27176cfSKirill A. Shutemov } else { 966396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 9676a8e0596SMuchun Song pmd_t *pmd = pvmw->pmd; 968f27176cfSKirill A. Shutemov pmd_t entry; 969d08b3851SPeter Zijlstra 970f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 971f27176cfSKirill A. Shutemov continue; 972f27176cfSKirill A. Shutemov 9737f9c9b60SMuchun Song flush_cache_range(vma, address, 9747f9c9b60SMuchun Song address + HPAGE_PMD_SIZE); 975024eee0eSAneesh Kumar K.V entry = pmdp_invalidate(vma, address, pmd); 976f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 977f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 978785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 979f27176cfSKirill A. Shutemov ret = 1; 980f27176cfSKirill A. Shutemov #else 981e83c09a2SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 982f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 983f27176cfSKirill A. Shutemov #endif 984f27176cfSKirill A. Shutemov } 9852ec74c3eSSagi Grimberg 9860f10851eSJérôme Glisse /* 9870f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 9880f10851eSJérôme Glisse * downgrading page table protection not changing it to point 9890f10851eSJérôme Glisse * to a new page. 9900f10851eSJérôme Glisse * 991ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 9920f10851eSJérôme Glisse */ 9930f10851eSJérôme Glisse if (ret) 9946a8e0596SMuchun Song cleaned++; 9959853a407SJoonsoo Kim } 996f27176cfSKirill A. Shutemov 997ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 998369ea824SJérôme Glisse 9996a8e0596SMuchun Song return cleaned; 10006a8e0596SMuchun Song } 10016a8e0596SMuchun Song 10026a8e0596SMuchun Song static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 10036a8e0596SMuchun Song unsigned long address, void *arg) 10046a8e0596SMuchun Song { 10056a8e0596SMuchun Song DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 10066a8e0596SMuchun Song int *cleaned = arg; 10076a8e0596SMuchun Song 10086a8e0596SMuchun Song *cleaned += page_vma_mkclean_one(&pvmw); 10096a8e0596SMuchun Song 1010e4b82222SMinchan Kim return true; 1011d08b3851SPeter Zijlstra } 1012d08b3851SPeter Zijlstra 10139853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1014d08b3851SPeter Zijlstra { 10159853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 1016871beb8cSFengguang Wu return false; 1017d08b3851SPeter Zijlstra 1018871beb8cSFengguang Wu return true; 1019d08b3851SPeter Zijlstra } 1020d08b3851SPeter Zijlstra 1021d9c08e22SMatthew Wilcox (Oracle) int folio_mkclean(struct folio *folio) 1022d08b3851SPeter Zijlstra { 10239853a407SJoonsoo Kim int cleaned = 0; 10249853a407SJoonsoo Kim struct address_space *mapping; 10259853a407SJoonsoo Kim struct rmap_walk_control rwc = { 10269853a407SJoonsoo Kim .arg = (void *)&cleaned, 10279853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 10289853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 10299853a407SJoonsoo Kim }; 1030d08b3851SPeter Zijlstra 1031d9c08e22SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 1032d08b3851SPeter Zijlstra 1033d9c08e22SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 10349853a407SJoonsoo Kim return 0; 1035d08b3851SPeter Zijlstra 1036d9c08e22SMatthew Wilcox (Oracle) mapping = folio_mapping(folio); 10379853a407SJoonsoo Kim if (!mapping) 10389853a407SJoonsoo Kim return 0; 10399853a407SJoonsoo Kim 10402f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 10419853a407SJoonsoo Kim 10429853a407SJoonsoo Kim return cleaned; 1043d08b3851SPeter Zijlstra } 1044d9c08e22SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_mkclean); 1045d08b3851SPeter Zijlstra 10461da177e4SLinus Torvalds /** 10476a8e0596SMuchun Song * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 10486a8e0596SMuchun Song * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 10496a8e0596SMuchun Song * within the @vma of shared mappings. And since clean PTEs 10506a8e0596SMuchun Song * should also be readonly, write protects them too. 10516a8e0596SMuchun Song * @pfn: start pfn. 10526a8e0596SMuchun Song * @nr_pages: number of physically contiguous pages srarting with @pfn. 10536a8e0596SMuchun Song * @pgoff: page offset that the @pfn mapped with. 10546a8e0596SMuchun Song * @vma: vma that @pfn mapped within. 10556a8e0596SMuchun Song * 10566a8e0596SMuchun Song * Returns the number of cleaned PTEs (including PMDs). 10576a8e0596SMuchun Song */ 10586a8e0596SMuchun Song int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 10596a8e0596SMuchun Song struct vm_area_struct *vma) 10606a8e0596SMuchun Song { 10616a8e0596SMuchun Song struct page_vma_mapped_walk pvmw = { 10626a8e0596SMuchun Song .pfn = pfn, 10636a8e0596SMuchun Song .nr_pages = nr_pages, 10646a8e0596SMuchun Song .pgoff = pgoff, 10656a8e0596SMuchun Song .vma = vma, 10666a8e0596SMuchun Song .flags = PVMW_SYNC, 10676a8e0596SMuchun Song }; 10686a8e0596SMuchun Song 10696a8e0596SMuchun Song if (invalid_mkclean_vma(vma, NULL)) 10706a8e0596SMuchun Song return 0; 10716a8e0596SMuchun Song 10726a8e0596SMuchun Song pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 10736a8e0596SMuchun Song VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 10746a8e0596SMuchun Song 10756a8e0596SMuchun Song return page_vma_mkclean_one(&pvmw); 10766a8e0596SMuchun Song } 10776a8e0596SMuchun Song 10786a8e0596SMuchun Song /** 1079c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 1080c44b6743SRik van Riel * @page: the page to move to our anon_vma 1081c44b6743SRik van Riel * @vma: the vma the page belongs to 1082c44b6743SRik van Riel * 1083c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 1084c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 1085c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 1086c44b6743SRik van Riel * processes. 1087c44b6743SRik van Riel */ 10885a49973dSHugh Dickins void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1089c44b6743SRik van Riel { 1090c44b6743SRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 10916c287605SDavid Hildenbrand struct page *subpage = page; 1092c44b6743SRik van Riel 10935a49973dSHugh Dickins page = compound_head(page); 10945a49973dSHugh Dickins 1095309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 109681d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1097c44b6743SRik van Riel 1098c44b6743SRik van Riel anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1099414e2fb8SVladimir Davydov /* 1100414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1101b3ac0413SMatthew Wilcox (Oracle) * simultaneously, so a concurrent reader (eg folio_referenced()'s 1102b3ac0413SMatthew Wilcox (Oracle) * folio_test_anon()) will not see one without the other. 1103414e2fb8SVladimir Davydov */ 1104414e2fb8SVladimir Davydov WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 11056c287605SDavid Hildenbrand SetPageAnonExclusive(subpage); 1106c44b6743SRik van Riel } 1107c44b6743SRik van Riel 1108c44b6743SRik van Riel /** 110943d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 1110451b9514SKirill Tkhai * @page: Page or Hugepage to add to rmap 11114e1c1975SAndi Kleen * @vma: VM area to add page to. 11124e1c1975SAndi Kleen * @address: User virtual address of the mapping 1113e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 11141da177e4SLinus Torvalds */ 11159617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 1116e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 11171da177e4SLinus Torvalds { 1118e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 11192822c1aaSNick Piggin 1120e8a03febSRik van Riel BUG_ON(!anon_vma); 1121ea90002bSLinus Torvalds 11224e1c1975SAndi Kleen if (PageAnon(page)) 11236c287605SDavid Hildenbrand goto out; 11244e1c1975SAndi Kleen 1125ea90002bSLinus Torvalds /* 1126e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 1127e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 1128e8a03febSRik van Riel * page mapping! 1129ea90002bSLinus Torvalds */ 11304e1c1975SAndi Kleen if (!exclusive) 1131288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1132ea90002bSLinus Torvalds 113316f5e707SAlex Shi /* 113416f5e707SAlex Shi * page_idle does a lockless/optimistic rmap scan on page->mapping. 113516f5e707SAlex Shi * Make sure the compiler doesn't split the stores of anon_vma and 113616f5e707SAlex Shi * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 113716f5e707SAlex Shi * could mistake the mapping for a struct address_space and crash. 113816f5e707SAlex Shi */ 11391da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 114016f5e707SAlex Shi WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 11414d7670e0SNick Piggin page->index = linear_page_index(vma, address); 11426c287605SDavid Hildenbrand out: 11436c287605SDavid Hildenbrand if (exclusive) 11446c287605SDavid Hildenbrand SetPageAnonExclusive(page); 11451da177e4SLinus Torvalds } 11469617d95eSNick Piggin 11479617d95eSNick Piggin /** 114843d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1149c97a9e10SNick Piggin * @page: the page to add the mapping to 1150c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1151c97a9e10SNick Piggin * @address: the user virtual address mapped 1152c97a9e10SNick Piggin */ 1153c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 1154c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1155c97a9e10SNick Piggin { 1156e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1157c97a9e10SNick Piggin /* 1158c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1159c97a9e10SNick Piggin * be set up correctly at this point. 1160c97a9e10SNick Piggin * 1161c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 116290aaca85SMiaohe Lin * always holds the page locked. 1163c97a9e10SNick Piggin * 1164c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1165c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1166c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1167c97a9e10SNick Piggin */ 1168e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1169e05b3453SMatthew Wilcox (Oracle) folio); 117030c46382SYang Shi VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 117130c46382SYang Shi page); 1172c97a9e10SNick Piggin } 1173c97a9e10SNick Piggin 1174c97a9e10SNick Piggin /** 11759617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 11769617d95eSNick Piggin * @page: the page to add the mapping to 11779617d95eSNick Piggin * @vma: the vm area in which the mapping is added 11789617d95eSNick Piggin * @address: the user virtual address mapped 1179f1e2db12SDavid Hildenbrand * @flags: the rmap flags 11809617d95eSNick Piggin * 11815ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 118280e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 118380e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 118480e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 11859617d95eSNick Piggin */ 11869617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 118714f9135dSDavid Hildenbrand struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1188ad8c2ee8SRik van Riel { 1189d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 119053f9263bSKirill A. Shutemov bool first; 119153f9263bSKirill A. Shutemov 1192be5d0a74SJohannes Weiner if (unlikely(PageKsm(page))) 1193be5d0a74SJohannes Weiner lock_page_memcg(page); 1194be5d0a74SJohannes Weiner else 1195be5d0a74SJohannes Weiner VM_BUG_ON_PAGE(!PageLocked(page), page); 1196be5d0a74SJohannes Weiner 119753f9263bSKirill A. Shutemov if (compound) { 119853f9263bSKirill A. Shutemov atomic_t *mapcount; 1199e9b61f19SKirill A. Shutemov VM_BUG_ON_PAGE(!PageLocked(page), page); 120053f9263bSKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 120153f9263bSKirill A. Shutemov mapcount = compound_mapcount_ptr(page); 120253f9263bSKirill A. Shutemov first = atomic_inc_and_test(mapcount); 120353f9263bSKirill A. Shutemov } else { 120453f9263bSKirill A. Shutemov first = atomic_inc_and_test(&page->_mapcount); 120553f9263bSKirill A. Shutemov } 12066c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 12076c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 120853f9263bSKirill A. Shutemov 120953f9263bSKirill A. Shutemov if (first) { 12106c357848SMatthew Wilcox (Oracle) int nr = compound ? thp_nr_pages(page) : 1; 1211bea04b07SJianyu Zhan /* 1212bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1213bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1214bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption 1215bea04b07SJianyu Zhan * disabled. 1216bea04b07SJianyu Zhan */ 121765c45377SKirill A. Shutemov if (compound) 121869473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1219be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 122079134171SAndrea Arcangeli } 12215ad64688SHugh Dickins 1222cea86fe2SHugh Dickins if (unlikely(PageKsm(page))) 1223be5d0a74SJohannes Weiner unlock_page_memcg(page); 122453f9263bSKirill A. Shutemov 12255dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 1226cea86fe2SHugh Dickins else if (first) 1227d281ee61SKirill A. Shutemov __page_set_anon_rmap(page, vma, address, 122814f9135dSDavid Hildenbrand !!(flags & RMAP_EXCLUSIVE)); 122969029cd5SKAMEZAWA Hiroyuki else 1230c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 1231cea86fe2SHugh Dickins 1232cea86fe2SHugh Dickins mlock_vma_page(page, vma, compound); 12331da177e4SLinus Torvalds } 12341da177e4SLinus Torvalds 123543d8eac4SRandy Dunlap /** 123640f2bbf7SDavid Hildenbrand * page_add_new_anon_rmap - add mapping to a new anonymous page 12379617d95eSNick Piggin * @page: the page to add the mapping to 12389617d95eSNick Piggin * @vma: the vm area in which the mapping is added 12399617d95eSNick Piggin * @address: the user virtual address mapped 124040f2bbf7SDavid Hildenbrand * 124140f2bbf7SDavid Hildenbrand * If it's a compound page, it is accounted as a compound page. As the page 124240f2bbf7SDavid Hildenbrand * is new, it's assume to get mapped exclusively by a single process. 12439617d95eSNick Piggin * 12449617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 12459617d95eSNick Piggin * This means the inc-and-test can be bypassed. 1246c97a9e10SNick Piggin * Page does not have to be locked. 12479617d95eSNick Piggin */ 12489617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 124940f2bbf7SDavid Hildenbrand struct vm_area_struct *vma, unsigned long address) 12509617d95eSNick Piggin { 125140f2bbf7SDavid Hildenbrand const bool compound = PageCompound(page); 12526c357848SMatthew Wilcox (Oracle) int nr = compound ? thp_nr_pages(page) : 1; 1253d281ee61SKirill A. Shutemov 125481d1b09cSSasha Levin VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1255fa9949daSHugh Dickins __SetPageSwapBacked(page); 1256d281ee61SKirill A. Shutemov if (compound) { 1257d281ee61SKirill A. Shutemov VM_BUG_ON_PAGE(!PageTransHuge(page), page); 125853f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 125953f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 126047e29d32SJohn Hubbard atomic_set(compound_pincount_ptr(page), 0); 126147e29d32SJohn Hubbard 126269473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 126353f9263bSKirill A. Shutemov } else { 126453f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 126553f9263bSKirill A. Shutemov atomic_set(&page->_mapcount, 0); 1266d281ee61SKirill A. Shutemov } 1267be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1268e8a03febSRik van Riel __page_set_anon_rmap(page, vma, address, 1); 12699617d95eSNick Piggin } 12709617d95eSNick Piggin 12711da177e4SLinus Torvalds /** 12721da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 12731da177e4SLinus Torvalds * @page: the page to add the mapping to 1274cea86fe2SHugh Dickins * @vma: the vm area in which the mapping is added 1275e8b098fcSMike Rapoport * @compound: charge the page as compound or small page 12761da177e4SLinus Torvalds * 1277b8072f09SHugh Dickins * The caller needs to hold the pte lock. 12781da177e4SLinus Torvalds */ 1279cea86fe2SHugh Dickins void page_add_file_rmap(struct page *page, 1280cea86fe2SHugh Dickins struct vm_area_struct *vma, bool compound) 12811da177e4SLinus Torvalds { 12825d543f13SHugh Dickins int i, nr = 0; 1283dd78feddSKirill A. Shutemov 1284dd78feddSKirill A. Shutemov VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 128562cccb8cSJohannes Weiner lock_page_memcg(page); 1286dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1287a1528e21SMuchun Song int nr_pages = thp_nr_pages(page); 1288a1528e21SMuchun Song 12895d543f13SHugh Dickins for (i = 0; i < nr_pages; i++) { 1290dd78feddSKirill A. Shutemov if (atomic_inc_and_test(&page[i]._mapcount)) 1291dd78feddSKirill A. Shutemov nr++; 1292d69b042fSBalbir Singh } 1293dd78feddSKirill A. Shutemov if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1294dd78feddSKirill A. Shutemov goto out; 1295bd55b0c2SHugh Dickins 1296bd55b0c2SHugh Dickins /* 1297bd55b0c2SHugh Dickins * It is racy to ClearPageDoubleMap in page_remove_file_rmap(); 1298bd55b0c2SHugh Dickins * but page lock is held by all page_add_file_rmap() compound 1299bd55b0c2SHugh Dickins * callers, and SetPageDoubleMap below warns if !PageLocked: 1300bd55b0c2SHugh Dickins * so here is a place that DoubleMap can be safely cleared. 1301bd55b0c2SHugh Dickins */ 1302bd55b0c2SHugh Dickins VM_WARN_ON_ONCE(!PageLocked(page)); 1303bd55b0c2SHugh Dickins if (nr == nr_pages && PageDoubleMap(page)) 1304bd55b0c2SHugh Dickins ClearPageDoubleMap(page); 1305bd55b0c2SHugh Dickins 130699cb0dbdSSong Liu if (PageSwapBacked(page)) 1307a1528e21SMuchun Song __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1308a1528e21SMuchun Song nr_pages); 130999cb0dbdSSong Liu else 1310380780e7SMuchun Song __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1311380780e7SMuchun Song nr_pages); 1312dd78feddSKirill A. Shutemov } else { 1313c8efc390SKirill A. Shutemov if (PageTransCompound(page) && page_mapping(page)) { 1314c8efc390SKirill A. Shutemov VM_WARN_ON_ONCE(!PageLocked(page)); 1315cea86fe2SHugh Dickins SetPageDoubleMap(compound_head(page)); 13169a73f61bSKirill A. Shutemov } 13175d543f13SHugh Dickins if (atomic_inc_and_test(&page->_mapcount)) 13185d543f13SHugh Dickins nr++; 1319dd78feddSKirill A. Shutemov } 1320dd78feddSKirill A. Shutemov out: 13215d543f13SHugh Dickins if (nr) 13225d543f13SHugh Dickins __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 132362cccb8cSJohannes Weiner unlock_page_memcg(page); 1324cea86fe2SHugh Dickins 1325cea86fe2SHugh Dickins mlock_vma_page(page, vma, compound); 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 1328dd78feddSKirill A. Shutemov static void page_remove_file_rmap(struct page *page, bool compound) 13298186eb6aSJohannes Weiner { 13305d543f13SHugh Dickins int i, nr = 0; 1331dd78feddSKirill A. Shutemov 133257dea93aSSteve Capper VM_BUG_ON_PAGE(compound && !PageHead(page), page); 13338186eb6aSJohannes Weiner 133453f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 133553f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) { 133653f9263bSKirill A. Shutemov /* hugetlb pages are always mapped with pmds */ 133753f9263bSKirill A. Shutemov atomic_dec(compound_mapcount_ptr(page)); 1338be5d0a74SJohannes Weiner return; 133953f9263bSKirill A. Shutemov } 134053f9263bSKirill A. Shutemov 13418186eb6aSJohannes Weiner /* page still mapped by someone else? */ 1342dd78feddSKirill A. Shutemov if (compound && PageTransHuge(page)) { 1343a1528e21SMuchun Song int nr_pages = thp_nr_pages(page); 1344a1528e21SMuchun Song 13455d543f13SHugh Dickins for (i = 0; i < nr_pages; i++) { 1346dd78feddSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 1347dd78feddSKirill A. Shutemov nr++; 1348dd78feddSKirill A. Shutemov } 1349dd78feddSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 13505d543f13SHugh Dickins goto out; 135199cb0dbdSSong Liu if (PageSwapBacked(page)) 1352a1528e21SMuchun Song __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1353a1528e21SMuchun Song -nr_pages); 135499cb0dbdSSong Liu else 1355380780e7SMuchun Song __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1356380780e7SMuchun Song -nr_pages); 1357dd78feddSKirill A. Shutemov } else { 13585d543f13SHugh Dickins if (atomic_add_negative(-1, &page->_mapcount)) 13595d543f13SHugh Dickins nr++; 1360dd78feddSKirill A. Shutemov } 13615d543f13SHugh Dickins out: 13625d543f13SHugh Dickins if (nr) 136300f3ca2cSJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 13648186eb6aSJohannes Weiner } 13658186eb6aSJohannes Weiner 136653f9263bSKirill A. Shutemov static void page_remove_anon_compound_rmap(struct page *page) 136753f9263bSKirill A. Shutemov { 136853f9263bSKirill A. Shutemov int i, nr; 136953f9263bSKirill A. Shutemov 137053f9263bSKirill A. Shutemov if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 137153f9263bSKirill A. Shutemov return; 137253f9263bSKirill A. Shutemov 137353f9263bSKirill A. Shutemov /* Hugepages are not counted in NR_ANON_PAGES for now. */ 137453f9263bSKirill A. Shutemov if (unlikely(PageHuge(page))) 137553f9263bSKirill A. Shutemov return; 137653f9263bSKirill A. Shutemov 137753f9263bSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 137853f9263bSKirill A. Shutemov return; 137953f9263bSKirill A. Shutemov 138069473e5dSMuchun Song __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 138153f9263bSKirill A. Shutemov 138253f9263bSKirill A. Shutemov if (TestClearPageDoubleMap(page)) { 138353f9263bSKirill A. Shutemov /* 138453f9263bSKirill A. Shutemov * Subpages can be mapped with PTEs too. Check how many of 1385f1fe80d4SKirill A. Shutemov * them are still mapped. 138653f9263bSKirill A. Shutemov */ 13875eaf35abSMatthew Wilcox (Oracle) for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 138853f9263bSKirill A. Shutemov if (atomic_add_negative(-1, &page[i]._mapcount)) 138953f9263bSKirill A. Shutemov nr++; 139053f9263bSKirill A. Shutemov } 1391f1fe80d4SKirill A. Shutemov 1392f1fe80d4SKirill A. Shutemov /* 1393f1fe80d4SKirill A. Shutemov * Queue the page for deferred split if at least one small 1394f1fe80d4SKirill A. Shutemov * page of the compound page is unmapped, but at least one 1395f1fe80d4SKirill A. Shutemov * small page is still mapped. 1396f1fe80d4SKirill A. Shutemov */ 13975eaf35abSMatthew Wilcox (Oracle) if (nr && nr < thp_nr_pages(page)) 1398f1fe80d4SKirill A. Shutemov deferred_split_huge_page(page); 139953f9263bSKirill A. Shutemov } else { 14005eaf35abSMatthew Wilcox (Oracle) nr = thp_nr_pages(page); 140153f9263bSKirill A. Shutemov } 140253f9263bSKirill A. Shutemov 1403f1fe80d4SKirill A. Shutemov if (nr) 1404be5d0a74SJohannes Weiner __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 140553f9263bSKirill A. Shutemov } 140653f9263bSKirill A. Shutemov 14071da177e4SLinus Torvalds /** 14081da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 14091da177e4SLinus Torvalds * @page: page to remove mapping from 1410cea86fe2SHugh Dickins * @vma: the vm area from which the mapping is removed 1411d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 14121da177e4SLinus Torvalds * 1413b8072f09SHugh Dickins * The caller needs to hold the pte lock. 14141da177e4SLinus Torvalds */ 1415cea86fe2SHugh Dickins void page_remove_rmap(struct page *page, 1416cea86fe2SHugh Dickins struct vm_area_struct *vma, bool compound) 14171da177e4SLinus Torvalds { 1418be5d0a74SJohannes Weiner lock_page_memcg(page); 141989c06bd5SKAMEZAWA Hiroyuki 1420be5d0a74SJohannes Weiner if (!PageAnon(page)) { 1421be5d0a74SJohannes Weiner page_remove_file_rmap(page, compound); 1422be5d0a74SJohannes Weiner goto out; 1423be5d0a74SJohannes Weiner } 1424be5d0a74SJohannes Weiner 1425be5d0a74SJohannes Weiner if (compound) { 1426be5d0a74SJohannes Weiner page_remove_anon_compound_rmap(page); 1427be5d0a74SJohannes Weiner goto out; 1428be5d0a74SJohannes Weiner } 142953f9263bSKirill A. Shutemov 1430b904dcfeSKOSAKI Motohiro /* page still mapped by someone else? */ 1431b904dcfeSKOSAKI Motohiro if (!atomic_add_negative(-1, &page->_mapcount)) 1432be5d0a74SJohannes Weiner goto out; 14338186eb6aSJohannes Weiner 14341da177e4SLinus Torvalds /* 1435bea04b07SJianyu Zhan * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1436bea04b07SJianyu Zhan * these counters are not modified in interrupt context, and 1437bea04b07SJianyu Zhan * pte lock(a spinlock) is held, which implies preemption disabled. 14380fe6e20bSNaoya Horiguchi */ 1439be5d0a74SJohannes Weiner __dec_lruvec_page_state(page, NR_ANON_MAPPED); 14408186eb6aSJohannes Weiner 14419a982250SKirill A. Shutemov if (PageTransCompound(page)) 14429a982250SKirill A. Shutemov deferred_split_huge_page(compound_head(page)); 14439a982250SKirill A. Shutemov 144416f8c5b2SHugh Dickins /* 14451da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 14461da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 14471da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 14482d4894b5SMel Gorman * before us: so leave the reset to free_unref_page, 14491da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 14501da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 14511da177e4SLinus Torvalds * faster for those pages still in swapcache. 14521da177e4SLinus Torvalds */ 1453be5d0a74SJohannes Weiner out: 1454be5d0a74SJohannes Weiner unlock_page_memcg(page); 1455cea86fe2SHugh Dickins 1456cea86fe2SHugh Dickins munlock_vma_page(page, vma, compound); 14571da177e4SLinus Torvalds } 14581da177e4SLinus Torvalds 14591da177e4SLinus Torvalds /* 146052629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 14611da177e4SLinus Torvalds */ 14622f031c6fSMatthew Wilcox (Oracle) static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 146352629506SJoonsoo Kim unsigned long address, void *arg) 14641da177e4SLinus Torvalds { 14651da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1466869f7ee6SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 14671da177e4SLinus Torvalds pte_t pteval; 1468c7ab0d2fSKirill A. Shutemov struct page *subpage; 14696c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1470ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 14714708f318SPalmer Dabbelt enum ttu_flags flags = (enum ttu_flags)(long)arg; 14721da177e4SLinus Torvalds 1473732ed558SHugh Dickins /* 1474732ed558SHugh Dickins * When racing against e.g. zap_pte_range() on another cpu, 1475732ed558SHugh Dickins * in between its ptep_get_and_clear_full() and page_remove_rmap(), 14761fb08ac6SYang Shi * try_to_unmap() may return before page_mapped() has become false, 1477732ed558SHugh Dickins * if page table locking is skipped: use TTU_SYNC to wait for that. 1478732ed558SHugh Dickins */ 1479732ed558SHugh Dickins if (flags & TTU_SYNC) 1480732ed558SHugh Dickins pvmw.flags = PVMW_SYNC; 1481732ed558SHugh Dickins 1482a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1483af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, false, folio); 1484fec89c10SKirill A. Shutemov 1485369ea824SJérôme Glisse /* 1486017b1660SMike Kravetz * For THP, we have to assume the worse case ie pmd for invalidation. 1487017b1660SMike Kravetz * For hugetlb, it could be much worse if we need to do pud 1488017b1660SMike Kravetz * invalidation in the case of pmd sharing. 1489017b1660SMike Kravetz * 1490869f7ee6SMatthew Wilcox (Oracle) * Note that the folio can not be freed in this function as call of 1491869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap() must hold a reference on the folio. 1492369ea824SJérôme Glisse */ 14932aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 14947269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1495494334e4SHugh Dickins address, range.end); 1496869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1497017b1660SMike Kravetz /* 1498017b1660SMike Kravetz * If sharing is possible, start and end will be adjusted 1499017b1660SMike Kravetz * accordingly. 1500017b1660SMike Kravetz */ 1501ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, 1502ac46d4f3SJérôme Glisse &range.end); 1503017b1660SMike Kravetz } 1504ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1505369ea824SJérôme Glisse 1506c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1507cea86fe2SHugh Dickins /* Unexpected PMD-mapped THP? */ 1508869f7ee6SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1509cea86fe2SHugh Dickins 15101da177e4SLinus Torvalds /* 1511869f7ee6SMatthew Wilcox (Oracle) * If the folio is in an mlock()d vma, we must not swap it out. 15121da177e4SLinus Torvalds */ 1513efdb6720SHugh Dickins if (!(flags & TTU_IGNORE_MLOCK) && 1514efdb6720SHugh Dickins (vma->vm_flags & VM_LOCKED)) { 1515cea86fe2SHugh Dickins /* Restore the mlock which got missed */ 1516869f7ee6SMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, false); 1517c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1518efdb6720SHugh Dickins ret = false; 1519c7ab0d2fSKirill A. Shutemov break; 1520b87537d9SHugh Dickins } 1521c7ab0d2fSKirill A. Shutemov 1522869f7ee6SMatthew Wilcox (Oracle) subpage = folio_page(folio, 1523869f7ee6SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1524785373b4SLinus Torvalds address = pvmw.address; 15256c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 15266c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1527785373b4SLinus Torvalds 1528869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1529c0d0381aSMike Kravetz /* 1530c0d0381aSMike Kravetz * To call huge_pmd_unshare, i_mmap_rwsem must be 1531c0d0381aSMike Kravetz * held in write mode. Caller needs to explicitly 1532c0d0381aSMike Kravetz * do this outside rmap routines. 1533c0d0381aSMike Kravetz */ 1534c0d0381aSMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 153534ae204fSMike Kravetz if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1536017b1660SMike Kravetz /* 1537017b1660SMike Kravetz * huge_pmd_unshare unmapped an entire PMD 1538017b1660SMike Kravetz * page. There is no way of knowing exactly 1539017b1660SMike Kravetz * which PMDs may be cached for this mm, so 1540017b1660SMike Kravetz * we must flush them all. start/end were 1541017b1660SMike Kravetz * already adjusted above to cover this range. 1542017b1660SMike Kravetz */ 1543ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end); 1544ac46d4f3SJérôme Glisse flush_tlb_range(vma, range.start, range.end); 1545ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range(mm, range.start, 1546ac46d4f3SJérôme Glisse range.end); 1547017b1660SMike Kravetz 1548017b1660SMike Kravetz /* 1549017b1660SMike Kravetz * The ref count of the PMD page was dropped 1550017b1660SMike Kravetz * which is part of the way map counting 1551017b1660SMike Kravetz * is done for shared PMDs. Return 'true' 1552017b1660SMike Kravetz * here. When there is no other sharing, 1553017b1660SMike Kravetz * huge_pmd_unshare returns false and we will 1554017b1660SMike Kravetz * unmap the actual page and drop map count 1555017b1660SMike Kravetz * to zero. 1556017b1660SMike Kravetz */ 1557017b1660SMike Kravetz page_vma_mapped_walk_done(&pvmw); 1558017b1660SMike Kravetz break; 1559017b1660SMike Kravetz } 1560017b1660SMike Kravetz } 15618346242aSKirill A. Shutemov 15626c287605SDavid Hildenbrand /* 15636c287605SDavid Hildenbrand * Nuke the page table entry. When having to clear 15646c287605SDavid Hildenbrand * PageAnonExclusive(), we always have to flush. 15656c287605SDavid Hildenbrand */ 1566785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 15676c287605SDavid Hildenbrand if (should_defer_flush(mm, flags) && !anon_exclusive) { 156872b252aeSMel Gorman /* 1569c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1570869f7ee6SMatthew Wilcox (Oracle) * a remote CPU could still be writing to the folio. 1571c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1572c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1573c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1574c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 157572b252aeSMel Gorman */ 1576785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 157772b252aeSMel Gorman 1578c7ab0d2fSKirill A. Shutemov set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 157972b252aeSMel Gorman } else { 1580785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 158172b252aeSMel Gorman } 15821da177e4SLinus Torvalds 1583869f7ee6SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 15841da177e4SLinus Torvalds if (pte_dirty(pteval)) 1585869f7ee6SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 15861da177e4SLinus Torvalds 1587365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1588365e9c87SHugh Dickins update_hiwater_rss(mm); 1589365e9c87SHugh Dickins 1590da358d5cSMatthew Wilcox (Oracle) if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 15915fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1592869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1593869f7ee6SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 1594785373b4SLinus Torvalds set_huge_swap_pte_at(mm, address, 15955fd27b8eSPunit Agrawal pvmw.pte, pteval, 15965fd27b8eSPunit Agrawal vma_mmu_pagesize(vma)); 15975d317b2bSNaoya Horiguchi } else { 1598869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1599785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 16005f24ae58SNaoya Horiguchi } 1601c7ab0d2fSKirill A. Shutemov 1602bce73e48SChristian Borntraeger } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 160345961722SKonstantin Weitz /* 160445961722SKonstantin Weitz * The guest indicated that the page content is of no 160545961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 160645961722SKonstantin Weitz * will take care of the rest. 1607bce73e48SChristian Borntraeger * A future reference will then fault in a new zero 1608bce73e48SChristian Borntraeger * page. When userfaultfd is active, we must not drop 1609bce73e48SChristian Borntraeger * this page though, as its main user (postcopy 1610bce73e48SChristian Borntraeger * migration) will not expect userfaults on already 1611bce73e48SChristian Borntraeger * copied pages. 161245961722SKonstantin Weitz */ 1613869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 16140f10851eSJérôme Glisse /* We have to invalidate as we cleared the pte */ 16150f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 16160f10851eSJérôme Glisse address + PAGE_SIZE); 1617869f7ee6SMatthew Wilcox (Oracle) } else if (folio_test_anon(folio)) { 1618c7ab0d2fSKirill A. Shutemov swp_entry_t entry = { .val = page_private(subpage) }; 1619179ef71cSCyrill Gorcunov pte_t swp_pte; 16201da177e4SLinus Torvalds /* 16211da177e4SLinus Torvalds * Store the swap location in the pte. 16221da177e4SLinus Torvalds * See handle_pte_fault() ... 16231da177e4SLinus Torvalds */ 1624869f7ee6SMatthew Wilcox (Oracle) if (unlikely(folio_test_swapbacked(folio) != 1625869f7ee6SMatthew Wilcox (Oracle) folio_test_swapcache(folio))) { 1626eb94a878SMinchan Kim WARN_ON_ONCE(1); 162783612a94SMinchan Kim ret = false; 1628369ea824SJérôme Glisse /* We have to invalidate as we cleared the pte */ 16290f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 16300f10851eSJérôme Glisse address + PAGE_SIZE); 1631eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1632eb94a878SMinchan Kim break; 1633eb94a878SMinchan Kim } 1634854e9ed0SMinchan Kim 1635802a3a92SShaohua Li /* MADV_FREE page check */ 1636869f7ee6SMatthew Wilcox (Oracle) if (!folio_test_swapbacked(folio)) { 16376c8e2a25SMauricio Faria de Oliveira int ref_count, map_count; 16386c8e2a25SMauricio Faria de Oliveira 16396c8e2a25SMauricio Faria de Oliveira /* 16406c8e2a25SMauricio Faria de Oliveira * Synchronize with gup_pte_range(): 16416c8e2a25SMauricio Faria de Oliveira * - clear PTE; barrier; read refcount 16426c8e2a25SMauricio Faria de Oliveira * - inc refcount; barrier; read PTE 16436c8e2a25SMauricio Faria de Oliveira */ 16446c8e2a25SMauricio Faria de Oliveira smp_mb(); 16456c8e2a25SMauricio Faria de Oliveira 16466c8e2a25SMauricio Faria de Oliveira ref_count = folio_ref_count(folio); 16476c8e2a25SMauricio Faria de Oliveira map_count = folio_mapcount(folio); 16486c8e2a25SMauricio Faria de Oliveira 16496c8e2a25SMauricio Faria de Oliveira /* 16506c8e2a25SMauricio Faria de Oliveira * Order reads for page refcount and dirty flag 16516c8e2a25SMauricio Faria de Oliveira * (see comments in __remove_mapping()). 16526c8e2a25SMauricio Faria de Oliveira */ 16536c8e2a25SMauricio Faria de Oliveira smp_rmb(); 16546c8e2a25SMauricio Faria de Oliveira 16556c8e2a25SMauricio Faria de Oliveira /* 16566c8e2a25SMauricio Faria de Oliveira * The only page refs must be one from isolation 16576c8e2a25SMauricio Faria de Oliveira * plus the rmap(s) (dropped by discard:). 16586c8e2a25SMauricio Faria de Oliveira */ 16596c8e2a25SMauricio Faria de Oliveira if (ref_count == 1 + map_count && 16606c8e2a25SMauricio Faria de Oliveira !folio_test_dirty(folio)) { 16610f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 16620f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, 16630f10851eSJérôme Glisse address, address + PAGE_SIZE); 1664854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1665854e9ed0SMinchan Kim goto discard; 1666854e9ed0SMinchan Kim } 1667854e9ed0SMinchan Kim 1668802a3a92SShaohua Li /* 1669869f7ee6SMatthew Wilcox (Oracle) * If the folio was redirtied, it cannot be 1670802a3a92SShaohua Li * discarded. Remap the page to page table. 1671802a3a92SShaohua Li */ 1672785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1673869f7ee6SMatthew Wilcox (Oracle) folio_set_swapbacked(folio); 1674e4b82222SMinchan Kim ret = false; 1675802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1676802a3a92SShaohua Li break; 1677802a3a92SShaohua Li } 1678802a3a92SShaohua Li 1679570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1680785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1681e4b82222SMinchan Kim ret = false; 1682c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1683c7ab0d2fSKirill A. Shutemov break; 1684570a335bSHugh Dickins } 1685ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1686322842eaSDavid Hildenbrand swap_free(entry); 1687ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1688ca827d55SKhalid Aziz ret = false; 1689ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1690ca827d55SKhalid Aziz break; 1691ca827d55SKhalid Aziz } 16926c287605SDavid Hildenbrand if (anon_exclusive && 16936c287605SDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 16946c287605SDavid Hildenbrand swap_free(entry); 16956c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 16966c287605SDavid Hildenbrand ret = false; 16976c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 16986c287605SDavid Hildenbrand break; 16996c287605SDavid Hildenbrand } 17006c287605SDavid Hildenbrand /* 1701*1493a191SDavid Hildenbrand * Note: We *don't* remember if the page was mapped 1702*1493a191SDavid Hildenbrand * exclusively in the swap pte if the architecture 1703*1493a191SDavid Hildenbrand * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In 1704*1493a191SDavid Hildenbrand * that case, swapin code has to re-determine that 1705*1493a191SDavid Hildenbrand * manually and might detect the page as possibly 1706*1493a191SDavid Hildenbrand * shared, for example, if there are other references on 1707*1493a191SDavid Hildenbrand * the page or if the page is under writeback. We made 1708*1493a191SDavid Hildenbrand * sure that there are no GUP pins on the page that 1709*1493a191SDavid Hildenbrand * would rely on it, so for GUP pins this is fine. 17106c287605SDavid Hildenbrand */ 17111da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 17121da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1713f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 17141da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 17151da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 17161da177e4SLinus Torvalds } 1717d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1718b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1719179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 1720*1493a191SDavid Hildenbrand if (anon_exclusive) 1721*1493a191SDavid Hildenbrand swp_pte = pte_swp_mkexclusive(swp_pte); 1722179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1723179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1724f45ec5ffSPeter Xu if (pte_uffd_wp(pteval)) 1725f45ec5ffSPeter Xu swp_pte = pte_swp_mkuffd_wp(swp_pte); 1726785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 17270f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 1728369ea824SJérôme Glisse mmu_notifier_invalidate_range(mm, address, 1729369ea824SJérôme Glisse address + PAGE_SIZE); 17300f10851eSJérôme Glisse } else { 17310f10851eSJérôme Glisse /* 1732869f7ee6SMatthew Wilcox (Oracle) * This is a locked file-backed folio, 1733869f7ee6SMatthew Wilcox (Oracle) * so it cannot be removed from the page 1734869f7ee6SMatthew Wilcox (Oracle) * cache and replaced by a new folio before 1735869f7ee6SMatthew Wilcox (Oracle) * mmu_notifier_invalidate_range_end, so no 1736869f7ee6SMatthew Wilcox (Oracle) * concurrent thread might update its page table 1737869f7ee6SMatthew Wilcox (Oracle) * to point at a new folio while a device is 1738869f7ee6SMatthew Wilcox (Oracle) * still using this folio. 17390f10851eSJérôme Glisse * 1740ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 17410f10851eSJérôme Glisse */ 1742869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter_file(&folio->page)); 17430f10851eSJérôme Glisse } 17440f10851eSJérôme Glisse discard: 17450f10851eSJérôme Glisse /* 17460f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() it has be 17470f10851eSJérôme Glisse * done above for all cases requiring it to happen under page 17480f10851eSJérôme Glisse * table lock before mmu_notifier_invalidate_range_end() 17490f10851eSJérôme Glisse * 1750ad56b738SMike Rapoport * See Documentation/vm/mmu_notifier.rst 17510f10851eSJérôme Glisse */ 1752869f7ee6SMatthew Wilcox (Oracle) page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1753b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 1754adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 1755869f7ee6SMatthew Wilcox (Oracle) folio_put(folio); 1756c7ab0d2fSKirill A. Shutemov } 1757369ea824SJérôme Glisse 1758ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1759369ea824SJérôme Glisse 1760caed0f48SKOSAKI Motohiro return ret; 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds 176352629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 176452629506SJoonsoo Kim { 1765222100eeSAnshuman Khandual return vma_is_temporary_stack(vma); 176652629506SJoonsoo Kim } 176752629506SJoonsoo Kim 17682f031c6fSMatthew Wilcox (Oracle) static int page_not_mapped(struct folio *folio) 176952629506SJoonsoo Kim { 17702f031c6fSMatthew Wilcox (Oracle) return !folio_mapped(folio); 17712a52bcbcSKirill A. Shutemov } 177252629506SJoonsoo Kim 17731da177e4SLinus Torvalds /** 1774869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap - Try to remove all page table mappings to a folio. 1775869f7ee6SMatthew Wilcox (Oracle) * @folio: The folio to unmap. 177614fa31b8SAndi Kleen * @flags: action and flags 17771da177e4SLinus Torvalds * 17781da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 1779869f7ee6SMatthew Wilcox (Oracle) * folio. It is the caller's responsibility to check if the folio is 1780869f7ee6SMatthew Wilcox (Oracle) * still mapped if needed (use TTU_SYNC to prevent accounting races). 17811da177e4SLinus Torvalds * 1782869f7ee6SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 17831da177e4SLinus Torvalds */ 1784869f7ee6SMatthew Wilcox (Oracle) void try_to_unmap(struct folio *folio, enum ttu_flags flags) 17851da177e4SLinus Torvalds { 178652629506SJoonsoo Kim struct rmap_walk_control rwc = { 178752629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1788802a3a92SShaohua Li .arg = (void *)flags, 1789b7e188ecSMiaohe Lin .done = page_not_mapped, 17902f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 179152629506SJoonsoo Kim }; 17921da177e4SLinus Torvalds 1793a98a2f0cSAlistair Popple if (flags & TTU_RMAP_LOCKED) 17942f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 1795a98a2f0cSAlistair Popple else 17962f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 1797a98a2f0cSAlistair Popple } 1798a98a2f0cSAlistair Popple 1799a98a2f0cSAlistair Popple /* 1800a98a2f0cSAlistair Popple * @arg: enum ttu_flags will be passed to this argument. 1801a98a2f0cSAlistair Popple * 1802a98a2f0cSAlistair Popple * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 180364b586d1SHugh Dickins * containing migration entries. 1804a98a2f0cSAlistair Popple */ 18052f031c6fSMatthew Wilcox (Oracle) static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1806a98a2f0cSAlistair Popple unsigned long address, void *arg) 1807a98a2f0cSAlistair Popple { 1808a98a2f0cSAlistair Popple struct mm_struct *mm = vma->vm_mm; 18094b8554c5SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1810a98a2f0cSAlistair Popple pte_t pteval; 1811a98a2f0cSAlistair Popple struct page *subpage; 18126c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1813a98a2f0cSAlistair Popple struct mmu_notifier_range range; 1814a98a2f0cSAlistair Popple enum ttu_flags flags = (enum ttu_flags)(long)arg; 1815a98a2f0cSAlistair Popple 1816a98a2f0cSAlistair Popple /* 1817a98a2f0cSAlistair Popple * When racing against e.g. zap_pte_range() on another cpu, 1818a98a2f0cSAlistair Popple * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1819a98a2f0cSAlistair Popple * try_to_migrate() may return before page_mapped() has become false, 1820a98a2f0cSAlistair Popple * if page table locking is skipped: use TTU_SYNC to wait for that. 1821a98a2f0cSAlistair Popple */ 1822a98a2f0cSAlistair Popple if (flags & TTU_SYNC) 1823a98a2f0cSAlistair Popple pvmw.flags = PVMW_SYNC; 1824a98a2f0cSAlistair Popple 1825a98a2f0cSAlistair Popple /* 1826a98a2f0cSAlistair Popple * unmap_page() in mm/huge_memory.c is the only user of migration with 1827a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1828a98a2f0cSAlistair Popple */ 1829a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1830af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, true, folio); 1831a98a2f0cSAlistair Popple 1832a98a2f0cSAlistair Popple /* 1833a98a2f0cSAlistair Popple * For THP, we have to assume the worse case ie pmd for invalidation. 1834a98a2f0cSAlistair Popple * For hugetlb, it could be much worse if we need to do pud 1835a98a2f0cSAlistair Popple * invalidation in the case of pmd sharing. 1836a98a2f0cSAlistair Popple * 1837a98a2f0cSAlistair Popple * Note that the page can not be free in this function as call of 1838a98a2f0cSAlistair Popple * try_to_unmap() must hold a reference on the page. 1839a98a2f0cSAlistair Popple */ 18402aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 1841a98a2f0cSAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1842a98a2f0cSAlistair Popple address, range.end); 18434b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1844a98a2f0cSAlistair Popple /* 1845a98a2f0cSAlistair Popple * If sharing is possible, start and end will be adjusted 1846a98a2f0cSAlistair Popple * accordingly. 1847a98a2f0cSAlistair Popple */ 1848a98a2f0cSAlistair Popple adjust_range_if_pmd_sharing_possible(vma, &range.start, 1849a98a2f0cSAlistair Popple &range.end); 1850a98a2f0cSAlistair Popple } 1851a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_start(&range); 1852a98a2f0cSAlistair Popple 1853a98a2f0cSAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 1854a98a2f0cSAlistair Popple #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1855a98a2f0cSAlistair Popple /* PMD-mapped THP migration entry */ 1856a98a2f0cSAlistair Popple if (!pvmw.pte) { 18574b8554c5SMatthew Wilcox (Oracle) subpage = folio_page(folio, 18584b8554c5SMatthew Wilcox (Oracle) pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 18594b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 18604b8554c5SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio); 1861a98a2f0cSAlistair Popple 18627f5abe60SDavid Hildenbrand if (set_pmd_migration_entry(&pvmw, subpage)) { 18637f5abe60SDavid Hildenbrand ret = false; 18647f5abe60SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 18657f5abe60SDavid Hildenbrand break; 18667f5abe60SDavid Hildenbrand } 1867a98a2f0cSAlistair Popple continue; 1868a98a2f0cSAlistair Popple } 1869a98a2f0cSAlistair Popple #endif 1870a98a2f0cSAlistair Popple 1871a98a2f0cSAlistair Popple /* Unexpected PMD-mapped THP? */ 18724b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1873a98a2f0cSAlistair Popple 18744b8554c5SMatthew Wilcox (Oracle) subpage = folio_page(folio, 18754b8554c5SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1876a98a2f0cSAlistair Popple address = pvmw.address; 18776c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 18786c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1879a98a2f0cSAlistair Popple 18804b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1881a98a2f0cSAlistair Popple /* 1882a98a2f0cSAlistair Popple * To call huge_pmd_unshare, i_mmap_rwsem must be 1883a98a2f0cSAlistair Popple * held in write mode. Caller needs to explicitly 1884a98a2f0cSAlistair Popple * do this outside rmap routines. 1885a98a2f0cSAlistair Popple */ 1886a98a2f0cSAlistair Popple VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1887a98a2f0cSAlistair Popple if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1888a98a2f0cSAlistair Popple /* 1889a98a2f0cSAlistair Popple * huge_pmd_unshare unmapped an entire PMD 1890a98a2f0cSAlistair Popple * page. There is no way of knowing exactly 1891a98a2f0cSAlistair Popple * which PMDs may be cached for this mm, so 1892a98a2f0cSAlistair Popple * we must flush them all. start/end were 1893a98a2f0cSAlistair Popple * already adjusted above to cover this range. 1894a98a2f0cSAlistair Popple */ 1895a98a2f0cSAlistair Popple flush_cache_range(vma, range.start, range.end); 1896a98a2f0cSAlistair Popple flush_tlb_range(vma, range.start, range.end); 1897a98a2f0cSAlistair Popple mmu_notifier_invalidate_range(mm, range.start, 1898a98a2f0cSAlistair Popple range.end); 1899a98a2f0cSAlistair Popple 1900a98a2f0cSAlistair Popple /* 1901a98a2f0cSAlistair Popple * The ref count of the PMD page was dropped 1902a98a2f0cSAlistair Popple * which is part of the way map counting 1903a98a2f0cSAlistair Popple * is done for shared PMDs. Return 'true' 1904a98a2f0cSAlistair Popple * here. When there is no other sharing, 1905a98a2f0cSAlistair Popple * huge_pmd_unshare returns false and we will 1906a98a2f0cSAlistair Popple * unmap the actual page and drop map count 1907a98a2f0cSAlistair Popple * to zero. 1908a98a2f0cSAlistair Popple */ 1909a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 1910a98a2f0cSAlistair Popple break; 1911a98a2f0cSAlistair Popple } 1912a98a2f0cSAlistair Popple } 1913a98a2f0cSAlistair Popple 1914a98a2f0cSAlistair Popple /* Nuke the page table entry. */ 1915a98a2f0cSAlistair Popple flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1916a98a2f0cSAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 1917a98a2f0cSAlistair Popple 19184b8554c5SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 1919a98a2f0cSAlistair Popple if (pte_dirty(pteval)) 19204b8554c5SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1921a98a2f0cSAlistair Popple 1922a98a2f0cSAlistair Popple /* Update high watermark before we lower rss */ 1923a98a2f0cSAlistair Popple update_hiwater_rss(mm); 1924a98a2f0cSAlistair Popple 19254b8554c5SMatthew Wilcox (Oracle) if (folio_is_zone_device(folio)) { 19264b8554c5SMatthew Wilcox (Oracle) unsigned long pfn = folio_pfn(folio); 1927a98a2f0cSAlistair Popple swp_entry_t entry; 1928a98a2f0cSAlistair Popple pte_t swp_pte; 1929a98a2f0cSAlistair Popple 19306c287605SDavid Hildenbrand if (anon_exclusive) 19316c287605SDavid Hildenbrand BUG_ON(page_try_share_anon_rmap(subpage)); 19326c287605SDavid Hildenbrand 1933a98a2f0cSAlistair Popple /* 1934a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 1935a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 1936a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 1937a98a2f0cSAlistair Popple */ 19383d88705cSAlistair Popple entry = pte_to_swp_entry(pteval); 19393d88705cSAlistair Popple if (is_writable_device_private_entry(entry)) 19403d88705cSAlistair Popple entry = make_writable_migration_entry(pfn); 19416c287605SDavid Hildenbrand else if (anon_exclusive) 19426c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(pfn); 19433d88705cSAlistair Popple else 19443d88705cSAlistair Popple entry = make_readable_migration_entry(pfn); 1945a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 1946a98a2f0cSAlistair Popple 1947a98a2f0cSAlistair Popple /* 1948a98a2f0cSAlistair Popple * pteval maps a zone device page and is therefore 1949a98a2f0cSAlistair Popple * a swap pte. 1950a98a2f0cSAlistair Popple */ 1951a98a2f0cSAlistair Popple if (pte_swp_soft_dirty(pteval)) 1952a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 1953a98a2f0cSAlistair Popple if (pte_swp_uffd_wp(pteval)) 1954a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 1955a98a2f0cSAlistair Popple set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 19564cc79b33SAnshuman Khandual trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 19574cc79b33SAnshuman Khandual compound_order(&folio->page)); 1958a98a2f0cSAlistair Popple /* 1959a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 1960a98a2f0cSAlistair Popple * against the special swap migration pte. 1961a98a2f0cSAlistair Popple * 1962a98a2f0cSAlistair Popple * The assignment to subpage above was computed from a 1963a98a2f0cSAlistair Popple * swap PTE which results in an invalid pointer. 1964a98a2f0cSAlistair Popple * Since only PAGE_SIZE pages can currently be 1965a98a2f0cSAlistair Popple * migrated, just set it to page. This will need to be 1966a98a2f0cSAlistair Popple * changed when hugepage migrations to device private 1967a98a2f0cSAlistair Popple * memory are supported. 1968a98a2f0cSAlistair Popple */ 19694b8554c5SMatthew Wilcox (Oracle) subpage = &folio->page; 1970da358d5cSMatthew Wilcox (Oracle) } else if (PageHWPoison(subpage)) { 1971a98a2f0cSAlistair Popple pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 19724b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 19734b8554c5SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 1974a98a2f0cSAlistair Popple set_huge_swap_pte_at(mm, address, 1975a98a2f0cSAlistair Popple pvmw.pte, pteval, 1976a98a2f0cSAlistair Popple vma_mmu_pagesize(vma)); 1977a98a2f0cSAlistair Popple } else { 19784b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1979a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 1980a98a2f0cSAlistair Popple } 1981a98a2f0cSAlistair Popple 1982a98a2f0cSAlistair Popple } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1983a98a2f0cSAlistair Popple /* 1984a98a2f0cSAlistair Popple * The guest indicated that the page content is of no 1985a98a2f0cSAlistair Popple * interest anymore. Simply discard the pte, vmscan 1986a98a2f0cSAlistair Popple * will take care of the rest. 1987a98a2f0cSAlistair Popple * A future reference will then fault in a new zero 1988a98a2f0cSAlistair Popple * page. When userfaultfd is active, we must not drop 1989a98a2f0cSAlistair Popple * this page though, as its main user (postcopy 1990a98a2f0cSAlistair Popple * migration) will not expect userfaults on already 1991a98a2f0cSAlistair Popple * copied pages. 1992a98a2f0cSAlistair Popple */ 19934b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1994a98a2f0cSAlistair Popple /* We have to invalidate as we cleared the pte */ 1995a98a2f0cSAlistair Popple mmu_notifier_invalidate_range(mm, address, 1996a98a2f0cSAlistair Popple address + PAGE_SIZE); 1997a98a2f0cSAlistair Popple } else { 1998a98a2f0cSAlistair Popple swp_entry_t entry; 1999a98a2f0cSAlistair Popple pte_t swp_pte; 2000a98a2f0cSAlistair Popple 2001a98a2f0cSAlistair Popple if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2002a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 2003a98a2f0cSAlistair Popple ret = false; 2004a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 2005a98a2f0cSAlistair Popple break; 2006a98a2f0cSAlistair Popple } 20076c287605SDavid Hildenbrand VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 20086c287605SDavid Hildenbrand !anon_exclusive, subpage); 20096c287605SDavid Hildenbrand if (anon_exclusive && 20106c287605SDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 20116c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 20126c287605SDavid Hildenbrand ret = false; 20136c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 20146c287605SDavid Hildenbrand break; 20156c287605SDavid Hildenbrand } 2016a98a2f0cSAlistair Popple 2017a98a2f0cSAlistair Popple /* 2018a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 2019a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 2020a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 2021a98a2f0cSAlistair Popple */ 2022a98a2f0cSAlistair Popple if (pte_write(pteval)) 2023a98a2f0cSAlistair Popple entry = make_writable_migration_entry( 2024a98a2f0cSAlistair Popple page_to_pfn(subpage)); 20256c287605SDavid Hildenbrand else if (anon_exclusive) 20266c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry( 20276c287605SDavid Hildenbrand page_to_pfn(subpage)); 2028a98a2f0cSAlistair Popple else 2029a98a2f0cSAlistair Popple entry = make_readable_migration_entry( 2030a98a2f0cSAlistair Popple page_to_pfn(subpage)); 2031a98a2f0cSAlistair Popple 2032a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 2033a98a2f0cSAlistair Popple if (pte_soft_dirty(pteval)) 2034a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2035a98a2f0cSAlistair Popple if (pte_uffd_wp(pteval)) 2036a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2037a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 20384cc79b33SAnshuman Khandual trace_set_migration_pte(address, pte_val(swp_pte), 20394cc79b33SAnshuman Khandual compound_order(&folio->page)); 2040a98a2f0cSAlistair Popple /* 2041a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 2042a98a2f0cSAlistair Popple * against the special swap migration pte. 2043a98a2f0cSAlistair Popple */ 2044a98a2f0cSAlistair Popple } 2045a98a2f0cSAlistair Popple 2046a98a2f0cSAlistair Popple /* 2047a98a2f0cSAlistair Popple * No need to call mmu_notifier_invalidate_range() it has be 2048a98a2f0cSAlistair Popple * done above for all cases requiring it to happen under page 2049a98a2f0cSAlistair Popple * table lock before mmu_notifier_invalidate_range_end() 2050a98a2f0cSAlistair Popple * 2051a98a2f0cSAlistair Popple * See Documentation/vm/mmu_notifier.rst 2052a98a2f0cSAlistair Popple */ 20534b8554c5SMatthew Wilcox (Oracle) page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 2054b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 2055adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 20564b8554c5SMatthew Wilcox (Oracle) folio_put(folio); 2057a98a2f0cSAlistair Popple } 2058a98a2f0cSAlistair Popple 2059a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_end(&range); 2060a98a2f0cSAlistair Popple 2061a98a2f0cSAlistair Popple return ret; 2062a98a2f0cSAlistair Popple } 2063a98a2f0cSAlistair Popple 2064a98a2f0cSAlistair Popple /** 2065a98a2f0cSAlistair Popple * try_to_migrate - try to replace all page table mappings with swap entries 20664b8554c5SMatthew Wilcox (Oracle) * @folio: the folio to replace page table entries for 2067a98a2f0cSAlistair Popple * @flags: action and flags 2068a98a2f0cSAlistair Popple * 20694b8554c5SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this folio and 20704b8554c5SMatthew Wilcox (Oracle) * replace them with special swap entries. Caller must hold the folio lock. 2071a98a2f0cSAlistair Popple */ 20724b8554c5SMatthew Wilcox (Oracle) void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2073a98a2f0cSAlistair Popple { 2074a98a2f0cSAlistair Popple struct rmap_walk_control rwc = { 2075a98a2f0cSAlistair Popple .rmap_one = try_to_migrate_one, 2076a98a2f0cSAlistair Popple .arg = (void *)flags, 2077a98a2f0cSAlistair Popple .done = page_not_mapped, 20782f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2079a98a2f0cSAlistair Popple }; 2080a98a2f0cSAlistair Popple 2081a98a2f0cSAlistair Popple /* 2082a98a2f0cSAlistair Popple * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2083a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 2084a98a2f0cSAlistair Popple */ 2085a98a2f0cSAlistair Popple if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2086a98a2f0cSAlistair Popple TTU_SYNC))) 2087a98a2f0cSAlistair Popple return; 2088a98a2f0cSAlistair Popple 20894b8554c5SMatthew Wilcox (Oracle) if (folio_is_zone_device(folio) && !folio_is_device_private(folio)) 20906c855fceSHugh Dickins return; 20916c855fceSHugh Dickins 209252629506SJoonsoo Kim /* 209352629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 209452629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 209552629506SJoonsoo Kim * page tables leading to a race where migration cannot 209652629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 209752629506SJoonsoo Kim * locking requirements of exec(), migration skips 209852629506SJoonsoo Kim * temporary VMAs until after exec() completes. 209952629506SJoonsoo Kim */ 21004b8554c5SMatthew Wilcox (Oracle) if (!folio_test_ksm(folio) && folio_test_anon(folio)) 210152629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 210252629506SJoonsoo Kim 21032a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 21042f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 21052a52bcbcSKirill A. Shutemov else 21062f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2107b291f000SNick Piggin } 2108e9995ef9SHugh Dickins 2109b756a3b5SAlistair Popple #ifdef CONFIG_DEVICE_PRIVATE 2110b756a3b5SAlistair Popple struct make_exclusive_args { 2111b756a3b5SAlistair Popple struct mm_struct *mm; 2112b756a3b5SAlistair Popple unsigned long address; 2113b756a3b5SAlistair Popple void *owner; 2114b756a3b5SAlistair Popple bool valid; 2115b756a3b5SAlistair Popple }; 2116b756a3b5SAlistair Popple 21172f031c6fSMatthew Wilcox (Oracle) static bool page_make_device_exclusive_one(struct folio *folio, 2118b756a3b5SAlistair Popple struct vm_area_struct *vma, unsigned long address, void *priv) 2119b756a3b5SAlistair Popple { 2120b756a3b5SAlistair Popple struct mm_struct *mm = vma->vm_mm; 21210d251485SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2122b756a3b5SAlistair Popple struct make_exclusive_args *args = priv; 2123b756a3b5SAlistair Popple pte_t pteval; 2124b756a3b5SAlistair Popple struct page *subpage; 2125b756a3b5SAlistair Popple bool ret = true; 2126b756a3b5SAlistair Popple struct mmu_notifier_range range; 2127b756a3b5SAlistair Popple swp_entry_t entry; 2128b756a3b5SAlistair Popple pte_t swp_pte; 2129b756a3b5SAlistair Popple 2130b756a3b5SAlistair Popple mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2131b756a3b5SAlistair Popple vma->vm_mm, address, min(vma->vm_end, 21320d251485SMatthew Wilcox (Oracle) address + folio_size(folio)), 21330d251485SMatthew Wilcox (Oracle) args->owner); 2134b756a3b5SAlistair Popple mmu_notifier_invalidate_range_start(&range); 2135b756a3b5SAlistair Popple 2136b756a3b5SAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 2137b756a3b5SAlistair Popple /* Unexpected PMD-mapped THP? */ 21380d251485SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2139b756a3b5SAlistair Popple 2140b756a3b5SAlistair Popple if (!pte_present(*pvmw.pte)) { 2141b756a3b5SAlistair Popple ret = false; 2142b756a3b5SAlistair Popple page_vma_mapped_walk_done(&pvmw); 2143b756a3b5SAlistair Popple break; 2144b756a3b5SAlistair Popple } 2145b756a3b5SAlistair Popple 21460d251485SMatthew Wilcox (Oracle) subpage = folio_page(folio, 21470d251485SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2148b756a3b5SAlistair Popple address = pvmw.address; 2149b756a3b5SAlistair Popple 2150b756a3b5SAlistair Popple /* Nuke the page table entry. */ 2151b756a3b5SAlistair Popple flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2152b756a3b5SAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 2153b756a3b5SAlistair Popple 21540d251485SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 2155b756a3b5SAlistair Popple if (pte_dirty(pteval)) 21560d251485SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2157b756a3b5SAlistair Popple 2158b756a3b5SAlistair Popple /* 2159b756a3b5SAlistair Popple * Check that our target page is still mapped at the expected 2160b756a3b5SAlistair Popple * address. 2161b756a3b5SAlistair Popple */ 2162b756a3b5SAlistair Popple if (args->mm == mm && args->address == address && 2163b756a3b5SAlistair Popple pte_write(pteval)) 2164b756a3b5SAlistair Popple args->valid = true; 2165b756a3b5SAlistair Popple 2166b756a3b5SAlistair Popple /* 2167b756a3b5SAlistair Popple * Store the pfn of the page in a special migration 2168b756a3b5SAlistair Popple * pte. do_swap_page() will wait until the migration 2169b756a3b5SAlistair Popple * pte is removed and then restart fault handling. 2170b756a3b5SAlistair Popple */ 2171b756a3b5SAlistair Popple if (pte_write(pteval)) 2172b756a3b5SAlistair Popple entry = make_writable_device_exclusive_entry( 2173b756a3b5SAlistair Popple page_to_pfn(subpage)); 2174b756a3b5SAlistair Popple else 2175b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 2176b756a3b5SAlistair Popple page_to_pfn(subpage)); 2177b756a3b5SAlistair Popple swp_pte = swp_entry_to_pte(entry); 2178b756a3b5SAlistair Popple if (pte_soft_dirty(pteval)) 2179b756a3b5SAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2180b756a3b5SAlistair Popple if (pte_uffd_wp(pteval)) 2181b756a3b5SAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2182b756a3b5SAlistair Popple 2183b756a3b5SAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 2184b756a3b5SAlistair Popple 2185b756a3b5SAlistair Popple /* 2186b756a3b5SAlistair Popple * There is a reference on the page for the swap entry which has 2187b756a3b5SAlistair Popple * been removed, so shouldn't take another. 2188b756a3b5SAlistair Popple */ 2189cea86fe2SHugh Dickins page_remove_rmap(subpage, vma, false); 2190b756a3b5SAlistair Popple } 2191b756a3b5SAlistair Popple 2192b756a3b5SAlistair Popple mmu_notifier_invalidate_range_end(&range); 2193b756a3b5SAlistair Popple 2194b756a3b5SAlistair Popple return ret; 2195b756a3b5SAlistair Popple } 2196b756a3b5SAlistair Popple 2197b756a3b5SAlistair Popple /** 21980d251485SMatthew Wilcox (Oracle) * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 21990d251485SMatthew Wilcox (Oracle) * @folio: The folio to replace page table entries for. 22000d251485SMatthew Wilcox (Oracle) * @mm: The mm_struct where the folio is expected to be mapped. 22010d251485SMatthew Wilcox (Oracle) * @address: Address where the folio is expected to be mapped. 2202b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2203b756a3b5SAlistair Popple * 22040d251485SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this 22050d251485SMatthew Wilcox (Oracle) * folio and replace them with special device exclusive swap entries to 22060d251485SMatthew Wilcox (Oracle) * grant a device exclusive access to the folio. 2207b756a3b5SAlistair Popple * 22080d251485SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 22090d251485SMatthew Wilcox (Oracle) * Return: false if the page is still mapped, or if it could not be unmapped 2210b756a3b5SAlistair Popple * from the expected address. Otherwise returns true (success). 2211b756a3b5SAlistair Popple */ 22120d251485SMatthew Wilcox (Oracle) static bool folio_make_device_exclusive(struct folio *folio, 22130d251485SMatthew Wilcox (Oracle) struct mm_struct *mm, unsigned long address, void *owner) 2214b756a3b5SAlistair Popple { 2215b756a3b5SAlistair Popple struct make_exclusive_args args = { 2216b756a3b5SAlistair Popple .mm = mm, 2217b756a3b5SAlistair Popple .address = address, 2218b756a3b5SAlistair Popple .owner = owner, 2219b756a3b5SAlistair Popple .valid = false, 2220b756a3b5SAlistair Popple }; 2221b756a3b5SAlistair Popple struct rmap_walk_control rwc = { 2222b756a3b5SAlistair Popple .rmap_one = page_make_device_exclusive_one, 2223b756a3b5SAlistair Popple .done = page_not_mapped, 22242f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2225b756a3b5SAlistair Popple .arg = &args, 2226b756a3b5SAlistair Popple }; 2227b756a3b5SAlistair Popple 2228b756a3b5SAlistair Popple /* 22290d251485SMatthew Wilcox (Oracle) * Restrict to anonymous folios for now to avoid potential writeback 22300d251485SMatthew Wilcox (Oracle) * issues. 2231b756a3b5SAlistair Popple */ 22320d251485SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2233b756a3b5SAlistair Popple return false; 2234b756a3b5SAlistair Popple 22352f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2236b756a3b5SAlistair Popple 22370d251485SMatthew Wilcox (Oracle) return args.valid && !folio_mapcount(folio); 2238b756a3b5SAlistair Popple } 2239b756a3b5SAlistair Popple 2240b756a3b5SAlistair Popple /** 2241b756a3b5SAlistair Popple * make_device_exclusive_range() - Mark a range for exclusive use by a device 2242b756a3b5SAlistair Popple * @mm: mm_struct of assoicated target process 2243b756a3b5SAlistair Popple * @start: start of the region to mark for exclusive device access 2244b756a3b5SAlistair Popple * @end: end address of region 2245b756a3b5SAlistair Popple * @pages: returns the pages which were successfully marked for exclusive access 2246b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2247b756a3b5SAlistair Popple * 2248b756a3b5SAlistair Popple * Returns: number of pages found in the range by GUP. A page is marked for 2249b756a3b5SAlistair Popple * exclusive access only if the page pointer is non-NULL. 2250b756a3b5SAlistair Popple * 2251b756a3b5SAlistair Popple * This function finds ptes mapping page(s) to the given address range, locks 2252b756a3b5SAlistair Popple * them and replaces mappings with special swap entries preventing userspace CPU 2253b756a3b5SAlistair Popple * access. On fault these entries are replaced with the original mapping after 2254b756a3b5SAlistair Popple * calling MMU notifiers. 2255b756a3b5SAlistair Popple * 2256b756a3b5SAlistair Popple * A driver using this to program access from a device must use a mmu notifier 2257b756a3b5SAlistair Popple * critical section to hold a device specific lock during programming. Once 2258b756a3b5SAlistair Popple * programming is complete it should drop the page lock and reference after 2259b756a3b5SAlistair Popple * which point CPU access to the page will revoke the exclusive access. 2260b756a3b5SAlistair Popple */ 2261b756a3b5SAlistair Popple int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2262b756a3b5SAlistair Popple unsigned long end, struct page **pages, 2263b756a3b5SAlistair Popple void *owner) 2264b756a3b5SAlistair Popple { 2265b756a3b5SAlistair Popple long npages = (end - start) >> PAGE_SHIFT; 2266b756a3b5SAlistair Popple long i; 2267b756a3b5SAlistair Popple 2268b756a3b5SAlistair Popple npages = get_user_pages_remote(mm, start, npages, 2269b756a3b5SAlistair Popple FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2270b756a3b5SAlistair Popple pages, NULL, NULL); 2271b756a3b5SAlistair Popple if (npages < 0) 2272b756a3b5SAlistair Popple return npages; 2273b756a3b5SAlistair Popple 2274b756a3b5SAlistair Popple for (i = 0; i < npages; i++, start += PAGE_SIZE) { 22750d251485SMatthew Wilcox (Oracle) struct folio *folio = page_folio(pages[i]); 22760d251485SMatthew Wilcox (Oracle) if (PageTail(pages[i]) || !folio_trylock(folio)) { 22770d251485SMatthew Wilcox (Oracle) folio_put(folio); 2278b756a3b5SAlistair Popple pages[i] = NULL; 2279b756a3b5SAlistair Popple continue; 2280b756a3b5SAlistair Popple } 2281b756a3b5SAlistair Popple 22820d251485SMatthew Wilcox (Oracle) if (!folio_make_device_exclusive(folio, mm, start, owner)) { 22830d251485SMatthew Wilcox (Oracle) folio_unlock(folio); 22840d251485SMatthew Wilcox (Oracle) folio_put(folio); 2285b756a3b5SAlistair Popple pages[i] = NULL; 2286b756a3b5SAlistair Popple } 2287b756a3b5SAlistair Popple } 2288b756a3b5SAlistair Popple 2289b756a3b5SAlistair Popple return npages; 2290b756a3b5SAlistair Popple } 2291b756a3b5SAlistair Popple EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2292b756a3b5SAlistair Popple #endif 2293b756a3b5SAlistair Popple 229401d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 229576545066SRik van Riel { 229676545066SRik van Riel struct anon_vma *root = anon_vma->root; 229776545066SRik van Riel 2298624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 229901d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 230076545066SRik van Riel anon_vma_free(root); 230176545066SRik van Riel } 230276545066SRik van Riel 23032f031c6fSMatthew Wilcox (Oracle) static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 230484fbbe21SMatthew Wilcox (Oracle) const struct rmap_walk_control *rwc) 2305faecd8ddSJoonsoo Kim { 2306faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 2307faecd8ddSJoonsoo Kim 23080dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 23092f031c6fSMatthew Wilcox (Oracle) return rwc->anon_lock(folio); 23100dd1c7bbSJoonsoo Kim 2311faecd8ddSJoonsoo Kim /* 23122f031c6fSMatthew Wilcox (Oracle) * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2313faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 2314c1e8d7c6SMichel Lespinasse * are holding mmap_lock. Users without mmap_lock are required to 2315faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 2316faecd8ddSJoonsoo Kim */ 2317e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2318faecd8ddSJoonsoo Kim if (!anon_vma) 2319faecd8ddSJoonsoo Kim return NULL; 2320faecd8ddSJoonsoo Kim 2321faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 2322faecd8ddSJoonsoo Kim return anon_vma; 2323faecd8ddSJoonsoo Kim } 2324faecd8ddSJoonsoo Kim 2325e9995ef9SHugh Dickins /* 2326e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 2327e8351ac9SJoonsoo Kim * rmap method 2328e8351ac9SJoonsoo Kim * @page: the page to be handled 2329e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2330e8351ac9SJoonsoo Kim * 2331e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2332e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 2333e9995ef9SHugh Dickins */ 233484fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_anon(struct folio *folio, 233584fbbe21SMatthew Wilcox (Oracle) const struct rmap_walk_control *rwc, bool locked) 2336e9995ef9SHugh Dickins { 2337e9995ef9SHugh Dickins struct anon_vma *anon_vma; 2338a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 23395beb4930SRik van Riel struct anon_vma_chain *avc; 2340e9995ef9SHugh Dickins 2341b9773199SKirill A. Shutemov if (locked) { 2342e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2343b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 2344e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!anon_vma, folio); 2345b9773199SKirill A. Shutemov } else { 23462f031c6fSMatthew Wilcox (Oracle) anon_vma = rmap_walk_anon_lock(folio, rwc); 2347b9773199SKirill A. Shutemov } 2348e9995ef9SHugh Dickins if (!anon_vma) 23491df631aeSMinchan Kim return; 2350faecd8ddSJoonsoo Kim 23512f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 23522f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2353a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2354a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 23555beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 23562f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 23570dd1c7bbSJoonsoo Kim 2358494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2359ad12695fSAndrea Arcangeli cond_resched(); 2360ad12695fSAndrea Arcangeli 23610dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 23620dd1c7bbSJoonsoo Kim continue; 23630dd1c7bbSJoonsoo Kim 23642f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2365e9995ef9SHugh Dickins break; 23662f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 23670dd1c7bbSJoonsoo Kim break; 2368e9995ef9SHugh Dickins } 2369b9773199SKirill A. Shutemov 2370b9773199SKirill A. Shutemov if (!locked) 23714fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 2372e9995ef9SHugh Dickins } 2373e9995ef9SHugh Dickins 2374e8351ac9SJoonsoo Kim /* 2375e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 2376e8351ac9SJoonsoo Kim * @page: the page to be handled 2377e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2378e8351ac9SJoonsoo Kim * 2379e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2380e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 2381e8351ac9SJoonsoo Kim */ 238284fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_file(struct folio *folio, 238384fbbe21SMatthew Wilcox (Oracle) const struct rmap_walk_control *rwc, bool locked) 2384e9995ef9SHugh Dickins { 23852f031c6fSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio); 2386a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 2387e9995ef9SHugh Dickins struct vm_area_struct *vma; 2388e9995ef9SHugh Dickins 23899f32624bSJoonsoo Kim /* 23909f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 23919f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 23929f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 2393c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 23949f32624bSJoonsoo Kim */ 23952f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 23969f32624bSJoonsoo Kim 2397e9995ef9SHugh Dickins if (!mapping) 23981df631aeSMinchan Kim return; 23993dec0ba0SDavidlohr Bueso 24002f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 24012f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2402b9773199SKirill A. Shutemov if (!locked) 24033dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 2404a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 2405a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 24062f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 24070dd1c7bbSJoonsoo Kim 2408494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2409ad12695fSAndrea Arcangeli cond_resched(); 2410ad12695fSAndrea Arcangeli 24110dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 24120dd1c7bbSJoonsoo Kim continue; 24130dd1c7bbSJoonsoo Kim 24142f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 24150dd1c7bbSJoonsoo Kim goto done; 24162f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 24170dd1c7bbSJoonsoo Kim goto done; 2418e9995ef9SHugh Dickins } 24190dd1c7bbSJoonsoo Kim 24200dd1c7bbSJoonsoo Kim done: 2421b9773199SKirill A. Shutemov if (!locked) 24223dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 2423e9995ef9SHugh Dickins } 2424e9995ef9SHugh Dickins 242584fbbe21SMatthew Wilcox (Oracle) void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc) 2426e9995ef9SHugh Dickins { 24272f031c6fSMatthew Wilcox (Oracle) if (unlikely(folio_test_ksm(folio))) 24282f031c6fSMatthew Wilcox (Oracle) rmap_walk_ksm(folio, rwc); 24292f031c6fSMatthew Wilcox (Oracle) else if (folio_test_anon(folio)) 24302f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, false); 2431e9995ef9SHugh Dickins else 24322f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, false); 2433b9773199SKirill A. Shutemov } 2434b9773199SKirill A. Shutemov 2435b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 243684fbbe21SMatthew Wilcox (Oracle) void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc) 2437b9773199SKirill A. Shutemov { 2438b9773199SKirill A. Shutemov /* no ksm support for now */ 24392f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 24402f031c6fSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 24412f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, true); 2442b9773199SKirill A. Shutemov else 24432f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, true); 2444e9995ef9SHugh Dickins } 24450fe6e20bSNaoya Horiguchi 2446e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 24470fe6e20bSNaoya Horiguchi /* 2448451b9514SKirill Tkhai * The following two functions are for anonymous (private mapped) hugepages. 24490fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 24500fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 245128c5209dSDavid Hildenbrand * 245228c5209dSDavid Hildenbrand * RMAP_COMPOUND is ignored. 24530fe6e20bSNaoya Horiguchi */ 245428c5209dSDavid Hildenbrand void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 245528c5209dSDavid Hildenbrand unsigned long address, rmap_t flags) 24560fe6e20bSNaoya Horiguchi { 24570fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 24580fe6e20bSNaoya Horiguchi int first; 2459a850ea30SNaoya Horiguchi 2460a850ea30SNaoya Horiguchi BUG_ON(!PageLocked(page)); 24610fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 24625dbe0af4SHugh Dickins /* address might be in next vma when migration races vma_adjust */ 246353f9263bSKirill A. Shutemov first = atomic_inc_and_test(compound_mapcount_ptr(page)); 24646c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 24656c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 24660fe6e20bSNaoya Horiguchi if (first) 246728c5209dSDavid Hildenbrand __page_set_anon_rmap(page, vma, address, 246828c5209dSDavid Hildenbrand !!(flags & RMAP_EXCLUSIVE)); 24690fe6e20bSNaoya Horiguchi } 24700fe6e20bSNaoya Horiguchi 24710fe6e20bSNaoya Horiguchi void hugepage_add_new_anon_rmap(struct page *page, 24720fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 24730fe6e20bSNaoya Horiguchi { 24740fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 247553f9263bSKirill A. Shutemov atomic_set(compound_mapcount_ptr(page), 0); 247647e29d32SJohn Hubbard atomic_set(compound_pincount_ptr(page), 0); 247747e29d32SJohn Hubbard 2478451b9514SKirill Tkhai __page_set_anon_rmap(page, vma, address, 1); 24790fe6e20bSNaoya Horiguchi } 2480e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 2481