11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 239608703eSJan Kara * inode->i_rwsem (while writing or truncating, not reading or faulting) 24c1e8d7c6SMichel Lespinasse * mm->mmap_lock 25730633f0SJan Kara * mapping->invalidate_lock (in filemap_fault) 263a47c54fSMike Kravetz * page->flags PG_locked (lock_page) 278d9bfb26SMike Kravetz * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 2855fd6fccSSuren Baghdasaryan * vma_start_write 29c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 305a505085SIngo Molnar * anon_vma->rwsem 31b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 325d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 331da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 34e621900aSMatthew Wilcox (Oracle) * mapping->private_lock (in block_dirty_folio) 35e621900aSMatthew Wilcox (Oracle) * folio_lock_memcg move_lock (in block_dirty_folio) 36b93b0163SMatthew Wilcox * i_pages lock (widely used) 37e809c3feSMatthew Wilcox (Oracle) * lruvec->lru_lock (in folio_lruvec_lock_irq) 38250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 39f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 401da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 41b93b0163SMatthew Wilcox * i_pages lock (widely used, in set_page_dirty, 421da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 43f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 446a46079cSAndi Kleen * 459608703eSJan Kara * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 466a46079cSAndi Kleen * ->tasklist_lock 476a46079cSAndi Kleen * pte map lock 48c0d0381aSMike Kravetz * 498d9bfb26SMike Kravetz * hugetlbfs PageHuge() take locks in this order: 50c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 518d9bfb26SMike Kravetz * vma_lock (hugetlb specific lock for pmd_sharing) 528d9bfb26SMike Kravetz * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 53c0d0381aSMike Kravetz * page->flags PG_locked (lock_page) 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds #include <linux/mm.h> 576e84f315SIngo Molnar #include <linux/sched/mm.h> 5829930025SIngo Molnar #include <linux/sched/task.h> 591da177e4SLinus Torvalds #include <linux/pagemap.h> 601da177e4SLinus Torvalds #include <linux/swap.h> 611da177e4SLinus Torvalds #include <linux/swapops.h> 621da177e4SLinus Torvalds #include <linux/slab.h> 631da177e4SLinus Torvalds #include <linux/init.h> 645ad64688SHugh Dickins #include <linux/ksm.h> 651da177e4SLinus Torvalds #include <linux/rmap.h> 661da177e4SLinus Torvalds #include <linux/rcupdate.h> 67b95f1b31SPaul Gortmaker #include <linux/export.h> 688a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 69cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 7064cdd548SKOSAKI Motohiro #include <linux/migrate.h> 710fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 72444f84fdSBen Dooks #include <linux/huge_mm.h> 73ef5d437fSJan Kara #include <linux/backing-dev.h> 7433c3fc71SVladimir Davydov #include <linux/page_idle.h> 75a5430ddaSJérôme Glisse #include <linux/memremap.h> 76bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h> 77999dad82SPeter Xu #include <linux/mm_inline.h> 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds #include <asm/tlbflush.h> 801da177e4SLinus Torvalds 814cc79b33SAnshuman Khandual #define CREATE_TRACE_POINTS 8272b252aeSMel Gorman #include <trace/events/tlb.h> 834cc79b33SAnshuman Khandual #include <trace/events/migrate.h> 8472b252aeSMel Gorman 85b291f000SNick Piggin #include "internal.h" 86b291f000SNick Piggin 87fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 885beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 89fdd2e5f8SAdrian Bunk 90fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 91fdd2e5f8SAdrian Bunk { 9201d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 9301d8b20dSPeter Zijlstra 9401d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 9501d8b20dSPeter Zijlstra if (anon_vma) { 9601d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 972555283eSJann Horn anon_vma->num_children = 0; 982555283eSJann Horn anon_vma->num_active_vmas = 0; 997a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 10001d8b20dSPeter Zijlstra /* 10101d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 10201d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 10301d8b20dSPeter Zijlstra */ 10401d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 105fdd2e5f8SAdrian Bunk } 106fdd2e5f8SAdrian Bunk 10701d8b20dSPeter Zijlstra return anon_vma; 10801d8b20dSPeter Zijlstra } 10901d8b20dSPeter Zijlstra 11001d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 111fdd2e5f8SAdrian Bunk { 11201d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 11388c22088SPeter Zijlstra 11488c22088SPeter Zijlstra /* 1152f031c6fSMatthew Wilcox (Oracle) * Synchronize against folio_lock_anon_vma_read() such that 11688c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 11788c22088SPeter Zijlstra * freed. 11888c22088SPeter Zijlstra * 11988c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 12088c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1212f031c6fSMatthew Wilcox (Oracle) * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 12288c22088SPeter Zijlstra * 1232f031c6fSMatthew Wilcox (Oracle) * folio_lock_anon_vma_read() VS put_anon_vma() 1244fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 12588c22088SPeter Zijlstra * LOCK MB 1264fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 12788c22088SPeter Zijlstra * 12888c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 12988c22088SPeter Zijlstra * happen _before_ what follows. 13088c22088SPeter Zijlstra */ 1317f39dda9SHugh Dickins might_sleep(); 1325a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1334fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 13408b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 13588c22088SPeter Zijlstra } 13688c22088SPeter Zijlstra 137fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 138fdd2e5f8SAdrian Bunk } 1391da177e4SLinus Torvalds 140dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1415beb4930SRik van Riel { 142dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1435beb4930SRik van Riel } 1445beb4930SRik van Riel 145e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1465beb4930SRik van Riel { 1475beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1485beb4930SRik van Riel } 1495beb4930SRik van Riel 1506583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1516583a843SKautuk Consul struct anon_vma_chain *avc, 1526583a843SKautuk Consul struct anon_vma *anon_vma) 1536583a843SKautuk Consul { 1546583a843SKautuk Consul avc->vma = vma; 1556583a843SKautuk Consul avc->anon_vma = anon_vma; 1566583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 157bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1586583a843SKautuk Consul } 1596583a843SKautuk Consul 160d9d332e0SLinus Torvalds /** 161d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 162d9d332e0SLinus Torvalds * @vma: the memory region in question 163d9d332e0SLinus Torvalds * 164d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 165d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 166d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 167d9d332e0SLinus Torvalds * 168d5a187daSVlastimil Babka * The common case will be that we already have one, which 169d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 17023a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 171d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 172d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 173d9d332e0SLinus Torvalds * allocate a new one. 174d9d332e0SLinus Torvalds * 175d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1762f031c6fSMatthew Wilcox (Oracle) * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 177aaf1f990SMiaohe Lin * and that may actually touch the rwsem even in the newly 178d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 179d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 180d9d332e0SLinus Torvalds * 181d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 182d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 183d9d332e0SLinus Torvalds * to do any locking for the common case of already having 184d9d332e0SLinus Torvalds * an anon_vma. 185d9d332e0SLinus Torvalds * 186c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for reading. 187d9d332e0SLinus Torvalds */ 188d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1891da177e4SLinus Torvalds { 190d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 191d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1925beb4930SRik van Riel struct anon_vma_chain *avc; 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds might_sleep(); 1951da177e4SLinus Torvalds 196dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1975beb4930SRik van Riel if (!avc) 1985beb4930SRik van Riel goto out_enomem; 1995beb4930SRik van Riel 2001da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 2011da177e4SLinus Torvalds allocated = NULL; 202d9d332e0SLinus Torvalds if (!anon_vma) { 2031da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 2041da177e4SLinus Torvalds if (unlikely(!anon_vma)) 2055beb4930SRik van Riel goto out_enomem_free_avc; 2062555283eSJann Horn anon_vma->num_children++; /* self-parent link for new root */ 2071da177e4SLinus Torvalds allocated = anon_vma; 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 2104fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 2111da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 2121da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 2131da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 2141da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2156583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2162555283eSJann Horn anon_vma->num_active_vmas++; 2171da177e4SLinus Torvalds allocated = NULL; 21831f2b0ebSOleg Nesterov avc = NULL; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 22108b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 22231f2b0ebSOleg Nesterov 22331f2b0ebSOleg Nesterov if (unlikely(allocated)) 22401d8b20dSPeter Zijlstra put_anon_vma(allocated); 22531f2b0ebSOleg Nesterov if (unlikely(avc)) 2265beb4930SRik van Riel anon_vma_chain_free(avc); 227d5a187daSVlastimil Babka 2281da177e4SLinus Torvalds return 0; 2295beb4930SRik van Riel 2305beb4930SRik van Riel out_enomem_free_avc: 2315beb4930SRik van Riel anon_vma_chain_free(avc); 2325beb4930SRik van Riel out_enomem: 2335beb4930SRik van Riel return -ENOMEM; 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 236bb4aa396SLinus Torvalds /* 237bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 238bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 239bb4aa396SLinus Torvalds * have the same vma. 240bb4aa396SLinus Torvalds * 241bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 242bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 243bb4aa396SLinus Torvalds */ 244bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 245bb4aa396SLinus Torvalds { 246bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 247bb4aa396SLinus Torvalds if (new_root != root) { 248bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2495a505085SIngo Molnar up_write(&root->rwsem); 250bb4aa396SLinus Torvalds root = new_root; 2515a505085SIngo Molnar down_write(&root->rwsem); 252bb4aa396SLinus Torvalds } 253bb4aa396SLinus Torvalds return root; 254bb4aa396SLinus Torvalds } 255bb4aa396SLinus Torvalds 256bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 257bb4aa396SLinus Torvalds { 258bb4aa396SLinus Torvalds if (root) 2595a505085SIngo Molnar up_write(&root->rwsem); 260bb4aa396SLinus Torvalds } 261bb4aa396SLinus Torvalds 2625beb4930SRik van Riel /* 2635beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2645beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2657a3ef208SKonstantin Khlebnikov * 2660503ea8fSLiam R. Howlett * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), 2670503ea8fSLiam R. Howlett * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, 2680503ea8fSLiam R. Howlett * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to 2690503ea8fSLiam R. Howlett * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before 2700503ea8fSLiam R. Howlett * call, we can identify this case by checking (!dst->anon_vma && 2710503ea8fSLiam R. Howlett * src->anon_vma). 27247b390d2SWei Yang * 27347b390d2SWei Yang * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 27447b390d2SWei Yang * and reuse existing anon_vma which has no vmas and only one child anon_vma. 27547b390d2SWei Yang * This prevents degradation of anon_vma hierarchy to endless linear chain in 27647b390d2SWei Yang * case of constantly forking task. On the other hand, an anon_vma with more 27747b390d2SWei Yang * than one child isn't reused even if there was no alive vma, thus rmap 27847b390d2SWei Yang * walker has a good chance of avoiding scanning the whole hierarchy when it 27947b390d2SWei Yang * searches where page is mapped. 2805beb4930SRik van Riel */ 2815beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2825beb4930SRik van Riel { 2835beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 284bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2855beb4930SRik van Riel 286646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 287bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 288bb4aa396SLinus Torvalds 289dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 290dd34739cSLinus Torvalds if (unlikely(!avc)) { 291dd34739cSLinus Torvalds unlock_anon_vma_root(root); 292dd34739cSLinus Torvalds root = NULL; 293dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2945beb4930SRik van Riel if (!avc) 2955beb4930SRik van Riel goto enomem_failure; 296dd34739cSLinus Torvalds } 297bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 298bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 299bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 3007a3ef208SKonstantin Khlebnikov 3017a3ef208SKonstantin Khlebnikov /* 3022555283eSJann Horn * Reuse existing anon_vma if it has no vma and only one 3032555283eSJann Horn * anon_vma child. 3047a3ef208SKonstantin Khlebnikov * 3052555283eSJann Horn * Root anon_vma is never reused: 3067a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 3077a3ef208SKonstantin Khlebnikov */ 30847b390d2SWei Yang if (!dst->anon_vma && src->anon_vma && 3092555283eSJann Horn anon_vma->num_children < 2 && 3102555283eSJann Horn anon_vma->num_active_vmas == 0) 3117a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 3125beb4930SRik van Riel } 3137a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 3142555283eSJann Horn dst->anon_vma->num_active_vmas++; 315bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 3165beb4930SRik van Riel return 0; 3175beb4930SRik van Riel 3185beb4930SRik van Riel enomem_failure: 3193fe89b3eSLeon Yu /* 320d8e454ebSMa Wupeng * dst->anon_vma is dropped here otherwise its num_active_vmas can 321d8e454ebSMa Wupeng * be incorrectly decremented in unlink_anon_vmas(). 3223fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3233fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3243fe89b3eSLeon Yu */ 3253fe89b3eSLeon Yu dst->anon_vma = NULL; 3265beb4930SRik van Riel unlink_anon_vmas(dst); 3275beb4930SRik van Riel return -ENOMEM; 3281da177e4SLinus Torvalds } 3291da177e4SLinus Torvalds 3305beb4930SRik van Riel /* 3315beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3325beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3335beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3345beb4930SRik van Riel */ 3355beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3361da177e4SLinus Torvalds { 3375beb4930SRik van Riel struct anon_vma_chain *avc; 3385beb4930SRik van Riel struct anon_vma *anon_vma; 339c4ea95d7SDaniel Forrest int error; 3405beb4930SRik van Riel 3415beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3425beb4930SRik van Riel if (!pvma->anon_vma) 3435beb4930SRik van Riel return 0; 3445beb4930SRik van Riel 3457a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3467a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3477a3ef208SKonstantin Khlebnikov 3485beb4930SRik van Riel /* 3495beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3505beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3515beb4930SRik van Riel */ 352c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 353c4ea95d7SDaniel Forrest if (error) 354c4ea95d7SDaniel Forrest return error; 3555beb4930SRik van Riel 3567a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3577a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3587a3ef208SKonstantin Khlebnikov return 0; 3597a3ef208SKonstantin Khlebnikov 3605beb4930SRik van Riel /* Then add our own anon_vma. */ 3615beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3625beb4930SRik van Riel if (!anon_vma) 3635beb4930SRik van Riel goto out_error; 3642555283eSJann Horn anon_vma->num_active_vmas++; 365dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3665beb4930SRik van Riel if (!avc) 3675beb4930SRik van Riel goto out_error_free_anon_vma; 3685c341ee1SRik van Riel 3695c341ee1SRik van Riel /* 370aaf1f990SMiaohe Lin * The root anon_vma's rwsem is the lock actually used when we 3715c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3725c341ee1SRik van Riel */ 3735c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3747a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 37576545066SRik van Riel /* 37601d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 37701d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 37801d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 37976545066SRik van Riel */ 38076545066SRik van Riel get_anon_vma(anon_vma->root); 3815beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3825beb4930SRik van Riel vma->anon_vma = anon_vma; 3834fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3845c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3852555283eSJann Horn anon_vma->parent->num_children++; 38608b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3875beb4930SRik van Riel 3885beb4930SRik van Riel return 0; 3895beb4930SRik van Riel 3905beb4930SRik van Riel out_error_free_anon_vma: 39101d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3925beb4930SRik van Riel out_error: 3934946d54cSRik van Riel unlink_anon_vmas(vma); 3945beb4930SRik van Riel return -ENOMEM; 3955beb4930SRik van Riel } 3965beb4930SRik van Riel 3975beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3985beb4930SRik van Riel { 3995beb4930SRik van Riel struct anon_vma_chain *avc, *next; 400eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 4015beb4930SRik van Riel 4025c341ee1SRik van Riel /* 4035c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 4045c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 4055c341ee1SRik van Riel */ 4065beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 407eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 408eee2acbaSPeter Zijlstra 409eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 410bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 411eee2acbaSPeter Zijlstra 412eee2acbaSPeter Zijlstra /* 413eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 414eee2acbaSPeter Zijlstra * to free them outside the lock. 415eee2acbaSPeter Zijlstra */ 416f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 4172555283eSJann Horn anon_vma->parent->num_children--; 418eee2acbaSPeter Zijlstra continue; 4197a3ef208SKonstantin Khlebnikov } 420eee2acbaSPeter Zijlstra 421eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 422eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 423eee2acbaSPeter Zijlstra } 424ee8ab190SLi Xinhai if (vma->anon_vma) { 4252555283eSJann Horn vma->anon_vma->num_active_vmas--; 426ee8ab190SLi Xinhai 427ee8ab190SLi Xinhai /* 428ee8ab190SLi Xinhai * vma would still be needed after unlink, and anon_vma will be prepared 429ee8ab190SLi Xinhai * when handle fault. 430ee8ab190SLi Xinhai */ 431ee8ab190SLi Xinhai vma->anon_vma = NULL; 432ee8ab190SLi Xinhai } 433eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 434eee2acbaSPeter Zijlstra 435eee2acbaSPeter Zijlstra /* 436eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 437eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4385a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 439eee2acbaSPeter Zijlstra */ 440eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 441eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 442eee2acbaSPeter Zijlstra 4432555283eSJann Horn VM_WARN_ON(anon_vma->num_children); 4442555283eSJann Horn VM_WARN_ON(anon_vma->num_active_vmas); 445eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 446eee2acbaSPeter Zijlstra 4475beb4930SRik van Riel list_del(&avc->same_vma); 4485beb4930SRik van Riel anon_vma_chain_free(avc); 4495beb4930SRik van Riel } 4505beb4930SRik van Riel } 4515beb4930SRik van Riel 45251cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4531da177e4SLinus Torvalds { 4541da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4551da177e4SLinus Torvalds 4565a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 45783813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 458f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4591da177e4SLinus Torvalds } 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds void __init anon_vma_init(void) 4621da177e4SLinus Torvalds { 4631da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4645f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4655d097056SVladimir Davydov anon_vma_ctor); 4665d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4675d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4681da177e4SLinus Torvalds } 4691da177e4SLinus Torvalds 4701da177e4SLinus Torvalds /* 4716111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4726111e4caSPeter Zijlstra * 4736111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 474ad8a20cfSMiaohe Lin * the best this function can do is return a refcount increased anon_vma 475ad8a20cfSMiaohe Lin * that might have been relevant to this page. 4766111e4caSPeter Zijlstra * 4776111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4786111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4796111e4caSPeter Zijlstra * 480bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 481bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 482bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 483bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 484bc658c96SPeter Zijlstra * 4856111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4866111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4876111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4886111e4caSPeter Zijlstra * 489091e4299SMiles Chen * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 490091e4299SMiles Chen * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 491091e4299SMiles Chen * if there is a mapcount, we can dereference the anon_vma after observing 492091e4299SMiles Chen * those. 493adef4406SAndrea Arcangeli * 494adef4406SAndrea Arcangeli * NOTE: the caller should normally hold folio lock when calling this. If 495adef4406SAndrea Arcangeli * not, the caller needs to double check the anon_vma didn't change after 496adef4406SAndrea Arcangeli * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it 497adef4406SAndrea Arcangeli * concurrently without folio lock protection). See folio_lock_anon_vma_read() 498adef4406SAndrea Arcangeli * which has already covered that, and comment above remap_pages(). 4991da177e4SLinus Torvalds */ 50029eea9b5SMatthew Wilcox (Oracle) struct anon_vma *folio_get_anon_vma(struct folio *folio) 5011da177e4SLinus Torvalds { 502746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 5031da177e4SLinus Torvalds unsigned long anon_mapping; 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds rcu_read_lock(); 50629eea9b5SMatthew Wilcox (Oracle) anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 5073ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 5081da177e4SLinus Torvalds goto out; 50929eea9b5SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 5101da177e4SLinus Torvalds goto out; 5111da177e4SLinus Torvalds 5121da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 513746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 514746b18d4SPeter Zijlstra anon_vma = NULL; 515746b18d4SPeter Zijlstra goto out; 516746b18d4SPeter Zijlstra } 517f1819427SHugh Dickins 518f1819427SHugh Dickins /* 51929eea9b5SMatthew Wilcox (Oracle) * If this folio is still mapped, then its anon_vma cannot have been 520746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 521746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 5225f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 523746b18d4SPeter Zijlstra * above cannot corrupt). 524f1819427SHugh Dickins */ 52529eea9b5SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5267f39dda9SHugh Dickins rcu_read_unlock(); 527746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 5287f39dda9SHugh Dickins return NULL; 529746b18d4SPeter Zijlstra } 5301da177e4SLinus Torvalds out: 5311da177e4SLinus Torvalds rcu_read_unlock(); 532746b18d4SPeter Zijlstra 533746b18d4SPeter Zijlstra return anon_vma; 534746b18d4SPeter Zijlstra } 535746b18d4SPeter Zijlstra 53688c22088SPeter Zijlstra /* 53729eea9b5SMatthew Wilcox (Oracle) * Similar to folio_get_anon_vma() except it locks the anon_vma. 53888c22088SPeter Zijlstra * 53988c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 54088c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 54129eea9b5SMatthew Wilcox (Oracle) * reference like with folio_get_anon_vma() and then block on the mutex 5426d4675e6SMinchan Kim * on !rwc->try_lock case. 54388c22088SPeter Zijlstra */ 5446d4675e6SMinchan Kim struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 5456d4675e6SMinchan Kim struct rmap_walk_control *rwc) 546746b18d4SPeter Zijlstra { 54788c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 548eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 54988c22088SPeter Zijlstra unsigned long anon_mapping; 550746b18d4SPeter Zijlstra 551880a99b6SAndrea Arcangeli retry: 55288c22088SPeter Zijlstra rcu_read_lock(); 5539595d769SMatthew Wilcox (Oracle) anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 55488c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 55588c22088SPeter Zijlstra goto out; 5569595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 55788c22088SPeter Zijlstra goto out; 55888c22088SPeter Zijlstra 55988c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5604db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5614fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 56288c22088SPeter Zijlstra /* 563880a99b6SAndrea Arcangeli * folio_move_anon_rmap() might have changed the anon_vma as we 564880a99b6SAndrea Arcangeli * might not hold the folio lock here. 565880a99b6SAndrea Arcangeli */ 566880a99b6SAndrea Arcangeli if (unlikely((unsigned long)READ_ONCE(folio->mapping) != 567880a99b6SAndrea Arcangeli anon_mapping)) { 568880a99b6SAndrea Arcangeli up_read(&root_anon_vma->rwsem); 569880a99b6SAndrea Arcangeli rcu_read_unlock(); 570880a99b6SAndrea Arcangeli goto retry; 571880a99b6SAndrea Arcangeli } 572880a99b6SAndrea Arcangeli 573880a99b6SAndrea Arcangeli /* 5749595d769SMatthew Wilcox (Oracle) * If the folio is still mapped, then this anon_vma is still 575eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 576bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 57788c22088SPeter Zijlstra */ 5789595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5794fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 58088c22088SPeter Zijlstra anon_vma = NULL; 58188c22088SPeter Zijlstra } 58288c22088SPeter Zijlstra goto out; 58388c22088SPeter Zijlstra } 58488c22088SPeter Zijlstra 5856d4675e6SMinchan Kim if (rwc && rwc->try_lock) { 5866d4675e6SMinchan Kim anon_vma = NULL; 5876d4675e6SMinchan Kim rwc->contended = true; 5886d4675e6SMinchan Kim goto out; 5896d4675e6SMinchan Kim } 5906d4675e6SMinchan Kim 59188c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 59288c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 59388c22088SPeter Zijlstra anon_vma = NULL; 59488c22088SPeter Zijlstra goto out; 59588c22088SPeter Zijlstra } 59688c22088SPeter Zijlstra 5979595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5987f39dda9SHugh Dickins rcu_read_unlock(); 59988c22088SPeter Zijlstra put_anon_vma(anon_vma); 6007f39dda9SHugh Dickins return NULL; 60188c22088SPeter Zijlstra } 60288c22088SPeter Zijlstra 60388c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 60488c22088SPeter Zijlstra rcu_read_unlock(); 6054fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 606746b18d4SPeter Zijlstra 607880a99b6SAndrea Arcangeli /* 608880a99b6SAndrea Arcangeli * folio_move_anon_rmap() might have changed the anon_vma as we might 609880a99b6SAndrea Arcangeli * not hold the folio lock here. 610880a99b6SAndrea Arcangeli */ 611880a99b6SAndrea Arcangeli if (unlikely((unsigned long)READ_ONCE(folio->mapping) != 612880a99b6SAndrea Arcangeli anon_mapping)) { 613880a99b6SAndrea Arcangeli anon_vma_unlock_read(anon_vma); 614880a99b6SAndrea Arcangeli put_anon_vma(anon_vma); 615880a99b6SAndrea Arcangeli anon_vma = NULL; 616880a99b6SAndrea Arcangeli goto retry; 617880a99b6SAndrea Arcangeli } 618880a99b6SAndrea Arcangeli 61988c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 62088c22088SPeter Zijlstra /* 62188c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 62288c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 6234fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 62488c22088SPeter Zijlstra */ 6254fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 62688c22088SPeter Zijlstra __put_anon_vma(anon_vma); 62788c22088SPeter Zijlstra anon_vma = NULL; 62888c22088SPeter Zijlstra } 62988c22088SPeter Zijlstra 63088c22088SPeter Zijlstra return anon_vma; 63188c22088SPeter Zijlstra 63288c22088SPeter Zijlstra out: 63388c22088SPeter Zijlstra rcu_read_unlock(); 634746b18d4SPeter Zijlstra return anon_vma; 63534bbd704SOleg Nesterov } 63634bbd704SOleg Nesterov 63772b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 63872b252aeSMel Gorman /* 63972b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 64072b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 64172b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 64272b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 64372b252aeSMel Gorman */ 64472b252aeSMel Gorman void try_to_unmap_flush(void) 64572b252aeSMel Gorman { 64672b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 64772b252aeSMel Gorman 64872b252aeSMel Gorman if (!tlb_ubc->flush_required) 64972b252aeSMel Gorman return; 65072b252aeSMel Gorman 651e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 65272b252aeSMel Gorman tlb_ubc->flush_required = false; 653d950c947SMel Gorman tlb_ubc->writable = false; 65472b252aeSMel Gorman } 65572b252aeSMel Gorman 656d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 657d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 658d950c947SMel Gorman { 659d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 660d950c947SMel Gorman 661d950c947SMel Gorman if (tlb_ubc->writable) 662d950c947SMel Gorman try_to_unmap_flush(); 663d950c947SMel Gorman } 664d950c947SMel Gorman 6655ee2fa2fSHuang Ying /* 6665ee2fa2fSHuang Ying * Bits 0-14 of mm->tlb_flush_batched record pending generations. 6675ee2fa2fSHuang Ying * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 6685ee2fa2fSHuang Ying */ 6695ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 6705ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_MASK \ 6715ee2fa2fSHuang Ying ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 6725ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_LARGE \ 6735ee2fa2fSHuang Ying (TLB_FLUSH_BATCH_PENDING_MASK / 2) 6745ee2fa2fSHuang Ying 675f73419bbSBarry Song static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 676f73419bbSBarry Song unsigned long uaddr) 67772b252aeSMel Gorman { 67872b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 679bdeb9188SUros Bizjak int batch; 6804d4b6d66SHuang Ying bool writable = pte_dirty(pteval); 6814d4b6d66SHuang Ying 6824d4b6d66SHuang Ying if (!pte_accessible(mm, pteval)) 6834d4b6d66SHuang Ying return; 68472b252aeSMel Gorman 685f73419bbSBarry Song arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); 68672b252aeSMel Gorman tlb_ubc->flush_required = true; 687d950c947SMel Gorman 688d950c947SMel Gorman /* 6893ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6903ea27719SMel Gorman * before the PTE is cleared. 6913ea27719SMel Gorman */ 6923ea27719SMel Gorman barrier(); 6935ee2fa2fSHuang Ying batch = atomic_read(&mm->tlb_flush_batched); 6945ee2fa2fSHuang Ying retry: 6955ee2fa2fSHuang Ying if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 6965ee2fa2fSHuang Ying /* 6975ee2fa2fSHuang Ying * Prevent `pending' from catching up with `flushed' because of 6985ee2fa2fSHuang Ying * overflow. Reset `pending' and `flushed' to be 1 and 0 if 6995ee2fa2fSHuang Ying * `pending' becomes large. 7005ee2fa2fSHuang Ying */ 701bdeb9188SUros Bizjak if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) 7025ee2fa2fSHuang Ying goto retry; 7035ee2fa2fSHuang Ying } else { 7045ee2fa2fSHuang Ying atomic_inc(&mm->tlb_flush_batched); 7055ee2fa2fSHuang Ying } 7063ea27719SMel Gorman 7073ea27719SMel Gorman /* 708d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 709d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 710d950c947SMel Gorman * before the page is queued for IO. 711d950c947SMel Gorman */ 712d950c947SMel Gorman if (writable) 713d950c947SMel Gorman tlb_ubc->writable = true; 71472b252aeSMel Gorman } 71572b252aeSMel Gorman 71672b252aeSMel Gorman /* 71772b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 71872b252aeSMel Gorman * unmap operations to reduce IPIs. 71972b252aeSMel Gorman */ 72072b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 72172b252aeSMel Gorman { 72272b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 72372b252aeSMel Gorman return false; 72472b252aeSMel Gorman 72565c8d30eSAnshuman Khandual return arch_tlbbatch_should_defer(mm); 72672b252aeSMel Gorman } 7273ea27719SMel Gorman 7283ea27719SMel Gorman /* 7293ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 7303ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 7313ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 7323ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 7333ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 7343ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 7353ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 7363ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 7373ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 7383ea27719SMel Gorman * 7393ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 7403ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 7413ea27719SMel Gorman * via the PTL. 7423ea27719SMel Gorman */ 7433ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 7443ea27719SMel Gorman { 7455ee2fa2fSHuang Ying int batch = atomic_read(&mm->tlb_flush_batched); 7465ee2fa2fSHuang Ying int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 7475ee2fa2fSHuang Ying int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 7483ea27719SMel Gorman 7495ee2fa2fSHuang Ying if (pending != flushed) { 750db6c1f6fSYicong Yang arch_flush_tlb_batched_pending(mm); 7513ea27719SMel Gorman /* 7525ee2fa2fSHuang Ying * If the new TLB flushing is pending during flushing, leave 7535ee2fa2fSHuang Ying * mm->tlb_flush_batched as is, to avoid losing flushing. 7543ea27719SMel Gorman */ 7555ee2fa2fSHuang Ying atomic_cmpxchg(&mm->tlb_flush_batched, batch, 7565ee2fa2fSHuang Ying pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 7573ea27719SMel Gorman } 7583ea27719SMel Gorman } 75972b252aeSMel Gorman #else 760f73419bbSBarry Song static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 761f73419bbSBarry Song unsigned long uaddr) 76272b252aeSMel Gorman { 76372b252aeSMel Gorman } 76472b252aeSMel Gorman 76572b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 76672b252aeSMel Gorman { 76772b252aeSMel Gorman return false; 76872b252aeSMel Gorman } 76972b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 77072b252aeSMel Gorman 7711da177e4SLinus Torvalds /* 772bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 773ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 7741da177e4SLinus Torvalds */ 7751da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 7761da177e4SLinus Torvalds { 777e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 778e05b3453SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) { 779e05b3453SMatthew Wilcox (Oracle) struct anon_vma *page__anon_vma = folio_anon_vma(folio); 7804829b906SHugh Dickins /* 7814829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 7824829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 7834829b906SHugh Dickins */ 7844829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 7854829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 78621d0d443SAndrea Arcangeli return -EFAULT; 78731657170SJue Wang } else if (!vma->vm_file) { 7881da177e4SLinus Torvalds return -EFAULT; 789e05b3453SMatthew Wilcox (Oracle) } else if (vma->vm_file->f_mapping != folio->mapping) { 7901da177e4SLinus Torvalds return -EFAULT; 79131657170SJue Wang } 792494334e4SHugh Dickins 793494334e4SHugh Dickins return vma_address(page, vma); 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 79650722804SZach O'Keefe /* 79750722804SZach O'Keefe * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 79850722804SZach O'Keefe * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 79950722804SZach O'Keefe * represents. 80050722804SZach O'Keefe */ 8016219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 8026219049aSBob Liu { 8036219049aSBob Liu pgd_t *pgd; 804c2febafcSKirill A. Shutemov p4d_t *p4d; 8056219049aSBob Liu pud_t *pud; 8066219049aSBob Liu pmd_t *pmd = NULL; 8076219049aSBob Liu 8086219049aSBob Liu pgd = pgd_offset(mm, address); 8096219049aSBob Liu if (!pgd_present(*pgd)) 8106219049aSBob Liu goto out; 8116219049aSBob Liu 812c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 813c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 814c2febafcSKirill A. Shutemov goto out; 815c2febafcSKirill A. Shutemov 816c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 8176219049aSBob Liu if (!pud_present(*pud)) 8186219049aSBob Liu goto out; 8196219049aSBob Liu 8206219049aSBob Liu pmd = pmd_offset(pud, address); 8216219049aSBob Liu out: 8226219049aSBob Liu return pmd; 8236219049aSBob Liu } 8246219049aSBob Liu 825b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg { 8269f32624bSJoonsoo Kim int mapcount; 8279f32624bSJoonsoo Kim int referenced; 8289f32624bSJoonsoo Kim unsigned long vm_flags; 8299f32624bSJoonsoo Kim struct mem_cgroup *memcg; 8309f32624bSJoonsoo Kim }; 8311acbc3f9SYin Fengwei 83281b4082dSNikita Danilov /* 833b3ac0413SMatthew Wilcox (Oracle) * arg: folio_referenced_arg will be passed 8341da177e4SLinus Torvalds */ 8352f031c6fSMatthew Wilcox (Oracle) static bool folio_referenced_one(struct folio *folio, 8362f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long address, void *arg) 8371da177e4SLinus Torvalds { 838b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 839b3ac0413SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 8408749cfeaSVladimir Davydov int referenced = 0; 8411acbc3f9SYin Fengwei unsigned long start = address, ptes = 0; 8422da28bfdSAndrea Arcangeli 8438eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 8448eaededeSKirill A. Shutemov address = pvmw.address; 8452da28bfdSAndrea Arcangeli 8461acbc3f9SYin Fengwei if (vma->vm_flags & VM_LOCKED) { 8471acbc3f9SYin Fengwei if (!folio_test_large(folio) || !pvmw.pte) { 84847d4f3eeSHugh Dickins /* Restore the mlock which got missed */ 8491acbc3f9SYin Fengwei mlock_vma_folio(folio, vma); 8508eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 8519f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 852e4b82222SMinchan Kim return false; /* To break the loop */ 8532da28bfdSAndrea Arcangeli } 8541acbc3f9SYin Fengwei /* 8551acbc3f9SYin Fengwei * For large folio fully mapped to VMA, will 8561acbc3f9SYin Fengwei * be handled after the pvmw loop. 8571acbc3f9SYin Fengwei * 8581acbc3f9SYin Fengwei * For large folio cross VMA boundaries, it's 8591acbc3f9SYin Fengwei * expected to be picked by page reclaim. But 8601acbc3f9SYin Fengwei * should skip reference of pages which are in 8611acbc3f9SYin Fengwei * the range of VM_LOCKED vma. As page reclaim 8621acbc3f9SYin Fengwei * should just count the reference of pages out 8631acbc3f9SYin Fengwei * the range of VM_LOCKED vma. 8641acbc3f9SYin Fengwei */ 8651acbc3f9SYin Fengwei ptes++; 8661acbc3f9SYin Fengwei pra->mapcount--; 8671acbc3f9SYin Fengwei continue; 8681acbc3f9SYin Fengwei } 8692da28bfdSAndrea Arcangeli 8708eaededeSKirill A. Shutemov if (pvmw.pte) { 871c33c7948SRyan Roberts if (lru_gen_enabled() && 872c33c7948SRyan Roberts pte_young(ptep_get(pvmw.pte))) { 873018ee47fSYu Zhao lru_gen_look_around(&pvmw); 874018ee47fSYu Zhao referenced++; 875018ee47fSYu Zhao } 876018ee47fSYu Zhao 8778eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 8788788f678SYu Zhao pvmw.pte)) 8791da177e4SLinus Torvalds referenced++; 8808749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 8818eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 8828eaededeSKirill A. Shutemov pvmw.pmd)) 8838749cfeaSVladimir Davydov referenced++; 8848749cfeaSVladimir Davydov } else { 885b3ac0413SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 8868749cfeaSVladimir Davydov WARN_ON_ONCE(1); 8878749cfeaSVladimir Davydov } 8888eaededeSKirill A. Shutemov 8898eaededeSKirill A. Shutemov pra->mapcount--; 8908eaededeSKirill A. Shutemov } 89171e3aac0SAndrea Arcangeli 8921acbc3f9SYin Fengwei if ((vma->vm_flags & VM_LOCKED) && 8931acbc3f9SYin Fengwei folio_test_large(folio) && 8941acbc3f9SYin Fengwei folio_within_vma(folio, vma)) { 8951acbc3f9SYin Fengwei unsigned long s_align, e_align; 8961acbc3f9SYin Fengwei 8971acbc3f9SYin Fengwei s_align = ALIGN_DOWN(start, PMD_SIZE); 8981acbc3f9SYin Fengwei e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); 8991acbc3f9SYin Fengwei 9001acbc3f9SYin Fengwei /* folio doesn't cross page table boundary and fully mapped */ 9011acbc3f9SYin Fengwei if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { 9021acbc3f9SYin Fengwei /* Restore the mlock which got missed */ 9031acbc3f9SYin Fengwei mlock_vma_folio(folio, vma); 9041acbc3f9SYin Fengwei pra->vm_flags |= VM_LOCKED; 9051acbc3f9SYin Fengwei return false; /* To break the loop */ 9061acbc3f9SYin Fengwei } 9071acbc3f9SYin Fengwei } 9081acbc3f9SYin Fengwei 90933c3fc71SVladimir Davydov if (referenced) 910b3ac0413SMatthew Wilcox (Oracle) folio_clear_idle(folio); 911b3ac0413SMatthew Wilcox (Oracle) if (folio_test_clear_young(folio)) 91233c3fc71SVladimir Davydov referenced++; 91333c3fc71SVladimir Davydov 9149f32624bSJoonsoo Kim if (referenced) { 9159f32624bSJoonsoo Kim pra->referenced++; 91647d4f3eeSHugh Dickins pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 9199f32624bSJoonsoo Kim if (!pra->mapcount) 920e4b82222SMinchan Kim return false; /* To break the loop */ 9219f32624bSJoonsoo Kim 922e4b82222SMinchan Kim return true; 9239f32624bSJoonsoo Kim } 9249f32624bSJoonsoo Kim 925b3ac0413SMatthew Wilcox (Oracle) static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 9261da177e4SLinus Torvalds { 927b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 9289f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 9291da177e4SLinus Torvalds 9308788f678SYu Zhao /* 9318788f678SYu Zhao * Ignore references from this mapping if it has no recency. If the 9328788f678SYu Zhao * folio has been used in another mapping, we will catch it; if this 9338788f678SYu Zhao * other mapping is already gone, the unmap path will have set the 9348788f678SYu Zhao * referenced flag or activated the folio in zap_pte_range(). 9358788f678SYu Zhao */ 9368788f678SYu Zhao if (!vma_has_recency(vma)) 9378788f678SYu Zhao return true; 9388788f678SYu Zhao 9398788f678SYu Zhao /* 9408788f678SYu Zhao * If we are reclaiming on behalf of a cgroup, skip counting on behalf 9418788f678SYu Zhao * of references from different cgroups. 9428788f678SYu Zhao */ 9438788f678SYu Zhao if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 9449f32624bSJoonsoo Kim return true; 9451da177e4SLinus Torvalds 9469f32624bSJoonsoo Kim return false; 9471da177e4SLinus Torvalds } 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds /** 950b3ac0413SMatthew Wilcox (Oracle) * folio_referenced() - Test if the folio was referenced. 951b3ac0413SMatthew Wilcox (Oracle) * @folio: The folio to test. 952b3ac0413SMatthew Wilcox (Oracle) * @is_locked: Caller holds lock on the folio. 95372835c86SJohannes Weiner * @memcg: target memory cgroup 954b3ac0413SMatthew Wilcox (Oracle) * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 9551da177e4SLinus Torvalds * 956b3ac0413SMatthew Wilcox (Oracle) * Quick test_and_clear_referenced for all mappings of a folio, 957b3ac0413SMatthew Wilcox (Oracle) * 9586d4675e6SMinchan Kim * Return: The number of mappings which referenced the folio. Return -1 if 9596d4675e6SMinchan Kim * the function bailed out due to rmap lock contention. 9601da177e4SLinus Torvalds */ 961b3ac0413SMatthew Wilcox (Oracle) int folio_referenced(struct folio *folio, int is_locked, 962b3ac0413SMatthew Wilcox (Oracle) struct mem_cgroup *memcg, unsigned long *vm_flags) 9631da177e4SLinus Torvalds { 9645ad64688SHugh Dickins int we_locked = 0; 965b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg pra = { 966b3ac0413SMatthew Wilcox (Oracle) .mapcount = folio_mapcount(folio), 9679f32624bSJoonsoo Kim .memcg = memcg, 9689f32624bSJoonsoo Kim }; 9699f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 970b3ac0413SMatthew Wilcox (Oracle) .rmap_one = folio_referenced_one, 9719f32624bSJoonsoo Kim .arg = (void *)&pra, 9722f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 9736d4675e6SMinchan Kim .try_lock = true, 9748788f678SYu Zhao .invalid_vma = invalid_folio_referenced_vma, 9759f32624bSJoonsoo Kim }; 9761da177e4SLinus Torvalds 9776fe6b7e3SWu Fengguang *vm_flags = 0; 978059d8442SHuang Shijie if (!pra.mapcount) 9799f32624bSJoonsoo Kim return 0; 9809f32624bSJoonsoo Kim 981b3ac0413SMatthew Wilcox (Oracle) if (!folio_raw_mapping(folio)) 9829f32624bSJoonsoo Kim return 0; 9839f32624bSJoonsoo Kim 984b3ac0413SMatthew Wilcox (Oracle) if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 985b3ac0413SMatthew Wilcox (Oracle) we_locked = folio_trylock(folio); 9869f32624bSJoonsoo Kim if (!we_locked) 9879f32624bSJoonsoo Kim return 1; 9885ad64688SHugh Dickins } 9899f32624bSJoonsoo Kim 9902f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 9919f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 9929f32624bSJoonsoo Kim 9935ad64688SHugh Dickins if (we_locked) 994b3ac0413SMatthew Wilcox (Oracle) folio_unlock(folio); 9959f32624bSJoonsoo Kim 9966d4675e6SMinchan Kim return rwc.contended ? -1 : pra.referenced; 9971da177e4SLinus Torvalds } 9981da177e4SLinus Torvalds 9996a8e0596SMuchun Song static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 1000d08b3851SPeter Zijlstra { 10016a8e0596SMuchun Song int cleaned = 0; 10026a8e0596SMuchun Song struct vm_area_struct *vma = pvmw->vma; 1003ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 10046a8e0596SMuchun Song unsigned long address = pvmw->address; 1005d08b3851SPeter Zijlstra 1006369ea824SJérôme Glisse /* 1007369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 1008e83c09a2SMatthew Wilcox (Oracle) * the folio can not be freed from this function. 1009369ea824SJérôme Glisse */ 10107d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 10117d4a8be0SAlistair Popple vma->vm_mm, address, vma_address_end(pvmw)); 1012ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1013369ea824SJérôme Glisse 10146a8e0596SMuchun Song while (page_vma_mapped_walk(pvmw)) { 1015f27176cfSKirill A. Shutemov int ret = 0; 1016369ea824SJérôme Glisse 10176a8e0596SMuchun Song address = pvmw->address; 10186a8e0596SMuchun Song if (pvmw->pte) { 10196a8e0596SMuchun Song pte_t *pte = pvmw->pte; 1020c33c7948SRyan Roberts pte_t entry = ptep_get(pte); 1021f27176cfSKirill A. Shutemov 1022c33c7948SRyan Roberts if (!pte_dirty(entry) && !pte_write(entry)) 1023f27176cfSKirill A. Shutemov continue; 1024d08b3851SPeter Zijlstra 1025c33c7948SRyan Roberts flush_cache_page(vma, address, pte_pfn(entry)); 1026785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 1027d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 1028c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 1029785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 1030d08b3851SPeter Zijlstra ret = 1; 1031f27176cfSKirill A. Shutemov } else { 1032396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 10336a8e0596SMuchun Song pmd_t *pmd = pvmw->pmd; 1034f27176cfSKirill A. Shutemov pmd_t entry; 1035d08b3851SPeter Zijlstra 1036f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 1037f27176cfSKirill A. Shutemov continue; 1038f27176cfSKirill A. Shutemov 10397f9c9b60SMuchun Song flush_cache_range(vma, address, 10407f9c9b60SMuchun Song address + HPAGE_PMD_SIZE); 1041024eee0eSAneesh Kumar K.V entry = pmdp_invalidate(vma, address, pmd); 1042f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 1043f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 1044785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 1045f27176cfSKirill A. Shutemov ret = 1; 1046f27176cfSKirill A. Shutemov #else 1047e83c09a2SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 1048f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 1049f27176cfSKirill A. Shutemov #endif 1050f27176cfSKirill A. Shutemov } 10512ec74c3eSSagi Grimberg 10520f10851eSJérôme Glisse if (ret) 10536a8e0596SMuchun Song cleaned++; 10549853a407SJoonsoo Kim } 1055f27176cfSKirill A. Shutemov 1056ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1057369ea824SJérôme Glisse 10586a8e0596SMuchun Song return cleaned; 10596a8e0596SMuchun Song } 10606a8e0596SMuchun Song 10616a8e0596SMuchun Song static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 10626a8e0596SMuchun Song unsigned long address, void *arg) 10636a8e0596SMuchun Song { 10646a8e0596SMuchun Song DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 10656a8e0596SMuchun Song int *cleaned = arg; 10666a8e0596SMuchun Song 10676a8e0596SMuchun Song *cleaned += page_vma_mkclean_one(&pvmw); 10686a8e0596SMuchun Song 1069e4b82222SMinchan Kim return true; 1070d08b3851SPeter Zijlstra } 1071d08b3851SPeter Zijlstra 10729853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1073d08b3851SPeter Zijlstra { 10749853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 1075871beb8cSFengguang Wu return false; 1076d08b3851SPeter Zijlstra 1077871beb8cSFengguang Wu return true; 1078d08b3851SPeter Zijlstra } 1079d08b3851SPeter Zijlstra 1080d9c08e22SMatthew Wilcox (Oracle) int folio_mkclean(struct folio *folio) 1081d08b3851SPeter Zijlstra { 10829853a407SJoonsoo Kim int cleaned = 0; 10839853a407SJoonsoo Kim struct address_space *mapping; 10849853a407SJoonsoo Kim struct rmap_walk_control rwc = { 10859853a407SJoonsoo Kim .arg = (void *)&cleaned, 10869853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 10879853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 10889853a407SJoonsoo Kim }; 1089d08b3851SPeter Zijlstra 1090d9c08e22SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 1091d08b3851SPeter Zijlstra 1092d9c08e22SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 10939853a407SJoonsoo Kim return 0; 1094d08b3851SPeter Zijlstra 1095d9c08e22SMatthew Wilcox (Oracle) mapping = folio_mapping(folio); 10969853a407SJoonsoo Kim if (!mapping) 10979853a407SJoonsoo Kim return 0; 10989853a407SJoonsoo Kim 10992f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 11009853a407SJoonsoo Kim 11019853a407SJoonsoo Kim return cleaned; 1102d08b3851SPeter Zijlstra } 1103d9c08e22SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_mkclean); 1104d08b3851SPeter Zijlstra 11051da177e4SLinus Torvalds /** 11066a8e0596SMuchun Song * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 11076a8e0596SMuchun Song * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 11086a8e0596SMuchun Song * within the @vma of shared mappings. And since clean PTEs 11096a8e0596SMuchun Song * should also be readonly, write protects them too. 11106a8e0596SMuchun Song * @pfn: start pfn. 11116a8e0596SMuchun Song * @nr_pages: number of physically contiguous pages srarting with @pfn. 11126a8e0596SMuchun Song * @pgoff: page offset that the @pfn mapped with. 11136a8e0596SMuchun Song * @vma: vma that @pfn mapped within. 11146a8e0596SMuchun Song * 11156a8e0596SMuchun Song * Returns the number of cleaned PTEs (including PMDs). 11166a8e0596SMuchun Song */ 11176a8e0596SMuchun Song int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 11186a8e0596SMuchun Song struct vm_area_struct *vma) 11196a8e0596SMuchun Song { 11206a8e0596SMuchun Song struct page_vma_mapped_walk pvmw = { 11216a8e0596SMuchun Song .pfn = pfn, 11226a8e0596SMuchun Song .nr_pages = nr_pages, 11236a8e0596SMuchun Song .pgoff = pgoff, 11246a8e0596SMuchun Song .vma = vma, 11256a8e0596SMuchun Song .flags = PVMW_SYNC, 11266a8e0596SMuchun Song }; 11276a8e0596SMuchun Song 11286a8e0596SMuchun Song if (invalid_mkclean_vma(vma, NULL)) 11296a8e0596SMuchun Song return 0; 11306a8e0596SMuchun Song 11316a8e0596SMuchun Song pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 11326a8e0596SMuchun Song VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 11336a8e0596SMuchun Song 11346a8e0596SMuchun Song return page_vma_mkclean_one(&pvmw); 11356a8e0596SMuchun Song } 11366a8e0596SMuchun Song 1137b14224fbSMatthew Wilcox (Oracle) int folio_total_mapcount(struct folio *folio) 11389bd3155eSHugh Dickins { 1139b14224fbSMatthew Wilcox (Oracle) int mapcount = folio_entire_mapcount(folio); 1140b14224fbSMatthew Wilcox (Oracle) int nr_pages; 1141cb67f428SHugh Dickins int i; 1142cb67f428SHugh Dickins 1143b14224fbSMatthew Wilcox (Oracle) /* In the common case, avoid the loop when no pages mapped by PTE */ 1144eec20426SMatthew Wilcox (Oracle) if (folio_nr_pages_mapped(folio) == 0) 1145be5ef2d9SHugh Dickins return mapcount; 1146be5ef2d9SHugh Dickins /* 1147b14224fbSMatthew Wilcox (Oracle) * Add all the PTE mappings of those pages mapped by PTE. 1148b14224fbSMatthew Wilcox (Oracle) * Limit the loop to folio_nr_pages_mapped()? 1149be5ef2d9SHugh Dickins * Perhaps: given all the raciness, that may be a good or a bad idea. 1150be5ef2d9SHugh Dickins */ 1151b14224fbSMatthew Wilcox (Oracle) nr_pages = folio_nr_pages(folio); 1152b14224fbSMatthew Wilcox (Oracle) for (i = 0; i < nr_pages; i++) 1153b14224fbSMatthew Wilcox (Oracle) mapcount += atomic_read(&folio_page(folio, i)->_mapcount); 1154be5ef2d9SHugh Dickins 1155be5ef2d9SHugh Dickins /* But each of those _mapcounts was based on -1 */ 1156b14224fbSMatthew Wilcox (Oracle) mapcount += nr_pages; 1157be5ef2d9SHugh Dickins return mapcount; 1158cb67f428SHugh Dickins } 1159cb67f428SHugh Dickins 11606a8e0596SMuchun Song /** 116106968625SDavid Hildenbrand * folio_move_anon_rmap - move a folio to our anon_vma 116206968625SDavid Hildenbrand * @folio: The folio to move to our anon_vma 116306968625SDavid Hildenbrand * @vma: The vma the folio belongs to 1164c44b6743SRik van Riel * 116506968625SDavid Hildenbrand * When a folio belongs exclusively to one process after a COW event, 116606968625SDavid Hildenbrand * that folio can be moved into the anon_vma that belongs to just that 116706968625SDavid Hildenbrand * process, so the rmap code will not search the parent or sibling processes. 1168c44b6743SRik van Riel */ 116906968625SDavid Hildenbrand void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) 1170c44b6743SRik van Riel { 1171595af4c9SMatthew Wilcox (Oracle) void *anon_vma = vma->anon_vma; 1172c44b6743SRik van Riel 1173595af4c9SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 117481d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1175c44b6743SRik van Riel 1176595af4c9SMatthew Wilcox (Oracle) anon_vma += PAGE_MAPPING_ANON; 1177414e2fb8SVladimir Davydov /* 1178414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1179b3ac0413SMatthew Wilcox (Oracle) * simultaneously, so a concurrent reader (eg folio_referenced()'s 1180b3ac0413SMatthew Wilcox (Oracle) * folio_test_anon()) will not see one without the other. 1181414e2fb8SVladimir Davydov */ 1182595af4c9SMatthew Wilcox (Oracle) WRITE_ONCE(folio->mapping, anon_vma); 1183c44b6743SRik van Riel } 1184c44b6743SRik van Riel 1185c44b6743SRik van Riel /** 1186c66db8c0SDavid Hildenbrand * __folio_set_anon - set up a new anonymous rmap for a folio 1187c66db8c0SDavid Hildenbrand * @folio: The folio to set up the new anonymous rmap for. 1188c66db8c0SDavid Hildenbrand * @vma: VM area to add the folio to. 11894e1c1975SAndi Kleen * @address: User virtual address of the mapping 1190c66db8c0SDavid Hildenbrand * @exclusive: Whether the folio is exclusive to the process. 11911da177e4SLinus Torvalds */ 1192c66db8c0SDavid Hildenbrand static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, 1193c66db8c0SDavid Hildenbrand unsigned long address, bool exclusive) 11941da177e4SLinus Torvalds { 1195e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 11962822c1aaSNick Piggin 1197e8a03febSRik van Riel BUG_ON(!anon_vma); 1198ea90002bSLinus Torvalds 1199ea90002bSLinus Torvalds /* 1200c66db8c0SDavid Hildenbrand * If the folio isn't exclusive to this vma, we must use the _oldest_ 1201c66db8c0SDavid Hildenbrand * possible anon_vma for the folio mapping! 1202ea90002bSLinus Torvalds */ 12034e1c1975SAndi Kleen if (!exclusive) 1204288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1205ea90002bSLinus Torvalds 120616f5e707SAlex Shi /* 12075b4bd90fSMatthew Wilcox (Oracle) * page_idle does a lockless/optimistic rmap scan on folio->mapping. 120816f5e707SAlex Shi * Make sure the compiler doesn't split the stores of anon_vma and 120916f5e707SAlex Shi * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 121016f5e707SAlex Shi * could mistake the mapping for a struct address_space and crash. 121116f5e707SAlex Shi */ 12121da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 12135b4bd90fSMatthew Wilcox (Oracle) WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 12145b4bd90fSMatthew Wilcox (Oracle) folio->index = linear_page_index(vma, address); 12151da177e4SLinus Torvalds } 12169617d95eSNick Piggin 12179617d95eSNick Piggin /** 121843d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1219dba438bdSMatthew Wilcox (Oracle) * @folio: The folio containing @page. 1220dba438bdSMatthew Wilcox (Oracle) * @page: the page to check the mapping of 1221c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1222c97a9e10SNick Piggin * @address: the user virtual address mapped 1223c97a9e10SNick Piggin */ 1224dba438bdSMatthew Wilcox (Oracle) static void __page_check_anon_rmap(struct folio *folio, struct page *page, 1225c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1226c97a9e10SNick Piggin { 1227c97a9e10SNick Piggin /* 1228c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1229c97a9e10SNick Piggin * be set up correctly at this point. 1230c97a9e10SNick Piggin * 1231c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 123290aaca85SMiaohe Lin * always holds the page locked. 1233c97a9e10SNick Piggin * 1234cb9089baSMatthew Wilcox (Oracle) * We have exclusion against folio_add_new_anon_rmap because those pages 1235c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1236cb9089baSMatthew Wilcox (Oracle) * over the call to folio_add_new_anon_rmap. 1237c97a9e10SNick Piggin */ 1238e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1239e05b3453SMatthew Wilcox (Oracle) folio); 124030c46382SYang Shi VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 124130c46382SYang Shi page); 1242c97a9e10SNick Piggin } 1243c97a9e10SNick Piggin 1244c97a9e10SNick Piggin /** 12459617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 12469617d95eSNick Piggin * @page: the page to add the mapping to 12479617d95eSNick Piggin * @vma: the vm area in which the mapping is added 12489617d95eSNick Piggin * @address: the user virtual address mapped 1249f1e2db12SDavid Hildenbrand * @flags: the rmap flags 12509617d95eSNick Piggin * 12515ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 125280e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 125380e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 125480e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 12559617d95eSNick Piggin */ 1256ee0800c2SMatthew Wilcox (Oracle) void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 1257ee0800c2SMatthew Wilcox (Oracle) unsigned long address, rmap_t flags) 1258ad8c2ee8SRik van Riel { 1259ee0800c2SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1260ee0800c2SMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 12619bd3155eSHugh Dickins int nr = 0, nr_pmdmapped = 0; 1262d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 1263132b180fSDavid Hildenbrand bool first; 126453f9263bSKirill A. Shutemov 1265*a4ea1864SDavid Hildenbrand VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 1266*a4ea1864SDavid Hildenbrand 1267be5ef2d9SHugh Dickins /* Is page being mapped by PTE? Is this its first map to be added? */ 1268be5ef2d9SHugh Dickins if (likely(!compound)) { 1269d8dd5e97SHugh Dickins first = atomic_inc_and_test(&page->_mapcount); 1270d8dd5e97SHugh Dickins nr = first; 1271ee0800c2SMatthew Wilcox (Oracle) if (first && folio_test_large(folio)) { 12724b51634cSHugh Dickins nr = atomic_inc_return_relaxed(mapped); 12736287b7daSHugh Dickins nr = (nr < COMPOUND_MAPPED); 127453f9263bSKirill A. Shutemov } 1275ee0800c2SMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1276be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1277be5ef2d9SHugh Dickins 1278ee0800c2SMatthew Wilcox (Oracle) first = atomic_inc_and_test(&folio->_entire_mapcount); 1279be5ef2d9SHugh Dickins if (first) { 12804b51634cSHugh Dickins nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 12816287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1282ee0800c2SMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1283eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 12846287b7daSHugh Dickins /* Raced ahead of a remove and another add? */ 12856287b7daSHugh Dickins if (unlikely(nr < 0)) 12866287b7daSHugh Dickins nr = 0; 12876287b7daSHugh Dickins } else { 12886287b7daSHugh Dickins /* Raced ahead of a remove of COMPOUND_MAPPED */ 12896287b7daSHugh Dickins nr = 0; 12906287b7daSHugh Dickins } 1291be5ef2d9SHugh Dickins } 1292be5ef2d9SHugh Dickins } 1293cb67f428SHugh Dickins 12949bd3155eSHugh Dickins if (nr_pmdmapped) 1295ee0800c2SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); 12969bd3155eSHugh Dickins if (nr) 1297ee0800c2SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 12985ad64688SHugh Dickins 1299c5c54003SDavid Hildenbrand if (unlikely(!folio_test_anon(folio))) { 1300c5c54003SDavid Hildenbrand VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 1301a1f34ee1SDavid Hildenbrand /* 1302a1f34ee1SDavid Hildenbrand * For a PTE-mapped large folio, we only know that the single 1303a1f34ee1SDavid Hildenbrand * PTE is exclusive. Further, __folio_set_anon() might not get 1304a1f34ee1SDavid Hildenbrand * folio->index right when not given the address of the head 1305a1f34ee1SDavid Hildenbrand * page. 1306a1f34ee1SDavid Hildenbrand */ 1307a1f34ee1SDavid Hildenbrand VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio); 1308c66db8c0SDavid Hildenbrand __folio_set_anon(folio, vma, address, 130914f9135dSDavid Hildenbrand !!(flags & RMAP_EXCLUSIVE)); 1310c5c54003SDavid Hildenbrand } else if (likely(!folio_test_ksm(folio))) { 1311dba438bdSMatthew Wilcox (Oracle) __page_check_anon_rmap(folio, page, vma, address); 1312c7c3dec1SJohannes Weiner } 1313c66db8c0SDavid Hildenbrand if (flags & RMAP_EXCLUSIVE) 1314c66db8c0SDavid Hildenbrand SetPageAnonExclusive(page); 1315132b180fSDavid Hildenbrand /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ 1316132b180fSDavid Hildenbrand VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 || 1317132b180fSDavid Hildenbrand (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && 1318132b180fSDavid Hildenbrand PageAnonExclusive(page), folio); 1319cea86fe2SHugh Dickins 13201acbc3f9SYin Fengwei /* 13211acbc3f9SYin Fengwei * For large folio, only mlock it if it's fully mapped to VMA. It's 13221acbc3f9SYin Fengwei * not easy to check whether the large folio is fully mapped to VMA 13231acbc3f9SYin Fengwei * here. Only mlock normal 4K folio and leave page reclaim to handle 13241acbc3f9SYin Fengwei * large folio. 13251acbc3f9SYin Fengwei */ 13261acbc3f9SYin Fengwei if (!folio_test_large(folio)) 13271acbc3f9SYin Fengwei mlock_vma_folio(folio, vma); 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 133043d8eac4SRandy Dunlap /** 13314d510f3dSMatthew Wilcox (Oracle) * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 13324d510f3dSMatthew Wilcox (Oracle) * @folio: The folio to add the mapping to. 13339617d95eSNick Piggin * @vma: the vm area in which the mapping is added 13349617d95eSNick Piggin * @address: the user virtual address mapped 133540f2bbf7SDavid Hildenbrand * 13364d510f3dSMatthew Wilcox (Oracle) * Like page_add_anon_rmap() but must only be called on *new* folios. 13379617d95eSNick Piggin * This means the inc-and-test can be bypassed. 13384d510f3dSMatthew Wilcox (Oracle) * The folio does not have to be locked. 13394d510f3dSMatthew Wilcox (Oracle) * 1340372cbd4dSRyan Roberts * If the folio is pmd-mappable, it is accounted as a THP. As the folio 13414d510f3dSMatthew Wilcox (Oracle) * is new, it's assumed to be mapped exclusively by a single process. 13429617d95eSNick Piggin */ 13434d510f3dSMatthew Wilcox (Oracle) void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 13444d510f3dSMatthew Wilcox (Oracle) unsigned long address) 13459617d95eSNick Piggin { 1346372cbd4dSRyan Roberts int nr = folio_nr_pages(folio); 1347d281ee61SKirill A. Shutemov 1348*a4ea1864SDavid Hildenbrand VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 1349372cbd4dSRyan Roberts VM_BUG_ON_VMA(address < vma->vm_start || 1350372cbd4dSRyan Roberts address + (nr << PAGE_SHIFT) > vma->vm_end, vma); 13514d510f3dSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 1352372cbd4dSRyan Roberts __folio_set_anon(folio, vma, address, true); 1353d8dd5e97SHugh Dickins 1354372cbd4dSRyan Roberts if (likely(!folio_test_large(folio))) { 1355d8dd5e97SHugh Dickins /* increment count (starts at -1) */ 13564d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_mapcount, 0); 1357372cbd4dSRyan Roberts SetPageAnonExclusive(&folio->page); 1358372cbd4dSRyan Roberts } else if (!folio_test_pmd_mappable(folio)) { 1359372cbd4dSRyan Roberts int i; 1360372cbd4dSRyan Roberts 1361372cbd4dSRyan Roberts for (i = 0; i < nr; i++) { 1362372cbd4dSRyan Roberts struct page *page = folio_page(folio, i); 1363372cbd4dSRyan Roberts 1364372cbd4dSRyan Roberts /* increment count (starts at -1) */ 1365372cbd4dSRyan Roberts atomic_set(&page->_mapcount, 0); 1366372cbd4dSRyan Roberts SetPageAnonExclusive(page); 1367372cbd4dSRyan Roberts } 1368372cbd4dSRyan Roberts 1369372cbd4dSRyan Roberts atomic_set(&folio->_nr_pages_mapped, nr); 1370d8dd5e97SHugh Dickins } else { 137153f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 13724d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, 0); 13734d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); 1374372cbd4dSRyan Roberts SetPageAnonExclusive(&folio->page); 13754d510f3dSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); 1376d281ee61SKirill A. Shutemov } 1377d8dd5e97SHugh Dickins 13784d510f3dSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 13799617d95eSNick Piggin } 13809617d95eSNick Piggin 13811da177e4SLinus Torvalds /** 138286f35f69SYin Fengwei * folio_add_file_rmap_range - add pte mapping to page range of a folio 138386f35f69SYin Fengwei * @folio: The folio to add the mapping to 138486f35f69SYin Fengwei * @page: The first page to add 138586f35f69SYin Fengwei * @nr_pages: The number of pages which will be mapped 1386cea86fe2SHugh Dickins * @vma: the vm area in which the mapping is added 1387e8b098fcSMike Rapoport * @compound: charge the page as compound or small page 13881da177e4SLinus Torvalds * 138986f35f69SYin Fengwei * The page range of folio is defined by [first_page, first_page + nr_pages) 139086f35f69SYin Fengwei * 1391b8072f09SHugh Dickins * The caller needs to hold the pte lock. 13921da177e4SLinus Torvalds */ 139386f35f69SYin Fengwei void folio_add_file_rmap_range(struct folio *folio, struct page *page, 139486f35f69SYin Fengwei unsigned int nr_pages, struct vm_area_struct *vma, 1395eb01a2adSMatthew Wilcox (Oracle) bool compound) 13961da177e4SLinus Torvalds { 1397eb01a2adSMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 139886f35f69SYin Fengwei unsigned int nr_pmdmapped = 0, first; 139986f35f69SYin Fengwei int nr = 0; 1400dd78feddSKirill A. Shutemov 140144887f39SDavid Hildenbrand VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 140286f35f69SYin Fengwei VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); 14039bd3155eSHugh Dickins 1404be5ef2d9SHugh Dickins /* Is page being mapped by PTE? Is this its first map to be added? */ 1405be5ef2d9SHugh Dickins if (likely(!compound)) { 140686f35f69SYin Fengwei do { 1407d8dd5e97SHugh Dickins first = atomic_inc_and_test(&page->_mapcount); 1408eb01a2adSMatthew Wilcox (Oracle) if (first && folio_test_large(folio)) { 140986f35f69SYin Fengwei first = atomic_inc_return_relaxed(mapped); 141086f35f69SYin Fengwei first = (first < COMPOUND_MAPPED); 14119a73f61bSKirill A. Shutemov } 141286f35f69SYin Fengwei 141386f35f69SYin Fengwei if (first) 141486f35f69SYin Fengwei nr++; 141586f35f69SYin Fengwei } while (page++, --nr_pages > 0); 1416eb01a2adSMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1417be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1418be5ef2d9SHugh Dickins 1419eb01a2adSMatthew Wilcox (Oracle) first = atomic_inc_and_test(&folio->_entire_mapcount); 1420be5ef2d9SHugh Dickins if (first) { 14214b51634cSHugh Dickins nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 14226287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1423eb01a2adSMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1424eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 14256287b7daSHugh Dickins /* Raced ahead of a remove and another add? */ 14266287b7daSHugh Dickins if (unlikely(nr < 0)) 14276287b7daSHugh Dickins nr = 0; 14286287b7daSHugh Dickins } else { 14296287b7daSHugh Dickins /* Raced ahead of a remove of COMPOUND_MAPPED */ 14306287b7daSHugh Dickins nr = 0; 14316287b7daSHugh Dickins } 1432be5ef2d9SHugh Dickins } 1433be5ef2d9SHugh Dickins } 14349bd3155eSHugh Dickins 14359bd3155eSHugh Dickins if (nr_pmdmapped) 1436eb01a2adSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? 14379bd3155eSHugh Dickins NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); 14385d543f13SHugh Dickins if (nr) 1439eb01a2adSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); 1440cea86fe2SHugh Dickins 14411acbc3f9SYin Fengwei /* See comments in page_add_anon_rmap() */ 14421acbc3f9SYin Fengwei if (!folio_test_large(folio)) 14431acbc3f9SYin Fengwei mlock_vma_folio(folio, vma); 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds 14461da177e4SLinus Torvalds /** 144786f35f69SYin Fengwei * page_add_file_rmap - add pte mapping to a file page 144886f35f69SYin Fengwei * @page: the page to add the mapping to 144986f35f69SYin Fengwei * @vma: the vm area in which the mapping is added 145086f35f69SYin Fengwei * @compound: charge the page as compound or small page 145186f35f69SYin Fengwei * 145286f35f69SYin Fengwei * The caller needs to hold the pte lock. 145386f35f69SYin Fengwei */ 145486f35f69SYin Fengwei void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, 145586f35f69SYin Fengwei bool compound) 145686f35f69SYin Fengwei { 145786f35f69SYin Fengwei struct folio *folio = page_folio(page); 145886f35f69SYin Fengwei unsigned int nr_pages; 145986f35f69SYin Fengwei 146086f35f69SYin Fengwei VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); 146186f35f69SYin Fengwei 146286f35f69SYin Fengwei if (likely(!compound)) 146386f35f69SYin Fengwei nr_pages = 1; 146486f35f69SYin Fengwei else 146586f35f69SYin Fengwei nr_pages = folio_nr_pages(folio); 146686f35f69SYin Fengwei 146786f35f69SYin Fengwei folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); 146886f35f69SYin Fengwei } 146986f35f69SYin Fengwei 147086f35f69SYin Fengwei /** 14711da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 14721da177e4SLinus Torvalds * @page: page to remove mapping from 1473cea86fe2SHugh Dickins * @vma: the vm area from which the mapping is removed 1474d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 14751da177e4SLinus Torvalds * 1476b8072f09SHugh Dickins * The caller needs to hold the pte lock. 14771da177e4SLinus Torvalds */ 147862beb906SMatthew Wilcox (Oracle) void page_remove_rmap(struct page *page, struct vm_area_struct *vma, 147962beb906SMatthew Wilcox (Oracle) bool compound) 14801da177e4SLinus Torvalds { 148162beb906SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 148262beb906SMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 14839bd3155eSHugh Dickins int nr = 0, nr_pmdmapped = 0; 14849bd3155eSHugh Dickins bool last; 148562beb906SMatthew Wilcox (Oracle) enum node_stat_item idx; 14869bd3155eSHugh Dickins 1487e135826bSDavid Hildenbrand VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 14889bd3155eSHugh Dickins VM_BUG_ON_PAGE(compound && !PageHead(page), page); 14899bd3155eSHugh Dickins 1490be5ef2d9SHugh Dickins /* Is page being unmapped by PTE? Is this its last map to be removed? */ 1491be5ef2d9SHugh Dickins if (likely(!compound)) { 1492d8dd5e97SHugh Dickins last = atomic_add_negative(-1, &page->_mapcount); 1493d8dd5e97SHugh Dickins nr = last; 149462beb906SMatthew Wilcox (Oracle) if (last && folio_test_large(folio)) { 14954b51634cSHugh Dickins nr = atomic_dec_return_relaxed(mapped); 14966287b7daSHugh Dickins nr = (nr < COMPOUND_MAPPED); 1497cb67f428SHugh Dickins } 149862beb906SMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1499be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1500be5ef2d9SHugh Dickins 150162beb906SMatthew Wilcox (Oracle) last = atomic_add_negative(-1, &folio->_entire_mapcount); 1502be5ef2d9SHugh Dickins if (last) { 15034b51634cSHugh Dickins nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); 15046287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED)) { 150562beb906SMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1506eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 15076287b7daSHugh Dickins /* Raced ahead of another remove and an add? */ 15086287b7daSHugh Dickins if (unlikely(nr < 0)) 15096287b7daSHugh Dickins nr = 0; 15106287b7daSHugh Dickins } else { 15116287b7daSHugh Dickins /* An add of COMPOUND_MAPPED raced ahead */ 15126287b7daSHugh Dickins nr = 0; 15136287b7daSHugh Dickins } 1514be5ef2d9SHugh Dickins } 1515be5ef2d9SHugh Dickins } 1516cb67f428SHugh Dickins 15179bd3155eSHugh Dickins if (nr_pmdmapped) { 151862beb906SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 151962beb906SMatthew Wilcox (Oracle) idx = NR_ANON_THPS; 152062beb906SMatthew Wilcox (Oracle) else if (folio_test_swapbacked(folio)) 152162beb906SMatthew Wilcox (Oracle) idx = NR_SHMEM_PMDMAPPED; 152262beb906SMatthew Wilcox (Oracle) else 152362beb906SMatthew Wilcox (Oracle) idx = NR_FILE_PMDMAPPED; 152462beb906SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); 15259bd3155eSHugh Dickins } 15269bd3155eSHugh Dickins if (nr) { 152762beb906SMatthew Wilcox (Oracle) idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 152862beb906SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, idx, -nr); 152962beb906SMatthew Wilcox (Oracle) 15309bd3155eSHugh Dickins /* 15317dc7c5efSRyan Roberts * Queue anon large folio for deferred split if at least one 153262beb906SMatthew Wilcox (Oracle) * page of the folio is unmapped and at least one page 153362beb906SMatthew Wilcox (Oracle) * is still mapped. 15349bd3155eSHugh Dickins */ 15357dc7c5efSRyan Roberts if (folio_test_large(folio) && folio_test_anon(folio)) 15369bd3155eSHugh Dickins if (!compound || nr < nr_pmdmapped) 1537f158ed61SMatthew Wilcox (Oracle) deferred_split_folio(folio); 15389bd3155eSHugh Dickins } 15399a982250SKirill A. Shutemov 154016f8c5b2SHugh Dickins /* 1541672aa27dSMatthew Wilcox (Oracle) * It would be tidy to reset folio_test_anon mapping when fully 1542672aa27dSMatthew Wilcox (Oracle) * unmapped, but that might overwrite a racing page_add_anon_rmap 1543672aa27dSMatthew Wilcox (Oracle) * which increments mapcount after us but sets mapping before us: 1544672aa27dSMatthew Wilcox (Oracle) * so leave the reset to free_pages_prepare, and remember that 1545672aa27dSMatthew Wilcox (Oracle) * it's only reliable while mapped. 15461da177e4SLinus Torvalds */ 15479bd3155eSHugh Dickins 15481acbc3f9SYin Fengwei munlock_vma_folio(folio, vma); 15491da177e4SLinus Torvalds } 15501da177e4SLinus Torvalds 15511da177e4SLinus Torvalds /* 155252629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 15531da177e4SLinus Torvalds */ 15542f031c6fSMatthew Wilcox (Oracle) static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 155552629506SJoonsoo Kim unsigned long address, void *arg) 15561da177e4SLinus Torvalds { 15571da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1558869f7ee6SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 15591da177e4SLinus Torvalds pte_t pteval; 1560c7ab0d2fSKirill A. Shutemov struct page *subpage; 15616c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1562ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 15634708f318SPalmer Dabbelt enum ttu_flags flags = (enum ttu_flags)(long)arg; 1564c33c7948SRyan Roberts unsigned long pfn; 1565935d4f0cSRyan Roberts unsigned long hsz = 0; 15661da177e4SLinus Torvalds 1567732ed558SHugh Dickins /* 1568732ed558SHugh Dickins * When racing against e.g. zap_pte_range() on another cpu, 1569732ed558SHugh Dickins * in between its ptep_get_and_clear_full() and page_remove_rmap(), 15701fb08ac6SYang Shi * try_to_unmap() may return before page_mapped() has become false, 1571732ed558SHugh Dickins * if page table locking is skipped: use TTU_SYNC to wait for that. 1572732ed558SHugh Dickins */ 1573732ed558SHugh Dickins if (flags & TTU_SYNC) 1574732ed558SHugh Dickins pvmw.flags = PVMW_SYNC; 1575732ed558SHugh Dickins 1576a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1577af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, false, folio); 1578fec89c10SKirill A. Shutemov 1579369ea824SJérôme Glisse /* 1580017b1660SMike Kravetz * For THP, we have to assume the worse case ie pmd for invalidation. 1581017b1660SMike Kravetz * For hugetlb, it could be much worse if we need to do pud 1582017b1660SMike Kravetz * invalidation in the case of pmd sharing. 1583017b1660SMike Kravetz * 1584869f7ee6SMatthew Wilcox (Oracle) * Note that the folio can not be freed in this function as call of 1585869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap() must hold a reference on the folio. 1586369ea824SJérôme Glisse */ 15872aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 15887d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1589494334e4SHugh Dickins address, range.end); 1590869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1591017b1660SMike Kravetz /* 1592017b1660SMike Kravetz * If sharing is possible, start and end will be adjusted 1593017b1660SMike Kravetz * accordingly. 1594017b1660SMike Kravetz */ 1595ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, 1596ac46d4f3SJérôme Glisse &range.end); 1597935d4f0cSRyan Roberts 1598935d4f0cSRyan Roberts /* We need the huge page size for set_huge_pte_at() */ 1599935d4f0cSRyan Roberts hsz = huge_page_size(hstate_vma(vma)); 1600017b1660SMike Kravetz } 1601ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1602369ea824SJérôme Glisse 1603c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1604cea86fe2SHugh Dickins /* Unexpected PMD-mapped THP? */ 1605869f7ee6SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1606cea86fe2SHugh Dickins 16071da177e4SLinus Torvalds /* 1608869f7ee6SMatthew Wilcox (Oracle) * If the folio is in an mlock()d vma, we must not swap it out. 16091da177e4SLinus Torvalds */ 1610efdb6720SHugh Dickins if (!(flags & TTU_IGNORE_MLOCK) && 1611efdb6720SHugh Dickins (vma->vm_flags & VM_LOCKED)) { 1612cea86fe2SHugh Dickins /* Restore the mlock which got missed */ 16131acbc3f9SYin Fengwei if (!folio_test_large(folio)) 16141acbc3f9SYin Fengwei mlock_vma_folio(folio, vma); 1615c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1616efdb6720SHugh Dickins ret = false; 1617c7ab0d2fSKirill A. Shutemov break; 1618b87537d9SHugh Dickins } 1619c7ab0d2fSKirill A. Shutemov 1620c33c7948SRyan Roberts pfn = pte_pfn(ptep_get(pvmw.pte)); 1621c33c7948SRyan Roberts subpage = folio_page(folio, pfn - folio_pfn(folio)); 1622785373b4SLinus Torvalds address = pvmw.address; 16236c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 16246c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1625785373b4SLinus Torvalds 1626dfc7ab57SBaolin Wang if (folio_test_hugetlb(folio)) { 16270506c31dSBaolin Wang bool anon = folio_test_anon(folio); 16280506c31dSBaolin Wang 1629017b1660SMike Kravetz /* 1630a00a8759SBaolin Wang * The try_to_unmap() is only passed a hugetlb page 1631a00a8759SBaolin Wang * in the case where the hugetlb page is poisoned. 1632a00a8759SBaolin Wang */ 1633a00a8759SBaolin Wang VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1634a00a8759SBaolin Wang /* 163554205e9cSBaolin Wang * huge_pmd_unshare may unmap an entire PMD page. 163654205e9cSBaolin Wang * There is no way of knowing exactly which PMDs may 163754205e9cSBaolin Wang * be cached for this mm, so we must flush them all. 163854205e9cSBaolin Wang * start/end were already adjusted above to cover this 163954205e9cSBaolin Wang * range. 1640017b1660SMike Kravetz */ 1641ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end); 164254205e9cSBaolin Wang 1643dfc7ab57SBaolin Wang /* 1644dfc7ab57SBaolin Wang * To call huge_pmd_unshare, i_mmap_rwsem must be 1645dfc7ab57SBaolin Wang * held in write mode. Caller needs to explicitly 1646dfc7ab57SBaolin Wang * do this outside rmap routines. 164740549ba8SMike Kravetz * 164840549ba8SMike Kravetz * We also must hold hugetlb vma_lock in write mode. 164940549ba8SMike Kravetz * Lock order dictates acquiring vma_lock BEFORE 165040549ba8SMike Kravetz * i_mmap_rwsem. We can only try lock here and fail 165140549ba8SMike Kravetz * if unsuccessful. 1652dfc7ab57SBaolin Wang */ 165340549ba8SMike Kravetz if (!anon) { 165440549ba8SMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 165540549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) { 165640549ba8SMike Kravetz page_vma_mapped_walk_done(&pvmw); 165740549ba8SMike Kravetz ret = false; 165840549ba8SMike Kravetz break; 165940549ba8SMike Kravetz } 166040549ba8SMike Kravetz if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 166140549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 166240549ba8SMike Kravetz flush_tlb_range(vma, 166340549ba8SMike Kravetz range.start, range.end); 1664017b1660SMike Kravetz /* 166540549ba8SMike Kravetz * The ref count of the PMD page was 166640549ba8SMike Kravetz * dropped which is part of the way map 166740549ba8SMike Kravetz * counting is done for shared PMDs. 166840549ba8SMike Kravetz * Return 'true' here. When there is 166940549ba8SMike Kravetz * no other sharing, huge_pmd_unshare 167040549ba8SMike Kravetz * returns false and we will unmap the 167140549ba8SMike Kravetz * actual page and drop map count 1672017b1660SMike Kravetz * to zero. 1673017b1660SMike Kravetz */ 1674017b1660SMike Kravetz page_vma_mapped_walk_done(&pvmw); 1675017b1660SMike Kravetz break; 1676017b1660SMike Kravetz } 167740549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 167840549ba8SMike Kravetz } 1679a00a8759SBaolin Wang pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 168054205e9cSBaolin Wang } else { 1681c33c7948SRyan Roberts flush_cache_page(vma, address, pfn); 1682088b8aa5SDavid Hildenbrand /* Nuke the page table entry. */ 1683088b8aa5SDavid Hildenbrand if (should_defer_flush(mm, flags)) { 168472b252aeSMel Gorman /* 1685c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1686869f7ee6SMatthew Wilcox (Oracle) * a remote CPU could still be writing to the folio. 1687c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1688c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1689c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1690c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 169172b252aeSMel Gorman */ 1692785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 169372b252aeSMel Gorman 1694f73419bbSBarry Song set_tlb_ubc_flush_pending(mm, pteval, address); 169572b252aeSMel Gorman } else { 1696785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 169772b252aeSMel Gorman } 1698a00a8759SBaolin Wang } 16991da177e4SLinus Torvalds 1700999dad82SPeter Xu /* 1701999dad82SPeter Xu * Now the pte is cleared. If this pte was uffd-wp armed, 1702999dad82SPeter Xu * we may want to replace a none pte with a marker pte if 1703999dad82SPeter Xu * it's file-backed, so we don't lose the tracking info. 1704999dad82SPeter Xu */ 1705999dad82SPeter Xu pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1706999dad82SPeter Xu 1707869f7ee6SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 17081da177e4SLinus Torvalds if (pte_dirty(pteval)) 1709869f7ee6SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 17101da177e4SLinus Torvalds 1711365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1712365e9c87SHugh Dickins update_hiwater_rss(mm); 1713365e9c87SHugh Dickins 17146da6b1d4SNaoya Horiguchi if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 17155fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1716869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1717869f7ee6SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 1718935d4f0cSRyan Roberts set_huge_pte_at(mm, address, pvmw.pte, pteval, 1719935d4f0cSRyan Roberts hsz); 17205d317b2bSNaoya Horiguchi } else { 1721869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1722785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 17235f24ae58SNaoya Horiguchi } 1724c7ab0d2fSKirill A. Shutemov 1725bce73e48SChristian Borntraeger } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 172645961722SKonstantin Weitz /* 172745961722SKonstantin Weitz * The guest indicated that the page content is of no 172845961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 172945961722SKonstantin Weitz * will take care of the rest. 1730bce73e48SChristian Borntraeger * A future reference will then fault in a new zero 1731bce73e48SChristian Borntraeger * page. When userfaultfd is active, we must not drop 1732bce73e48SChristian Borntraeger * this page though, as its main user (postcopy 1733bce73e48SChristian Borntraeger * migration) will not expect userfaults on already 1734bce73e48SChristian Borntraeger * copied pages. 173545961722SKonstantin Weitz */ 1736869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1737869f7ee6SMatthew Wilcox (Oracle) } else if (folio_test_anon(folio)) { 1738cfeed8ffSDavid Hildenbrand swp_entry_t entry = page_swap_entry(subpage); 1739179ef71cSCyrill Gorcunov pte_t swp_pte; 17401da177e4SLinus Torvalds /* 17411da177e4SLinus Torvalds * Store the swap location in the pte. 17421da177e4SLinus Torvalds * See handle_pte_fault() ... 17431da177e4SLinus Torvalds */ 1744869f7ee6SMatthew Wilcox (Oracle) if (unlikely(folio_test_swapbacked(folio) != 1745869f7ee6SMatthew Wilcox (Oracle) folio_test_swapcache(folio))) { 1746eb94a878SMinchan Kim WARN_ON_ONCE(1); 174783612a94SMinchan Kim ret = false; 1748eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1749eb94a878SMinchan Kim break; 1750eb94a878SMinchan Kim } 1751854e9ed0SMinchan Kim 1752802a3a92SShaohua Li /* MADV_FREE page check */ 1753869f7ee6SMatthew Wilcox (Oracle) if (!folio_test_swapbacked(folio)) { 17546c8e2a25SMauricio Faria de Oliveira int ref_count, map_count; 17556c8e2a25SMauricio Faria de Oliveira 17566c8e2a25SMauricio Faria de Oliveira /* 17576c8e2a25SMauricio Faria de Oliveira * Synchronize with gup_pte_range(): 17586c8e2a25SMauricio Faria de Oliveira * - clear PTE; barrier; read refcount 17596c8e2a25SMauricio Faria de Oliveira * - inc refcount; barrier; read PTE 17606c8e2a25SMauricio Faria de Oliveira */ 17616c8e2a25SMauricio Faria de Oliveira smp_mb(); 17626c8e2a25SMauricio Faria de Oliveira 17636c8e2a25SMauricio Faria de Oliveira ref_count = folio_ref_count(folio); 17646c8e2a25SMauricio Faria de Oliveira map_count = folio_mapcount(folio); 17656c8e2a25SMauricio Faria de Oliveira 17666c8e2a25SMauricio Faria de Oliveira /* 17676c8e2a25SMauricio Faria de Oliveira * Order reads for page refcount and dirty flag 17686c8e2a25SMauricio Faria de Oliveira * (see comments in __remove_mapping()). 17696c8e2a25SMauricio Faria de Oliveira */ 17706c8e2a25SMauricio Faria de Oliveira smp_rmb(); 17716c8e2a25SMauricio Faria de Oliveira 17726c8e2a25SMauricio Faria de Oliveira /* 17736c8e2a25SMauricio Faria de Oliveira * The only page refs must be one from isolation 17746c8e2a25SMauricio Faria de Oliveira * plus the rmap(s) (dropped by discard:). 17756c8e2a25SMauricio Faria de Oliveira */ 17766c8e2a25SMauricio Faria de Oliveira if (ref_count == 1 + map_count && 17776c8e2a25SMauricio Faria de Oliveira !folio_test_dirty(folio)) { 1778854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1779854e9ed0SMinchan Kim goto discard; 1780854e9ed0SMinchan Kim } 1781854e9ed0SMinchan Kim 1782802a3a92SShaohua Li /* 1783869f7ee6SMatthew Wilcox (Oracle) * If the folio was redirtied, it cannot be 1784802a3a92SShaohua Li * discarded. Remap the page to page table. 1785802a3a92SShaohua Li */ 1786785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1787869f7ee6SMatthew Wilcox (Oracle) folio_set_swapbacked(folio); 1788e4b82222SMinchan Kim ret = false; 1789802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1790802a3a92SShaohua Li break; 1791802a3a92SShaohua Li } 1792802a3a92SShaohua Li 1793570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1794785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1795e4b82222SMinchan Kim ret = false; 1796c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1797c7ab0d2fSKirill A. Shutemov break; 1798570a335bSHugh Dickins } 1799ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1800322842eaSDavid Hildenbrand swap_free(entry); 1801ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1802ca827d55SKhalid Aziz ret = false; 1803ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1804ca827d55SKhalid Aziz break; 1805ca827d55SKhalid Aziz } 1806088b8aa5SDavid Hildenbrand 1807088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 18086c287605SDavid Hildenbrand if (anon_exclusive && 18096c287605SDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 18106c287605SDavid Hildenbrand swap_free(entry); 18116c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 18126c287605SDavid Hildenbrand ret = false; 18136c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 18146c287605SDavid Hildenbrand break; 18156c287605SDavid Hildenbrand } 18161da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 18171da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1818f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 18191da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 18201da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 18211da177e4SLinus Torvalds } 1822d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1823b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1824179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 18251493a191SDavid Hildenbrand if (anon_exclusive) 18261493a191SDavid Hildenbrand swp_pte = pte_swp_mkexclusive(swp_pte); 1827179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1828179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1829f45ec5ffSPeter Xu if (pte_uffd_wp(pteval)) 1830f45ec5ffSPeter Xu swp_pte = pte_swp_mkuffd_wp(swp_pte); 1831785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 18320f10851eSJérôme Glisse } else { 18330f10851eSJérôme Glisse /* 1834869f7ee6SMatthew Wilcox (Oracle) * This is a locked file-backed folio, 1835869f7ee6SMatthew Wilcox (Oracle) * so it cannot be removed from the page 1836869f7ee6SMatthew Wilcox (Oracle) * cache and replaced by a new folio before 1837869f7ee6SMatthew Wilcox (Oracle) * mmu_notifier_invalidate_range_end, so no 1838869f7ee6SMatthew Wilcox (Oracle) * concurrent thread might update its page table 1839869f7ee6SMatthew Wilcox (Oracle) * to point at a new folio while a device is 1840869f7ee6SMatthew Wilcox (Oracle) * still using this folio. 18410f10851eSJérôme Glisse * 1842ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 18430f10851eSJérôme Glisse */ 1844869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter_file(&folio->page)); 18450f10851eSJérôme Glisse } 18460f10851eSJérôme Glisse discard: 1847e135826bSDavid Hildenbrand if (unlikely(folio_test_hugetlb(folio))) 1848e135826bSDavid Hildenbrand hugetlb_remove_rmap(folio); 1849e135826bSDavid Hildenbrand else 1850e135826bSDavid Hildenbrand page_remove_rmap(subpage, vma, false); 1851b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 185296f97c43SLorenzo Stoakes mlock_drain_local(); 1853869f7ee6SMatthew Wilcox (Oracle) folio_put(folio); 1854c7ab0d2fSKirill A. Shutemov } 1855369ea824SJérôme Glisse 1856ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1857369ea824SJérôme Glisse 1858caed0f48SKOSAKI Motohiro return ret; 18591da177e4SLinus Torvalds } 18601da177e4SLinus Torvalds 186152629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 186252629506SJoonsoo Kim { 1863222100eeSAnshuman Khandual return vma_is_temporary_stack(vma); 186452629506SJoonsoo Kim } 186552629506SJoonsoo Kim 1866f3ad032cSKefeng Wang static int folio_not_mapped(struct folio *folio) 186752629506SJoonsoo Kim { 18682f031c6fSMatthew Wilcox (Oracle) return !folio_mapped(folio); 18692a52bcbcSKirill A. Shutemov } 187052629506SJoonsoo Kim 18711da177e4SLinus Torvalds /** 1872869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap - Try to remove all page table mappings to a folio. 1873869f7ee6SMatthew Wilcox (Oracle) * @folio: The folio to unmap. 187414fa31b8SAndi Kleen * @flags: action and flags 18751da177e4SLinus Torvalds * 18761da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 1877869f7ee6SMatthew Wilcox (Oracle) * folio. It is the caller's responsibility to check if the folio is 1878869f7ee6SMatthew Wilcox (Oracle) * still mapped if needed (use TTU_SYNC to prevent accounting races). 18791da177e4SLinus Torvalds * 1880869f7ee6SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 18811da177e4SLinus Torvalds */ 1882869f7ee6SMatthew Wilcox (Oracle) void try_to_unmap(struct folio *folio, enum ttu_flags flags) 18831da177e4SLinus Torvalds { 188452629506SJoonsoo Kim struct rmap_walk_control rwc = { 188552629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1886802a3a92SShaohua Li .arg = (void *)flags, 1887f3ad032cSKefeng Wang .done = folio_not_mapped, 18882f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 188952629506SJoonsoo Kim }; 18901da177e4SLinus Torvalds 1891a98a2f0cSAlistair Popple if (flags & TTU_RMAP_LOCKED) 18922f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 1893a98a2f0cSAlistair Popple else 18942f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 1895a98a2f0cSAlistair Popple } 1896a98a2f0cSAlistair Popple 1897a98a2f0cSAlistair Popple /* 1898a98a2f0cSAlistair Popple * @arg: enum ttu_flags will be passed to this argument. 1899a98a2f0cSAlistair Popple * 1900a98a2f0cSAlistair Popple * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 190164b586d1SHugh Dickins * containing migration entries. 1902a98a2f0cSAlistair Popple */ 19032f031c6fSMatthew Wilcox (Oracle) static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1904a98a2f0cSAlistair Popple unsigned long address, void *arg) 1905a98a2f0cSAlistair Popple { 1906a98a2f0cSAlistair Popple struct mm_struct *mm = vma->vm_mm; 19074b8554c5SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1908a98a2f0cSAlistair Popple pte_t pteval; 1909a98a2f0cSAlistair Popple struct page *subpage; 19106c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1911a98a2f0cSAlistair Popple struct mmu_notifier_range range; 1912a98a2f0cSAlistair Popple enum ttu_flags flags = (enum ttu_flags)(long)arg; 1913c33c7948SRyan Roberts unsigned long pfn; 1914935d4f0cSRyan Roberts unsigned long hsz = 0; 1915a98a2f0cSAlistair Popple 1916a98a2f0cSAlistair Popple /* 1917a98a2f0cSAlistair Popple * When racing against e.g. zap_pte_range() on another cpu, 1918a98a2f0cSAlistair Popple * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1919a98a2f0cSAlistair Popple * try_to_migrate() may return before page_mapped() has become false, 1920a98a2f0cSAlistair Popple * if page table locking is skipped: use TTU_SYNC to wait for that. 1921a98a2f0cSAlistair Popple */ 1922a98a2f0cSAlistair Popple if (flags & TTU_SYNC) 1923a98a2f0cSAlistair Popple pvmw.flags = PVMW_SYNC; 1924a98a2f0cSAlistair Popple 1925a98a2f0cSAlistair Popple /* 1926a98a2f0cSAlistair Popple * unmap_page() in mm/huge_memory.c is the only user of migration with 1927a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1928a98a2f0cSAlistair Popple */ 1929a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1930af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, true, folio); 1931a98a2f0cSAlistair Popple 1932a98a2f0cSAlistair Popple /* 1933a98a2f0cSAlistair Popple * For THP, we have to assume the worse case ie pmd for invalidation. 1934a98a2f0cSAlistair Popple * For hugetlb, it could be much worse if we need to do pud 1935a98a2f0cSAlistair Popple * invalidation in the case of pmd sharing. 1936a98a2f0cSAlistair Popple * 1937a98a2f0cSAlistair Popple * Note that the page can not be free in this function as call of 1938a98a2f0cSAlistair Popple * try_to_unmap() must hold a reference on the page. 1939a98a2f0cSAlistair Popple */ 19402aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 19417d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1942a98a2f0cSAlistair Popple address, range.end); 19434b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1944a98a2f0cSAlistair Popple /* 1945a98a2f0cSAlistair Popple * If sharing is possible, start and end will be adjusted 1946a98a2f0cSAlistair Popple * accordingly. 1947a98a2f0cSAlistair Popple */ 1948a98a2f0cSAlistair Popple adjust_range_if_pmd_sharing_possible(vma, &range.start, 1949a98a2f0cSAlistair Popple &range.end); 1950935d4f0cSRyan Roberts 1951935d4f0cSRyan Roberts /* We need the huge page size for set_huge_pte_at() */ 1952935d4f0cSRyan Roberts hsz = huge_page_size(hstate_vma(vma)); 1953a98a2f0cSAlistair Popple } 1954a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_start(&range); 1955a98a2f0cSAlistair Popple 1956a98a2f0cSAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 1957a98a2f0cSAlistair Popple #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1958a98a2f0cSAlistair Popple /* PMD-mapped THP migration entry */ 1959a98a2f0cSAlistair Popple if (!pvmw.pte) { 19604b8554c5SMatthew Wilcox (Oracle) subpage = folio_page(folio, 19614b8554c5SMatthew Wilcox (Oracle) pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 19624b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 19634b8554c5SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio); 1964a98a2f0cSAlistair Popple 19657f5abe60SDavid Hildenbrand if (set_pmd_migration_entry(&pvmw, subpage)) { 19667f5abe60SDavid Hildenbrand ret = false; 19677f5abe60SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 19687f5abe60SDavid Hildenbrand break; 19697f5abe60SDavid Hildenbrand } 1970a98a2f0cSAlistair Popple continue; 1971a98a2f0cSAlistair Popple } 1972a98a2f0cSAlistair Popple #endif 1973a98a2f0cSAlistair Popple 1974a98a2f0cSAlistair Popple /* Unexpected PMD-mapped THP? */ 19754b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1976a98a2f0cSAlistair Popple 1977c33c7948SRyan Roberts pfn = pte_pfn(ptep_get(pvmw.pte)); 1978c33c7948SRyan Roberts 19791118234eSDavid Hildenbrand if (folio_is_zone_device(folio)) { 19801118234eSDavid Hildenbrand /* 19811118234eSDavid Hildenbrand * Our PTE is a non-present device exclusive entry and 19821118234eSDavid Hildenbrand * calculating the subpage as for the common case would 19831118234eSDavid Hildenbrand * result in an invalid pointer. 19841118234eSDavid Hildenbrand * 19851118234eSDavid Hildenbrand * Since only PAGE_SIZE pages can currently be 19861118234eSDavid Hildenbrand * migrated, just set it to page. This will need to be 19871118234eSDavid Hildenbrand * changed when hugepage migrations to device private 19881118234eSDavid Hildenbrand * memory are supported. 19891118234eSDavid Hildenbrand */ 19901118234eSDavid Hildenbrand VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 19911118234eSDavid Hildenbrand subpage = &folio->page; 19921118234eSDavid Hildenbrand } else { 1993c33c7948SRyan Roberts subpage = folio_page(folio, pfn - folio_pfn(folio)); 19941118234eSDavid Hildenbrand } 1995a98a2f0cSAlistair Popple address = pvmw.address; 19966c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 19976c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1998a98a2f0cSAlistair Popple 1999dfc7ab57SBaolin Wang if (folio_test_hugetlb(folio)) { 20000506c31dSBaolin Wang bool anon = folio_test_anon(folio); 20010506c31dSBaolin Wang 2002a98a2f0cSAlistair Popple /* 200354205e9cSBaolin Wang * huge_pmd_unshare may unmap an entire PMD page. 200454205e9cSBaolin Wang * There is no way of knowing exactly which PMDs may 200554205e9cSBaolin Wang * be cached for this mm, so we must flush them all. 200654205e9cSBaolin Wang * start/end were already adjusted above to cover this 200754205e9cSBaolin Wang * range. 2008a98a2f0cSAlistair Popple */ 2009a98a2f0cSAlistair Popple flush_cache_range(vma, range.start, range.end); 201054205e9cSBaolin Wang 2011dfc7ab57SBaolin Wang /* 2012dfc7ab57SBaolin Wang * To call huge_pmd_unshare, i_mmap_rwsem must be 2013dfc7ab57SBaolin Wang * held in write mode. Caller needs to explicitly 2014dfc7ab57SBaolin Wang * do this outside rmap routines. 201540549ba8SMike Kravetz * 201640549ba8SMike Kravetz * We also must hold hugetlb vma_lock in write mode. 201740549ba8SMike Kravetz * Lock order dictates acquiring vma_lock BEFORE 201840549ba8SMike Kravetz * i_mmap_rwsem. We can only try lock here and 201940549ba8SMike Kravetz * fail if unsuccessful. 2020dfc7ab57SBaolin Wang */ 202140549ba8SMike Kravetz if (!anon) { 202240549ba8SMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 202340549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) { 202440549ba8SMike Kravetz page_vma_mapped_walk_done(&pvmw); 202540549ba8SMike Kravetz ret = false; 202640549ba8SMike Kravetz break; 202740549ba8SMike Kravetz } 202840549ba8SMike Kravetz if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 202940549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 203040549ba8SMike Kravetz flush_tlb_range(vma, 203140549ba8SMike Kravetz range.start, range.end); 2032a98a2f0cSAlistair Popple 2033a98a2f0cSAlistair Popple /* 203440549ba8SMike Kravetz * The ref count of the PMD page was 203540549ba8SMike Kravetz * dropped which is part of the way map 203640549ba8SMike Kravetz * counting is done for shared PMDs. 203740549ba8SMike Kravetz * Return 'true' here. When there is 203840549ba8SMike Kravetz * no other sharing, huge_pmd_unshare 203940549ba8SMike Kravetz * returns false and we will unmap the 204040549ba8SMike Kravetz * actual page and drop map count 2041a98a2f0cSAlistair Popple * to zero. 2042a98a2f0cSAlistair Popple */ 2043a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 2044a98a2f0cSAlistair Popple break; 2045a98a2f0cSAlistair Popple } 204640549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 204740549ba8SMike Kravetz } 20485d4af619SBaolin Wang /* Nuke the hugetlb page table entry */ 20495d4af619SBaolin Wang pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 205054205e9cSBaolin Wang } else { 2051c33c7948SRyan Roberts flush_cache_page(vma, address, pfn); 2052a98a2f0cSAlistair Popple /* Nuke the page table entry. */ 20537e12beb8SHuang Ying if (should_defer_flush(mm, flags)) { 20547e12beb8SHuang Ying /* 20557e12beb8SHuang Ying * We clear the PTE but do not flush so potentially 20567e12beb8SHuang Ying * a remote CPU could still be writing to the folio. 20577e12beb8SHuang Ying * If the entry was previously clean then the 20587e12beb8SHuang Ying * architecture must guarantee that a clear->dirty 20597e12beb8SHuang Ying * transition on a cached TLB entry is written through 20607e12beb8SHuang Ying * and traps if the PTE is unmapped. 20617e12beb8SHuang Ying */ 20627e12beb8SHuang Ying pteval = ptep_get_and_clear(mm, address, pvmw.pte); 20637e12beb8SHuang Ying 2064f73419bbSBarry Song set_tlb_ubc_flush_pending(mm, pteval, address); 20657e12beb8SHuang Ying } else { 2066a98a2f0cSAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 20675d4af619SBaolin Wang } 20687e12beb8SHuang Ying } 2069a98a2f0cSAlistair Popple 20704b8554c5SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 2071a98a2f0cSAlistair Popple if (pte_dirty(pteval)) 20724b8554c5SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2073a98a2f0cSAlistair Popple 2074a98a2f0cSAlistair Popple /* Update high watermark before we lower rss */ 2075a98a2f0cSAlistair Popple update_hiwater_rss(mm); 2076a98a2f0cSAlistair Popple 2077f25cbb7aSAlex Sierra if (folio_is_device_private(folio)) { 20784b8554c5SMatthew Wilcox (Oracle) unsigned long pfn = folio_pfn(folio); 2079a98a2f0cSAlistair Popple swp_entry_t entry; 2080a98a2f0cSAlistair Popple pte_t swp_pte; 2081a98a2f0cSAlistair Popple 20826c287605SDavid Hildenbrand if (anon_exclusive) 20836c287605SDavid Hildenbrand BUG_ON(page_try_share_anon_rmap(subpage)); 20846c287605SDavid Hildenbrand 2085a98a2f0cSAlistair Popple /* 2086a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 2087a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 2088a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 2089a98a2f0cSAlistair Popple */ 20903d88705cSAlistair Popple entry = pte_to_swp_entry(pteval); 20913d88705cSAlistair Popple if (is_writable_device_private_entry(entry)) 20923d88705cSAlistair Popple entry = make_writable_migration_entry(pfn); 20936c287605SDavid Hildenbrand else if (anon_exclusive) 20946c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(pfn); 20953d88705cSAlistair Popple else 20963d88705cSAlistair Popple entry = make_readable_migration_entry(pfn); 2097a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 2098a98a2f0cSAlistair Popple 2099a98a2f0cSAlistair Popple /* 2100a98a2f0cSAlistair Popple * pteval maps a zone device page and is therefore 2101a98a2f0cSAlistair Popple * a swap pte. 2102a98a2f0cSAlistair Popple */ 2103a98a2f0cSAlistair Popple if (pte_swp_soft_dirty(pteval)) 2104a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2105a98a2f0cSAlistair Popple if (pte_swp_uffd_wp(pteval)) 2106a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2107a98a2f0cSAlistair Popple set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 21084cc79b33SAnshuman Khandual trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 21094cc79b33SAnshuman Khandual compound_order(&folio->page)); 2110a98a2f0cSAlistair Popple /* 2111a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 2112a98a2f0cSAlistair Popple * against the special swap migration pte. 2113a98a2f0cSAlistair Popple */ 2114da358d5cSMatthew Wilcox (Oracle) } else if (PageHWPoison(subpage)) { 2115a98a2f0cSAlistair Popple pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 21164b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 21174b8554c5SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 2118935d4f0cSRyan Roberts set_huge_pte_at(mm, address, pvmw.pte, pteval, 2119935d4f0cSRyan Roberts hsz); 2120a98a2f0cSAlistair Popple } else { 21214b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 2122a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 2123a98a2f0cSAlistair Popple } 2124a98a2f0cSAlistair Popple 2125a98a2f0cSAlistair Popple } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2126a98a2f0cSAlistair Popple /* 2127a98a2f0cSAlistair Popple * The guest indicated that the page content is of no 2128a98a2f0cSAlistair Popple * interest anymore. Simply discard the pte, vmscan 2129a98a2f0cSAlistair Popple * will take care of the rest. 2130a98a2f0cSAlistair Popple * A future reference will then fault in a new zero 2131a98a2f0cSAlistair Popple * page. When userfaultfd is active, we must not drop 2132a98a2f0cSAlistair Popple * this page though, as its main user (postcopy 2133a98a2f0cSAlistair Popple * migration) will not expect userfaults on already 2134a98a2f0cSAlistair Popple * copied pages. 2135a98a2f0cSAlistair Popple */ 21364b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 2137a98a2f0cSAlistair Popple } else { 2138a98a2f0cSAlistair Popple swp_entry_t entry; 2139a98a2f0cSAlistair Popple pte_t swp_pte; 2140a98a2f0cSAlistair Popple 2141a98a2f0cSAlistair Popple if (arch_unmap_one(mm, vma, address, pteval) < 0) { 21425d4af619SBaolin Wang if (folio_test_hugetlb(folio)) 2143935d4f0cSRyan Roberts set_huge_pte_at(mm, address, pvmw.pte, 2144935d4f0cSRyan Roberts pteval, hsz); 21455d4af619SBaolin Wang else 2146a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 2147a98a2f0cSAlistair Popple ret = false; 2148a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 2149a98a2f0cSAlistair Popple break; 2150a98a2f0cSAlistair Popple } 21516c287605SDavid Hildenbrand VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 21526c287605SDavid Hildenbrand !anon_exclusive, subpage); 2153088b8aa5SDavid Hildenbrand 2154088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 21550c2ec32bSDavid Hildenbrand if (folio_test_hugetlb(folio)) { 21566c287605SDavid Hildenbrand if (anon_exclusive && 21570c2ec32bSDavid Hildenbrand hugetlb_try_share_anon_rmap(folio)) { 2158935d4f0cSRyan Roberts set_huge_pte_at(mm, address, pvmw.pte, 2159935d4f0cSRyan Roberts pteval, hsz); 21600c2ec32bSDavid Hildenbrand ret = false; 21610c2ec32bSDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 21620c2ec32bSDavid Hildenbrand break; 21630c2ec32bSDavid Hildenbrand } 21640c2ec32bSDavid Hildenbrand } else if (anon_exclusive && 21650c2ec32bSDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 21666c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 21676c287605SDavid Hildenbrand ret = false; 21686c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 21696c287605SDavid Hildenbrand break; 21706c287605SDavid Hildenbrand } 2171a98a2f0cSAlistair Popple 2172a98a2f0cSAlistair Popple /* 2173a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 2174a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 2175a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 2176a98a2f0cSAlistair Popple */ 2177a98a2f0cSAlistair Popple if (pte_write(pteval)) 2178a98a2f0cSAlistair Popple entry = make_writable_migration_entry( 2179a98a2f0cSAlistair Popple page_to_pfn(subpage)); 21806c287605SDavid Hildenbrand else if (anon_exclusive) 21816c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry( 21826c287605SDavid Hildenbrand page_to_pfn(subpage)); 2183a98a2f0cSAlistair Popple else 2184a98a2f0cSAlistair Popple entry = make_readable_migration_entry( 2185a98a2f0cSAlistair Popple page_to_pfn(subpage)); 21862e346877SPeter Xu if (pte_young(pteval)) 21872e346877SPeter Xu entry = make_migration_entry_young(entry); 21882e346877SPeter Xu if (pte_dirty(pteval)) 21892e346877SPeter Xu entry = make_migration_entry_dirty(entry); 2190a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 2191a98a2f0cSAlistair Popple if (pte_soft_dirty(pteval)) 2192a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2193a98a2f0cSAlistair Popple if (pte_uffd_wp(pteval)) 2194a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 21955d4af619SBaolin Wang if (folio_test_hugetlb(folio)) 2196935d4f0cSRyan Roberts set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2197935d4f0cSRyan Roberts hsz); 21985d4af619SBaolin Wang else 2199a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 22004cc79b33SAnshuman Khandual trace_set_migration_pte(address, pte_val(swp_pte), 22014cc79b33SAnshuman Khandual compound_order(&folio->page)); 2202a98a2f0cSAlistair Popple /* 2203a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 2204a98a2f0cSAlistair Popple * against the special swap migration pte. 2205a98a2f0cSAlistair Popple */ 2206a98a2f0cSAlistair Popple } 2207a98a2f0cSAlistair Popple 2208e135826bSDavid Hildenbrand if (unlikely(folio_test_hugetlb(folio))) 2209e135826bSDavid Hildenbrand hugetlb_remove_rmap(folio); 2210e135826bSDavid Hildenbrand else 2211e135826bSDavid Hildenbrand page_remove_rmap(subpage, vma, false); 2212b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 221396f97c43SLorenzo Stoakes mlock_drain_local(); 22144b8554c5SMatthew Wilcox (Oracle) folio_put(folio); 2215a98a2f0cSAlistair Popple } 2216a98a2f0cSAlistair Popple 2217a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_end(&range); 2218a98a2f0cSAlistair Popple 2219a98a2f0cSAlistair Popple return ret; 2220a98a2f0cSAlistair Popple } 2221a98a2f0cSAlistair Popple 2222a98a2f0cSAlistair Popple /** 2223a98a2f0cSAlistair Popple * try_to_migrate - try to replace all page table mappings with swap entries 22244b8554c5SMatthew Wilcox (Oracle) * @folio: the folio to replace page table entries for 2225a98a2f0cSAlistair Popple * @flags: action and flags 2226a98a2f0cSAlistair Popple * 22274b8554c5SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this folio and 22284b8554c5SMatthew Wilcox (Oracle) * replace them with special swap entries. Caller must hold the folio lock. 2229a98a2f0cSAlistair Popple */ 22304b8554c5SMatthew Wilcox (Oracle) void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2231a98a2f0cSAlistair Popple { 2232a98a2f0cSAlistair Popple struct rmap_walk_control rwc = { 2233a98a2f0cSAlistair Popple .rmap_one = try_to_migrate_one, 2234a98a2f0cSAlistair Popple .arg = (void *)flags, 2235f3ad032cSKefeng Wang .done = folio_not_mapped, 22362f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2237a98a2f0cSAlistair Popple }; 2238a98a2f0cSAlistair Popple 2239a98a2f0cSAlistair Popple /* 2240a98a2f0cSAlistair Popple * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 22417e12beb8SHuang Ying * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2242a98a2f0cSAlistair Popple */ 2243a98a2f0cSAlistair Popple if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 22447e12beb8SHuang Ying TTU_SYNC | TTU_BATCH_FLUSH))) 2245a98a2f0cSAlistair Popple return; 2246a98a2f0cSAlistair Popple 2247f25cbb7aSAlex Sierra if (folio_is_zone_device(folio) && 2248f25cbb7aSAlex Sierra (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 22496c855fceSHugh Dickins return; 22506c855fceSHugh Dickins 225152629506SJoonsoo Kim /* 225252629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 225352629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 225452629506SJoonsoo Kim * page tables leading to a race where migration cannot 225552629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 225652629506SJoonsoo Kim * locking requirements of exec(), migration skips 225752629506SJoonsoo Kim * temporary VMAs until after exec() completes. 225852629506SJoonsoo Kim */ 22594b8554c5SMatthew Wilcox (Oracle) if (!folio_test_ksm(folio) && folio_test_anon(folio)) 226052629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 226152629506SJoonsoo Kim 22622a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 22632f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 22642a52bcbcSKirill A. Shutemov else 22652f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2266b291f000SNick Piggin } 2267e9995ef9SHugh Dickins 2268b756a3b5SAlistair Popple #ifdef CONFIG_DEVICE_PRIVATE 2269b756a3b5SAlistair Popple struct make_exclusive_args { 2270b756a3b5SAlistair Popple struct mm_struct *mm; 2271b756a3b5SAlistair Popple unsigned long address; 2272b756a3b5SAlistair Popple void *owner; 2273b756a3b5SAlistair Popple bool valid; 2274b756a3b5SAlistair Popple }; 2275b756a3b5SAlistair Popple 22762f031c6fSMatthew Wilcox (Oracle) static bool page_make_device_exclusive_one(struct folio *folio, 2277b756a3b5SAlistair Popple struct vm_area_struct *vma, unsigned long address, void *priv) 2278b756a3b5SAlistair Popple { 2279b756a3b5SAlistair Popple struct mm_struct *mm = vma->vm_mm; 22800d251485SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2281b756a3b5SAlistair Popple struct make_exclusive_args *args = priv; 2282b756a3b5SAlistair Popple pte_t pteval; 2283b756a3b5SAlistair Popple struct page *subpage; 2284b756a3b5SAlistair Popple bool ret = true; 2285b756a3b5SAlistair Popple struct mmu_notifier_range range; 2286b756a3b5SAlistair Popple swp_entry_t entry; 2287b756a3b5SAlistair Popple pte_t swp_pte; 2288c33c7948SRyan Roberts pte_t ptent; 2289b756a3b5SAlistair Popple 22907d4a8be0SAlistair Popple mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2291b756a3b5SAlistair Popple vma->vm_mm, address, min(vma->vm_end, 22920d251485SMatthew Wilcox (Oracle) address + folio_size(folio)), 22930d251485SMatthew Wilcox (Oracle) args->owner); 2294b756a3b5SAlistair Popple mmu_notifier_invalidate_range_start(&range); 2295b756a3b5SAlistair Popple 2296b756a3b5SAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 2297b756a3b5SAlistair Popple /* Unexpected PMD-mapped THP? */ 22980d251485SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2299b756a3b5SAlistair Popple 2300c33c7948SRyan Roberts ptent = ptep_get(pvmw.pte); 2301c33c7948SRyan Roberts if (!pte_present(ptent)) { 2302b756a3b5SAlistair Popple ret = false; 2303b756a3b5SAlistair Popple page_vma_mapped_walk_done(&pvmw); 2304b756a3b5SAlistair Popple break; 2305b756a3b5SAlistair Popple } 2306b756a3b5SAlistair Popple 23070d251485SMatthew Wilcox (Oracle) subpage = folio_page(folio, 2308c33c7948SRyan Roberts pte_pfn(ptent) - folio_pfn(folio)); 2309b756a3b5SAlistair Popple address = pvmw.address; 2310b756a3b5SAlistair Popple 2311b756a3b5SAlistair Popple /* Nuke the page table entry. */ 2312c33c7948SRyan Roberts flush_cache_page(vma, address, pte_pfn(ptent)); 2313b756a3b5SAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 2314b756a3b5SAlistair Popple 23150d251485SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 2316b756a3b5SAlistair Popple if (pte_dirty(pteval)) 23170d251485SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2318b756a3b5SAlistair Popple 2319b756a3b5SAlistair Popple /* 2320b756a3b5SAlistair Popple * Check that our target page is still mapped at the expected 2321b756a3b5SAlistair Popple * address. 2322b756a3b5SAlistair Popple */ 2323b756a3b5SAlistair Popple if (args->mm == mm && args->address == address && 2324b756a3b5SAlistair Popple pte_write(pteval)) 2325b756a3b5SAlistair Popple args->valid = true; 2326b756a3b5SAlistair Popple 2327b756a3b5SAlistair Popple /* 2328b756a3b5SAlistair Popple * Store the pfn of the page in a special migration 2329b756a3b5SAlistair Popple * pte. do_swap_page() will wait until the migration 2330b756a3b5SAlistair Popple * pte is removed and then restart fault handling. 2331b756a3b5SAlistair Popple */ 2332b756a3b5SAlistair Popple if (pte_write(pteval)) 2333b756a3b5SAlistair Popple entry = make_writable_device_exclusive_entry( 2334b756a3b5SAlistair Popple page_to_pfn(subpage)); 2335b756a3b5SAlistair Popple else 2336b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 2337b756a3b5SAlistair Popple page_to_pfn(subpage)); 2338b756a3b5SAlistair Popple swp_pte = swp_entry_to_pte(entry); 2339b756a3b5SAlistair Popple if (pte_soft_dirty(pteval)) 2340b756a3b5SAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2341b756a3b5SAlistair Popple if (pte_uffd_wp(pteval)) 2342b756a3b5SAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2343b756a3b5SAlistair Popple 2344b756a3b5SAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 2345b756a3b5SAlistair Popple 2346b756a3b5SAlistair Popple /* 2347b756a3b5SAlistair Popple * There is a reference on the page for the swap entry which has 2348b756a3b5SAlistair Popple * been removed, so shouldn't take another. 2349b756a3b5SAlistair Popple */ 2350cea86fe2SHugh Dickins page_remove_rmap(subpage, vma, false); 2351b756a3b5SAlistair Popple } 2352b756a3b5SAlistair Popple 2353b756a3b5SAlistair Popple mmu_notifier_invalidate_range_end(&range); 2354b756a3b5SAlistair Popple 2355b756a3b5SAlistair Popple return ret; 2356b756a3b5SAlistair Popple } 2357b756a3b5SAlistair Popple 2358b756a3b5SAlistair Popple /** 23590d251485SMatthew Wilcox (Oracle) * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 23600d251485SMatthew Wilcox (Oracle) * @folio: The folio to replace page table entries for. 23610d251485SMatthew Wilcox (Oracle) * @mm: The mm_struct where the folio is expected to be mapped. 23620d251485SMatthew Wilcox (Oracle) * @address: Address where the folio is expected to be mapped. 2363b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2364b756a3b5SAlistair Popple * 23650d251485SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this 23660d251485SMatthew Wilcox (Oracle) * folio and replace them with special device exclusive swap entries to 23670d251485SMatthew Wilcox (Oracle) * grant a device exclusive access to the folio. 2368b756a3b5SAlistair Popple * 23690d251485SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 23700d251485SMatthew Wilcox (Oracle) * Return: false if the page is still mapped, or if it could not be unmapped 2371b756a3b5SAlistair Popple * from the expected address. Otherwise returns true (success). 2372b756a3b5SAlistair Popple */ 23730d251485SMatthew Wilcox (Oracle) static bool folio_make_device_exclusive(struct folio *folio, 23740d251485SMatthew Wilcox (Oracle) struct mm_struct *mm, unsigned long address, void *owner) 2375b756a3b5SAlistair Popple { 2376b756a3b5SAlistair Popple struct make_exclusive_args args = { 2377b756a3b5SAlistair Popple .mm = mm, 2378b756a3b5SAlistair Popple .address = address, 2379b756a3b5SAlistair Popple .owner = owner, 2380b756a3b5SAlistair Popple .valid = false, 2381b756a3b5SAlistair Popple }; 2382b756a3b5SAlistair Popple struct rmap_walk_control rwc = { 2383b756a3b5SAlistair Popple .rmap_one = page_make_device_exclusive_one, 2384f3ad032cSKefeng Wang .done = folio_not_mapped, 23852f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2386b756a3b5SAlistair Popple .arg = &args, 2387b756a3b5SAlistair Popple }; 2388b756a3b5SAlistair Popple 2389b756a3b5SAlistair Popple /* 23900d251485SMatthew Wilcox (Oracle) * Restrict to anonymous folios for now to avoid potential writeback 23910d251485SMatthew Wilcox (Oracle) * issues. 2392b756a3b5SAlistair Popple */ 23930d251485SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2394b756a3b5SAlistair Popple return false; 2395b756a3b5SAlistair Popple 23962f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2397b756a3b5SAlistair Popple 23980d251485SMatthew Wilcox (Oracle) return args.valid && !folio_mapcount(folio); 2399b756a3b5SAlistair Popple } 2400b756a3b5SAlistair Popple 2401b756a3b5SAlistair Popple /** 2402b756a3b5SAlistair Popple * make_device_exclusive_range() - Mark a range for exclusive use by a device 2403dd062302SAdrian Huang * @mm: mm_struct of associated target process 2404b756a3b5SAlistair Popple * @start: start of the region to mark for exclusive device access 2405b756a3b5SAlistair Popple * @end: end address of region 2406b756a3b5SAlistair Popple * @pages: returns the pages which were successfully marked for exclusive access 2407b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2408b756a3b5SAlistair Popple * 2409b756a3b5SAlistair Popple * Returns: number of pages found in the range by GUP. A page is marked for 2410b756a3b5SAlistair Popple * exclusive access only if the page pointer is non-NULL. 2411b756a3b5SAlistair Popple * 2412b756a3b5SAlistair Popple * This function finds ptes mapping page(s) to the given address range, locks 2413b756a3b5SAlistair Popple * them and replaces mappings with special swap entries preventing userspace CPU 2414b756a3b5SAlistair Popple * access. On fault these entries are replaced with the original mapping after 2415b756a3b5SAlistair Popple * calling MMU notifiers. 2416b756a3b5SAlistair Popple * 2417b756a3b5SAlistair Popple * A driver using this to program access from a device must use a mmu notifier 2418b756a3b5SAlistair Popple * critical section to hold a device specific lock during programming. Once 2419b756a3b5SAlistair Popple * programming is complete it should drop the page lock and reference after 2420b756a3b5SAlistair Popple * which point CPU access to the page will revoke the exclusive access. 2421b756a3b5SAlistair Popple */ 2422b756a3b5SAlistair Popple int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2423b756a3b5SAlistair Popple unsigned long end, struct page **pages, 2424b756a3b5SAlistair Popple void *owner) 2425b756a3b5SAlistair Popple { 2426b756a3b5SAlistair Popple long npages = (end - start) >> PAGE_SHIFT; 2427b756a3b5SAlistair Popple long i; 2428b756a3b5SAlistair Popple 2429b756a3b5SAlistair Popple npages = get_user_pages_remote(mm, start, npages, 2430b756a3b5SAlistair Popple FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2431ca5e8632SLorenzo Stoakes pages, NULL); 2432b756a3b5SAlistair Popple if (npages < 0) 2433b756a3b5SAlistair Popple return npages; 2434b756a3b5SAlistair Popple 2435b756a3b5SAlistair Popple for (i = 0; i < npages; i++, start += PAGE_SIZE) { 24360d251485SMatthew Wilcox (Oracle) struct folio *folio = page_folio(pages[i]); 24370d251485SMatthew Wilcox (Oracle) if (PageTail(pages[i]) || !folio_trylock(folio)) { 24380d251485SMatthew Wilcox (Oracle) folio_put(folio); 2439b756a3b5SAlistair Popple pages[i] = NULL; 2440b756a3b5SAlistair Popple continue; 2441b756a3b5SAlistair Popple } 2442b756a3b5SAlistair Popple 24430d251485SMatthew Wilcox (Oracle) if (!folio_make_device_exclusive(folio, mm, start, owner)) { 24440d251485SMatthew Wilcox (Oracle) folio_unlock(folio); 24450d251485SMatthew Wilcox (Oracle) folio_put(folio); 2446b756a3b5SAlistair Popple pages[i] = NULL; 2447b756a3b5SAlistair Popple } 2448b756a3b5SAlistair Popple } 2449b756a3b5SAlistair Popple 2450b756a3b5SAlistair Popple return npages; 2451b756a3b5SAlistair Popple } 2452b756a3b5SAlistair Popple EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2453b756a3b5SAlistair Popple #endif 2454b756a3b5SAlistair Popple 245501d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 245676545066SRik van Riel { 245776545066SRik van Riel struct anon_vma *root = anon_vma->root; 245876545066SRik van Riel 2459624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 246001d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 246176545066SRik van Riel anon_vma_free(root); 246276545066SRik van Riel } 246376545066SRik van Riel 24642f031c6fSMatthew Wilcox (Oracle) static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 24656d4675e6SMinchan Kim struct rmap_walk_control *rwc) 2466faecd8ddSJoonsoo Kim { 2467faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 2468faecd8ddSJoonsoo Kim 24690dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 24706d4675e6SMinchan Kim return rwc->anon_lock(folio, rwc); 24710dd1c7bbSJoonsoo Kim 2472faecd8ddSJoonsoo Kim /* 24732f031c6fSMatthew Wilcox (Oracle) * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2474faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 2475c1e8d7c6SMichel Lespinasse * are holding mmap_lock. Users without mmap_lock are required to 2476faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 2477faecd8ddSJoonsoo Kim */ 2478e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2479faecd8ddSJoonsoo Kim if (!anon_vma) 2480faecd8ddSJoonsoo Kim return NULL; 2481faecd8ddSJoonsoo Kim 24826d4675e6SMinchan Kim if (anon_vma_trylock_read(anon_vma)) 24836d4675e6SMinchan Kim goto out; 24846d4675e6SMinchan Kim 24856d4675e6SMinchan Kim if (rwc->try_lock) { 24866d4675e6SMinchan Kim anon_vma = NULL; 24876d4675e6SMinchan Kim rwc->contended = true; 24886d4675e6SMinchan Kim goto out; 24896d4675e6SMinchan Kim } 24906d4675e6SMinchan Kim 2491faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 24926d4675e6SMinchan Kim out: 2493faecd8ddSJoonsoo Kim return anon_vma; 2494faecd8ddSJoonsoo Kim } 2495faecd8ddSJoonsoo Kim 2496e9995ef9SHugh Dickins /* 2497e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 2498e8351ac9SJoonsoo Kim * rmap method 249989be82b4SKemeng Shi * @folio: the folio to be handled 2500e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 250189be82b4SKemeng Shi * @locked: caller holds relevant rmap lock 2502e8351ac9SJoonsoo Kim * 250389be82b4SKemeng Shi * Find all the mappings of a folio using the mapping pointer and the vma 250489be82b4SKemeng Shi * chains contained in the anon_vma struct it points to. 2505e9995ef9SHugh Dickins */ 250684fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_anon(struct folio *folio, 25076d4675e6SMinchan Kim struct rmap_walk_control *rwc, bool locked) 2508e9995ef9SHugh Dickins { 2509e9995ef9SHugh Dickins struct anon_vma *anon_vma; 2510a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 25115beb4930SRik van Riel struct anon_vma_chain *avc; 2512e9995ef9SHugh Dickins 2513b9773199SKirill A. Shutemov if (locked) { 2514e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2515b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 2516e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!anon_vma, folio); 2517b9773199SKirill A. Shutemov } else { 25182f031c6fSMatthew Wilcox (Oracle) anon_vma = rmap_walk_anon_lock(folio, rwc); 2519b9773199SKirill A. Shutemov } 2520e9995ef9SHugh Dickins if (!anon_vma) 25211df631aeSMinchan Kim return; 2522faecd8ddSJoonsoo Kim 25232f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 25242f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2525a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2526a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 25275beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 25282f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 25290dd1c7bbSJoonsoo Kim 2530494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2531ad12695fSAndrea Arcangeli cond_resched(); 2532ad12695fSAndrea Arcangeli 25330dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 25340dd1c7bbSJoonsoo Kim continue; 25350dd1c7bbSJoonsoo Kim 25362f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2537e9995ef9SHugh Dickins break; 25382f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 25390dd1c7bbSJoonsoo Kim break; 2540e9995ef9SHugh Dickins } 2541b9773199SKirill A. Shutemov 2542b9773199SKirill A. Shutemov if (!locked) 25434fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 2544e9995ef9SHugh Dickins } 2545e9995ef9SHugh Dickins 2546e8351ac9SJoonsoo Kim /* 2547e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 254889be82b4SKemeng Shi * @folio: the folio to be handled 2549e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 255089be82b4SKemeng Shi * @locked: caller holds relevant rmap lock 2551e8351ac9SJoonsoo Kim * 255289be82b4SKemeng Shi * Find all the mappings of a folio using the mapping pointer and the vma chains 2553e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 2554e8351ac9SJoonsoo Kim */ 255584fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_file(struct folio *folio, 25566d4675e6SMinchan Kim struct rmap_walk_control *rwc, bool locked) 2557e9995ef9SHugh Dickins { 25582f031c6fSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio); 2559a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 2560e9995ef9SHugh Dickins struct vm_area_struct *vma; 2561e9995ef9SHugh Dickins 25629f32624bSJoonsoo Kim /* 25639f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 25649f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 25659f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 2566c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 25679f32624bSJoonsoo Kim */ 25682f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 25699f32624bSJoonsoo Kim 2570e9995ef9SHugh Dickins if (!mapping) 25711df631aeSMinchan Kim return; 25723dec0ba0SDavidlohr Bueso 25732f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 25742f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 25756d4675e6SMinchan Kim if (!locked) { 25766d4675e6SMinchan Kim if (i_mmap_trylock_read(mapping)) 25776d4675e6SMinchan Kim goto lookup; 25786d4675e6SMinchan Kim 25796d4675e6SMinchan Kim if (rwc->try_lock) { 25806d4675e6SMinchan Kim rwc->contended = true; 25816d4675e6SMinchan Kim return; 25826d4675e6SMinchan Kim } 25836d4675e6SMinchan Kim 25843dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 25856d4675e6SMinchan Kim } 25866d4675e6SMinchan Kim lookup: 2587a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 2588a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 25892f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 25900dd1c7bbSJoonsoo Kim 2591494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2592ad12695fSAndrea Arcangeli cond_resched(); 2593ad12695fSAndrea Arcangeli 25940dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 25950dd1c7bbSJoonsoo Kim continue; 25960dd1c7bbSJoonsoo Kim 25972f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 25980dd1c7bbSJoonsoo Kim goto done; 25992f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 26000dd1c7bbSJoonsoo Kim goto done; 2601e9995ef9SHugh Dickins } 26020dd1c7bbSJoonsoo Kim 26030dd1c7bbSJoonsoo Kim done: 2604b9773199SKirill A. Shutemov if (!locked) 26053dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 2606e9995ef9SHugh Dickins } 2607e9995ef9SHugh Dickins 26086d4675e6SMinchan Kim void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2609e9995ef9SHugh Dickins { 26102f031c6fSMatthew Wilcox (Oracle) if (unlikely(folio_test_ksm(folio))) 26112f031c6fSMatthew Wilcox (Oracle) rmap_walk_ksm(folio, rwc); 26122f031c6fSMatthew Wilcox (Oracle) else if (folio_test_anon(folio)) 26132f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, false); 2614e9995ef9SHugh Dickins else 26152f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, false); 2616b9773199SKirill A. Shutemov } 2617b9773199SKirill A. Shutemov 2618b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 26196d4675e6SMinchan Kim void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2620b9773199SKirill A. Shutemov { 2621b9773199SKirill A. Shutemov /* no ksm support for now */ 26222f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 26232f031c6fSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 26242f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, true); 2625b9773199SKirill A. Shutemov else 26262f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, true); 2627e9995ef9SHugh Dickins } 26280fe6e20bSNaoya Horiguchi 2629e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 26300fe6e20bSNaoya Horiguchi /* 2631451b9514SKirill Tkhai * The following two functions are for anonymous (private mapped) hugepages. 26320fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 26330fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 263428c5209dSDavid Hildenbrand * 263528c5209dSDavid Hildenbrand * RMAP_COMPOUND is ignored. 26360fe6e20bSNaoya Horiguchi */ 26379d5fafd5SDavid Hildenbrand void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 263828c5209dSDavid Hildenbrand unsigned long address, rmap_t flags) 26390fe6e20bSNaoya Horiguchi { 2640*a4ea1864SDavid Hildenbrand VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 2641c5c54003SDavid Hildenbrand VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 2642c5c54003SDavid Hildenbrand 2643132b180fSDavid Hildenbrand atomic_inc(&folio->_entire_mapcount); 2644c66db8c0SDavid Hildenbrand if (flags & RMAP_EXCLUSIVE) 264509c55050SDavid Hildenbrand SetPageAnonExclusive(&folio->page); 2646132b180fSDavid Hildenbrand VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && 264709c55050SDavid Hildenbrand PageAnonExclusive(&folio->page), folio); 26480fe6e20bSNaoya Horiguchi } 26490fe6e20bSNaoya Horiguchi 26509d5fafd5SDavid Hildenbrand void hugetlb_add_new_anon_rmap(struct folio *folio, 26510fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 26520fe6e20bSNaoya Horiguchi { 2653*a4ea1864SDavid Hildenbrand VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 2654*a4ea1864SDavid Hildenbrand 26550fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2656cb67f428SHugh Dickins /* increment count (starts at -1) */ 2657db4e5dbdSMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, 0); 2658db4e5dbdSMatthew Wilcox (Oracle) folio_clear_hugetlb_restore_reserve(folio); 2659c66db8c0SDavid Hildenbrand __folio_set_anon(folio, vma, address, true); 2660c66db8c0SDavid Hildenbrand SetPageAnonExclusive(&folio->page); 26610fe6e20bSNaoya Horiguchi } 2662e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 2663