11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 1798f32602SHugh Dickins * Contributions by Hugh Dickins 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 239608703eSJan Kara * inode->i_rwsem (while writing or truncating, not reading or faulting) 24c1e8d7c6SMichel Lespinasse * mm->mmap_lock 25730633f0SJan Kara * mapping->invalidate_lock (in filemap_fault) 263a47c54fSMike Kravetz * page->flags PG_locked (lock_page) 278d9bfb26SMike Kravetz * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28c8c06efaSDavidlohr Bueso * mapping->i_mmap_rwsem 295a505085SIngo Molnar * anon_vma->rwsem 30b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 33e621900aSMatthew Wilcox (Oracle) * mapping->private_lock (in block_dirty_folio) 34e621900aSMatthew Wilcox (Oracle) * folio_lock_memcg move_lock (in block_dirty_folio) 35b93b0163SMatthew Wilcox * i_pages lock (widely used) 36e809c3feSMatthew Wilcox (Oracle) * lruvec->lru_lock (in folio_lruvec_lock_irq) 37250df6edSDave Chinner * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38f758eeabSChristoph Hellwig * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 391da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 40b93b0163SMatthew Wilcox * i_pages lock (widely used, in set_page_dirty, 411da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 42f758eeabSChristoph Hellwig * within bdi.wb->list_lock in __sync_single_inode) 436a46079cSAndi Kleen * 449608703eSJan Kara * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 456a46079cSAndi Kleen * ->tasklist_lock 466a46079cSAndi Kleen * pte map lock 47c0d0381aSMike Kravetz * 488d9bfb26SMike Kravetz * hugetlbfs PageHuge() take locks in this order: 49c0d0381aSMike Kravetz * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 508d9bfb26SMike Kravetz * vma_lock (hugetlb specific lock for pmd_sharing) 518d9bfb26SMike Kravetz * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 52c0d0381aSMike Kravetz * page->flags PG_locked (lock_page) 531da177e4SLinus Torvalds */ 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds #include <linux/mm.h> 566e84f315SIngo Molnar #include <linux/sched/mm.h> 5729930025SIngo Molnar #include <linux/sched/task.h> 581da177e4SLinus Torvalds #include <linux/pagemap.h> 591da177e4SLinus Torvalds #include <linux/swap.h> 601da177e4SLinus Torvalds #include <linux/swapops.h> 611da177e4SLinus Torvalds #include <linux/slab.h> 621da177e4SLinus Torvalds #include <linux/init.h> 635ad64688SHugh Dickins #include <linux/ksm.h> 641da177e4SLinus Torvalds #include <linux/rmap.h> 651da177e4SLinus Torvalds #include <linux/rcupdate.h> 66b95f1b31SPaul Gortmaker #include <linux/export.h> 678a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 68cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 6964cdd548SKOSAKI Motohiro #include <linux/migrate.h> 700fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h> 71444f84fdSBen Dooks #include <linux/huge_mm.h> 72ef5d437fSJan Kara #include <linux/backing-dev.h> 7333c3fc71SVladimir Davydov #include <linux/page_idle.h> 74a5430ddaSJérôme Glisse #include <linux/memremap.h> 75bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h> 76999dad82SPeter Xu #include <linux/mm_inline.h> 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds #include <asm/tlbflush.h> 791da177e4SLinus Torvalds 804cc79b33SAnshuman Khandual #define CREATE_TRACE_POINTS 8172b252aeSMel Gorman #include <trace/events/tlb.h> 824cc79b33SAnshuman Khandual #include <trace/events/migrate.h> 8372b252aeSMel Gorman 84b291f000SNick Piggin #include "internal.h" 85b291f000SNick Piggin 86fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep; 875beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep; 88fdd2e5f8SAdrian Bunk 89fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void) 90fdd2e5f8SAdrian Bunk { 9101d8b20dSPeter Zijlstra struct anon_vma *anon_vma; 9201d8b20dSPeter Zijlstra 9301d8b20dSPeter Zijlstra anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 9401d8b20dSPeter Zijlstra if (anon_vma) { 9501d8b20dSPeter Zijlstra atomic_set(&anon_vma->refcount, 1); 962555283eSJann Horn anon_vma->num_children = 0; 972555283eSJann Horn anon_vma->num_active_vmas = 0; 987a3ef208SKonstantin Khlebnikov anon_vma->parent = anon_vma; 9901d8b20dSPeter Zijlstra /* 10001d8b20dSPeter Zijlstra * Initialise the anon_vma root to point to itself. If called 10101d8b20dSPeter Zijlstra * from fork, the root will be reset to the parents anon_vma. 10201d8b20dSPeter Zijlstra */ 10301d8b20dSPeter Zijlstra anon_vma->root = anon_vma; 104fdd2e5f8SAdrian Bunk } 105fdd2e5f8SAdrian Bunk 10601d8b20dSPeter Zijlstra return anon_vma; 10701d8b20dSPeter Zijlstra } 10801d8b20dSPeter Zijlstra 10901d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma) 110fdd2e5f8SAdrian Bunk { 11101d8b20dSPeter Zijlstra VM_BUG_ON(atomic_read(&anon_vma->refcount)); 11288c22088SPeter Zijlstra 11388c22088SPeter Zijlstra /* 1142f031c6fSMatthew Wilcox (Oracle) * Synchronize against folio_lock_anon_vma_read() such that 11588c22088SPeter Zijlstra * we can safely hold the lock without the anon_vma getting 11688c22088SPeter Zijlstra * freed. 11788c22088SPeter Zijlstra * 11888c22088SPeter Zijlstra * Relies on the full mb implied by the atomic_dec_and_test() from 11988c22088SPeter Zijlstra * put_anon_vma() against the acquire barrier implied by 1202f031c6fSMatthew Wilcox (Oracle) * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 12188c22088SPeter Zijlstra * 1222f031c6fSMatthew Wilcox (Oracle) * folio_lock_anon_vma_read() VS put_anon_vma() 1234fc3f1d6SIngo Molnar * down_read_trylock() atomic_dec_and_test() 12488c22088SPeter Zijlstra * LOCK MB 1254fc3f1d6SIngo Molnar * atomic_read() rwsem_is_locked() 12688c22088SPeter Zijlstra * 12788c22088SPeter Zijlstra * LOCK should suffice since the actual taking of the lock must 12888c22088SPeter Zijlstra * happen _before_ what follows. 12988c22088SPeter Zijlstra */ 1307f39dda9SHugh Dickins might_sleep(); 1315a505085SIngo Molnar if (rwsem_is_locked(&anon_vma->root->rwsem)) { 1324fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 13308b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 13488c22088SPeter Zijlstra } 13588c22088SPeter Zijlstra 136fdd2e5f8SAdrian Bunk kmem_cache_free(anon_vma_cachep, anon_vma); 137fdd2e5f8SAdrian Bunk } 1381da177e4SLinus Torvalds 139dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 1405beb4930SRik van Riel { 141dd34739cSLinus Torvalds return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 1425beb4930SRik van Riel } 1435beb4930SRik van Riel 144e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 1455beb4930SRik van Riel { 1465beb4930SRik van Riel kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 1475beb4930SRik van Riel } 1485beb4930SRik van Riel 1496583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma, 1506583a843SKautuk Consul struct anon_vma_chain *avc, 1516583a843SKautuk Consul struct anon_vma *anon_vma) 1526583a843SKautuk Consul { 1536583a843SKautuk Consul avc->vma = vma; 1546583a843SKautuk Consul avc->anon_vma = anon_vma; 1556583a843SKautuk Consul list_add(&avc->same_vma, &vma->anon_vma_chain); 156bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 1576583a843SKautuk Consul } 1586583a843SKautuk Consul 159d9d332e0SLinus Torvalds /** 160d5a187daSVlastimil Babka * __anon_vma_prepare - attach an anon_vma to a memory region 161d9d332e0SLinus Torvalds * @vma: the memory region in question 162d9d332e0SLinus Torvalds * 163d9d332e0SLinus Torvalds * This makes sure the memory mapping described by 'vma' has 164d9d332e0SLinus Torvalds * an 'anon_vma' attached to it, so that we can associate the 165d9d332e0SLinus Torvalds * anonymous pages mapped into it with that anon_vma. 166d9d332e0SLinus Torvalds * 167d5a187daSVlastimil Babka * The common case will be that we already have one, which 168d5a187daSVlastimil Babka * is handled inline by anon_vma_prepare(). But if 16923a0790aSFigo.zhang * not we either need to find an adjacent mapping that we 170d9d332e0SLinus Torvalds * can re-use the anon_vma from (very common when the only 171d9d332e0SLinus Torvalds * reason for splitting a vma has been mprotect()), or we 172d9d332e0SLinus Torvalds * allocate a new one. 173d9d332e0SLinus Torvalds * 174d9d332e0SLinus Torvalds * Anon-vma allocations are very subtle, because we may have 1752f031c6fSMatthew Wilcox (Oracle) * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 176aaf1f990SMiaohe Lin * and that may actually touch the rwsem even in the newly 177d9d332e0SLinus Torvalds * allocated vma (it depends on RCU to make sure that the 178d9d332e0SLinus Torvalds * anon_vma isn't actually destroyed). 179d9d332e0SLinus Torvalds * 180d9d332e0SLinus Torvalds * As a result, we need to do proper anon_vma locking even 181d9d332e0SLinus Torvalds * for the new allocation. At the same time, we do not want 182d9d332e0SLinus Torvalds * to do any locking for the common case of already having 183d9d332e0SLinus Torvalds * an anon_vma. 184d9d332e0SLinus Torvalds * 185c1e8d7c6SMichel Lespinasse * This must be called with the mmap_lock held for reading. 186d9d332e0SLinus Torvalds */ 187d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma) 1881da177e4SLinus Torvalds { 189d5a187daSVlastimil Babka struct mm_struct *mm = vma->vm_mm; 190d5a187daSVlastimil Babka struct anon_vma *anon_vma, *allocated; 1915beb4930SRik van Riel struct anon_vma_chain *avc; 1921da177e4SLinus Torvalds 1931da177e4SLinus Torvalds might_sleep(); 1941da177e4SLinus Torvalds 195dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 1965beb4930SRik van Riel if (!avc) 1975beb4930SRik van Riel goto out_enomem; 1985beb4930SRik van Riel 1991da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 2001da177e4SLinus Torvalds allocated = NULL; 201d9d332e0SLinus Torvalds if (!anon_vma) { 2021da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 2031da177e4SLinus Torvalds if (unlikely(!anon_vma)) 2045beb4930SRik van Riel goto out_enomem_free_avc; 2052555283eSJann Horn anon_vma->num_children++; /* self-parent link for new root */ 2061da177e4SLinus Torvalds allocated = anon_vma; 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds 2094fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 2101da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 2111da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 2121da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 2131da177e4SLinus Torvalds vma->anon_vma = anon_vma; 2146583a843SKautuk Consul anon_vma_chain_link(vma, avc, anon_vma); 2152555283eSJann Horn anon_vma->num_active_vmas++; 2161da177e4SLinus Torvalds allocated = NULL; 21731f2b0ebSOleg Nesterov avc = NULL; 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 22008b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 22131f2b0ebSOleg Nesterov 22231f2b0ebSOleg Nesterov if (unlikely(allocated)) 22301d8b20dSPeter Zijlstra put_anon_vma(allocated); 22431f2b0ebSOleg Nesterov if (unlikely(avc)) 2255beb4930SRik van Riel anon_vma_chain_free(avc); 226d5a187daSVlastimil Babka 2271da177e4SLinus Torvalds return 0; 2285beb4930SRik van Riel 2295beb4930SRik van Riel out_enomem_free_avc: 2305beb4930SRik van Riel anon_vma_chain_free(avc); 2315beb4930SRik van Riel out_enomem: 2325beb4930SRik van Riel return -ENOMEM; 2331da177e4SLinus Torvalds } 2341da177e4SLinus Torvalds 235bb4aa396SLinus Torvalds /* 236bb4aa396SLinus Torvalds * This is a useful helper function for locking the anon_vma root as 237bb4aa396SLinus Torvalds * we traverse the vma->anon_vma_chain, looping over anon_vma's that 238bb4aa396SLinus Torvalds * have the same vma. 239bb4aa396SLinus Torvalds * 240bb4aa396SLinus Torvalds * Such anon_vma's should have the same root, so you'd expect to see 241bb4aa396SLinus Torvalds * just a single mutex_lock for the whole traversal. 242bb4aa396SLinus Torvalds */ 243bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 244bb4aa396SLinus Torvalds { 245bb4aa396SLinus Torvalds struct anon_vma *new_root = anon_vma->root; 246bb4aa396SLinus Torvalds if (new_root != root) { 247bb4aa396SLinus Torvalds if (WARN_ON_ONCE(root)) 2485a505085SIngo Molnar up_write(&root->rwsem); 249bb4aa396SLinus Torvalds root = new_root; 2505a505085SIngo Molnar down_write(&root->rwsem); 251bb4aa396SLinus Torvalds } 252bb4aa396SLinus Torvalds return root; 253bb4aa396SLinus Torvalds } 254bb4aa396SLinus Torvalds 255bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root) 256bb4aa396SLinus Torvalds { 257bb4aa396SLinus Torvalds if (root) 2585a505085SIngo Molnar up_write(&root->rwsem); 259bb4aa396SLinus Torvalds } 260bb4aa396SLinus Torvalds 2615beb4930SRik van Riel /* 2625beb4930SRik van Riel * Attach the anon_vmas from src to dst. 2635beb4930SRik van Riel * Returns 0 on success, -ENOMEM on failure. 2647a3ef208SKonstantin Khlebnikov * 2650503ea8fSLiam R. Howlett * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), 2660503ea8fSLiam R. Howlett * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, 2670503ea8fSLiam R. Howlett * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to 2680503ea8fSLiam R. Howlett * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before 2690503ea8fSLiam R. Howlett * call, we can identify this case by checking (!dst->anon_vma && 2700503ea8fSLiam R. Howlett * src->anon_vma). 27147b390d2SWei Yang * 27247b390d2SWei Yang * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 27347b390d2SWei Yang * and reuse existing anon_vma which has no vmas and only one child anon_vma. 27447b390d2SWei Yang * This prevents degradation of anon_vma hierarchy to endless linear chain in 27547b390d2SWei Yang * case of constantly forking task. On the other hand, an anon_vma with more 27647b390d2SWei Yang * than one child isn't reused even if there was no alive vma, thus rmap 27747b390d2SWei Yang * walker has a good chance of avoiding scanning the whole hierarchy when it 27847b390d2SWei Yang * searches where page is mapped. 2795beb4930SRik van Riel */ 2805beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 2815beb4930SRik van Riel { 2825beb4930SRik van Riel struct anon_vma_chain *avc, *pavc; 283bb4aa396SLinus Torvalds struct anon_vma *root = NULL; 2845beb4930SRik van Riel 285646d87b4SLinus Torvalds list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 286bb4aa396SLinus Torvalds struct anon_vma *anon_vma; 287bb4aa396SLinus Torvalds 288dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 289dd34739cSLinus Torvalds if (unlikely(!avc)) { 290dd34739cSLinus Torvalds unlock_anon_vma_root(root); 291dd34739cSLinus Torvalds root = NULL; 292dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 2935beb4930SRik van Riel if (!avc) 2945beb4930SRik van Riel goto enomem_failure; 295dd34739cSLinus Torvalds } 296bb4aa396SLinus Torvalds anon_vma = pavc->anon_vma; 297bb4aa396SLinus Torvalds root = lock_anon_vma_root(root, anon_vma); 298bb4aa396SLinus Torvalds anon_vma_chain_link(dst, avc, anon_vma); 2997a3ef208SKonstantin Khlebnikov 3007a3ef208SKonstantin Khlebnikov /* 3012555283eSJann Horn * Reuse existing anon_vma if it has no vma and only one 3022555283eSJann Horn * anon_vma child. 3037a3ef208SKonstantin Khlebnikov * 3042555283eSJann Horn * Root anon_vma is never reused: 3057a3ef208SKonstantin Khlebnikov * it has self-parent reference and at least one child. 3067a3ef208SKonstantin Khlebnikov */ 30747b390d2SWei Yang if (!dst->anon_vma && src->anon_vma && 3082555283eSJann Horn anon_vma->num_children < 2 && 3092555283eSJann Horn anon_vma->num_active_vmas == 0) 3107a3ef208SKonstantin Khlebnikov dst->anon_vma = anon_vma; 3115beb4930SRik van Riel } 3127a3ef208SKonstantin Khlebnikov if (dst->anon_vma) 3132555283eSJann Horn dst->anon_vma->num_active_vmas++; 314bb4aa396SLinus Torvalds unlock_anon_vma_root(root); 3155beb4930SRik van Riel return 0; 3165beb4930SRik van Riel 3175beb4930SRik van Riel enomem_failure: 3183fe89b3eSLeon Yu /* 319d8e454ebSMa Wupeng * dst->anon_vma is dropped here otherwise its num_active_vmas can 320d8e454ebSMa Wupeng * be incorrectly decremented in unlink_anon_vmas(). 3213fe89b3eSLeon Yu * We can safely do this because callers of anon_vma_clone() don't care 3223fe89b3eSLeon Yu * about dst->anon_vma if anon_vma_clone() failed. 3233fe89b3eSLeon Yu */ 3243fe89b3eSLeon Yu dst->anon_vma = NULL; 3255beb4930SRik van Riel unlink_anon_vmas(dst); 3265beb4930SRik van Riel return -ENOMEM; 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds 3295beb4930SRik van Riel /* 3305beb4930SRik van Riel * Attach vma to its own anon_vma, as well as to the anon_vmas that 3315beb4930SRik van Riel * the corresponding VMA in the parent process is attached to. 3325beb4930SRik van Riel * Returns 0 on success, non-zero on failure. 3335beb4930SRik van Riel */ 3345beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 3351da177e4SLinus Torvalds { 3365beb4930SRik van Riel struct anon_vma_chain *avc; 3375beb4930SRik van Riel struct anon_vma *anon_vma; 338c4ea95d7SDaniel Forrest int error; 3395beb4930SRik van Riel 3405beb4930SRik van Riel /* Don't bother if the parent process has no anon_vma here. */ 3415beb4930SRik van Riel if (!pvma->anon_vma) 3425beb4930SRik van Riel return 0; 3435beb4930SRik van Riel 3447a3ef208SKonstantin Khlebnikov /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 3457a3ef208SKonstantin Khlebnikov vma->anon_vma = NULL; 3467a3ef208SKonstantin Khlebnikov 3475beb4930SRik van Riel /* 3485beb4930SRik van Riel * First, attach the new VMA to the parent VMA's anon_vmas, 3495beb4930SRik van Riel * so rmap can find non-COWed pages in child processes. 3505beb4930SRik van Riel */ 351c4ea95d7SDaniel Forrest error = anon_vma_clone(vma, pvma); 352c4ea95d7SDaniel Forrest if (error) 353c4ea95d7SDaniel Forrest return error; 3545beb4930SRik van Riel 3557a3ef208SKonstantin Khlebnikov /* An existing anon_vma has been reused, all done then. */ 3567a3ef208SKonstantin Khlebnikov if (vma->anon_vma) 3577a3ef208SKonstantin Khlebnikov return 0; 3587a3ef208SKonstantin Khlebnikov 3595beb4930SRik van Riel /* Then add our own anon_vma. */ 3605beb4930SRik van Riel anon_vma = anon_vma_alloc(); 3615beb4930SRik van Riel if (!anon_vma) 3625beb4930SRik van Riel goto out_error; 3632555283eSJann Horn anon_vma->num_active_vmas++; 364dd34739cSLinus Torvalds avc = anon_vma_chain_alloc(GFP_KERNEL); 3655beb4930SRik van Riel if (!avc) 3665beb4930SRik van Riel goto out_error_free_anon_vma; 3675c341ee1SRik van Riel 3685c341ee1SRik van Riel /* 369aaf1f990SMiaohe Lin * The root anon_vma's rwsem is the lock actually used when we 3705c341ee1SRik van Riel * lock any of the anon_vmas in this anon_vma tree. 3715c341ee1SRik van Riel */ 3725c341ee1SRik van Riel anon_vma->root = pvma->anon_vma->root; 3737a3ef208SKonstantin Khlebnikov anon_vma->parent = pvma->anon_vma; 37476545066SRik van Riel /* 37501d8b20dSPeter Zijlstra * With refcounts, an anon_vma can stay around longer than the 37601d8b20dSPeter Zijlstra * process it belongs to. The root anon_vma needs to be pinned until 37701d8b20dSPeter Zijlstra * this anon_vma is freed, because the lock lives in the root. 37876545066SRik van Riel */ 37976545066SRik van Riel get_anon_vma(anon_vma->root); 3805beb4930SRik van Riel /* Mark this anon_vma as the one where our new (COWed) pages go. */ 3815beb4930SRik van Riel vma->anon_vma = anon_vma; 3824fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 3835c341ee1SRik van Riel anon_vma_chain_link(vma, avc, anon_vma); 3842555283eSJann Horn anon_vma->parent->num_children++; 38508b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 3865beb4930SRik van Riel 3875beb4930SRik van Riel return 0; 3885beb4930SRik van Riel 3895beb4930SRik van Riel out_error_free_anon_vma: 39001d8b20dSPeter Zijlstra put_anon_vma(anon_vma); 3915beb4930SRik van Riel out_error: 3924946d54cSRik van Riel unlink_anon_vmas(vma); 3935beb4930SRik van Riel return -ENOMEM; 3945beb4930SRik van Riel } 3955beb4930SRik van Riel 3965beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma) 3975beb4930SRik van Riel { 3985beb4930SRik van Riel struct anon_vma_chain *avc, *next; 399eee2acbaSPeter Zijlstra struct anon_vma *root = NULL; 4005beb4930SRik van Riel 4015c341ee1SRik van Riel /* 4025c341ee1SRik van Riel * Unlink each anon_vma chained to the VMA. This list is ordered 4035c341ee1SRik van Riel * from newest to oldest, ensuring the root anon_vma gets freed last. 4045c341ee1SRik van Riel */ 4055beb4930SRik van Riel list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 406eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 407eee2acbaSPeter Zijlstra 408eee2acbaSPeter Zijlstra root = lock_anon_vma_root(root, anon_vma); 409bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 410eee2acbaSPeter Zijlstra 411eee2acbaSPeter Zijlstra /* 412eee2acbaSPeter Zijlstra * Leave empty anon_vmas on the list - we'll need 413eee2acbaSPeter Zijlstra * to free them outside the lock. 414eee2acbaSPeter Zijlstra */ 415f808c13fSDavidlohr Bueso if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 4162555283eSJann Horn anon_vma->parent->num_children--; 417eee2acbaSPeter Zijlstra continue; 4187a3ef208SKonstantin Khlebnikov } 419eee2acbaSPeter Zijlstra 420eee2acbaSPeter Zijlstra list_del(&avc->same_vma); 421eee2acbaSPeter Zijlstra anon_vma_chain_free(avc); 422eee2acbaSPeter Zijlstra } 423ee8ab190SLi Xinhai if (vma->anon_vma) { 4242555283eSJann Horn vma->anon_vma->num_active_vmas--; 425ee8ab190SLi Xinhai 426ee8ab190SLi Xinhai /* 427ee8ab190SLi Xinhai * vma would still be needed after unlink, and anon_vma will be prepared 428ee8ab190SLi Xinhai * when handle fault. 429ee8ab190SLi Xinhai */ 430ee8ab190SLi Xinhai vma->anon_vma = NULL; 431ee8ab190SLi Xinhai } 432eee2acbaSPeter Zijlstra unlock_anon_vma_root(root); 433eee2acbaSPeter Zijlstra 434eee2acbaSPeter Zijlstra /* 435eee2acbaSPeter Zijlstra * Iterate the list once more, it now only contains empty and unlinked 436eee2acbaSPeter Zijlstra * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 4375a505085SIngo Molnar * needing to write-acquire the anon_vma->root->rwsem. 438eee2acbaSPeter Zijlstra */ 439eee2acbaSPeter Zijlstra list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 440eee2acbaSPeter Zijlstra struct anon_vma *anon_vma = avc->anon_vma; 441eee2acbaSPeter Zijlstra 4422555283eSJann Horn VM_WARN_ON(anon_vma->num_children); 4432555283eSJann Horn VM_WARN_ON(anon_vma->num_active_vmas); 444eee2acbaSPeter Zijlstra put_anon_vma(anon_vma); 445eee2acbaSPeter Zijlstra 4465beb4930SRik van Riel list_del(&avc->same_vma); 4475beb4930SRik van Riel anon_vma_chain_free(avc); 4485beb4930SRik van Riel } 4495beb4930SRik van Riel } 4505beb4930SRik van Riel 45151cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data) 4521da177e4SLinus Torvalds { 4531da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 4541da177e4SLinus Torvalds 4555a505085SIngo Molnar init_rwsem(&anon_vma->rwsem); 45683813267SPeter Zijlstra atomic_set(&anon_vma->refcount, 0); 457f808c13fSDavidlohr Bueso anon_vma->rb_root = RB_ROOT_CACHED; 4581da177e4SLinus Torvalds } 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds void __init anon_vma_init(void) 4611da177e4SLinus Torvalds { 4621da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 4635f0d5a3aSPaul E. McKenney 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 4645d097056SVladimir Davydov anon_vma_ctor); 4655d097056SVladimir Davydov anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 4665d097056SVladimir Davydov SLAB_PANIC|SLAB_ACCOUNT); 4671da177e4SLinus Torvalds } 4681da177e4SLinus Torvalds 4691da177e4SLinus Torvalds /* 4706111e4caSPeter Zijlstra * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 4716111e4caSPeter Zijlstra * 4726111e4caSPeter Zijlstra * Since there is no serialization what so ever against page_remove_rmap() 473ad8a20cfSMiaohe Lin * the best this function can do is return a refcount increased anon_vma 474ad8a20cfSMiaohe Lin * that might have been relevant to this page. 4756111e4caSPeter Zijlstra * 4766111e4caSPeter Zijlstra * The page might have been remapped to a different anon_vma or the anon_vma 4776111e4caSPeter Zijlstra * returned may already be freed (and even reused). 4786111e4caSPeter Zijlstra * 479bc658c96SPeter Zijlstra * In case it was remapped to a different anon_vma, the new anon_vma will be a 480bc658c96SPeter Zijlstra * child of the old anon_vma, and the anon_vma lifetime rules will therefore 481bc658c96SPeter Zijlstra * ensure that any anon_vma obtained from the page will still be valid for as 482bc658c96SPeter Zijlstra * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 483bc658c96SPeter Zijlstra * 4846111e4caSPeter Zijlstra * All users of this function must be very careful when walking the anon_vma 4856111e4caSPeter Zijlstra * chain and verify that the page in question is indeed mapped in it 4866111e4caSPeter Zijlstra * [ something equivalent to page_mapped_in_vma() ]. 4876111e4caSPeter Zijlstra * 488091e4299SMiles Chen * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 489091e4299SMiles Chen * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 490091e4299SMiles Chen * if there is a mapcount, we can dereference the anon_vma after observing 491091e4299SMiles Chen * those. 4921da177e4SLinus Torvalds */ 49329eea9b5SMatthew Wilcox (Oracle) struct anon_vma *folio_get_anon_vma(struct folio *folio) 4941da177e4SLinus Torvalds { 495746b18d4SPeter Zijlstra struct anon_vma *anon_vma = NULL; 4961da177e4SLinus Torvalds unsigned long anon_mapping; 4971da177e4SLinus Torvalds 4981da177e4SLinus Torvalds rcu_read_lock(); 49929eea9b5SMatthew Wilcox (Oracle) anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 5003ca7b3c5SHugh Dickins if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 5011da177e4SLinus Torvalds goto out; 50229eea9b5SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 5031da177e4SLinus Torvalds goto out; 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 506746b18d4SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 507746b18d4SPeter Zijlstra anon_vma = NULL; 508746b18d4SPeter Zijlstra goto out; 509746b18d4SPeter Zijlstra } 510f1819427SHugh Dickins 511f1819427SHugh Dickins /* 51229eea9b5SMatthew Wilcox (Oracle) * If this folio is still mapped, then its anon_vma cannot have been 513746b18d4SPeter Zijlstra * freed. But if it has been unmapped, we have no security against the 514746b18d4SPeter Zijlstra * anon_vma structure being freed and reused (for another anon_vma: 5155f0d5a3aSPaul E. McKenney * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 516746b18d4SPeter Zijlstra * above cannot corrupt). 517f1819427SHugh Dickins */ 51829eea9b5SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5197f39dda9SHugh Dickins rcu_read_unlock(); 520746b18d4SPeter Zijlstra put_anon_vma(anon_vma); 5217f39dda9SHugh Dickins return NULL; 522746b18d4SPeter Zijlstra } 5231da177e4SLinus Torvalds out: 5241da177e4SLinus Torvalds rcu_read_unlock(); 525746b18d4SPeter Zijlstra 526746b18d4SPeter Zijlstra return anon_vma; 527746b18d4SPeter Zijlstra } 528746b18d4SPeter Zijlstra 52988c22088SPeter Zijlstra /* 53029eea9b5SMatthew Wilcox (Oracle) * Similar to folio_get_anon_vma() except it locks the anon_vma. 53188c22088SPeter Zijlstra * 53288c22088SPeter Zijlstra * Its a little more complex as it tries to keep the fast path to a single 53388c22088SPeter Zijlstra * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 53429eea9b5SMatthew Wilcox (Oracle) * reference like with folio_get_anon_vma() and then block on the mutex 5356d4675e6SMinchan Kim * on !rwc->try_lock case. 53688c22088SPeter Zijlstra */ 5376d4675e6SMinchan Kim struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 5386d4675e6SMinchan Kim struct rmap_walk_control *rwc) 539746b18d4SPeter Zijlstra { 54088c22088SPeter Zijlstra struct anon_vma *anon_vma = NULL; 541eee0f252SHugh Dickins struct anon_vma *root_anon_vma; 54288c22088SPeter Zijlstra unsigned long anon_mapping; 543746b18d4SPeter Zijlstra 54488c22088SPeter Zijlstra rcu_read_lock(); 5459595d769SMatthew Wilcox (Oracle) anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 54688c22088SPeter Zijlstra if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 54788c22088SPeter Zijlstra goto out; 5489595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 54988c22088SPeter Zijlstra goto out; 55088c22088SPeter Zijlstra 55188c22088SPeter Zijlstra anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 5524db0c3c2SJason Low root_anon_vma = READ_ONCE(anon_vma->root); 5534fc3f1d6SIngo Molnar if (down_read_trylock(&root_anon_vma->rwsem)) { 55488c22088SPeter Zijlstra /* 5559595d769SMatthew Wilcox (Oracle) * If the folio is still mapped, then this anon_vma is still 556eee0f252SHugh Dickins * its anon_vma, and holding the mutex ensures that it will 557bc658c96SPeter Zijlstra * not go away, see anon_vma_free(). 55888c22088SPeter Zijlstra */ 5599595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5604fc3f1d6SIngo Molnar up_read(&root_anon_vma->rwsem); 56188c22088SPeter Zijlstra anon_vma = NULL; 56288c22088SPeter Zijlstra } 56388c22088SPeter Zijlstra goto out; 56488c22088SPeter Zijlstra } 56588c22088SPeter Zijlstra 5666d4675e6SMinchan Kim if (rwc && rwc->try_lock) { 5676d4675e6SMinchan Kim anon_vma = NULL; 5686d4675e6SMinchan Kim rwc->contended = true; 5696d4675e6SMinchan Kim goto out; 5706d4675e6SMinchan Kim } 5716d4675e6SMinchan Kim 57288c22088SPeter Zijlstra /* trylock failed, we got to sleep */ 57388c22088SPeter Zijlstra if (!atomic_inc_not_zero(&anon_vma->refcount)) { 57488c22088SPeter Zijlstra anon_vma = NULL; 57588c22088SPeter Zijlstra goto out; 57688c22088SPeter Zijlstra } 57788c22088SPeter Zijlstra 5789595d769SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) { 5797f39dda9SHugh Dickins rcu_read_unlock(); 58088c22088SPeter Zijlstra put_anon_vma(anon_vma); 5817f39dda9SHugh Dickins return NULL; 58288c22088SPeter Zijlstra } 58388c22088SPeter Zijlstra 58488c22088SPeter Zijlstra /* we pinned the anon_vma, its safe to sleep */ 58588c22088SPeter Zijlstra rcu_read_unlock(); 5864fc3f1d6SIngo Molnar anon_vma_lock_read(anon_vma); 587746b18d4SPeter Zijlstra 58888c22088SPeter Zijlstra if (atomic_dec_and_test(&anon_vma->refcount)) { 58988c22088SPeter Zijlstra /* 59088c22088SPeter Zijlstra * Oops, we held the last refcount, release the lock 59188c22088SPeter Zijlstra * and bail -- can't simply use put_anon_vma() because 5924fc3f1d6SIngo Molnar * we'll deadlock on the anon_vma_lock_write() recursion. 59388c22088SPeter Zijlstra */ 5944fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 59588c22088SPeter Zijlstra __put_anon_vma(anon_vma); 59688c22088SPeter Zijlstra anon_vma = NULL; 59788c22088SPeter Zijlstra } 59888c22088SPeter Zijlstra 59988c22088SPeter Zijlstra return anon_vma; 60088c22088SPeter Zijlstra 60188c22088SPeter Zijlstra out: 60288c22088SPeter Zijlstra rcu_read_unlock(); 603746b18d4SPeter Zijlstra return anon_vma; 60434bbd704SOleg Nesterov } 60534bbd704SOleg Nesterov 60672b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 60772b252aeSMel Gorman /* 60872b252aeSMel Gorman * Flush TLB entries for recently unmapped pages from remote CPUs. It is 60972b252aeSMel Gorman * important if a PTE was dirty when it was unmapped that it's flushed 61072b252aeSMel Gorman * before any IO is initiated on the page to prevent lost writes. Similarly, 61172b252aeSMel Gorman * it must be flushed before freeing to prevent data leakage. 61272b252aeSMel Gorman */ 61372b252aeSMel Gorman void try_to_unmap_flush(void) 61472b252aeSMel Gorman { 61572b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 61672b252aeSMel Gorman 61772b252aeSMel Gorman if (!tlb_ubc->flush_required) 61872b252aeSMel Gorman return; 61972b252aeSMel Gorman 620e73ad5ffSAndy Lutomirski arch_tlbbatch_flush(&tlb_ubc->arch); 62172b252aeSMel Gorman tlb_ubc->flush_required = false; 622d950c947SMel Gorman tlb_ubc->writable = false; 62372b252aeSMel Gorman } 62472b252aeSMel Gorman 625d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */ 626d950c947SMel Gorman void try_to_unmap_flush_dirty(void) 627d950c947SMel Gorman { 628d950c947SMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 629d950c947SMel Gorman 630d950c947SMel Gorman if (tlb_ubc->writable) 631d950c947SMel Gorman try_to_unmap_flush(); 632d950c947SMel Gorman } 633d950c947SMel Gorman 6345ee2fa2fSHuang Ying /* 6355ee2fa2fSHuang Ying * Bits 0-14 of mm->tlb_flush_batched record pending generations. 6365ee2fa2fSHuang Ying * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 6375ee2fa2fSHuang Ying */ 6385ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 6395ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_MASK \ 6405ee2fa2fSHuang Ying ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 6415ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_LARGE \ 6425ee2fa2fSHuang Ying (TLB_FLUSH_BATCH_PENDING_MASK / 2) 6435ee2fa2fSHuang Ying 644c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 64572b252aeSMel Gorman { 64672b252aeSMel Gorman struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 6475ee2fa2fSHuang Ying int batch, nbatch; 64872b252aeSMel Gorman 649e73ad5ffSAndy Lutomirski arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 65072b252aeSMel Gorman tlb_ubc->flush_required = true; 651d950c947SMel Gorman 652d950c947SMel Gorman /* 6533ea27719SMel Gorman * Ensure compiler does not re-order the setting of tlb_flush_batched 6543ea27719SMel Gorman * before the PTE is cleared. 6553ea27719SMel Gorman */ 6563ea27719SMel Gorman barrier(); 6575ee2fa2fSHuang Ying batch = atomic_read(&mm->tlb_flush_batched); 6585ee2fa2fSHuang Ying retry: 6595ee2fa2fSHuang Ying if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 6605ee2fa2fSHuang Ying /* 6615ee2fa2fSHuang Ying * Prevent `pending' from catching up with `flushed' because of 6625ee2fa2fSHuang Ying * overflow. Reset `pending' and `flushed' to be 1 and 0 if 6635ee2fa2fSHuang Ying * `pending' becomes large. 6645ee2fa2fSHuang Ying */ 6655ee2fa2fSHuang Ying nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); 6665ee2fa2fSHuang Ying if (nbatch != batch) { 6675ee2fa2fSHuang Ying batch = nbatch; 6685ee2fa2fSHuang Ying goto retry; 6695ee2fa2fSHuang Ying } 6705ee2fa2fSHuang Ying } else { 6715ee2fa2fSHuang Ying atomic_inc(&mm->tlb_flush_batched); 6725ee2fa2fSHuang Ying } 6733ea27719SMel Gorman 6743ea27719SMel Gorman /* 675d950c947SMel Gorman * If the PTE was dirty then it's best to assume it's writable. The 676d950c947SMel Gorman * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 677d950c947SMel Gorman * before the page is queued for IO. 678d950c947SMel Gorman */ 679d950c947SMel Gorman if (writable) 680d950c947SMel Gorman tlb_ubc->writable = true; 68172b252aeSMel Gorman } 68272b252aeSMel Gorman 68372b252aeSMel Gorman /* 68472b252aeSMel Gorman * Returns true if the TLB flush should be deferred to the end of a batch of 68572b252aeSMel Gorman * unmap operations to reduce IPIs. 68672b252aeSMel Gorman */ 68772b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 68872b252aeSMel Gorman { 68972b252aeSMel Gorman bool should_defer = false; 69072b252aeSMel Gorman 69172b252aeSMel Gorman if (!(flags & TTU_BATCH_FLUSH)) 69272b252aeSMel Gorman return false; 69372b252aeSMel Gorman 69472b252aeSMel Gorman /* If remote CPUs need to be flushed then defer batch the flush */ 69572b252aeSMel Gorman if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 69672b252aeSMel Gorman should_defer = true; 69772b252aeSMel Gorman put_cpu(); 69872b252aeSMel Gorman 69972b252aeSMel Gorman return should_defer; 70072b252aeSMel Gorman } 7013ea27719SMel Gorman 7023ea27719SMel Gorman /* 7033ea27719SMel Gorman * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 7043ea27719SMel Gorman * releasing the PTL if TLB flushes are batched. It's possible for a parallel 7053ea27719SMel Gorman * operation such as mprotect or munmap to race between reclaim unmapping 7063ea27719SMel Gorman * the page and flushing the page. If this race occurs, it potentially allows 7073ea27719SMel Gorman * access to data via a stale TLB entry. Tracking all mm's that have TLB 7083ea27719SMel Gorman * batching in flight would be expensive during reclaim so instead track 7093ea27719SMel Gorman * whether TLB batching occurred in the past and if so then do a flush here 7103ea27719SMel Gorman * if required. This will cost one additional flush per reclaim cycle paid 7113ea27719SMel Gorman * by the first operation at risk such as mprotect and mumap. 7123ea27719SMel Gorman * 7133ea27719SMel Gorman * This must be called under the PTL so that an access to tlb_flush_batched 7143ea27719SMel Gorman * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 7153ea27719SMel Gorman * via the PTL. 7163ea27719SMel Gorman */ 7173ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm) 7183ea27719SMel Gorman { 7195ee2fa2fSHuang Ying int batch = atomic_read(&mm->tlb_flush_batched); 7205ee2fa2fSHuang Ying int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 7215ee2fa2fSHuang Ying int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 7223ea27719SMel Gorman 7235ee2fa2fSHuang Ying if (pending != flushed) { 7245ee2fa2fSHuang Ying flush_tlb_mm(mm); 7253ea27719SMel Gorman /* 7265ee2fa2fSHuang Ying * If the new TLB flushing is pending during flushing, leave 7275ee2fa2fSHuang Ying * mm->tlb_flush_batched as is, to avoid losing flushing. 7283ea27719SMel Gorman */ 7295ee2fa2fSHuang Ying atomic_cmpxchg(&mm->tlb_flush_batched, batch, 7305ee2fa2fSHuang Ying pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 7313ea27719SMel Gorman } 7323ea27719SMel Gorman } 73372b252aeSMel Gorman #else 734c7ab0d2fSKirill A. Shutemov static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 73572b252aeSMel Gorman { 73672b252aeSMel Gorman } 73772b252aeSMel Gorman 73872b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 73972b252aeSMel Gorman { 74072b252aeSMel Gorman return false; 74172b252aeSMel Gorman } 74272b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 74372b252aeSMel Gorman 7441da177e4SLinus Torvalds /* 745bf89c8c8SHuang Shijie * At what user virtual address is page expected in vma? 746ab941e0fSNaoya Horiguchi * Caller should check the page is actually part of the vma. 7471da177e4SLinus Torvalds */ 7481da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 7491da177e4SLinus Torvalds { 750e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 751e05b3453SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) { 752e05b3453SMatthew Wilcox (Oracle) struct anon_vma *page__anon_vma = folio_anon_vma(folio); 7534829b906SHugh Dickins /* 7544829b906SHugh Dickins * Note: swapoff's unuse_vma() is more efficient with this 7554829b906SHugh Dickins * check, and needs it to match anon_vma when KSM is active. 7564829b906SHugh Dickins */ 7574829b906SHugh Dickins if (!vma->anon_vma || !page__anon_vma || 7584829b906SHugh Dickins vma->anon_vma->root != page__anon_vma->root) 75921d0d443SAndrea Arcangeli return -EFAULT; 76031657170SJue Wang } else if (!vma->vm_file) { 7611da177e4SLinus Torvalds return -EFAULT; 762e05b3453SMatthew Wilcox (Oracle) } else if (vma->vm_file->f_mapping != folio->mapping) { 7631da177e4SLinus Torvalds return -EFAULT; 76431657170SJue Wang } 765494334e4SHugh Dickins 766494334e4SHugh Dickins return vma_address(page, vma); 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds 76950722804SZach O'Keefe /* 77050722804SZach O'Keefe * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 77150722804SZach O'Keefe * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 77250722804SZach O'Keefe * represents. 77350722804SZach O'Keefe */ 7746219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 7756219049aSBob Liu { 7766219049aSBob Liu pgd_t *pgd; 777c2febafcSKirill A. Shutemov p4d_t *p4d; 7786219049aSBob Liu pud_t *pud; 7796219049aSBob Liu pmd_t *pmd = NULL; 7806219049aSBob Liu 7816219049aSBob Liu pgd = pgd_offset(mm, address); 7826219049aSBob Liu if (!pgd_present(*pgd)) 7836219049aSBob Liu goto out; 7846219049aSBob Liu 785c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, address); 786c2febafcSKirill A. Shutemov if (!p4d_present(*p4d)) 787c2febafcSKirill A. Shutemov goto out; 788c2febafcSKirill A. Shutemov 789c2febafcSKirill A. Shutemov pud = pud_offset(p4d, address); 7906219049aSBob Liu if (!pud_present(*pud)) 7916219049aSBob Liu goto out; 7926219049aSBob Liu 7936219049aSBob Liu pmd = pmd_offset(pud, address); 7946219049aSBob Liu out: 7956219049aSBob Liu return pmd; 7966219049aSBob Liu } 7976219049aSBob Liu 798b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg { 7999f32624bSJoonsoo Kim int mapcount; 8009f32624bSJoonsoo Kim int referenced; 8019f32624bSJoonsoo Kim unsigned long vm_flags; 8029f32624bSJoonsoo Kim struct mem_cgroup *memcg; 8039f32624bSJoonsoo Kim }; 80481b4082dSNikita Danilov /* 805b3ac0413SMatthew Wilcox (Oracle) * arg: folio_referenced_arg will be passed 8061da177e4SLinus Torvalds */ 8072f031c6fSMatthew Wilcox (Oracle) static bool folio_referenced_one(struct folio *folio, 8082f031c6fSMatthew Wilcox (Oracle) struct vm_area_struct *vma, unsigned long address, void *arg) 8091da177e4SLinus Torvalds { 810b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 811b3ac0413SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 8128749cfeaSVladimir Davydov int referenced = 0; 8132da28bfdSAndrea Arcangeli 8148eaededeSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 8158eaededeSKirill A. Shutemov address = pvmw.address; 8162da28bfdSAndrea Arcangeli 81747d4f3eeSHugh Dickins if ((vma->vm_flags & VM_LOCKED) && 818b3ac0413SMatthew Wilcox (Oracle) (!folio_test_large(folio) || !pvmw.pte)) { 81947d4f3eeSHugh Dickins /* Restore the mlock which got missed */ 820b3ac0413SMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, !pvmw.pte); 8218eaededeSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 8229f32624bSJoonsoo Kim pra->vm_flags |= VM_LOCKED; 823e4b82222SMinchan Kim return false; /* To break the loop */ 8242da28bfdSAndrea Arcangeli } 8252da28bfdSAndrea Arcangeli 8268eaededeSKirill A. Shutemov if (pvmw.pte) { 8278788f678SYu Zhao if (lru_gen_enabled() && pte_young(*pvmw.pte)) { 828018ee47fSYu Zhao lru_gen_look_around(&pvmw); 829018ee47fSYu Zhao referenced++; 830018ee47fSYu Zhao } 831018ee47fSYu Zhao 8328eaededeSKirill A. Shutemov if (ptep_clear_flush_young_notify(vma, address, 8338788f678SYu Zhao pvmw.pte)) 8341da177e4SLinus Torvalds referenced++; 8358749cfeaSVladimir Davydov } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 8368eaededeSKirill A. Shutemov if (pmdp_clear_flush_young_notify(vma, address, 8378eaededeSKirill A. Shutemov pvmw.pmd)) 8388749cfeaSVladimir Davydov referenced++; 8398749cfeaSVladimir Davydov } else { 840b3ac0413SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 8418749cfeaSVladimir Davydov WARN_ON_ONCE(1); 8428749cfeaSVladimir Davydov } 8438eaededeSKirill A. Shutemov 8448eaededeSKirill A. Shutemov pra->mapcount--; 8458eaededeSKirill A. Shutemov } 84671e3aac0SAndrea Arcangeli 84733c3fc71SVladimir Davydov if (referenced) 848b3ac0413SMatthew Wilcox (Oracle) folio_clear_idle(folio); 849b3ac0413SMatthew Wilcox (Oracle) if (folio_test_clear_young(folio)) 85033c3fc71SVladimir Davydov referenced++; 85133c3fc71SVladimir Davydov 8529f32624bSJoonsoo Kim if (referenced) { 8539f32624bSJoonsoo Kim pra->referenced++; 85447d4f3eeSHugh Dickins pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds 8579f32624bSJoonsoo Kim if (!pra->mapcount) 858e4b82222SMinchan Kim return false; /* To break the loop */ 8599f32624bSJoonsoo Kim 860e4b82222SMinchan Kim return true; 8619f32624bSJoonsoo Kim } 8629f32624bSJoonsoo Kim 863b3ac0413SMatthew Wilcox (Oracle) static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 8641da177e4SLinus Torvalds { 865b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg *pra = arg; 8669f32624bSJoonsoo Kim struct mem_cgroup *memcg = pra->memcg; 8671da177e4SLinus Torvalds 8688788f678SYu Zhao /* 8698788f678SYu Zhao * Ignore references from this mapping if it has no recency. If the 8708788f678SYu Zhao * folio has been used in another mapping, we will catch it; if this 8718788f678SYu Zhao * other mapping is already gone, the unmap path will have set the 8728788f678SYu Zhao * referenced flag or activated the folio in zap_pte_range(). 8738788f678SYu Zhao */ 8748788f678SYu Zhao if (!vma_has_recency(vma)) 8758788f678SYu Zhao return true; 8768788f678SYu Zhao 8778788f678SYu Zhao /* 8788788f678SYu Zhao * If we are reclaiming on behalf of a cgroup, skip counting on behalf 8798788f678SYu Zhao * of references from different cgroups. 8808788f678SYu Zhao */ 8818788f678SYu Zhao if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 8829f32624bSJoonsoo Kim return true; 8831da177e4SLinus Torvalds 8849f32624bSJoonsoo Kim return false; 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds /** 888b3ac0413SMatthew Wilcox (Oracle) * folio_referenced() - Test if the folio was referenced. 889b3ac0413SMatthew Wilcox (Oracle) * @folio: The folio to test. 890b3ac0413SMatthew Wilcox (Oracle) * @is_locked: Caller holds lock on the folio. 89172835c86SJohannes Weiner * @memcg: target memory cgroup 892b3ac0413SMatthew Wilcox (Oracle) * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 8931da177e4SLinus Torvalds * 894b3ac0413SMatthew Wilcox (Oracle) * Quick test_and_clear_referenced for all mappings of a folio, 895b3ac0413SMatthew Wilcox (Oracle) * 8966d4675e6SMinchan Kim * Return: The number of mappings which referenced the folio. Return -1 if 8976d4675e6SMinchan Kim * the function bailed out due to rmap lock contention. 8981da177e4SLinus Torvalds */ 899b3ac0413SMatthew Wilcox (Oracle) int folio_referenced(struct folio *folio, int is_locked, 900b3ac0413SMatthew Wilcox (Oracle) struct mem_cgroup *memcg, unsigned long *vm_flags) 9011da177e4SLinus Torvalds { 9025ad64688SHugh Dickins int we_locked = 0; 903b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg pra = { 904b3ac0413SMatthew Wilcox (Oracle) .mapcount = folio_mapcount(folio), 9059f32624bSJoonsoo Kim .memcg = memcg, 9069f32624bSJoonsoo Kim }; 9079f32624bSJoonsoo Kim struct rmap_walk_control rwc = { 908b3ac0413SMatthew Wilcox (Oracle) .rmap_one = folio_referenced_one, 9099f32624bSJoonsoo Kim .arg = (void *)&pra, 9102f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 9116d4675e6SMinchan Kim .try_lock = true, 9128788f678SYu Zhao .invalid_vma = invalid_folio_referenced_vma, 9139f32624bSJoonsoo Kim }; 9141da177e4SLinus Torvalds 9156fe6b7e3SWu Fengguang *vm_flags = 0; 916059d8442SHuang Shijie if (!pra.mapcount) 9179f32624bSJoonsoo Kim return 0; 9189f32624bSJoonsoo Kim 919b3ac0413SMatthew Wilcox (Oracle) if (!folio_raw_mapping(folio)) 9209f32624bSJoonsoo Kim return 0; 9219f32624bSJoonsoo Kim 922b3ac0413SMatthew Wilcox (Oracle) if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 923b3ac0413SMatthew Wilcox (Oracle) we_locked = folio_trylock(folio); 9249f32624bSJoonsoo Kim if (!we_locked) 9259f32624bSJoonsoo Kim return 1; 9265ad64688SHugh Dickins } 9279f32624bSJoonsoo Kim 9282f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 9299f32624bSJoonsoo Kim *vm_flags = pra.vm_flags; 9309f32624bSJoonsoo Kim 9315ad64688SHugh Dickins if (we_locked) 932b3ac0413SMatthew Wilcox (Oracle) folio_unlock(folio); 9339f32624bSJoonsoo Kim 9346d4675e6SMinchan Kim return rwc.contended ? -1 : pra.referenced; 9351da177e4SLinus Torvalds } 9361da177e4SLinus Torvalds 9376a8e0596SMuchun Song static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 938d08b3851SPeter Zijlstra { 9396a8e0596SMuchun Song int cleaned = 0; 9406a8e0596SMuchun Song struct vm_area_struct *vma = pvmw->vma; 941ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 9426a8e0596SMuchun Song unsigned long address = pvmw->address; 943d08b3851SPeter Zijlstra 944369ea824SJérôme Glisse /* 945369ea824SJérôme Glisse * We have to assume the worse case ie pmd for invalidation. Note that 946e83c09a2SMatthew Wilcox (Oracle) * the folio can not be freed from this function. 947369ea824SJérôme Glisse */ 9487d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 9497d4a8be0SAlistair Popple vma->vm_mm, address, vma_address_end(pvmw)); 950ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 951369ea824SJérôme Glisse 9526a8e0596SMuchun Song while (page_vma_mapped_walk(pvmw)) { 953f27176cfSKirill A. Shutemov int ret = 0; 954369ea824SJérôme Glisse 9556a8e0596SMuchun Song address = pvmw->address; 9566a8e0596SMuchun Song if (pvmw->pte) { 957c2fda5feSPeter Zijlstra pte_t entry; 9586a8e0596SMuchun Song pte_t *pte = pvmw->pte; 959f27176cfSKirill A. Shutemov 960f27176cfSKirill A. Shutemov if (!pte_dirty(*pte) && !pte_write(*pte)) 961f27176cfSKirill A. Shutemov continue; 962d08b3851SPeter Zijlstra 963785373b4SLinus Torvalds flush_cache_page(vma, address, pte_pfn(*pte)); 964785373b4SLinus Torvalds entry = ptep_clear_flush(vma, address, pte); 965d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 966c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 967785373b4SLinus Torvalds set_pte_at(vma->vm_mm, address, pte, entry); 968d08b3851SPeter Zijlstra ret = 1; 969f27176cfSKirill A. Shutemov } else { 970396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 9716a8e0596SMuchun Song pmd_t *pmd = pvmw->pmd; 972f27176cfSKirill A. Shutemov pmd_t entry; 973d08b3851SPeter Zijlstra 974f27176cfSKirill A. Shutemov if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 975f27176cfSKirill A. Shutemov continue; 976f27176cfSKirill A. Shutemov 9777f9c9b60SMuchun Song flush_cache_range(vma, address, 9787f9c9b60SMuchun Song address + HPAGE_PMD_SIZE); 979024eee0eSAneesh Kumar K.V entry = pmdp_invalidate(vma, address, pmd); 980f27176cfSKirill A. Shutemov entry = pmd_wrprotect(entry); 981f27176cfSKirill A. Shutemov entry = pmd_mkclean(entry); 982785373b4SLinus Torvalds set_pmd_at(vma->vm_mm, address, pmd, entry); 983f27176cfSKirill A. Shutemov ret = 1; 984f27176cfSKirill A. Shutemov #else 985e83c09a2SMatthew Wilcox (Oracle) /* unexpected pmd-mapped folio? */ 986f27176cfSKirill A. Shutemov WARN_ON_ONCE(1); 987f27176cfSKirill A. Shutemov #endif 988f27176cfSKirill A. Shutemov } 9892ec74c3eSSagi Grimberg 9900f10851eSJérôme Glisse /* 9910f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() as we are 9920f10851eSJérôme Glisse * downgrading page table protection not changing it to point 9930f10851eSJérôme Glisse * to a new page. 9940f10851eSJérôme Glisse * 995ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 9960f10851eSJérôme Glisse */ 9970f10851eSJérôme Glisse if (ret) 9986a8e0596SMuchun Song cleaned++; 9999853a407SJoonsoo Kim } 1000f27176cfSKirill A. Shutemov 1001ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1002369ea824SJérôme Glisse 10036a8e0596SMuchun Song return cleaned; 10046a8e0596SMuchun Song } 10056a8e0596SMuchun Song 10066a8e0596SMuchun Song static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 10076a8e0596SMuchun Song unsigned long address, void *arg) 10086a8e0596SMuchun Song { 10096a8e0596SMuchun Song DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 10106a8e0596SMuchun Song int *cleaned = arg; 10116a8e0596SMuchun Song 10126a8e0596SMuchun Song *cleaned += page_vma_mkclean_one(&pvmw); 10136a8e0596SMuchun Song 1014e4b82222SMinchan Kim return true; 1015d08b3851SPeter Zijlstra } 1016d08b3851SPeter Zijlstra 10179853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1018d08b3851SPeter Zijlstra { 10199853a407SJoonsoo Kim if (vma->vm_flags & VM_SHARED) 1020871beb8cSFengguang Wu return false; 1021d08b3851SPeter Zijlstra 1022871beb8cSFengguang Wu return true; 1023d08b3851SPeter Zijlstra } 1024d08b3851SPeter Zijlstra 1025d9c08e22SMatthew Wilcox (Oracle) int folio_mkclean(struct folio *folio) 1026d08b3851SPeter Zijlstra { 10279853a407SJoonsoo Kim int cleaned = 0; 10289853a407SJoonsoo Kim struct address_space *mapping; 10299853a407SJoonsoo Kim struct rmap_walk_control rwc = { 10309853a407SJoonsoo Kim .arg = (void *)&cleaned, 10319853a407SJoonsoo Kim .rmap_one = page_mkclean_one, 10329853a407SJoonsoo Kim .invalid_vma = invalid_mkclean_vma, 10339853a407SJoonsoo Kim }; 1034d08b3851SPeter Zijlstra 1035d9c08e22SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 1036d08b3851SPeter Zijlstra 1037d9c08e22SMatthew Wilcox (Oracle) if (!folio_mapped(folio)) 10389853a407SJoonsoo Kim return 0; 1039d08b3851SPeter Zijlstra 1040d9c08e22SMatthew Wilcox (Oracle) mapping = folio_mapping(folio); 10419853a407SJoonsoo Kim if (!mapping) 10429853a407SJoonsoo Kim return 0; 10439853a407SJoonsoo Kim 10442f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 10459853a407SJoonsoo Kim 10469853a407SJoonsoo Kim return cleaned; 1047d08b3851SPeter Zijlstra } 1048d9c08e22SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_mkclean); 1049d08b3851SPeter Zijlstra 10501da177e4SLinus Torvalds /** 10516a8e0596SMuchun Song * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 10526a8e0596SMuchun Song * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 10536a8e0596SMuchun Song * within the @vma of shared mappings. And since clean PTEs 10546a8e0596SMuchun Song * should also be readonly, write protects them too. 10556a8e0596SMuchun Song * @pfn: start pfn. 10566a8e0596SMuchun Song * @nr_pages: number of physically contiguous pages srarting with @pfn. 10576a8e0596SMuchun Song * @pgoff: page offset that the @pfn mapped with. 10586a8e0596SMuchun Song * @vma: vma that @pfn mapped within. 10596a8e0596SMuchun Song * 10606a8e0596SMuchun Song * Returns the number of cleaned PTEs (including PMDs). 10616a8e0596SMuchun Song */ 10626a8e0596SMuchun Song int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 10636a8e0596SMuchun Song struct vm_area_struct *vma) 10646a8e0596SMuchun Song { 10656a8e0596SMuchun Song struct page_vma_mapped_walk pvmw = { 10666a8e0596SMuchun Song .pfn = pfn, 10676a8e0596SMuchun Song .nr_pages = nr_pages, 10686a8e0596SMuchun Song .pgoff = pgoff, 10696a8e0596SMuchun Song .vma = vma, 10706a8e0596SMuchun Song .flags = PVMW_SYNC, 10716a8e0596SMuchun Song }; 10726a8e0596SMuchun Song 10736a8e0596SMuchun Song if (invalid_mkclean_vma(vma, NULL)) 10746a8e0596SMuchun Song return 0; 10756a8e0596SMuchun Song 10766a8e0596SMuchun Song pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 10776a8e0596SMuchun Song VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 10786a8e0596SMuchun Song 10796a8e0596SMuchun Song return page_vma_mkclean_one(&pvmw); 10806a8e0596SMuchun Song } 10816a8e0596SMuchun Song 1082b14224fbSMatthew Wilcox (Oracle) int folio_total_mapcount(struct folio *folio) 10839bd3155eSHugh Dickins { 1084b14224fbSMatthew Wilcox (Oracle) int mapcount = folio_entire_mapcount(folio); 1085b14224fbSMatthew Wilcox (Oracle) int nr_pages; 1086cb67f428SHugh Dickins int i; 1087cb67f428SHugh Dickins 1088b14224fbSMatthew Wilcox (Oracle) /* In the common case, avoid the loop when no pages mapped by PTE */ 1089eec20426SMatthew Wilcox (Oracle) if (folio_nr_pages_mapped(folio) == 0) 1090be5ef2d9SHugh Dickins return mapcount; 1091be5ef2d9SHugh Dickins /* 1092b14224fbSMatthew Wilcox (Oracle) * Add all the PTE mappings of those pages mapped by PTE. 1093b14224fbSMatthew Wilcox (Oracle) * Limit the loop to folio_nr_pages_mapped()? 1094be5ef2d9SHugh Dickins * Perhaps: given all the raciness, that may be a good or a bad idea. 1095be5ef2d9SHugh Dickins */ 1096b14224fbSMatthew Wilcox (Oracle) nr_pages = folio_nr_pages(folio); 1097b14224fbSMatthew Wilcox (Oracle) for (i = 0; i < nr_pages; i++) 1098b14224fbSMatthew Wilcox (Oracle) mapcount += atomic_read(&folio_page(folio, i)->_mapcount); 1099be5ef2d9SHugh Dickins 1100be5ef2d9SHugh Dickins /* But each of those _mapcounts was based on -1 */ 1101b14224fbSMatthew Wilcox (Oracle) mapcount += nr_pages; 1102be5ef2d9SHugh Dickins return mapcount; 1103cb67f428SHugh Dickins } 1104cb67f428SHugh Dickins 11056a8e0596SMuchun Song /** 1106c44b6743SRik van Riel * page_move_anon_rmap - move a page to our anon_vma 1107c44b6743SRik van Riel * @page: the page to move to our anon_vma 1108c44b6743SRik van Riel * @vma: the vma the page belongs to 1109c44b6743SRik van Riel * 1110c44b6743SRik van Riel * When a page belongs exclusively to one process after a COW event, 1111c44b6743SRik van Riel * that page can be moved into the anon_vma that belongs to just that 1112c44b6743SRik van Riel * process, so the rmap code will not search the parent or sibling 1113c44b6743SRik van Riel * processes. 1114c44b6743SRik van Riel */ 11155a49973dSHugh Dickins void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1116c44b6743SRik van Riel { 1117595af4c9SMatthew Wilcox (Oracle) void *anon_vma = vma->anon_vma; 1118595af4c9SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1119c44b6743SRik van Riel 1120595af4c9SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 112181d1b09cSSasha Levin VM_BUG_ON_VMA(!anon_vma, vma); 1122c44b6743SRik van Riel 1123595af4c9SMatthew Wilcox (Oracle) anon_vma += PAGE_MAPPING_ANON; 1124414e2fb8SVladimir Davydov /* 1125414e2fb8SVladimir Davydov * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1126b3ac0413SMatthew Wilcox (Oracle) * simultaneously, so a concurrent reader (eg folio_referenced()'s 1127b3ac0413SMatthew Wilcox (Oracle) * folio_test_anon()) will not see one without the other. 1128414e2fb8SVladimir Davydov */ 1129595af4c9SMatthew Wilcox (Oracle) WRITE_ONCE(folio->mapping, anon_vma); 1130595af4c9SMatthew Wilcox (Oracle) SetPageAnonExclusive(page); 1131c44b6743SRik van Riel } 1132c44b6743SRik van Riel 1133c44b6743SRik van Riel /** 113443d8eac4SRandy Dunlap * __page_set_anon_rmap - set up new anonymous rmap 11355b4bd90fSMatthew Wilcox (Oracle) * @folio: Folio which contains page. 11365b4bd90fSMatthew Wilcox (Oracle) * @page: Page to add to rmap. 11374e1c1975SAndi Kleen * @vma: VM area to add page to. 11384e1c1975SAndi Kleen * @address: User virtual address of the mapping 1139e8a03febSRik van Riel * @exclusive: the page is exclusively owned by the current process 11401da177e4SLinus Torvalds */ 11415b4bd90fSMatthew Wilcox (Oracle) static void __page_set_anon_rmap(struct folio *folio, struct page *page, 1142e8a03febSRik van Riel struct vm_area_struct *vma, unsigned long address, int exclusive) 11431da177e4SLinus Torvalds { 1144e8a03febSRik van Riel struct anon_vma *anon_vma = vma->anon_vma; 11452822c1aaSNick Piggin 1146e8a03febSRik van Riel BUG_ON(!anon_vma); 1147ea90002bSLinus Torvalds 11485b4bd90fSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 11496c287605SDavid Hildenbrand goto out; 11504e1c1975SAndi Kleen 1151ea90002bSLinus Torvalds /* 1152e8a03febSRik van Riel * If the page isn't exclusively mapped into this vma, 1153e8a03febSRik van Riel * we must use the _oldest_ possible anon_vma for the 1154e8a03febSRik van Riel * page mapping! 1155ea90002bSLinus Torvalds */ 11564e1c1975SAndi Kleen if (!exclusive) 1157288468c3SAndrea Arcangeli anon_vma = anon_vma->root; 1158ea90002bSLinus Torvalds 115916f5e707SAlex Shi /* 11605b4bd90fSMatthew Wilcox (Oracle) * page_idle does a lockless/optimistic rmap scan on folio->mapping. 116116f5e707SAlex Shi * Make sure the compiler doesn't split the stores of anon_vma and 116216f5e707SAlex Shi * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 116316f5e707SAlex Shi * could mistake the mapping for a struct address_space and crash. 116416f5e707SAlex Shi */ 11651da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 11665b4bd90fSMatthew Wilcox (Oracle) WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 11675b4bd90fSMatthew Wilcox (Oracle) folio->index = linear_page_index(vma, address); 11686c287605SDavid Hildenbrand out: 11696c287605SDavid Hildenbrand if (exclusive) 11706c287605SDavid Hildenbrand SetPageAnonExclusive(page); 11711da177e4SLinus Torvalds } 11729617d95eSNick Piggin 11739617d95eSNick Piggin /** 117443d8eac4SRandy Dunlap * __page_check_anon_rmap - sanity check anonymous rmap addition 1175c97a9e10SNick Piggin * @page: the page to add the mapping to 1176c97a9e10SNick Piggin * @vma: the vm area in which the mapping is added 1177c97a9e10SNick Piggin * @address: the user virtual address mapped 1178c97a9e10SNick Piggin */ 1179c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page, 1180c97a9e10SNick Piggin struct vm_area_struct *vma, unsigned long address) 1181c97a9e10SNick Piggin { 1182e05b3453SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1183c97a9e10SNick Piggin /* 1184c97a9e10SNick Piggin * The page's anon-rmap details (mapping and index) are guaranteed to 1185c97a9e10SNick Piggin * be set up correctly at this point. 1186c97a9e10SNick Piggin * 1187c97a9e10SNick Piggin * We have exclusion against page_add_anon_rmap because the caller 118890aaca85SMiaohe Lin * always holds the page locked. 1189c97a9e10SNick Piggin * 1190c97a9e10SNick Piggin * We have exclusion against page_add_new_anon_rmap because those pages 1191c97a9e10SNick Piggin * are initially only visible via the pagetables, and the pte is locked 1192c97a9e10SNick Piggin * over the call to page_add_new_anon_rmap. 1193c97a9e10SNick Piggin */ 1194e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1195e05b3453SMatthew Wilcox (Oracle) folio); 119630c46382SYang Shi VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 119730c46382SYang Shi page); 1198c97a9e10SNick Piggin } 1199c97a9e10SNick Piggin 1200c97a9e10SNick Piggin /** 12019617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 12029617d95eSNick Piggin * @page: the page to add the mapping to 12039617d95eSNick Piggin * @vma: the vm area in which the mapping is added 12049617d95eSNick Piggin * @address: the user virtual address mapped 1205f1e2db12SDavid Hildenbrand * @flags: the rmap flags 12069617d95eSNick Piggin * 12075ad64688SHugh Dickins * The caller needs to hold the pte lock, and the page must be locked in 120880e14822SHugh Dickins * the anon_vma case: to serialize mapping,index checking after setting, 120980e14822SHugh Dickins * and to ensure that PageAnon is not being upgraded racily to PageKsm 121080e14822SHugh Dickins * (but PageKsm is never downgraded to PageAnon). 12119617d95eSNick Piggin */ 1212ee0800c2SMatthew Wilcox (Oracle) void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 1213ee0800c2SMatthew Wilcox (Oracle) unsigned long address, rmap_t flags) 1214ad8c2ee8SRik van Riel { 1215ee0800c2SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1216ee0800c2SMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 12179bd3155eSHugh Dickins int nr = 0, nr_pmdmapped = 0; 1218d281ee61SKirill A. Shutemov bool compound = flags & RMAP_COMPOUND; 1219be5ef2d9SHugh Dickins bool first = true; 122053f9263bSKirill A. Shutemov 1221be5ef2d9SHugh Dickins /* Is page being mapped by PTE? Is this its first map to be added? */ 1222be5ef2d9SHugh Dickins if (likely(!compound)) { 1223d8dd5e97SHugh Dickins first = atomic_inc_and_test(&page->_mapcount); 1224d8dd5e97SHugh Dickins nr = first; 1225ee0800c2SMatthew Wilcox (Oracle) if (first && folio_test_large(folio)) { 12264b51634cSHugh Dickins nr = atomic_inc_return_relaxed(mapped); 12276287b7daSHugh Dickins nr = (nr < COMPOUND_MAPPED); 122853f9263bSKirill A. Shutemov } 1229ee0800c2SMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1230be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1231be5ef2d9SHugh Dickins 1232ee0800c2SMatthew Wilcox (Oracle) first = atomic_inc_and_test(&folio->_entire_mapcount); 1233be5ef2d9SHugh Dickins if (first) { 12344b51634cSHugh Dickins nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 12356287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1236ee0800c2SMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1237eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 12386287b7daSHugh Dickins /* Raced ahead of a remove and another add? */ 12396287b7daSHugh Dickins if (unlikely(nr < 0)) 12406287b7daSHugh Dickins nr = 0; 12416287b7daSHugh Dickins } else { 12426287b7daSHugh Dickins /* Raced ahead of a remove of COMPOUND_MAPPED */ 12436287b7daSHugh Dickins nr = 0; 12446287b7daSHugh Dickins } 1245be5ef2d9SHugh Dickins } 1246be5ef2d9SHugh Dickins } 1247cb67f428SHugh Dickins 12486c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 12496c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 125053f9263bSKirill A. Shutemov 12519bd3155eSHugh Dickins if (nr_pmdmapped) 1252ee0800c2SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); 12539bd3155eSHugh Dickins if (nr) 1254ee0800c2SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 12555ad64688SHugh Dickins 1256ee0800c2SMatthew Wilcox (Oracle) if (likely(!folio_test_ksm(folio))) { 12570503ea8fSLiam R. Howlett /* address might be in next vma when migration races vma_merge */ 1258c7c3dec1SJohannes Weiner if (first) 12595b4bd90fSMatthew Wilcox (Oracle) __page_set_anon_rmap(folio, page, vma, address, 126014f9135dSDavid Hildenbrand !!(flags & RMAP_EXCLUSIVE)); 126169029cd5SKAMEZAWA Hiroyuki else 1262c97a9e10SNick Piggin __page_check_anon_rmap(page, vma, address); 1263c7c3dec1SJohannes Weiner } 1264cea86fe2SHugh Dickins 12657efecffbSMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, compound); 12661da177e4SLinus Torvalds } 12671da177e4SLinus Torvalds 126843d8eac4SRandy Dunlap /** 12694d510f3dSMatthew Wilcox (Oracle) * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 12704d510f3dSMatthew Wilcox (Oracle) * @folio: The folio to add the mapping to. 12719617d95eSNick Piggin * @vma: the vm area in which the mapping is added 12729617d95eSNick Piggin * @address: the user virtual address mapped 127340f2bbf7SDavid Hildenbrand * 12744d510f3dSMatthew Wilcox (Oracle) * Like page_add_anon_rmap() but must only be called on *new* folios. 12759617d95eSNick Piggin * This means the inc-and-test can be bypassed. 12764d510f3dSMatthew Wilcox (Oracle) * The folio does not have to be locked. 12774d510f3dSMatthew Wilcox (Oracle) * 12784d510f3dSMatthew Wilcox (Oracle) * If the folio is large, it is accounted as a THP. As the folio 12794d510f3dSMatthew Wilcox (Oracle) * is new, it's assumed to be mapped exclusively by a single process. 12809617d95eSNick Piggin */ 12814d510f3dSMatthew Wilcox (Oracle) void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 12824d510f3dSMatthew Wilcox (Oracle) unsigned long address) 12839617d95eSNick Piggin { 1284d8dd5e97SHugh Dickins int nr; 1285d281ee61SKirill A. Shutemov 128681d1b09cSSasha Levin VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 12874d510f3dSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 1288d8dd5e97SHugh Dickins 12894d510f3dSMatthew Wilcox (Oracle) if (likely(!folio_test_pmd_mappable(folio))) { 1290d8dd5e97SHugh Dickins /* increment count (starts at -1) */ 12914d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_mapcount, 0); 1292d8dd5e97SHugh Dickins nr = 1; 1293d8dd5e97SHugh Dickins } else { 129453f9263bSKirill A. Shutemov /* increment count (starts at -1) */ 12954d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, 0); 12964d510f3dSMatthew Wilcox (Oracle) atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); 12974d510f3dSMatthew Wilcox (Oracle) nr = folio_nr_pages(folio); 12984d510f3dSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); 1299d281ee61SKirill A. Shutemov } 1300d8dd5e97SHugh Dickins 13014d510f3dSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 13025b4bd90fSMatthew Wilcox (Oracle) __page_set_anon_rmap(folio, &folio->page, vma, address, 1); 13039617d95eSNick Piggin } 13049617d95eSNick Piggin 13051da177e4SLinus Torvalds /** 13061da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 13071da177e4SLinus Torvalds * @page: the page to add the mapping to 1308cea86fe2SHugh Dickins * @vma: the vm area in which the mapping is added 1309e8b098fcSMike Rapoport * @compound: charge the page as compound or small page 13101da177e4SLinus Torvalds * 1311b8072f09SHugh Dickins * The caller needs to hold the pte lock. 13121da177e4SLinus Torvalds */ 1313eb01a2adSMatthew Wilcox (Oracle) void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, 1314eb01a2adSMatthew Wilcox (Oracle) bool compound) 13151da177e4SLinus Torvalds { 1316eb01a2adSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1317eb01a2adSMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 13189bd3155eSHugh Dickins int nr = 0, nr_pmdmapped = 0; 13199bd3155eSHugh Dickins bool first; 1320dd78feddSKirill A. Shutemov 1321dd78feddSKirill A. Shutemov VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 13229bd3155eSHugh Dickins 1323be5ef2d9SHugh Dickins /* Is page being mapped by PTE? Is this its first map to be added? */ 1324be5ef2d9SHugh Dickins if (likely(!compound)) { 1325d8dd5e97SHugh Dickins first = atomic_inc_and_test(&page->_mapcount); 1326d8dd5e97SHugh Dickins nr = first; 1327eb01a2adSMatthew Wilcox (Oracle) if (first && folio_test_large(folio)) { 13284b51634cSHugh Dickins nr = atomic_inc_return_relaxed(mapped); 13296287b7daSHugh Dickins nr = (nr < COMPOUND_MAPPED); 13309a73f61bSKirill A. Shutemov } 1331eb01a2adSMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1332be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1333be5ef2d9SHugh Dickins 1334eb01a2adSMatthew Wilcox (Oracle) first = atomic_inc_and_test(&folio->_entire_mapcount); 1335be5ef2d9SHugh Dickins if (first) { 13364b51634cSHugh Dickins nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 13376287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1338eb01a2adSMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1339eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 13406287b7daSHugh Dickins /* Raced ahead of a remove and another add? */ 13416287b7daSHugh Dickins if (unlikely(nr < 0)) 13426287b7daSHugh Dickins nr = 0; 13436287b7daSHugh Dickins } else { 13446287b7daSHugh Dickins /* Raced ahead of a remove of COMPOUND_MAPPED */ 13456287b7daSHugh Dickins nr = 0; 13466287b7daSHugh Dickins } 1347be5ef2d9SHugh Dickins } 1348be5ef2d9SHugh Dickins } 13499bd3155eSHugh Dickins 13509bd3155eSHugh Dickins if (nr_pmdmapped) 1351eb01a2adSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? 13529bd3155eSHugh Dickins NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); 13535d543f13SHugh Dickins if (nr) 1354eb01a2adSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); 1355cea86fe2SHugh Dickins 13567efecffbSMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, compound); 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds 13591da177e4SLinus Torvalds /** 13601da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 13611da177e4SLinus Torvalds * @page: page to remove mapping from 1362cea86fe2SHugh Dickins * @vma: the vm area from which the mapping is removed 1363d281ee61SKirill A. Shutemov * @compound: uncharge the page as compound or small page 13641da177e4SLinus Torvalds * 1365b8072f09SHugh Dickins * The caller needs to hold the pte lock. 13661da177e4SLinus Torvalds */ 136762beb906SMatthew Wilcox (Oracle) void page_remove_rmap(struct page *page, struct vm_area_struct *vma, 136862beb906SMatthew Wilcox (Oracle) bool compound) 13691da177e4SLinus Torvalds { 137062beb906SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 137162beb906SMatthew Wilcox (Oracle) atomic_t *mapped = &folio->_nr_pages_mapped; 13729bd3155eSHugh Dickins int nr = 0, nr_pmdmapped = 0; 13739bd3155eSHugh Dickins bool last; 137462beb906SMatthew Wilcox (Oracle) enum node_stat_item idx; 13759bd3155eSHugh Dickins 13769bd3155eSHugh Dickins VM_BUG_ON_PAGE(compound && !PageHead(page), page); 13779bd3155eSHugh Dickins 13789bd3155eSHugh Dickins /* Hugetlb pages are not counted in NR_*MAPPED */ 137962beb906SMatthew Wilcox (Oracle) if (unlikely(folio_test_hugetlb(folio))) { 13809bd3155eSHugh Dickins /* hugetlb pages are always mapped with pmds */ 138162beb906SMatthew Wilcox (Oracle) atomic_dec(&folio->_entire_mapcount); 13829bd3155eSHugh Dickins return; 13839bd3155eSHugh Dickins } 1384cb67f428SHugh Dickins 1385be5ef2d9SHugh Dickins /* Is page being unmapped by PTE? Is this its last map to be removed? */ 1386be5ef2d9SHugh Dickins if (likely(!compound)) { 1387d8dd5e97SHugh Dickins last = atomic_add_negative(-1, &page->_mapcount); 1388d8dd5e97SHugh Dickins nr = last; 138962beb906SMatthew Wilcox (Oracle) if (last && folio_test_large(folio)) { 13904b51634cSHugh Dickins nr = atomic_dec_return_relaxed(mapped); 13916287b7daSHugh Dickins nr = (nr < COMPOUND_MAPPED); 1392cb67f428SHugh Dickins } 139362beb906SMatthew Wilcox (Oracle) } else if (folio_test_pmd_mappable(folio)) { 1394be5ef2d9SHugh Dickins /* That test is redundant: it's for safety or to optimize out */ 1395be5ef2d9SHugh Dickins 139662beb906SMatthew Wilcox (Oracle) last = atomic_add_negative(-1, &folio->_entire_mapcount); 1397be5ef2d9SHugh Dickins if (last) { 13984b51634cSHugh Dickins nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); 13996287b7daSHugh Dickins if (likely(nr < COMPOUND_MAPPED)) { 140062beb906SMatthew Wilcox (Oracle) nr_pmdmapped = folio_nr_pages(folio); 1401eec20426SMatthew Wilcox (Oracle) nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 14026287b7daSHugh Dickins /* Raced ahead of another remove and an add? */ 14036287b7daSHugh Dickins if (unlikely(nr < 0)) 14046287b7daSHugh Dickins nr = 0; 14056287b7daSHugh Dickins } else { 14066287b7daSHugh Dickins /* An add of COMPOUND_MAPPED raced ahead */ 14076287b7daSHugh Dickins nr = 0; 14086287b7daSHugh Dickins } 1409be5ef2d9SHugh Dickins } 1410be5ef2d9SHugh Dickins } 1411cb67f428SHugh Dickins 14129bd3155eSHugh Dickins if (nr_pmdmapped) { 141362beb906SMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 141462beb906SMatthew Wilcox (Oracle) idx = NR_ANON_THPS; 141562beb906SMatthew Wilcox (Oracle) else if (folio_test_swapbacked(folio)) 141662beb906SMatthew Wilcox (Oracle) idx = NR_SHMEM_PMDMAPPED; 141762beb906SMatthew Wilcox (Oracle) else 141862beb906SMatthew Wilcox (Oracle) idx = NR_FILE_PMDMAPPED; 141962beb906SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); 14209bd3155eSHugh Dickins } 14219bd3155eSHugh Dickins if (nr) { 142262beb906SMatthew Wilcox (Oracle) idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 142362beb906SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, idx, -nr); 142462beb906SMatthew Wilcox (Oracle) 14259bd3155eSHugh Dickins /* 142662beb906SMatthew Wilcox (Oracle) * Queue anon THP for deferred split if at least one 142762beb906SMatthew Wilcox (Oracle) * page of the folio is unmapped and at least one page 142862beb906SMatthew Wilcox (Oracle) * is still mapped. 14299bd3155eSHugh Dickins */ 143062beb906SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) 14319bd3155eSHugh Dickins if (!compound || nr < nr_pmdmapped) 1432f158ed61SMatthew Wilcox (Oracle) deferred_split_folio(folio); 14339bd3155eSHugh Dickins } 14349a982250SKirill A. Shutemov 143516f8c5b2SHugh Dickins /* 1436672aa27dSMatthew Wilcox (Oracle) * It would be tidy to reset folio_test_anon mapping when fully 1437672aa27dSMatthew Wilcox (Oracle) * unmapped, but that might overwrite a racing page_add_anon_rmap 1438672aa27dSMatthew Wilcox (Oracle) * which increments mapcount after us but sets mapping before us: 1439672aa27dSMatthew Wilcox (Oracle) * so leave the reset to free_pages_prepare, and remember that 1440672aa27dSMatthew Wilcox (Oracle) * it's only reliable while mapped. 14411da177e4SLinus Torvalds */ 14429bd3155eSHugh Dickins 1443672aa27dSMatthew Wilcox (Oracle) munlock_vma_folio(folio, vma, compound); 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds 14461da177e4SLinus Torvalds /* 144752629506SJoonsoo Kim * @arg: enum ttu_flags will be passed to this argument 14481da177e4SLinus Torvalds */ 14492f031c6fSMatthew Wilcox (Oracle) static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 145052629506SJoonsoo Kim unsigned long address, void *arg) 14511da177e4SLinus Torvalds { 14521da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1453869f7ee6SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 14541da177e4SLinus Torvalds pte_t pteval; 1455c7ab0d2fSKirill A. Shutemov struct page *subpage; 14566c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1457ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 14584708f318SPalmer Dabbelt enum ttu_flags flags = (enum ttu_flags)(long)arg; 14591da177e4SLinus Torvalds 1460732ed558SHugh Dickins /* 1461732ed558SHugh Dickins * When racing against e.g. zap_pte_range() on another cpu, 1462732ed558SHugh Dickins * in between its ptep_get_and_clear_full() and page_remove_rmap(), 14631fb08ac6SYang Shi * try_to_unmap() may return before page_mapped() has become false, 1464732ed558SHugh Dickins * if page table locking is skipped: use TTU_SYNC to wait for that. 1465732ed558SHugh Dickins */ 1466732ed558SHugh Dickins if (flags & TTU_SYNC) 1467732ed558SHugh Dickins pvmw.flags = PVMW_SYNC; 1468732ed558SHugh Dickins 1469a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1470af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, false, folio); 1471fec89c10SKirill A. Shutemov 1472369ea824SJérôme Glisse /* 1473017b1660SMike Kravetz * For THP, we have to assume the worse case ie pmd for invalidation. 1474017b1660SMike Kravetz * For hugetlb, it could be much worse if we need to do pud 1475017b1660SMike Kravetz * invalidation in the case of pmd sharing. 1476017b1660SMike Kravetz * 1477869f7ee6SMatthew Wilcox (Oracle) * Note that the folio can not be freed in this function as call of 1478869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap() must hold a reference on the folio. 1479369ea824SJérôme Glisse */ 14802aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 14817d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1482494334e4SHugh Dickins address, range.end); 1483869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1484017b1660SMike Kravetz /* 1485017b1660SMike Kravetz * If sharing is possible, start and end will be adjusted 1486017b1660SMike Kravetz * accordingly. 1487017b1660SMike Kravetz */ 1488ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, 1489ac46d4f3SJérôme Glisse &range.end); 1490017b1660SMike Kravetz } 1491ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 1492369ea824SJérôme Glisse 1493c7ab0d2fSKirill A. Shutemov while (page_vma_mapped_walk(&pvmw)) { 1494cea86fe2SHugh Dickins /* Unexpected PMD-mapped THP? */ 1495869f7ee6SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1496cea86fe2SHugh Dickins 14971da177e4SLinus Torvalds /* 1498869f7ee6SMatthew Wilcox (Oracle) * If the folio is in an mlock()d vma, we must not swap it out. 14991da177e4SLinus Torvalds */ 1500efdb6720SHugh Dickins if (!(flags & TTU_IGNORE_MLOCK) && 1501efdb6720SHugh Dickins (vma->vm_flags & VM_LOCKED)) { 1502cea86fe2SHugh Dickins /* Restore the mlock which got missed */ 1503869f7ee6SMatthew Wilcox (Oracle) mlock_vma_folio(folio, vma, false); 1504c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1505efdb6720SHugh Dickins ret = false; 1506c7ab0d2fSKirill A. Shutemov break; 1507b87537d9SHugh Dickins } 1508c7ab0d2fSKirill A. Shutemov 1509869f7ee6SMatthew Wilcox (Oracle) subpage = folio_page(folio, 1510869f7ee6SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1511785373b4SLinus Torvalds address = pvmw.address; 15126c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 15136c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1514785373b4SLinus Torvalds 1515dfc7ab57SBaolin Wang if (folio_test_hugetlb(folio)) { 15160506c31dSBaolin Wang bool anon = folio_test_anon(folio); 15170506c31dSBaolin Wang 1518017b1660SMike Kravetz /* 1519a00a8759SBaolin Wang * The try_to_unmap() is only passed a hugetlb page 1520a00a8759SBaolin Wang * in the case where the hugetlb page is poisoned. 1521a00a8759SBaolin Wang */ 1522a00a8759SBaolin Wang VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1523a00a8759SBaolin Wang /* 152454205e9cSBaolin Wang * huge_pmd_unshare may unmap an entire PMD page. 152554205e9cSBaolin Wang * There is no way of knowing exactly which PMDs may 152654205e9cSBaolin Wang * be cached for this mm, so we must flush them all. 152754205e9cSBaolin Wang * start/end were already adjusted above to cover this 152854205e9cSBaolin Wang * range. 1529017b1660SMike Kravetz */ 1530ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end); 153154205e9cSBaolin Wang 1532dfc7ab57SBaolin Wang /* 1533dfc7ab57SBaolin Wang * To call huge_pmd_unshare, i_mmap_rwsem must be 1534dfc7ab57SBaolin Wang * held in write mode. Caller needs to explicitly 1535dfc7ab57SBaolin Wang * do this outside rmap routines. 153640549ba8SMike Kravetz * 153740549ba8SMike Kravetz * We also must hold hugetlb vma_lock in write mode. 153840549ba8SMike Kravetz * Lock order dictates acquiring vma_lock BEFORE 153940549ba8SMike Kravetz * i_mmap_rwsem. We can only try lock here and fail 154040549ba8SMike Kravetz * if unsuccessful. 1541dfc7ab57SBaolin Wang */ 154240549ba8SMike Kravetz if (!anon) { 154340549ba8SMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 154440549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) { 154540549ba8SMike Kravetz page_vma_mapped_walk_done(&pvmw); 154640549ba8SMike Kravetz ret = false; 154740549ba8SMike Kravetz break; 154840549ba8SMike Kravetz } 154940549ba8SMike Kravetz if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 155040549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 155140549ba8SMike Kravetz flush_tlb_range(vma, 155240549ba8SMike Kravetz range.start, range.end); 155340549ba8SMike Kravetz mmu_notifier_invalidate_range(mm, 155440549ba8SMike Kravetz range.start, range.end); 1555017b1660SMike Kravetz /* 155640549ba8SMike Kravetz * The ref count of the PMD page was 155740549ba8SMike Kravetz * dropped which is part of the way map 155840549ba8SMike Kravetz * counting is done for shared PMDs. 155940549ba8SMike Kravetz * Return 'true' here. When there is 156040549ba8SMike Kravetz * no other sharing, huge_pmd_unshare 156140549ba8SMike Kravetz * returns false and we will unmap the 156240549ba8SMike Kravetz * actual page and drop map count 1563017b1660SMike Kravetz * to zero. 1564017b1660SMike Kravetz */ 1565017b1660SMike Kravetz page_vma_mapped_walk_done(&pvmw); 1566017b1660SMike Kravetz break; 1567017b1660SMike Kravetz } 156840549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 156940549ba8SMike Kravetz } 1570a00a8759SBaolin Wang pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 157154205e9cSBaolin Wang } else { 157254205e9cSBaolin Wang flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1573088b8aa5SDavid Hildenbrand /* Nuke the page table entry. */ 1574088b8aa5SDavid Hildenbrand if (should_defer_flush(mm, flags)) { 157572b252aeSMel Gorman /* 1576c7ab0d2fSKirill A. Shutemov * We clear the PTE but do not flush so potentially 1577869f7ee6SMatthew Wilcox (Oracle) * a remote CPU could still be writing to the folio. 1578c7ab0d2fSKirill A. Shutemov * If the entry was previously clean then the 1579c7ab0d2fSKirill A. Shutemov * architecture must guarantee that a clear->dirty 1580c7ab0d2fSKirill A. Shutemov * transition on a cached TLB entry is written through 1581c7ab0d2fSKirill A. Shutemov * and traps if the PTE is unmapped. 158272b252aeSMel Gorman */ 1583785373b4SLinus Torvalds pteval = ptep_get_and_clear(mm, address, pvmw.pte); 158472b252aeSMel Gorman 1585c7ab0d2fSKirill A. Shutemov set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 158672b252aeSMel Gorman } else { 1587785373b4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pvmw.pte); 158872b252aeSMel Gorman } 1589a00a8759SBaolin Wang } 15901da177e4SLinus Torvalds 1591999dad82SPeter Xu /* 1592999dad82SPeter Xu * Now the pte is cleared. If this pte was uffd-wp armed, 1593999dad82SPeter Xu * we may want to replace a none pte with a marker pte if 1594999dad82SPeter Xu * it's file-backed, so we don't lose the tracking info. 1595999dad82SPeter Xu */ 1596999dad82SPeter Xu pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1597999dad82SPeter Xu 1598869f7ee6SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 15991da177e4SLinus Torvalds if (pte_dirty(pteval)) 1600869f7ee6SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 16011da177e4SLinus Torvalds 1602365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 1603365e9c87SHugh Dickins update_hiwater_rss(mm); 1604365e9c87SHugh Dickins 1605da358d5cSMatthew Wilcox (Oracle) if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 16065fd27b8eSPunit Agrawal pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1607869f7ee6SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1608869f7ee6SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 160918f39629SQi Zheng set_huge_pte_at(mm, address, pvmw.pte, pteval); 16105d317b2bSNaoya Horiguchi } else { 1611869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 1612785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 16135f24ae58SNaoya Horiguchi } 1614c7ab0d2fSKirill A. Shutemov 1615bce73e48SChristian Borntraeger } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 161645961722SKonstantin Weitz /* 161745961722SKonstantin Weitz * The guest indicated that the page content is of no 161845961722SKonstantin Weitz * interest anymore. Simply discard the pte, vmscan 161945961722SKonstantin Weitz * will take care of the rest. 1620bce73e48SChristian Borntraeger * A future reference will then fault in a new zero 1621bce73e48SChristian Borntraeger * page. When userfaultfd is active, we must not drop 1622bce73e48SChristian Borntraeger * this page though, as its main user (postcopy 1623bce73e48SChristian Borntraeger * migration) will not expect userfaults on already 1624bce73e48SChristian Borntraeger * copied pages. 162545961722SKonstantin Weitz */ 1626869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 16270f10851eSJérôme Glisse /* We have to invalidate as we cleared the pte */ 16280f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 16290f10851eSJérôme Glisse address + PAGE_SIZE); 1630869f7ee6SMatthew Wilcox (Oracle) } else if (folio_test_anon(folio)) { 1631c7ab0d2fSKirill A. Shutemov swp_entry_t entry = { .val = page_private(subpage) }; 1632179ef71cSCyrill Gorcunov pte_t swp_pte; 16331da177e4SLinus Torvalds /* 16341da177e4SLinus Torvalds * Store the swap location in the pte. 16351da177e4SLinus Torvalds * See handle_pte_fault() ... 16361da177e4SLinus Torvalds */ 1637869f7ee6SMatthew Wilcox (Oracle) if (unlikely(folio_test_swapbacked(folio) != 1638869f7ee6SMatthew Wilcox (Oracle) folio_test_swapcache(folio))) { 1639eb94a878SMinchan Kim WARN_ON_ONCE(1); 164083612a94SMinchan Kim ret = false; 1641369ea824SJérôme Glisse /* We have to invalidate as we cleared the pte */ 16420f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, address, 16430f10851eSJérôme Glisse address + PAGE_SIZE); 1644eb94a878SMinchan Kim page_vma_mapped_walk_done(&pvmw); 1645eb94a878SMinchan Kim break; 1646eb94a878SMinchan Kim } 1647854e9ed0SMinchan Kim 1648802a3a92SShaohua Li /* MADV_FREE page check */ 1649869f7ee6SMatthew Wilcox (Oracle) if (!folio_test_swapbacked(folio)) { 16506c8e2a25SMauricio Faria de Oliveira int ref_count, map_count; 16516c8e2a25SMauricio Faria de Oliveira 16526c8e2a25SMauricio Faria de Oliveira /* 16536c8e2a25SMauricio Faria de Oliveira * Synchronize with gup_pte_range(): 16546c8e2a25SMauricio Faria de Oliveira * - clear PTE; barrier; read refcount 16556c8e2a25SMauricio Faria de Oliveira * - inc refcount; barrier; read PTE 16566c8e2a25SMauricio Faria de Oliveira */ 16576c8e2a25SMauricio Faria de Oliveira smp_mb(); 16586c8e2a25SMauricio Faria de Oliveira 16596c8e2a25SMauricio Faria de Oliveira ref_count = folio_ref_count(folio); 16606c8e2a25SMauricio Faria de Oliveira map_count = folio_mapcount(folio); 16616c8e2a25SMauricio Faria de Oliveira 16626c8e2a25SMauricio Faria de Oliveira /* 16636c8e2a25SMauricio Faria de Oliveira * Order reads for page refcount and dirty flag 16646c8e2a25SMauricio Faria de Oliveira * (see comments in __remove_mapping()). 16656c8e2a25SMauricio Faria de Oliveira */ 16666c8e2a25SMauricio Faria de Oliveira smp_rmb(); 16676c8e2a25SMauricio Faria de Oliveira 16686c8e2a25SMauricio Faria de Oliveira /* 16696c8e2a25SMauricio Faria de Oliveira * The only page refs must be one from isolation 16706c8e2a25SMauricio Faria de Oliveira * plus the rmap(s) (dropped by discard:). 16716c8e2a25SMauricio Faria de Oliveira */ 16726c8e2a25SMauricio Faria de Oliveira if (ref_count == 1 + map_count && 16736c8e2a25SMauricio Faria de Oliveira !folio_test_dirty(folio)) { 16740f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 16750f10851eSJérôme Glisse mmu_notifier_invalidate_range(mm, 16760f10851eSJérôme Glisse address, address + PAGE_SIZE); 1677854e9ed0SMinchan Kim dec_mm_counter(mm, MM_ANONPAGES); 1678854e9ed0SMinchan Kim goto discard; 1679854e9ed0SMinchan Kim } 1680854e9ed0SMinchan Kim 1681802a3a92SShaohua Li /* 1682869f7ee6SMatthew Wilcox (Oracle) * If the folio was redirtied, it cannot be 1683802a3a92SShaohua Li * discarded. Remap the page to page table. 1684802a3a92SShaohua Li */ 1685785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1686869f7ee6SMatthew Wilcox (Oracle) folio_set_swapbacked(folio); 1687e4b82222SMinchan Kim ret = false; 1688802a3a92SShaohua Li page_vma_mapped_walk_done(&pvmw); 1689802a3a92SShaohua Li break; 1690802a3a92SShaohua Li } 1691802a3a92SShaohua Li 1692570a335bSHugh Dickins if (swap_duplicate(entry) < 0) { 1693785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, pteval); 1694e4b82222SMinchan Kim ret = false; 1695c7ab0d2fSKirill A. Shutemov page_vma_mapped_walk_done(&pvmw); 1696c7ab0d2fSKirill A. Shutemov break; 1697570a335bSHugh Dickins } 1698ca827d55SKhalid Aziz if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1699322842eaSDavid Hildenbrand swap_free(entry); 1700ca827d55SKhalid Aziz set_pte_at(mm, address, pvmw.pte, pteval); 1701ca827d55SKhalid Aziz ret = false; 1702ca827d55SKhalid Aziz page_vma_mapped_walk_done(&pvmw); 1703ca827d55SKhalid Aziz break; 1704ca827d55SKhalid Aziz } 1705088b8aa5SDavid Hildenbrand 1706088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 17076c287605SDavid Hildenbrand if (anon_exclusive && 17086c287605SDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 17096c287605SDavid Hildenbrand swap_free(entry); 17106c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 17116c287605SDavid Hildenbrand ret = false; 17126c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 17136c287605SDavid Hildenbrand break; 17146c287605SDavid Hildenbrand } 17151da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 17161da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1717f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 17181da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 17191da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 17201da177e4SLinus Torvalds } 1721d559db08SKAMEZAWA Hiroyuki dec_mm_counter(mm, MM_ANONPAGES); 1722b084d435SKAMEZAWA Hiroyuki inc_mm_counter(mm, MM_SWAPENTS); 1723179ef71cSCyrill Gorcunov swp_pte = swp_entry_to_pte(entry); 17241493a191SDavid Hildenbrand if (anon_exclusive) 17251493a191SDavid Hildenbrand swp_pte = pte_swp_mkexclusive(swp_pte); 1726179ef71cSCyrill Gorcunov if (pte_soft_dirty(pteval)) 1727179ef71cSCyrill Gorcunov swp_pte = pte_swp_mksoft_dirty(swp_pte); 1728f45ec5ffSPeter Xu if (pte_uffd_wp(pteval)) 1729f45ec5ffSPeter Xu swp_pte = pte_swp_mkuffd_wp(swp_pte); 1730785373b4SLinus Torvalds set_pte_at(mm, address, pvmw.pte, swp_pte); 17310f10851eSJérôme Glisse /* Invalidate as we cleared the pte */ 1732369ea824SJérôme Glisse mmu_notifier_invalidate_range(mm, address, 1733369ea824SJérôme Glisse address + PAGE_SIZE); 17340f10851eSJérôme Glisse } else { 17350f10851eSJérôme Glisse /* 1736869f7ee6SMatthew Wilcox (Oracle) * This is a locked file-backed folio, 1737869f7ee6SMatthew Wilcox (Oracle) * so it cannot be removed from the page 1738869f7ee6SMatthew Wilcox (Oracle) * cache and replaced by a new folio before 1739869f7ee6SMatthew Wilcox (Oracle) * mmu_notifier_invalidate_range_end, so no 1740869f7ee6SMatthew Wilcox (Oracle) * concurrent thread might update its page table 1741869f7ee6SMatthew Wilcox (Oracle) * to point at a new folio while a device is 1742869f7ee6SMatthew Wilcox (Oracle) * still using this folio. 17430f10851eSJérôme Glisse * 1744ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 17450f10851eSJérôme Glisse */ 1746869f7ee6SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter_file(&folio->page)); 17470f10851eSJérôme Glisse } 17480f10851eSJérôme Glisse discard: 17490f10851eSJérôme Glisse /* 17500f10851eSJérôme Glisse * No need to call mmu_notifier_invalidate_range() it has be 17510f10851eSJérôme Glisse * done above for all cases requiring it to happen under page 17520f10851eSJérôme Glisse * table lock before mmu_notifier_invalidate_range_end() 17530f10851eSJérôme Glisse * 1754ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 17550f10851eSJérôme Glisse */ 1756869f7ee6SMatthew Wilcox (Oracle) page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1757b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 175896f97c43SLorenzo Stoakes mlock_drain_local(); 1759869f7ee6SMatthew Wilcox (Oracle) folio_put(folio); 1760c7ab0d2fSKirill A. Shutemov } 1761369ea824SJérôme Glisse 1762ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 1763369ea824SJérôme Glisse 1764caed0f48SKOSAKI Motohiro return ret; 17651da177e4SLinus Torvalds } 17661da177e4SLinus Torvalds 176752629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 176852629506SJoonsoo Kim { 1769222100eeSAnshuman Khandual return vma_is_temporary_stack(vma); 177052629506SJoonsoo Kim } 177152629506SJoonsoo Kim 1772f3ad032cSKefeng Wang static int folio_not_mapped(struct folio *folio) 177352629506SJoonsoo Kim { 17742f031c6fSMatthew Wilcox (Oracle) return !folio_mapped(folio); 17752a52bcbcSKirill A. Shutemov } 177652629506SJoonsoo Kim 17771da177e4SLinus Torvalds /** 1778869f7ee6SMatthew Wilcox (Oracle) * try_to_unmap - Try to remove all page table mappings to a folio. 1779869f7ee6SMatthew Wilcox (Oracle) * @folio: The folio to unmap. 178014fa31b8SAndi Kleen * @flags: action and flags 17811da177e4SLinus Torvalds * 17821da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 1783869f7ee6SMatthew Wilcox (Oracle) * folio. It is the caller's responsibility to check if the folio is 1784869f7ee6SMatthew Wilcox (Oracle) * still mapped if needed (use TTU_SYNC to prevent accounting races). 17851da177e4SLinus Torvalds * 1786869f7ee6SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 17871da177e4SLinus Torvalds */ 1788869f7ee6SMatthew Wilcox (Oracle) void try_to_unmap(struct folio *folio, enum ttu_flags flags) 17891da177e4SLinus Torvalds { 179052629506SJoonsoo Kim struct rmap_walk_control rwc = { 179152629506SJoonsoo Kim .rmap_one = try_to_unmap_one, 1792802a3a92SShaohua Li .arg = (void *)flags, 1793f3ad032cSKefeng Wang .done = folio_not_mapped, 17942f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 179552629506SJoonsoo Kim }; 17961da177e4SLinus Torvalds 1797a98a2f0cSAlistair Popple if (flags & TTU_RMAP_LOCKED) 17982f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 1799a98a2f0cSAlistair Popple else 18002f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 1801a98a2f0cSAlistair Popple } 1802a98a2f0cSAlistair Popple 1803a98a2f0cSAlistair Popple /* 1804a98a2f0cSAlistair Popple * @arg: enum ttu_flags will be passed to this argument. 1805a98a2f0cSAlistair Popple * 1806a98a2f0cSAlistair Popple * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 180764b586d1SHugh Dickins * containing migration entries. 1808a98a2f0cSAlistair Popple */ 18092f031c6fSMatthew Wilcox (Oracle) static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1810a98a2f0cSAlistair Popple unsigned long address, void *arg) 1811a98a2f0cSAlistair Popple { 1812a98a2f0cSAlistair Popple struct mm_struct *mm = vma->vm_mm; 18134b8554c5SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1814a98a2f0cSAlistair Popple pte_t pteval; 1815a98a2f0cSAlistair Popple struct page *subpage; 18166c287605SDavid Hildenbrand bool anon_exclusive, ret = true; 1817a98a2f0cSAlistair Popple struct mmu_notifier_range range; 1818a98a2f0cSAlistair Popple enum ttu_flags flags = (enum ttu_flags)(long)arg; 1819a98a2f0cSAlistair Popple 1820a98a2f0cSAlistair Popple /* 1821a98a2f0cSAlistair Popple * When racing against e.g. zap_pte_range() on another cpu, 1822a98a2f0cSAlistair Popple * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1823a98a2f0cSAlistair Popple * try_to_migrate() may return before page_mapped() has become false, 1824a98a2f0cSAlistair Popple * if page table locking is skipped: use TTU_SYNC to wait for that. 1825a98a2f0cSAlistair Popple */ 1826a98a2f0cSAlistair Popple if (flags & TTU_SYNC) 1827a98a2f0cSAlistair Popple pvmw.flags = PVMW_SYNC; 1828a98a2f0cSAlistair Popple 1829a98a2f0cSAlistair Popple /* 1830a98a2f0cSAlistair Popple * unmap_page() in mm/huge_memory.c is the only user of migration with 1831a98a2f0cSAlistair Popple * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1832a98a2f0cSAlistair Popple */ 1833a98a2f0cSAlistair Popple if (flags & TTU_SPLIT_HUGE_PMD) 1834af28a988SMatthew Wilcox (Oracle) split_huge_pmd_address(vma, address, true, folio); 1835a98a2f0cSAlistair Popple 1836a98a2f0cSAlistair Popple /* 1837a98a2f0cSAlistair Popple * For THP, we have to assume the worse case ie pmd for invalidation. 1838a98a2f0cSAlistair Popple * For hugetlb, it could be much worse if we need to do pud 1839a98a2f0cSAlistair Popple * invalidation in the case of pmd sharing. 1840a98a2f0cSAlistair Popple * 1841a98a2f0cSAlistair Popple * Note that the page can not be free in this function as call of 1842a98a2f0cSAlistair Popple * try_to_unmap() must hold a reference on the page. 1843a98a2f0cSAlistair Popple */ 18442aff7a47SMatthew Wilcox (Oracle) range.end = vma_address_end(&pvmw); 18457d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1846a98a2f0cSAlistair Popple address, range.end); 18474b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 1848a98a2f0cSAlistair Popple /* 1849a98a2f0cSAlistair Popple * If sharing is possible, start and end will be adjusted 1850a98a2f0cSAlistair Popple * accordingly. 1851a98a2f0cSAlistair Popple */ 1852a98a2f0cSAlistair Popple adjust_range_if_pmd_sharing_possible(vma, &range.start, 1853a98a2f0cSAlistair Popple &range.end); 1854a98a2f0cSAlistair Popple } 1855a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_start(&range); 1856a98a2f0cSAlistair Popple 1857a98a2f0cSAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 1858a98a2f0cSAlistair Popple #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1859a98a2f0cSAlistair Popple /* PMD-mapped THP migration entry */ 1860a98a2f0cSAlistair Popple if (!pvmw.pte) { 18614b8554c5SMatthew Wilcox (Oracle) subpage = folio_page(folio, 18624b8554c5SMatthew Wilcox (Oracle) pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 18634b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 18644b8554c5SMatthew Wilcox (Oracle) !folio_test_pmd_mappable(folio), folio); 1865a98a2f0cSAlistair Popple 18667f5abe60SDavid Hildenbrand if (set_pmd_migration_entry(&pvmw, subpage)) { 18677f5abe60SDavid Hildenbrand ret = false; 18687f5abe60SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 18697f5abe60SDavid Hildenbrand break; 18707f5abe60SDavid Hildenbrand } 1871a98a2f0cSAlistair Popple continue; 1872a98a2f0cSAlistair Popple } 1873a98a2f0cSAlistair Popple #endif 1874a98a2f0cSAlistair Popple 1875a98a2f0cSAlistair Popple /* Unexpected PMD-mapped THP? */ 18764b8554c5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1877a98a2f0cSAlistair Popple 18781118234eSDavid Hildenbrand if (folio_is_zone_device(folio)) { 18791118234eSDavid Hildenbrand /* 18801118234eSDavid Hildenbrand * Our PTE is a non-present device exclusive entry and 18811118234eSDavid Hildenbrand * calculating the subpage as for the common case would 18821118234eSDavid Hildenbrand * result in an invalid pointer. 18831118234eSDavid Hildenbrand * 18841118234eSDavid Hildenbrand * Since only PAGE_SIZE pages can currently be 18851118234eSDavid Hildenbrand * migrated, just set it to page. This will need to be 18861118234eSDavid Hildenbrand * changed when hugepage migrations to device private 18871118234eSDavid Hildenbrand * memory are supported. 18881118234eSDavid Hildenbrand */ 18891118234eSDavid Hildenbrand VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 18901118234eSDavid Hildenbrand subpage = &folio->page; 18911118234eSDavid Hildenbrand } else { 18924b8554c5SMatthew Wilcox (Oracle) subpage = folio_page(folio, 18934b8554c5SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 18941118234eSDavid Hildenbrand } 1895a98a2f0cSAlistair Popple address = pvmw.address; 18966c287605SDavid Hildenbrand anon_exclusive = folio_test_anon(folio) && 18976c287605SDavid Hildenbrand PageAnonExclusive(subpage); 1898a98a2f0cSAlistair Popple 1899dfc7ab57SBaolin Wang if (folio_test_hugetlb(folio)) { 19000506c31dSBaolin Wang bool anon = folio_test_anon(folio); 19010506c31dSBaolin Wang 1902a98a2f0cSAlistair Popple /* 190354205e9cSBaolin Wang * huge_pmd_unshare may unmap an entire PMD page. 190454205e9cSBaolin Wang * There is no way of knowing exactly which PMDs may 190554205e9cSBaolin Wang * be cached for this mm, so we must flush them all. 190654205e9cSBaolin Wang * start/end were already adjusted above to cover this 190754205e9cSBaolin Wang * range. 1908a98a2f0cSAlistair Popple */ 1909a98a2f0cSAlistair Popple flush_cache_range(vma, range.start, range.end); 191054205e9cSBaolin Wang 1911dfc7ab57SBaolin Wang /* 1912dfc7ab57SBaolin Wang * To call huge_pmd_unshare, i_mmap_rwsem must be 1913dfc7ab57SBaolin Wang * held in write mode. Caller needs to explicitly 1914dfc7ab57SBaolin Wang * do this outside rmap routines. 191540549ba8SMike Kravetz * 191640549ba8SMike Kravetz * We also must hold hugetlb vma_lock in write mode. 191740549ba8SMike Kravetz * Lock order dictates acquiring vma_lock BEFORE 191840549ba8SMike Kravetz * i_mmap_rwsem. We can only try lock here and 191940549ba8SMike Kravetz * fail if unsuccessful. 1920dfc7ab57SBaolin Wang */ 192140549ba8SMike Kravetz if (!anon) { 192240549ba8SMike Kravetz VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 192340549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) { 192440549ba8SMike Kravetz page_vma_mapped_walk_done(&pvmw); 192540549ba8SMike Kravetz ret = false; 192640549ba8SMike Kravetz break; 192740549ba8SMike Kravetz } 192840549ba8SMike Kravetz if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 192940549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 193040549ba8SMike Kravetz flush_tlb_range(vma, 193140549ba8SMike Kravetz range.start, range.end); 193240549ba8SMike Kravetz mmu_notifier_invalidate_range(mm, 193340549ba8SMike Kravetz range.start, range.end); 1934a98a2f0cSAlistair Popple 1935a98a2f0cSAlistair Popple /* 193640549ba8SMike Kravetz * The ref count of the PMD page was 193740549ba8SMike Kravetz * dropped which is part of the way map 193840549ba8SMike Kravetz * counting is done for shared PMDs. 193940549ba8SMike Kravetz * Return 'true' here. When there is 194040549ba8SMike Kravetz * no other sharing, huge_pmd_unshare 194140549ba8SMike Kravetz * returns false and we will unmap the 194240549ba8SMike Kravetz * actual page and drop map count 1943a98a2f0cSAlistair Popple * to zero. 1944a98a2f0cSAlistair Popple */ 1945a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 1946a98a2f0cSAlistair Popple break; 1947a98a2f0cSAlistair Popple } 194840549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 194940549ba8SMike Kravetz } 19505d4af619SBaolin Wang /* Nuke the hugetlb page table entry */ 19515d4af619SBaolin Wang pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 195254205e9cSBaolin Wang } else { 195354205e9cSBaolin Wang flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1954a98a2f0cSAlistair Popple /* Nuke the page table entry. */ 1955*7e12beb8SHuang Ying if (should_defer_flush(mm, flags)) { 1956*7e12beb8SHuang Ying /* 1957*7e12beb8SHuang Ying * We clear the PTE but do not flush so potentially 1958*7e12beb8SHuang Ying * a remote CPU could still be writing to the folio. 1959*7e12beb8SHuang Ying * If the entry was previously clean then the 1960*7e12beb8SHuang Ying * architecture must guarantee that a clear->dirty 1961*7e12beb8SHuang Ying * transition on a cached TLB entry is written through 1962*7e12beb8SHuang Ying * and traps if the PTE is unmapped. 1963*7e12beb8SHuang Ying */ 1964*7e12beb8SHuang Ying pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1965*7e12beb8SHuang Ying 1966*7e12beb8SHuang Ying set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1967*7e12beb8SHuang Ying } else { 1968a98a2f0cSAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 19695d4af619SBaolin Wang } 1970*7e12beb8SHuang Ying } 1971a98a2f0cSAlistair Popple 19724b8554c5SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 1973a98a2f0cSAlistair Popple if (pte_dirty(pteval)) 19744b8554c5SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1975a98a2f0cSAlistair Popple 1976a98a2f0cSAlistair Popple /* Update high watermark before we lower rss */ 1977a98a2f0cSAlistair Popple update_hiwater_rss(mm); 1978a98a2f0cSAlistair Popple 1979f25cbb7aSAlex Sierra if (folio_is_device_private(folio)) { 19804b8554c5SMatthew Wilcox (Oracle) unsigned long pfn = folio_pfn(folio); 1981a98a2f0cSAlistair Popple swp_entry_t entry; 1982a98a2f0cSAlistair Popple pte_t swp_pte; 1983a98a2f0cSAlistair Popple 19846c287605SDavid Hildenbrand if (anon_exclusive) 19856c287605SDavid Hildenbrand BUG_ON(page_try_share_anon_rmap(subpage)); 19866c287605SDavid Hildenbrand 1987a98a2f0cSAlistair Popple /* 1988a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 1989a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 1990a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 1991a98a2f0cSAlistair Popple */ 19923d88705cSAlistair Popple entry = pte_to_swp_entry(pteval); 19933d88705cSAlistair Popple if (is_writable_device_private_entry(entry)) 19943d88705cSAlistair Popple entry = make_writable_migration_entry(pfn); 19956c287605SDavid Hildenbrand else if (anon_exclusive) 19966c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(pfn); 19973d88705cSAlistair Popple else 19983d88705cSAlistair Popple entry = make_readable_migration_entry(pfn); 1999a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 2000a98a2f0cSAlistair Popple 2001a98a2f0cSAlistair Popple /* 2002a98a2f0cSAlistair Popple * pteval maps a zone device page and is therefore 2003a98a2f0cSAlistair Popple * a swap pte. 2004a98a2f0cSAlistair Popple */ 2005a98a2f0cSAlistair Popple if (pte_swp_soft_dirty(pteval)) 2006a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2007a98a2f0cSAlistair Popple if (pte_swp_uffd_wp(pteval)) 2008a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2009a98a2f0cSAlistair Popple set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 20104cc79b33SAnshuman Khandual trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 20114cc79b33SAnshuman Khandual compound_order(&folio->page)); 2012a98a2f0cSAlistair Popple /* 2013a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 2014a98a2f0cSAlistair Popple * against the special swap migration pte. 2015a98a2f0cSAlistair Popple */ 2016da358d5cSMatthew Wilcox (Oracle) } else if (PageHWPoison(subpage)) { 2017a98a2f0cSAlistair Popple pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 20184b8554c5SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 20194b8554c5SMatthew Wilcox (Oracle) hugetlb_count_sub(folio_nr_pages(folio), mm); 202018f39629SQi Zheng set_huge_pte_at(mm, address, pvmw.pte, pteval); 2021a98a2f0cSAlistair Popple } else { 20224b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 2023a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 2024a98a2f0cSAlistair Popple } 2025a98a2f0cSAlistair Popple 2026a98a2f0cSAlistair Popple } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2027a98a2f0cSAlistair Popple /* 2028a98a2f0cSAlistair Popple * The guest indicated that the page content is of no 2029a98a2f0cSAlistair Popple * interest anymore. Simply discard the pte, vmscan 2030a98a2f0cSAlistair Popple * will take care of the rest. 2031a98a2f0cSAlistair Popple * A future reference will then fault in a new zero 2032a98a2f0cSAlistair Popple * page. When userfaultfd is active, we must not drop 2033a98a2f0cSAlistair Popple * this page though, as its main user (postcopy 2034a98a2f0cSAlistair Popple * migration) will not expect userfaults on already 2035a98a2f0cSAlistair Popple * copied pages. 2036a98a2f0cSAlistair Popple */ 20374b8554c5SMatthew Wilcox (Oracle) dec_mm_counter(mm, mm_counter(&folio->page)); 2038a98a2f0cSAlistair Popple /* We have to invalidate as we cleared the pte */ 2039a98a2f0cSAlistair Popple mmu_notifier_invalidate_range(mm, address, 2040a98a2f0cSAlistair Popple address + PAGE_SIZE); 2041a98a2f0cSAlistair Popple } else { 2042a98a2f0cSAlistair Popple swp_entry_t entry; 2043a98a2f0cSAlistair Popple pte_t swp_pte; 2044a98a2f0cSAlistair Popple 2045a98a2f0cSAlistair Popple if (arch_unmap_one(mm, vma, address, pteval) < 0) { 20465d4af619SBaolin Wang if (folio_test_hugetlb(folio)) 20475d4af619SBaolin Wang set_huge_pte_at(mm, address, pvmw.pte, pteval); 20485d4af619SBaolin Wang else 2049a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, pteval); 2050a98a2f0cSAlistair Popple ret = false; 2051a98a2f0cSAlistair Popple page_vma_mapped_walk_done(&pvmw); 2052a98a2f0cSAlistair Popple break; 2053a98a2f0cSAlistair Popple } 20546c287605SDavid Hildenbrand VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 20556c287605SDavid Hildenbrand !anon_exclusive, subpage); 2056088b8aa5SDavid Hildenbrand 2057088b8aa5SDavid Hildenbrand /* See page_try_share_anon_rmap(): clear PTE first. */ 20586c287605SDavid Hildenbrand if (anon_exclusive && 20596c287605SDavid Hildenbrand page_try_share_anon_rmap(subpage)) { 20605d4af619SBaolin Wang if (folio_test_hugetlb(folio)) 20615d4af619SBaolin Wang set_huge_pte_at(mm, address, pvmw.pte, pteval); 20625d4af619SBaolin Wang else 20636c287605SDavid Hildenbrand set_pte_at(mm, address, pvmw.pte, pteval); 20646c287605SDavid Hildenbrand ret = false; 20656c287605SDavid Hildenbrand page_vma_mapped_walk_done(&pvmw); 20666c287605SDavid Hildenbrand break; 20676c287605SDavid Hildenbrand } 2068a98a2f0cSAlistair Popple 2069a98a2f0cSAlistair Popple /* 2070a98a2f0cSAlistair Popple * Store the pfn of the page in a special migration 2071a98a2f0cSAlistair Popple * pte. do_swap_page() will wait until the migration 2072a98a2f0cSAlistair Popple * pte is removed and then restart fault handling. 2073a98a2f0cSAlistair Popple */ 2074a98a2f0cSAlistair Popple if (pte_write(pteval)) 2075a98a2f0cSAlistair Popple entry = make_writable_migration_entry( 2076a98a2f0cSAlistair Popple page_to_pfn(subpage)); 20776c287605SDavid Hildenbrand else if (anon_exclusive) 20786c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry( 20796c287605SDavid Hildenbrand page_to_pfn(subpage)); 2080a98a2f0cSAlistair Popple else 2081a98a2f0cSAlistair Popple entry = make_readable_migration_entry( 2082a98a2f0cSAlistair Popple page_to_pfn(subpage)); 20832e346877SPeter Xu if (pte_young(pteval)) 20842e346877SPeter Xu entry = make_migration_entry_young(entry); 20852e346877SPeter Xu if (pte_dirty(pteval)) 20862e346877SPeter Xu entry = make_migration_entry_dirty(entry); 2087a98a2f0cSAlistair Popple swp_pte = swp_entry_to_pte(entry); 2088a98a2f0cSAlistair Popple if (pte_soft_dirty(pteval)) 2089a98a2f0cSAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2090a98a2f0cSAlistair Popple if (pte_uffd_wp(pteval)) 2091a98a2f0cSAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 20925d4af619SBaolin Wang if (folio_test_hugetlb(folio)) 209318f39629SQi Zheng set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 20945d4af619SBaolin Wang else 2095a98a2f0cSAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 20964cc79b33SAnshuman Khandual trace_set_migration_pte(address, pte_val(swp_pte), 20974cc79b33SAnshuman Khandual compound_order(&folio->page)); 2098a98a2f0cSAlistair Popple /* 2099a98a2f0cSAlistair Popple * No need to invalidate here it will synchronize on 2100a98a2f0cSAlistair Popple * against the special swap migration pte. 2101a98a2f0cSAlistair Popple */ 2102a98a2f0cSAlistair Popple } 2103a98a2f0cSAlistair Popple 2104a98a2f0cSAlistair Popple /* 2105a98a2f0cSAlistair Popple * No need to call mmu_notifier_invalidate_range() it has be 2106a98a2f0cSAlistair Popple * done above for all cases requiring it to happen under page 2107a98a2f0cSAlistair Popple * table lock before mmu_notifier_invalidate_range_end() 2108a98a2f0cSAlistair Popple * 2109ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst 2110a98a2f0cSAlistair Popple */ 21114b8554c5SMatthew Wilcox (Oracle) page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 2112b7435507SHugh Dickins if (vma->vm_flags & VM_LOCKED) 211396f97c43SLorenzo Stoakes mlock_drain_local(); 21144b8554c5SMatthew Wilcox (Oracle) folio_put(folio); 2115a98a2f0cSAlistair Popple } 2116a98a2f0cSAlistair Popple 2117a98a2f0cSAlistair Popple mmu_notifier_invalidate_range_end(&range); 2118a98a2f0cSAlistair Popple 2119a98a2f0cSAlistair Popple return ret; 2120a98a2f0cSAlistair Popple } 2121a98a2f0cSAlistair Popple 2122a98a2f0cSAlistair Popple /** 2123a98a2f0cSAlistair Popple * try_to_migrate - try to replace all page table mappings with swap entries 21244b8554c5SMatthew Wilcox (Oracle) * @folio: the folio to replace page table entries for 2125a98a2f0cSAlistair Popple * @flags: action and flags 2126a98a2f0cSAlistair Popple * 21274b8554c5SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this folio and 21284b8554c5SMatthew Wilcox (Oracle) * replace them with special swap entries. Caller must hold the folio lock. 2129a98a2f0cSAlistair Popple */ 21304b8554c5SMatthew Wilcox (Oracle) void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2131a98a2f0cSAlistair Popple { 2132a98a2f0cSAlistair Popple struct rmap_walk_control rwc = { 2133a98a2f0cSAlistair Popple .rmap_one = try_to_migrate_one, 2134a98a2f0cSAlistair Popple .arg = (void *)flags, 2135f3ad032cSKefeng Wang .done = folio_not_mapped, 21362f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2137a98a2f0cSAlistair Popple }; 2138a98a2f0cSAlistair Popple 2139a98a2f0cSAlistair Popple /* 2140a98a2f0cSAlistair Popple * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2141*7e12beb8SHuang Ying * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2142a98a2f0cSAlistair Popple */ 2143a98a2f0cSAlistair Popple if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2144*7e12beb8SHuang Ying TTU_SYNC | TTU_BATCH_FLUSH))) 2145a98a2f0cSAlistair Popple return; 2146a98a2f0cSAlistair Popple 2147f25cbb7aSAlex Sierra if (folio_is_zone_device(folio) && 2148f25cbb7aSAlex Sierra (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 21496c855fceSHugh Dickins return; 21506c855fceSHugh Dickins 215152629506SJoonsoo Kim /* 215252629506SJoonsoo Kim * During exec, a temporary VMA is setup and later moved. 215352629506SJoonsoo Kim * The VMA is moved under the anon_vma lock but not the 215452629506SJoonsoo Kim * page tables leading to a race where migration cannot 215552629506SJoonsoo Kim * find the migration ptes. Rather than increasing the 215652629506SJoonsoo Kim * locking requirements of exec(), migration skips 215752629506SJoonsoo Kim * temporary VMAs until after exec() completes. 215852629506SJoonsoo Kim */ 21594b8554c5SMatthew Wilcox (Oracle) if (!folio_test_ksm(folio) && folio_test_anon(folio)) 216052629506SJoonsoo Kim rwc.invalid_vma = invalid_migration_vma; 216152629506SJoonsoo Kim 21622a52bcbcSKirill A. Shutemov if (flags & TTU_RMAP_LOCKED) 21632f031c6fSMatthew Wilcox (Oracle) rmap_walk_locked(folio, &rwc); 21642a52bcbcSKirill A. Shutemov else 21652f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2166b291f000SNick Piggin } 2167e9995ef9SHugh Dickins 2168b756a3b5SAlistair Popple #ifdef CONFIG_DEVICE_PRIVATE 2169b756a3b5SAlistair Popple struct make_exclusive_args { 2170b756a3b5SAlistair Popple struct mm_struct *mm; 2171b756a3b5SAlistair Popple unsigned long address; 2172b756a3b5SAlistair Popple void *owner; 2173b756a3b5SAlistair Popple bool valid; 2174b756a3b5SAlistair Popple }; 2175b756a3b5SAlistair Popple 21762f031c6fSMatthew Wilcox (Oracle) static bool page_make_device_exclusive_one(struct folio *folio, 2177b756a3b5SAlistair Popple struct vm_area_struct *vma, unsigned long address, void *priv) 2178b756a3b5SAlistair Popple { 2179b756a3b5SAlistair Popple struct mm_struct *mm = vma->vm_mm; 21800d251485SMatthew Wilcox (Oracle) DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2181b756a3b5SAlistair Popple struct make_exclusive_args *args = priv; 2182b756a3b5SAlistair Popple pte_t pteval; 2183b756a3b5SAlistair Popple struct page *subpage; 2184b756a3b5SAlistair Popple bool ret = true; 2185b756a3b5SAlistair Popple struct mmu_notifier_range range; 2186b756a3b5SAlistair Popple swp_entry_t entry; 2187b756a3b5SAlistair Popple pte_t swp_pte; 2188b756a3b5SAlistair Popple 21897d4a8be0SAlistair Popple mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2190b756a3b5SAlistair Popple vma->vm_mm, address, min(vma->vm_end, 21910d251485SMatthew Wilcox (Oracle) address + folio_size(folio)), 21920d251485SMatthew Wilcox (Oracle) args->owner); 2193b756a3b5SAlistair Popple mmu_notifier_invalidate_range_start(&range); 2194b756a3b5SAlistair Popple 2195b756a3b5SAlistair Popple while (page_vma_mapped_walk(&pvmw)) { 2196b756a3b5SAlistair Popple /* Unexpected PMD-mapped THP? */ 21970d251485SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2198b756a3b5SAlistair Popple 2199b756a3b5SAlistair Popple if (!pte_present(*pvmw.pte)) { 2200b756a3b5SAlistair Popple ret = false; 2201b756a3b5SAlistair Popple page_vma_mapped_walk_done(&pvmw); 2202b756a3b5SAlistair Popple break; 2203b756a3b5SAlistair Popple } 2204b756a3b5SAlistair Popple 22050d251485SMatthew Wilcox (Oracle) subpage = folio_page(folio, 22060d251485SMatthew Wilcox (Oracle) pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2207b756a3b5SAlistair Popple address = pvmw.address; 2208b756a3b5SAlistair Popple 2209b756a3b5SAlistair Popple /* Nuke the page table entry. */ 2210b756a3b5SAlistair Popple flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2211b756a3b5SAlistair Popple pteval = ptep_clear_flush(vma, address, pvmw.pte); 2212b756a3b5SAlistair Popple 22130d251485SMatthew Wilcox (Oracle) /* Set the dirty flag on the folio now the pte is gone. */ 2214b756a3b5SAlistair Popple if (pte_dirty(pteval)) 22150d251485SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2216b756a3b5SAlistair Popple 2217b756a3b5SAlistair Popple /* 2218b756a3b5SAlistair Popple * Check that our target page is still mapped at the expected 2219b756a3b5SAlistair Popple * address. 2220b756a3b5SAlistair Popple */ 2221b756a3b5SAlistair Popple if (args->mm == mm && args->address == address && 2222b756a3b5SAlistair Popple pte_write(pteval)) 2223b756a3b5SAlistair Popple args->valid = true; 2224b756a3b5SAlistair Popple 2225b756a3b5SAlistair Popple /* 2226b756a3b5SAlistair Popple * Store the pfn of the page in a special migration 2227b756a3b5SAlistair Popple * pte. do_swap_page() will wait until the migration 2228b756a3b5SAlistair Popple * pte is removed and then restart fault handling. 2229b756a3b5SAlistair Popple */ 2230b756a3b5SAlistair Popple if (pte_write(pteval)) 2231b756a3b5SAlistair Popple entry = make_writable_device_exclusive_entry( 2232b756a3b5SAlistair Popple page_to_pfn(subpage)); 2233b756a3b5SAlistair Popple else 2234b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 2235b756a3b5SAlistair Popple page_to_pfn(subpage)); 2236b756a3b5SAlistair Popple swp_pte = swp_entry_to_pte(entry); 2237b756a3b5SAlistair Popple if (pte_soft_dirty(pteval)) 2238b756a3b5SAlistair Popple swp_pte = pte_swp_mksoft_dirty(swp_pte); 2239b756a3b5SAlistair Popple if (pte_uffd_wp(pteval)) 2240b756a3b5SAlistair Popple swp_pte = pte_swp_mkuffd_wp(swp_pte); 2241b756a3b5SAlistair Popple 2242b756a3b5SAlistair Popple set_pte_at(mm, address, pvmw.pte, swp_pte); 2243b756a3b5SAlistair Popple 2244b756a3b5SAlistair Popple /* 2245b756a3b5SAlistair Popple * There is a reference on the page for the swap entry which has 2246b756a3b5SAlistair Popple * been removed, so shouldn't take another. 2247b756a3b5SAlistair Popple */ 2248cea86fe2SHugh Dickins page_remove_rmap(subpage, vma, false); 2249b756a3b5SAlistair Popple } 2250b756a3b5SAlistair Popple 2251b756a3b5SAlistair Popple mmu_notifier_invalidate_range_end(&range); 2252b756a3b5SAlistair Popple 2253b756a3b5SAlistair Popple return ret; 2254b756a3b5SAlistair Popple } 2255b756a3b5SAlistair Popple 2256b756a3b5SAlistair Popple /** 22570d251485SMatthew Wilcox (Oracle) * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 22580d251485SMatthew Wilcox (Oracle) * @folio: The folio to replace page table entries for. 22590d251485SMatthew Wilcox (Oracle) * @mm: The mm_struct where the folio is expected to be mapped. 22600d251485SMatthew Wilcox (Oracle) * @address: Address where the folio is expected to be mapped. 2261b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2262b756a3b5SAlistair Popple * 22630d251485SMatthew Wilcox (Oracle) * Tries to remove all the page table entries which are mapping this 22640d251485SMatthew Wilcox (Oracle) * folio and replace them with special device exclusive swap entries to 22650d251485SMatthew Wilcox (Oracle) * grant a device exclusive access to the folio. 2266b756a3b5SAlistair Popple * 22670d251485SMatthew Wilcox (Oracle) * Context: Caller must hold the folio lock. 22680d251485SMatthew Wilcox (Oracle) * Return: false if the page is still mapped, or if it could not be unmapped 2269b756a3b5SAlistair Popple * from the expected address. Otherwise returns true (success). 2270b756a3b5SAlistair Popple */ 22710d251485SMatthew Wilcox (Oracle) static bool folio_make_device_exclusive(struct folio *folio, 22720d251485SMatthew Wilcox (Oracle) struct mm_struct *mm, unsigned long address, void *owner) 2273b756a3b5SAlistair Popple { 2274b756a3b5SAlistair Popple struct make_exclusive_args args = { 2275b756a3b5SAlistair Popple .mm = mm, 2276b756a3b5SAlistair Popple .address = address, 2277b756a3b5SAlistair Popple .owner = owner, 2278b756a3b5SAlistair Popple .valid = false, 2279b756a3b5SAlistair Popple }; 2280b756a3b5SAlistair Popple struct rmap_walk_control rwc = { 2281b756a3b5SAlistair Popple .rmap_one = page_make_device_exclusive_one, 2282f3ad032cSKefeng Wang .done = folio_not_mapped, 22832f031c6fSMatthew Wilcox (Oracle) .anon_lock = folio_lock_anon_vma_read, 2284b756a3b5SAlistair Popple .arg = &args, 2285b756a3b5SAlistair Popple }; 2286b756a3b5SAlistair Popple 2287b756a3b5SAlistair Popple /* 22880d251485SMatthew Wilcox (Oracle) * Restrict to anonymous folios for now to avoid potential writeback 22890d251485SMatthew Wilcox (Oracle) * issues. 2290b756a3b5SAlistair Popple */ 22910d251485SMatthew Wilcox (Oracle) if (!folio_test_anon(folio)) 2292b756a3b5SAlistair Popple return false; 2293b756a3b5SAlistair Popple 22942f031c6fSMatthew Wilcox (Oracle) rmap_walk(folio, &rwc); 2295b756a3b5SAlistair Popple 22960d251485SMatthew Wilcox (Oracle) return args.valid && !folio_mapcount(folio); 2297b756a3b5SAlistair Popple } 2298b756a3b5SAlistair Popple 2299b756a3b5SAlistair Popple /** 2300b756a3b5SAlistair Popple * make_device_exclusive_range() - Mark a range for exclusive use by a device 2301dd062302SAdrian Huang * @mm: mm_struct of associated target process 2302b756a3b5SAlistair Popple * @start: start of the region to mark for exclusive device access 2303b756a3b5SAlistair Popple * @end: end address of region 2304b756a3b5SAlistair Popple * @pages: returns the pages which were successfully marked for exclusive access 2305b756a3b5SAlistair Popple * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2306b756a3b5SAlistair Popple * 2307b756a3b5SAlistair Popple * Returns: number of pages found in the range by GUP. A page is marked for 2308b756a3b5SAlistair Popple * exclusive access only if the page pointer is non-NULL. 2309b756a3b5SAlistair Popple * 2310b756a3b5SAlistair Popple * This function finds ptes mapping page(s) to the given address range, locks 2311b756a3b5SAlistair Popple * them and replaces mappings with special swap entries preventing userspace CPU 2312b756a3b5SAlistair Popple * access. On fault these entries are replaced with the original mapping after 2313b756a3b5SAlistair Popple * calling MMU notifiers. 2314b756a3b5SAlistair Popple * 2315b756a3b5SAlistair Popple * A driver using this to program access from a device must use a mmu notifier 2316b756a3b5SAlistair Popple * critical section to hold a device specific lock during programming. Once 2317b756a3b5SAlistair Popple * programming is complete it should drop the page lock and reference after 2318b756a3b5SAlistair Popple * which point CPU access to the page will revoke the exclusive access. 2319b756a3b5SAlistair Popple */ 2320b756a3b5SAlistair Popple int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2321b756a3b5SAlistair Popple unsigned long end, struct page **pages, 2322b756a3b5SAlistair Popple void *owner) 2323b756a3b5SAlistair Popple { 2324b756a3b5SAlistair Popple long npages = (end - start) >> PAGE_SHIFT; 2325b756a3b5SAlistair Popple long i; 2326b756a3b5SAlistair Popple 2327b756a3b5SAlistair Popple npages = get_user_pages_remote(mm, start, npages, 2328b756a3b5SAlistair Popple FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2329b756a3b5SAlistair Popple pages, NULL, NULL); 2330b756a3b5SAlistair Popple if (npages < 0) 2331b756a3b5SAlistair Popple return npages; 2332b756a3b5SAlistair Popple 2333b756a3b5SAlistair Popple for (i = 0; i < npages; i++, start += PAGE_SIZE) { 23340d251485SMatthew Wilcox (Oracle) struct folio *folio = page_folio(pages[i]); 23350d251485SMatthew Wilcox (Oracle) if (PageTail(pages[i]) || !folio_trylock(folio)) { 23360d251485SMatthew Wilcox (Oracle) folio_put(folio); 2337b756a3b5SAlistair Popple pages[i] = NULL; 2338b756a3b5SAlistair Popple continue; 2339b756a3b5SAlistair Popple } 2340b756a3b5SAlistair Popple 23410d251485SMatthew Wilcox (Oracle) if (!folio_make_device_exclusive(folio, mm, start, owner)) { 23420d251485SMatthew Wilcox (Oracle) folio_unlock(folio); 23430d251485SMatthew Wilcox (Oracle) folio_put(folio); 2344b756a3b5SAlistair Popple pages[i] = NULL; 2345b756a3b5SAlistair Popple } 2346b756a3b5SAlistair Popple } 2347b756a3b5SAlistair Popple 2348b756a3b5SAlistair Popple return npages; 2349b756a3b5SAlistair Popple } 2350b756a3b5SAlistair Popple EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2351b756a3b5SAlistair Popple #endif 2352b756a3b5SAlistair Popple 235301d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma) 235476545066SRik van Riel { 235576545066SRik van Riel struct anon_vma *root = anon_vma->root; 235676545066SRik van Riel 2357624483f3SAndrey Ryabinin anon_vma_free(anon_vma); 235801d8b20dSPeter Zijlstra if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 235976545066SRik van Riel anon_vma_free(root); 236076545066SRik van Riel } 236176545066SRik van Riel 23622f031c6fSMatthew Wilcox (Oracle) static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 23636d4675e6SMinchan Kim struct rmap_walk_control *rwc) 2364faecd8ddSJoonsoo Kim { 2365faecd8ddSJoonsoo Kim struct anon_vma *anon_vma; 2366faecd8ddSJoonsoo Kim 23670dd1c7bbSJoonsoo Kim if (rwc->anon_lock) 23686d4675e6SMinchan Kim return rwc->anon_lock(folio, rwc); 23690dd1c7bbSJoonsoo Kim 2370faecd8ddSJoonsoo Kim /* 23712f031c6fSMatthew Wilcox (Oracle) * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2372faecd8ddSJoonsoo Kim * because that depends on page_mapped(); but not all its usages 2373c1e8d7c6SMichel Lespinasse * are holding mmap_lock. Users without mmap_lock are required to 2374faecd8ddSJoonsoo Kim * take a reference count to prevent the anon_vma disappearing 2375faecd8ddSJoonsoo Kim */ 2376e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2377faecd8ddSJoonsoo Kim if (!anon_vma) 2378faecd8ddSJoonsoo Kim return NULL; 2379faecd8ddSJoonsoo Kim 23806d4675e6SMinchan Kim if (anon_vma_trylock_read(anon_vma)) 23816d4675e6SMinchan Kim goto out; 23826d4675e6SMinchan Kim 23836d4675e6SMinchan Kim if (rwc->try_lock) { 23846d4675e6SMinchan Kim anon_vma = NULL; 23856d4675e6SMinchan Kim rwc->contended = true; 23866d4675e6SMinchan Kim goto out; 23876d4675e6SMinchan Kim } 23886d4675e6SMinchan Kim 2389faecd8ddSJoonsoo Kim anon_vma_lock_read(anon_vma); 23906d4675e6SMinchan Kim out: 2391faecd8ddSJoonsoo Kim return anon_vma; 2392faecd8ddSJoonsoo Kim } 2393faecd8ddSJoonsoo Kim 2394e9995ef9SHugh Dickins /* 2395e8351ac9SJoonsoo Kim * rmap_walk_anon - do something to anonymous page using the object-based 2396e8351ac9SJoonsoo Kim * rmap method 2397e8351ac9SJoonsoo Kim * @page: the page to be handled 2398e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2399e8351ac9SJoonsoo Kim * 2400e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2401e8351ac9SJoonsoo Kim * contained in the anon_vma struct it points to. 2402e9995ef9SHugh Dickins */ 240384fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_anon(struct folio *folio, 24046d4675e6SMinchan Kim struct rmap_walk_control *rwc, bool locked) 2405e9995ef9SHugh Dickins { 2406e9995ef9SHugh Dickins struct anon_vma *anon_vma; 2407a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 24085beb4930SRik van Riel struct anon_vma_chain *avc; 2409e9995ef9SHugh Dickins 2410b9773199SKirill A. Shutemov if (locked) { 2411e05b3453SMatthew Wilcox (Oracle) anon_vma = folio_anon_vma(folio); 2412b9773199SKirill A. Shutemov /* anon_vma disappear under us? */ 2413e05b3453SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!anon_vma, folio); 2414b9773199SKirill A. Shutemov } else { 24152f031c6fSMatthew Wilcox (Oracle) anon_vma = rmap_walk_anon_lock(folio, rwc); 2416b9773199SKirill A. Shutemov } 2417e9995ef9SHugh Dickins if (!anon_vma) 24181df631aeSMinchan Kim return; 2419faecd8ddSJoonsoo Kim 24202f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 24212f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2422a8fa41adSKirill A. Shutemov anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2423a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 24245beb4930SRik van Riel struct vm_area_struct *vma = avc->vma; 24252f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 24260dd1c7bbSJoonsoo Kim 2427494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2428ad12695fSAndrea Arcangeli cond_resched(); 2429ad12695fSAndrea Arcangeli 24300dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 24310dd1c7bbSJoonsoo Kim continue; 24320dd1c7bbSJoonsoo Kim 24332f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2434e9995ef9SHugh Dickins break; 24352f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 24360dd1c7bbSJoonsoo Kim break; 2437e9995ef9SHugh Dickins } 2438b9773199SKirill A. Shutemov 2439b9773199SKirill A. Shutemov if (!locked) 24404fc3f1d6SIngo Molnar anon_vma_unlock_read(anon_vma); 2441e9995ef9SHugh Dickins } 2442e9995ef9SHugh Dickins 2443e8351ac9SJoonsoo Kim /* 2444e8351ac9SJoonsoo Kim * rmap_walk_file - do something to file page using the object-based rmap method 2445e8351ac9SJoonsoo Kim * @page: the page to be handled 2446e8351ac9SJoonsoo Kim * @rwc: control variable according to each walk type 2447e8351ac9SJoonsoo Kim * 2448e8351ac9SJoonsoo Kim * Find all the mappings of a page using the mapping pointer and the vma chains 2449e8351ac9SJoonsoo Kim * contained in the address_space struct it points to. 2450e8351ac9SJoonsoo Kim */ 245184fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_file(struct folio *folio, 24526d4675e6SMinchan Kim struct rmap_walk_control *rwc, bool locked) 2453e9995ef9SHugh Dickins { 24542f031c6fSMatthew Wilcox (Oracle) struct address_space *mapping = folio_mapping(folio); 2455a8fa41adSKirill A. Shutemov pgoff_t pgoff_start, pgoff_end; 2456e9995ef9SHugh Dickins struct vm_area_struct *vma; 2457e9995ef9SHugh Dickins 24589f32624bSJoonsoo Kim /* 24599f32624bSJoonsoo Kim * The page lock not only makes sure that page->mapping cannot 24609f32624bSJoonsoo Kim * suddenly be NULLified by truncation, it makes sure that the 24619f32624bSJoonsoo Kim * structure at mapping cannot be freed and reused yet, 2462c8c06efaSDavidlohr Bueso * so we can safely take mapping->i_mmap_rwsem. 24639f32624bSJoonsoo Kim */ 24642f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 24659f32624bSJoonsoo Kim 2466e9995ef9SHugh Dickins if (!mapping) 24671df631aeSMinchan Kim return; 24683dec0ba0SDavidlohr Bueso 24692f031c6fSMatthew Wilcox (Oracle) pgoff_start = folio_pgoff(folio); 24702f031c6fSMatthew Wilcox (Oracle) pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 24716d4675e6SMinchan Kim if (!locked) { 24726d4675e6SMinchan Kim if (i_mmap_trylock_read(mapping)) 24736d4675e6SMinchan Kim goto lookup; 24746d4675e6SMinchan Kim 24756d4675e6SMinchan Kim if (rwc->try_lock) { 24766d4675e6SMinchan Kim rwc->contended = true; 24776d4675e6SMinchan Kim return; 24786d4675e6SMinchan Kim } 24796d4675e6SMinchan Kim 24803dec0ba0SDavidlohr Bueso i_mmap_lock_read(mapping); 24816d4675e6SMinchan Kim } 24826d4675e6SMinchan Kim lookup: 2483a8fa41adSKirill A. Shutemov vma_interval_tree_foreach(vma, &mapping->i_mmap, 2484a8fa41adSKirill A. Shutemov pgoff_start, pgoff_end) { 24852f031c6fSMatthew Wilcox (Oracle) unsigned long address = vma_address(&folio->page, vma); 24860dd1c7bbSJoonsoo Kim 2487494334e4SHugh Dickins VM_BUG_ON_VMA(address == -EFAULT, vma); 2488ad12695fSAndrea Arcangeli cond_resched(); 2489ad12695fSAndrea Arcangeli 24900dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 24910dd1c7bbSJoonsoo Kim continue; 24920dd1c7bbSJoonsoo Kim 24932f031c6fSMatthew Wilcox (Oracle) if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 24940dd1c7bbSJoonsoo Kim goto done; 24952f031c6fSMatthew Wilcox (Oracle) if (rwc->done && rwc->done(folio)) 24960dd1c7bbSJoonsoo Kim goto done; 2497e9995ef9SHugh Dickins } 24980dd1c7bbSJoonsoo Kim 24990dd1c7bbSJoonsoo Kim done: 2500b9773199SKirill A. Shutemov if (!locked) 25013dec0ba0SDavidlohr Bueso i_mmap_unlock_read(mapping); 2502e9995ef9SHugh Dickins } 2503e9995ef9SHugh Dickins 25046d4675e6SMinchan Kim void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2505e9995ef9SHugh Dickins { 25062f031c6fSMatthew Wilcox (Oracle) if (unlikely(folio_test_ksm(folio))) 25072f031c6fSMatthew Wilcox (Oracle) rmap_walk_ksm(folio, rwc); 25082f031c6fSMatthew Wilcox (Oracle) else if (folio_test_anon(folio)) 25092f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, false); 2510e9995ef9SHugh Dickins else 25112f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, false); 2512b9773199SKirill A. Shutemov } 2513b9773199SKirill A. Shutemov 2514b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */ 25156d4675e6SMinchan Kim void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2516b9773199SKirill A. Shutemov { 2517b9773199SKirill A. Shutemov /* no ksm support for now */ 25182f031c6fSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 25192f031c6fSMatthew Wilcox (Oracle) if (folio_test_anon(folio)) 25202f031c6fSMatthew Wilcox (Oracle) rmap_walk_anon(folio, rwc, true); 2521b9773199SKirill A. Shutemov else 25222f031c6fSMatthew Wilcox (Oracle) rmap_walk_file(folio, rwc, true); 2523e9995ef9SHugh Dickins } 25240fe6e20bSNaoya Horiguchi 2525e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE 25260fe6e20bSNaoya Horiguchi /* 2527451b9514SKirill Tkhai * The following two functions are for anonymous (private mapped) hugepages. 25280fe6e20bSNaoya Horiguchi * Unlike common anonymous pages, anonymous hugepages have no accounting code 25290fe6e20bSNaoya Horiguchi * and no lru code, because we handle hugepages differently from common pages. 253028c5209dSDavid Hildenbrand * 253128c5209dSDavid Hildenbrand * RMAP_COMPOUND is ignored. 25320fe6e20bSNaoya Horiguchi */ 253328c5209dSDavid Hildenbrand void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 253428c5209dSDavid Hildenbrand unsigned long address, rmap_t flags) 25350fe6e20bSNaoya Horiguchi { 2536db4e5dbdSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 25370fe6e20bSNaoya Horiguchi struct anon_vma *anon_vma = vma->anon_vma; 25380fe6e20bSNaoya Horiguchi int first; 2539a850ea30SNaoya Horiguchi 2540db4e5dbdSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 25410fe6e20bSNaoya Horiguchi BUG_ON(!anon_vma); 25420503ea8fSLiam R. Howlett /* address might be in next vma when migration races vma_merge */ 2543db4e5dbdSMatthew Wilcox (Oracle) first = atomic_inc_and_test(&folio->_entire_mapcount); 25446c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 25456c287605SDavid Hildenbrand VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 25460fe6e20bSNaoya Horiguchi if (first) 25475b4bd90fSMatthew Wilcox (Oracle) __page_set_anon_rmap(folio, page, vma, address, 254828c5209dSDavid Hildenbrand !!(flags & RMAP_EXCLUSIVE)); 25490fe6e20bSNaoya Horiguchi } 25500fe6e20bSNaoya Horiguchi 2551d0ce0e47SSidhartha Kumar void hugepage_add_new_anon_rmap(struct folio *folio, 25520fe6e20bSNaoya Horiguchi struct vm_area_struct *vma, unsigned long address) 25530fe6e20bSNaoya Horiguchi { 25540fe6e20bSNaoya Horiguchi BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2555cb67f428SHugh Dickins /* increment count (starts at -1) */ 2556db4e5dbdSMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, 0); 2557db4e5dbdSMatthew Wilcox (Oracle) folio_clear_hugetlb_restore_reserve(folio); 2558d0ce0e47SSidhartha Kumar __page_set_anon_rmap(folio, &folio->page, vma, address, 1); 25590fe6e20bSNaoya Horiguchi } 2560e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */ 2561