xref: /linux/mm/rmap.c (revision 880a99b60d467eefd96322e27b0a8c0b805dfa43)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * mm/rmap.c - physical to virtual reverse mappings
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
51da177e4SLinus Torvalds  * Released under the General Public License (GPL).
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Simple, low overhead reverse mapping scheme.
81da177e4SLinus Torvalds  * Please try to keep this thing as modular as possible.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Provides methods for unmapping each kind of mapped page:
111da177e4SLinus Torvalds  * the anon methods track anonymous pages, and
121da177e4SLinus Torvalds  * the file methods track pages belonging to an inode.
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
151da177e4SLinus Torvalds  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
161da177e4SLinus Torvalds  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
1798f32602SHugh Dickins  * Contributions by Hugh Dickins 2003, 2004
181da177e4SLinus Torvalds  */
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds /*
211da177e4SLinus Torvalds  * Lock ordering in mm:
221da177e4SLinus Torvalds  *
239608703eSJan Kara  * inode->i_rwsem	(while writing or truncating, not reading or faulting)
24c1e8d7c6SMichel Lespinasse  *   mm->mmap_lock
25730633f0SJan Kara  *     mapping->invalidate_lock (in filemap_fault)
263a47c54fSMike Kravetz  *       page->flags PG_locked (lock_page)
278d9bfb26SMike Kravetz  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
2855fd6fccSSuren Baghdasaryan  *           vma_start_write
29c8c06efaSDavidlohr Bueso  *             mapping->i_mmap_rwsem
305a505085SIngo Molnar  *               anon_vma->rwsem
31b8072f09SHugh Dickins  *                 mm->page_table_lock or pte_lock
325d337b91SHugh Dickins  *                   swap_lock (in swap_duplicate, swap_info_get)
331da177e4SLinus Torvalds  *                     mmlist_lock (in mmput, drain_mmlist and others)
34e621900aSMatthew Wilcox (Oracle)  *                     mapping->private_lock (in block_dirty_folio)
35e621900aSMatthew Wilcox (Oracle)  *                       folio_lock_memcg move_lock (in block_dirty_folio)
36b93b0163SMatthew Wilcox  *                         i_pages lock (widely used)
37e809c3feSMatthew Wilcox (Oracle)  *                           lruvec->lru_lock (in folio_lruvec_lock_irq)
38250df6edSDave Chinner  *                     inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39f758eeabSChristoph Hellwig  *                     bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
401da177e4SLinus Torvalds  *                       sb_lock (within inode_lock in fs/fs-writeback.c)
41b93b0163SMatthew Wilcox  *                       i_pages lock (widely used, in set_page_dirty,
421da177e4SLinus Torvalds  *                                 in arch-dependent flush_dcache_mmap_lock,
43f758eeabSChristoph Hellwig  *                                 within bdi.wb->list_lock in __sync_single_inode)
446a46079cSAndi Kleen  *
459608703eSJan Kara  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
466a46079cSAndi Kleen  *   ->tasklist_lock
476a46079cSAndi Kleen  *     pte map lock
48c0d0381aSMike Kravetz  *
498d9bfb26SMike Kravetz  * hugetlbfs PageHuge() take locks in this order:
50c0d0381aSMike Kravetz  *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
518d9bfb26SMike Kravetz  *     vma_lock (hugetlb specific lock for pmd_sharing)
528d9bfb26SMike Kravetz  *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
53c0d0381aSMike Kravetz  *         page->flags PG_locked (lock_page)
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds #include <linux/mm.h>
576e84f315SIngo Molnar #include <linux/sched/mm.h>
5829930025SIngo Molnar #include <linux/sched/task.h>
591da177e4SLinus Torvalds #include <linux/pagemap.h>
601da177e4SLinus Torvalds #include <linux/swap.h>
611da177e4SLinus Torvalds #include <linux/swapops.h>
621da177e4SLinus Torvalds #include <linux/slab.h>
631da177e4SLinus Torvalds #include <linux/init.h>
645ad64688SHugh Dickins #include <linux/ksm.h>
651da177e4SLinus Torvalds #include <linux/rmap.h>
661da177e4SLinus Torvalds #include <linux/rcupdate.h>
67b95f1b31SPaul Gortmaker #include <linux/export.h>
688a9f3ccdSBalbir Singh #include <linux/memcontrol.h>
69cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
7064cdd548SKOSAKI Motohiro #include <linux/migrate.h>
710fe6e20bSNaoya Horiguchi #include <linux/hugetlb.h>
72444f84fdSBen Dooks #include <linux/huge_mm.h>
73ef5d437fSJan Kara #include <linux/backing-dev.h>
7433c3fc71SVladimir Davydov #include <linux/page_idle.h>
75a5430ddaSJérôme Glisse #include <linux/memremap.h>
76bce73e48SChristian Borntraeger #include <linux/userfaultfd_k.h>
77999dad82SPeter Xu #include <linux/mm_inline.h>
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds #include <asm/tlbflush.h>
801da177e4SLinus Torvalds 
814cc79b33SAnshuman Khandual #define CREATE_TRACE_POINTS
8272b252aeSMel Gorman #include <trace/events/tlb.h>
834cc79b33SAnshuman Khandual #include <trace/events/migrate.h>
8472b252aeSMel Gorman 
85b291f000SNick Piggin #include "internal.h"
86b291f000SNick Piggin 
87fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep;
885beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep;
89fdd2e5f8SAdrian Bunk 
90fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void)
91fdd2e5f8SAdrian Bunk {
9201d8b20dSPeter Zijlstra 	struct anon_vma *anon_vma;
9301d8b20dSPeter Zijlstra 
9401d8b20dSPeter Zijlstra 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
9501d8b20dSPeter Zijlstra 	if (anon_vma) {
9601d8b20dSPeter Zijlstra 		atomic_set(&anon_vma->refcount, 1);
972555283eSJann Horn 		anon_vma->num_children = 0;
982555283eSJann Horn 		anon_vma->num_active_vmas = 0;
997a3ef208SKonstantin Khlebnikov 		anon_vma->parent = anon_vma;
10001d8b20dSPeter Zijlstra 		/*
10101d8b20dSPeter Zijlstra 		 * Initialise the anon_vma root to point to itself. If called
10201d8b20dSPeter Zijlstra 		 * from fork, the root will be reset to the parents anon_vma.
10301d8b20dSPeter Zijlstra 		 */
10401d8b20dSPeter Zijlstra 		anon_vma->root = anon_vma;
105fdd2e5f8SAdrian Bunk 	}
106fdd2e5f8SAdrian Bunk 
10701d8b20dSPeter Zijlstra 	return anon_vma;
10801d8b20dSPeter Zijlstra }
10901d8b20dSPeter Zijlstra 
11001d8b20dSPeter Zijlstra static inline void anon_vma_free(struct anon_vma *anon_vma)
111fdd2e5f8SAdrian Bunk {
11201d8b20dSPeter Zijlstra 	VM_BUG_ON(atomic_read(&anon_vma->refcount));
11388c22088SPeter Zijlstra 
11488c22088SPeter Zijlstra 	/*
1152f031c6fSMatthew Wilcox (Oracle) 	 * Synchronize against folio_lock_anon_vma_read() such that
11688c22088SPeter Zijlstra 	 * we can safely hold the lock without the anon_vma getting
11788c22088SPeter Zijlstra 	 * freed.
11888c22088SPeter Zijlstra 	 *
11988c22088SPeter Zijlstra 	 * Relies on the full mb implied by the atomic_dec_and_test() from
12088c22088SPeter Zijlstra 	 * put_anon_vma() against the acquire barrier implied by
1212f031c6fSMatthew Wilcox (Oracle) 	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
12288c22088SPeter Zijlstra 	 *
1232f031c6fSMatthew Wilcox (Oracle) 	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
1244fc3f1d6SIngo Molnar 	 *   down_read_trylock()		  atomic_dec_and_test()
12588c22088SPeter Zijlstra 	 *   LOCK				  MB
1264fc3f1d6SIngo Molnar 	 *   atomic_read()			  rwsem_is_locked()
12788c22088SPeter Zijlstra 	 *
12888c22088SPeter Zijlstra 	 * LOCK should suffice since the actual taking of the lock must
12988c22088SPeter Zijlstra 	 * happen _before_ what follows.
13088c22088SPeter Zijlstra 	 */
1317f39dda9SHugh Dickins 	might_sleep();
1325a505085SIngo Molnar 	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
1334fc3f1d6SIngo Molnar 		anon_vma_lock_write(anon_vma);
13408b52706SKonstantin Khlebnikov 		anon_vma_unlock_write(anon_vma);
13588c22088SPeter Zijlstra 	}
13688c22088SPeter Zijlstra 
137fdd2e5f8SAdrian Bunk 	kmem_cache_free(anon_vma_cachep, anon_vma);
138fdd2e5f8SAdrian Bunk }
1391da177e4SLinus Torvalds 
140dd34739cSLinus Torvalds static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
1415beb4930SRik van Riel {
142dd34739cSLinus Torvalds 	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
1435beb4930SRik van Riel }
1445beb4930SRik van Riel 
145e574b5fdSNamhyung Kim static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
1465beb4930SRik van Riel {
1475beb4930SRik van Riel 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
1485beb4930SRik van Riel }
1495beb4930SRik van Riel 
1506583a843SKautuk Consul static void anon_vma_chain_link(struct vm_area_struct *vma,
1516583a843SKautuk Consul 				struct anon_vma_chain *avc,
1526583a843SKautuk Consul 				struct anon_vma *anon_vma)
1536583a843SKautuk Consul {
1546583a843SKautuk Consul 	avc->vma = vma;
1556583a843SKautuk Consul 	avc->anon_vma = anon_vma;
1566583a843SKautuk Consul 	list_add(&avc->same_vma, &vma->anon_vma_chain);
157bf181b9fSMichel Lespinasse 	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
1586583a843SKautuk Consul }
1596583a843SKautuk Consul 
160d9d332e0SLinus Torvalds /**
161d5a187daSVlastimil Babka  * __anon_vma_prepare - attach an anon_vma to a memory region
162d9d332e0SLinus Torvalds  * @vma: the memory region in question
163d9d332e0SLinus Torvalds  *
164d9d332e0SLinus Torvalds  * This makes sure the memory mapping described by 'vma' has
165d9d332e0SLinus Torvalds  * an 'anon_vma' attached to it, so that we can associate the
166d9d332e0SLinus Torvalds  * anonymous pages mapped into it with that anon_vma.
167d9d332e0SLinus Torvalds  *
168d5a187daSVlastimil Babka  * The common case will be that we already have one, which
169d5a187daSVlastimil Babka  * is handled inline by anon_vma_prepare(). But if
17023a0790aSFigo.zhang  * not we either need to find an adjacent mapping that we
171d9d332e0SLinus Torvalds  * can re-use the anon_vma from (very common when the only
172d9d332e0SLinus Torvalds  * reason for splitting a vma has been mprotect()), or we
173d9d332e0SLinus Torvalds  * allocate a new one.
174d9d332e0SLinus Torvalds  *
175d9d332e0SLinus Torvalds  * Anon-vma allocations are very subtle, because we may have
1762f031c6fSMatthew Wilcox (Oracle)  * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
177aaf1f990SMiaohe Lin  * and that may actually touch the rwsem even in the newly
178d9d332e0SLinus Torvalds  * allocated vma (it depends on RCU to make sure that the
179d9d332e0SLinus Torvalds  * anon_vma isn't actually destroyed).
180d9d332e0SLinus Torvalds  *
181d9d332e0SLinus Torvalds  * As a result, we need to do proper anon_vma locking even
182d9d332e0SLinus Torvalds  * for the new allocation. At the same time, we do not want
183d9d332e0SLinus Torvalds  * to do any locking for the common case of already having
184d9d332e0SLinus Torvalds  * an anon_vma.
185d9d332e0SLinus Torvalds  *
186c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for reading.
187d9d332e0SLinus Torvalds  */
188d5a187daSVlastimil Babka int __anon_vma_prepare(struct vm_area_struct *vma)
1891da177e4SLinus Torvalds {
190d5a187daSVlastimil Babka 	struct mm_struct *mm = vma->vm_mm;
191d5a187daSVlastimil Babka 	struct anon_vma *anon_vma, *allocated;
1925beb4930SRik van Riel 	struct anon_vma_chain *avc;
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds 	might_sleep();
1951da177e4SLinus Torvalds 
196dd34739cSLinus Torvalds 	avc = anon_vma_chain_alloc(GFP_KERNEL);
1975beb4930SRik van Riel 	if (!avc)
1985beb4930SRik van Riel 		goto out_enomem;
1995beb4930SRik van Riel 
2001da177e4SLinus Torvalds 	anon_vma = find_mergeable_anon_vma(vma);
2011da177e4SLinus Torvalds 	allocated = NULL;
202d9d332e0SLinus Torvalds 	if (!anon_vma) {
2031da177e4SLinus Torvalds 		anon_vma = anon_vma_alloc();
2041da177e4SLinus Torvalds 		if (unlikely(!anon_vma))
2055beb4930SRik van Riel 			goto out_enomem_free_avc;
2062555283eSJann Horn 		anon_vma->num_children++; /* self-parent link for new root */
2071da177e4SLinus Torvalds 		allocated = anon_vma;
2081da177e4SLinus Torvalds 	}
2091da177e4SLinus Torvalds 
2104fc3f1d6SIngo Molnar 	anon_vma_lock_write(anon_vma);
2111da177e4SLinus Torvalds 	/* page_table_lock to protect against threads */
2121da177e4SLinus Torvalds 	spin_lock(&mm->page_table_lock);
2131da177e4SLinus Torvalds 	if (likely(!vma->anon_vma)) {
2141da177e4SLinus Torvalds 		vma->anon_vma = anon_vma;
2156583a843SKautuk Consul 		anon_vma_chain_link(vma, avc, anon_vma);
2162555283eSJann Horn 		anon_vma->num_active_vmas++;
2171da177e4SLinus Torvalds 		allocated = NULL;
21831f2b0ebSOleg Nesterov 		avc = NULL;
2191da177e4SLinus Torvalds 	}
2201da177e4SLinus Torvalds 	spin_unlock(&mm->page_table_lock);
22108b52706SKonstantin Khlebnikov 	anon_vma_unlock_write(anon_vma);
22231f2b0ebSOleg Nesterov 
22331f2b0ebSOleg Nesterov 	if (unlikely(allocated))
22401d8b20dSPeter Zijlstra 		put_anon_vma(allocated);
22531f2b0ebSOleg Nesterov 	if (unlikely(avc))
2265beb4930SRik van Riel 		anon_vma_chain_free(avc);
227d5a187daSVlastimil Babka 
2281da177e4SLinus Torvalds 	return 0;
2295beb4930SRik van Riel 
2305beb4930SRik van Riel  out_enomem_free_avc:
2315beb4930SRik van Riel 	anon_vma_chain_free(avc);
2325beb4930SRik van Riel  out_enomem:
2335beb4930SRik van Riel 	return -ENOMEM;
2341da177e4SLinus Torvalds }
2351da177e4SLinus Torvalds 
236bb4aa396SLinus Torvalds /*
237bb4aa396SLinus Torvalds  * This is a useful helper function for locking the anon_vma root as
238bb4aa396SLinus Torvalds  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
239bb4aa396SLinus Torvalds  * have the same vma.
240bb4aa396SLinus Torvalds  *
241bb4aa396SLinus Torvalds  * Such anon_vma's should have the same root, so you'd expect to see
242bb4aa396SLinus Torvalds  * just a single mutex_lock for the whole traversal.
243bb4aa396SLinus Torvalds  */
244bb4aa396SLinus Torvalds static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
245bb4aa396SLinus Torvalds {
246bb4aa396SLinus Torvalds 	struct anon_vma *new_root = anon_vma->root;
247bb4aa396SLinus Torvalds 	if (new_root != root) {
248bb4aa396SLinus Torvalds 		if (WARN_ON_ONCE(root))
2495a505085SIngo Molnar 			up_write(&root->rwsem);
250bb4aa396SLinus Torvalds 		root = new_root;
2515a505085SIngo Molnar 		down_write(&root->rwsem);
252bb4aa396SLinus Torvalds 	}
253bb4aa396SLinus Torvalds 	return root;
254bb4aa396SLinus Torvalds }
255bb4aa396SLinus Torvalds 
256bb4aa396SLinus Torvalds static inline void unlock_anon_vma_root(struct anon_vma *root)
257bb4aa396SLinus Torvalds {
258bb4aa396SLinus Torvalds 	if (root)
2595a505085SIngo Molnar 		up_write(&root->rwsem);
260bb4aa396SLinus Torvalds }
261bb4aa396SLinus Torvalds 
2625beb4930SRik van Riel /*
2635beb4930SRik van Riel  * Attach the anon_vmas from src to dst.
2645beb4930SRik van Riel  * Returns 0 on success, -ENOMEM on failure.
2657a3ef208SKonstantin Khlebnikov  *
2660503ea8fSLiam R. Howlett  * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(),
2670503ea8fSLiam R. Howlett  * copy_vma() and anon_vma_fork(). The first four want an exact copy of src,
2680503ea8fSLiam R. Howlett  * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to
2690503ea8fSLiam R. Howlett  * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
2700503ea8fSLiam R. Howlett  * call, we can identify this case by checking (!dst->anon_vma &&
2710503ea8fSLiam R. Howlett  * src->anon_vma).
27247b390d2SWei Yang  *
27347b390d2SWei Yang  * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
27447b390d2SWei Yang  * and reuse existing anon_vma which has no vmas and only one child anon_vma.
27547b390d2SWei Yang  * This prevents degradation of anon_vma hierarchy to endless linear chain in
27647b390d2SWei Yang  * case of constantly forking task. On the other hand, an anon_vma with more
27747b390d2SWei Yang  * than one child isn't reused even if there was no alive vma, thus rmap
27847b390d2SWei Yang  * walker has a good chance of avoiding scanning the whole hierarchy when it
27947b390d2SWei Yang  * searches where page is mapped.
2805beb4930SRik van Riel  */
2815beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
2825beb4930SRik van Riel {
2835beb4930SRik van Riel 	struct anon_vma_chain *avc, *pavc;
284bb4aa396SLinus Torvalds 	struct anon_vma *root = NULL;
2855beb4930SRik van Riel 
286646d87b4SLinus Torvalds 	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
287bb4aa396SLinus Torvalds 		struct anon_vma *anon_vma;
288bb4aa396SLinus Torvalds 
289dd34739cSLinus Torvalds 		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
290dd34739cSLinus Torvalds 		if (unlikely(!avc)) {
291dd34739cSLinus Torvalds 			unlock_anon_vma_root(root);
292dd34739cSLinus Torvalds 			root = NULL;
293dd34739cSLinus Torvalds 			avc = anon_vma_chain_alloc(GFP_KERNEL);
2945beb4930SRik van Riel 			if (!avc)
2955beb4930SRik van Riel 				goto enomem_failure;
296dd34739cSLinus Torvalds 		}
297bb4aa396SLinus Torvalds 		anon_vma = pavc->anon_vma;
298bb4aa396SLinus Torvalds 		root = lock_anon_vma_root(root, anon_vma);
299bb4aa396SLinus Torvalds 		anon_vma_chain_link(dst, avc, anon_vma);
3007a3ef208SKonstantin Khlebnikov 
3017a3ef208SKonstantin Khlebnikov 		/*
3022555283eSJann Horn 		 * Reuse existing anon_vma if it has no vma and only one
3032555283eSJann Horn 		 * anon_vma child.
3047a3ef208SKonstantin Khlebnikov 		 *
3052555283eSJann Horn 		 * Root anon_vma is never reused:
3067a3ef208SKonstantin Khlebnikov 		 * it has self-parent reference and at least one child.
3077a3ef208SKonstantin Khlebnikov 		 */
30847b390d2SWei Yang 		if (!dst->anon_vma && src->anon_vma &&
3092555283eSJann Horn 		    anon_vma->num_children < 2 &&
3102555283eSJann Horn 		    anon_vma->num_active_vmas == 0)
3117a3ef208SKonstantin Khlebnikov 			dst->anon_vma = anon_vma;
3125beb4930SRik van Riel 	}
3137a3ef208SKonstantin Khlebnikov 	if (dst->anon_vma)
3142555283eSJann Horn 		dst->anon_vma->num_active_vmas++;
315bb4aa396SLinus Torvalds 	unlock_anon_vma_root(root);
3165beb4930SRik van Riel 	return 0;
3175beb4930SRik van Riel 
3185beb4930SRik van Riel  enomem_failure:
3193fe89b3eSLeon Yu 	/*
320d8e454ebSMa Wupeng 	 * dst->anon_vma is dropped here otherwise its num_active_vmas can
321d8e454ebSMa Wupeng 	 * be incorrectly decremented in unlink_anon_vmas().
3223fe89b3eSLeon Yu 	 * We can safely do this because callers of anon_vma_clone() don't care
3233fe89b3eSLeon Yu 	 * about dst->anon_vma if anon_vma_clone() failed.
3243fe89b3eSLeon Yu 	 */
3253fe89b3eSLeon Yu 	dst->anon_vma = NULL;
3265beb4930SRik van Riel 	unlink_anon_vmas(dst);
3275beb4930SRik van Riel 	return -ENOMEM;
3281da177e4SLinus Torvalds }
3291da177e4SLinus Torvalds 
3305beb4930SRik van Riel /*
3315beb4930SRik van Riel  * Attach vma to its own anon_vma, as well as to the anon_vmas that
3325beb4930SRik van Riel  * the corresponding VMA in the parent process is attached to.
3335beb4930SRik van Riel  * Returns 0 on success, non-zero on failure.
3345beb4930SRik van Riel  */
3355beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
3361da177e4SLinus Torvalds {
3375beb4930SRik van Riel 	struct anon_vma_chain *avc;
3385beb4930SRik van Riel 	struct anon_vma *anon_vma;
339c4ea95d7SDaniel Forrest 	int error;
3405beb4930SRik van Riel 
3415beb4930SRik van Riel 	/* Don't bother if the parent process has no anon_vma here. */
3425beb4930SRik van Riel 	if (!pvma->anon_vma)
3435beb4930SRik van Riel 		return 0;
3445beb4930SRik van Riel 
3457a3ef208SKonstantin Khlebnikov 	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
3467a3ef208SKonstantin Khlebnikov 	vma->anon_vma = NULL;
3477a3ef208SKonstantin Khlebnikov 
3485beb4930SRik van Riel 	/*
3495beb4930SRik van Riel 	 * First, attach the new VMA to the parent VMA's anon_vmas,
3505beb4930SRik van Riel 	 * so rmap can find non-COWed pages in child processes.
3515beb4930SRik van Riel 	 */
352c4ea95d7SDaniel Forrest 	error = anon_vma_clone(vma, pvma);
353c4ea95d7SDaniel Forrest 	if (error)
354c4ea95d7SDaniel Forrest 		return error;
3555beb4930SRik van Riel 
3567a3ef208SKonstantin Khlebnikov 	/* An existing anon_vma has been reused, all done then. */
3577a3ef208SKonstantin Khlebnikov 	if (vma->anon_vma)
3587a3ef208SKonstantin Khlebnikov 		return 0;
3597a3ef208SKonstantin Khlebnikov 
3605beb4930SRik van Riel 	/* Then add our own anon_vma. */
3615beb4930SRik van Riel 	anon_vma = anon_vma_alloc();
3625beb4930SRik van Riel 	if (!anon_vma)
3635beb4930SRik van Riel 		goto out_error;
3642555283eSJann Horn 	anon_vma->num_active_vmas++;
365dd34739cSLinus Torvalds 	avc = anon_vma_chain_alloc(GFP_KERNEL);
3665beb4930SRik van Riel 	if (!avc)
3675beb4930SRik van Riel 		goto out_error_free_anon_vma;
3685c341ee1SRik van Riel 
3695c341ee1SRik van Riel 	/*
370aaf1f990SMiaohe Lin 	 * The root anon_vma's rwsem is the lock actually used when we
3715c341ee1SRik van Riel 	 * lock any of the anon_vmas in this anon_vma tree.
3725c341ee1SRik van Riel 	 */
3735c341ee1SRik van Riel 	anon_vma->root = pvma->anon_vma->root;
3747a3ef208SKonstantin Khlebnikov 	anon_vma->parent = pvma->anon_vma;
37576545066SRik van Riel 	/*
37601d8b20dSPeter Zijlstra 	 * With refcounts, an anon_vma can stay around longer than the
37701d8b20dSPeter Zijlstra 	 * process it belongs to. The root anon_vma needs to be pinned until
37801d8b20dSPeter Zijlstra 	 * this anon_vma is freed, because the lock lives in the root.
37976545066SRik van Riel 	 */
38076545066SRik van Riel 	get_anon_vma(anon_vma->root);
3815beb4930SRik van Riel 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
3825beb4930SRik van Riel 	vma->anon_vma = anon_vma;
3834fc3f1d6SIngo Molnar 	anon_vma_lock_write(anon_vma);
3845c341ee1SRik van Riel 	anon_vma_chain_link(vma, avc, anon_vma);
3852555283eSJann Horn 	anon_vma->parent->num_children++;
38608b52706SKonstantin Khlebnikov 	anon_vma_unlock_write(anon_vma);
3875beb4930SRik van Riel 
3885beb4930SRik van Riel 	return 0;
3895beb4930SRik van Riel 
3905beb4930SRik van Riel  out_error_free_anon_vma:
39101d8b20dSPeter Zijlstra 	put_anon_vma(anon_vma);
3925beb4930SRik van Riel  out_error:
3934946d54cSRik van Riel 	unlink_anon_vmas(vma);
3945beb4930SRik van Riel 	return -ENOMEM;
3955beb4930SRik van Riel }
3965beb4930SRik van Riel 
3975beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma)
3985beb4930SRik van Riel {
3995beb4930SRik van Riel 	struct anon_vma_chain *avc, *next;
400eee2acbaSPeter Zijlstra 	struct anon_vma *root = NULL;
4015beb4930SRik van Riel 
4025c341ee1SRik van Riel 	/*
4035c341ee1SRik van Riel 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
4045c341ee1SRik van Riel 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
4055c341ee1SRik van Riel 	 */
4065beb4930SRik van Riel 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
407eee2acbaSPeter Zijlstra 		struct anon_vma *anon_vma = avc->anon_vma;
408eee2acbaSPeter Zijlstra 
409eee2acbaSPeter Zijlstra 		root = lock_anon_vma_root(root, anon_vma);
410bf181b9fSMichel Lespinasse 		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
411eee2acbaSPeter Zijlstra 
412eee2acbaSPeter Zijlstra 		/*
413eee2acbaSPeter Zijlstra 		 * Leave empty anon_vmas on the list - we'll need
414eee2acbaSPeter Zijlstra 		 * to free them outside the lock.
415eee2acbaSPeter Zijlstra 		 */
416f808c13fSDavidlohr Bueso 		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
4172555283eSJann Horn 			anon_vma->parent->num_children--;
418eee2acbaSPeter Zijlstra 			continue;
4197a3ef208SKonstantin Khlebnikov 		}
420eee2acbaSPeter Zijlstra 
421eee2acbaSPeter Zijlstra 		list_del(&avc->same_vma);
422eee2acbaSPeter Zijlstra 		anon_vma_chain_free(avc);
423eee2acbaSPeter Zijlstra 	}
424ee8ab190SLi Xinhai 	if (vma->anon_vma) {
4252555283eSJann Horn 		vma->anon_vma->num_active_vmas--;
426ee8ab190SLi Xinhai 
427ee8ab190SLi Xinhai 		/*
428ee8ab190SLi Xinhai 		 * vma would still be needed after unlink, and anon_vma will be prepared
429ee8ab190SLi Xinhai 		 * when handle fault.
430ee8ab190SLi Xinhai 		 */
431ee8ab190SLi Xinhai 		vma->anon_vma = NULL;
432ee8ab190SLi Xinhai 	}
433eee2acbaSPeter Zijlstra 	unlock_anon_vma_root(root);
434eee2acbaSPeter Zijlstra 
435eee2acbaSPeter Zijlstra 	/*
436eee2acbaSPeter Zijlstra 	 * Iterate the list once more, it now only contains empty and unlinked
437eee2acbaSPeter Zijlstra 	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
4385a505085SIngo Molnar 	 * needing to write-acquire the anon_vma->root->rwsem.
439eee2acbaSPeter Zijlstra 	 */
440eee2acbaSPeter Zijlstra 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
441eee2acbaSPeter Zijlstra 		struct anon_vma *anon_vma = avc->anon_vma;
442eee2acbaSPeter Zijlstra 
4432555283eSJann Horn 		VM_WARN_ON(anon_vma->num_children);
4442555283eSJann Horn 		VM_WARN_ON(anon_vma->num_active_vmas);
445eee2acbaSPeter Zijlstra 		put_anon_vma(anon_vma);
446eee2acbaSPeter Zijlstra 
4475beb4930SRik van Riel 		list_del(&avc->same_vma);
4485beb4930SRik van Riel 		anon_vma_chain_free(avc);
4495beb4930SRik van Riel 	}
4505beb4930SRik van Riel }
4515beb4930SRik van Riel 
45251cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data)
4531da177e4SLinus Torvalds {
4541da177e4SLinus Torvalds 	struct anon_vma *anon_vma = data;
4551da177e4SLinus Torvalds 
4565a505085SIngo Molnar 	init_rwsem(&anon_vma->rwsem);
45783813267SPeter Zijlstra 	atomic_set(&anon_vma->refcount, 0);
458f808c13fSDavidlohr Bueso 	anon_vma->rb_root = RB_ROOT_CACHED;
4591da177e4SLinus Torvalds }
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds void __init anon_vma_init(void)
4621da177e4SLinus Torvalds {
4631da177e4SLinus Torvalds 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
4645f0d5a3aSPaul E. McKenney 			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
4655d097056SVladimir Davydov 			anon_vma_ctor);
4665d097056SVladimir Davydov 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
4675d097056SVladimir Davydov 			SLAB_PANIC|SLAB_ACCOUNT);
4681da177e4SLinus Torvalds }
4691da177e4SLinus Torvalds 
4701da177e4SLinus Torvalds /*
4716111e4caSPeter Zijlstra  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
4726111e4caSPeter Zijlstra  *
4736111e4caSPeter Zijlstra  * Since there is no serialization what so ever against page_remove_rmap()
474ad8a20cfSMiaohe Lin  * the best this function can do is return a refcount increased anon_vma
475ad8a20cfSMiaohe Lin  * that might have been relevant to this page.
4766111e4caSPeter Zijlstra  *
4776111e4caSPeter Zijlstra  * The page might have been remapped to a different anon_vma or the anon_vma
4786111e4caSPeter Zijlstra  * returned may already be freed (and even reused).
4796111e4caSPeter Zijlstra  *
480bc658c96SPeter Zijlstra  * In case it was remapped to a different anon_vma, the new anon_vma will be a
481bc658c96SPeter Zijlstra  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
482bc658c96SPeter Zijlstra  * ensure that any anon_vma obtained from the page will still be valid for as
483bc658c96SPeter Zijlstra  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
484bc658c96SPeter Zijlstra  *
4856111e4caSPeter Zijlstra  * All users of this function must be very careful when walking the anon_vma
4866111e4caSPeter Zijlstra  * chain and verify that the page in question is indeed mapped in it
4876111e4caSPeter Zijlstra  * [ something equivalent to page_mapped_in_vma() ].
4886111e4caSPeter Zijlstra  *
489091e4299SMiles Chen  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
490091e4299SMiles Chen  * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
491091e4299SMiles Chen  * if there is a mapcount, we can dereference the anon_vma after observing
492091e4299SMiles Chen  * those.
4931da177e4SLinus Torvalds  */
49429eea9b5SMatthew Wilcox (Oracle) struct anon_vma *folio_get_anon_vma(struct folio *folio)
4951da177e4SLinus Torvalds {
496746b18d4SPeter Zijlstra 	struct anon_vma *anon_vma = NULL;
4971da177e4SLinus Torvalds 	unsigned long anon_mapping;
4981da177e4SLinus Torvalds 
4991da177e4SLinus Torvalds 	rcu_read_lock();
50029eea9b5SMatthew Wilcox (Oracle) 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
5013ca7b3c5SHugh Dickins 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
5021da177e4SLinus Torvalds 		goto out;
50329eea9b5SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio))
5041da177e4SLinus Torvalds 		goto out;
5051da177e4SLinus Torvalds 
5061da177e4SLinus Torvalds 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
507746b18d4SPeter Zijlstra 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
508746b18d4SPeter Zijlstra 		anon_vma = NULL;
509746b18d4SPeter Zijlstra 		goto out;
510746b18d4SPeter Zijlstra 	}
511f1819427SHugh Dickins 
512f1819427SHugh Dickins 	/*
51329eea9b5SMatthew Wilcox (Oracle) 	 * If this folio is still mapped, then its anon_vma cannot have been
514746b18d4SPeter Zijlstra 	 * freed.  But if it has been unmapped, we have no security against the
515746b18d4SPeter Zijlstra 	 * anon_vma structure being freed and reused (for another anon_vma:
5165f0d5a3aSPaul E. McKenney 	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
517746b18d4SPeter Zijlstra 	 * above cannot corrupt).
518f1819427SHugh Dickins 	 */
51929eea9b5SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio)) {
5207f39dda9SHugh Dickins 		rcu_read_unlock();
521746b18d4SPeter Zijlstra 		put_anon_vma(anon_vma);
5227f39dda9SHugh Dickins 		return NULL;
523746b18d4SPeter Zijlstra 	}
5241da177e4SLinus Torvalds out:
5251da177e4SLinus Torvalds 	rcu_read_unlock();
526746b18d4SPeter Zijlstra 
527746b18d4SPeter Zijlstra 	return anon_vma;
528746b18d4SPeter Zijlstra }
529746b18d4SPeter Zijlstra 
53088c22088SPeter Zijlstra /*
53129eea9b5SMatthew Wilcox (Oracle)  * Similar to folio_get_anon_vma() except it locks the anon_vma.
53288c22088SPeter Zijlstra  *
53388c22088SPeter Zijlstra  * Its a little more complex as it tries to keep the fast path to a single
53488c22088SPeter Zijlstra  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
53529eea9b5SMatthew Wilcox (Oracle)  * reference like with folio_get_anon_vma() and then block on the mutex
5366d4675e6SMinchan Kim  * on !rwc->try_lock case.
53788c22088SPeter Zijlstra  */
5386d4675e6SMinchan Kim struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
5396d4675e6SMinchan Kim 					  struct rmap_walk_control *rwc)
540746b18d4SPeter Zijlstra {
54188c22088SPeter Zijlstra 	struct anon_vma *anon_vma = NULL;
542eee0f252SHugh Dickins 	struct anon_vma *root_anon_vma;
54388c22088SPeter Zijlstra 	unsigned long anon_mapping;
544746b18d4SPeter Zijlstra 
545*880a99b6SAndrea Arcangeli retry:
54688c22088SPeter Zijlstra 	rcu_read_lock();
5479595d769SMatthew Wilcox (Oracle) 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
54888c22088SPeter Zijlstra 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
54988c22088SPeter Zijlstra 		goto out;
5509595d769SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio))
55188c22088SPeter Zijlstra 		goto out;
55288c22088SPeter Zijlstra 
55388c22088SPeter Zijlstra 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
5544db0c3c2SJason Low 	root_anon_vma = READ_ONCE(anon_vma->root);
5554fc3f1d6SIngo Molnar 	if (down_read_trylock(&root_anon_vma->rwsem)) {
55688c22088SPeter Zijlstra 		/*
557*880a99b6SAndrea Arcangeli 		 * folio_move_anon_rmap() might have changed the anon_vma as we
558*880a99b6SAndrea Arcangeli 		 * might not hold the folio lock here.
559*880a99b6SAndrea Arcangeli 		 */
560*880a99b6SAndrea Arcangeli 		if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
561*880a99b6SAndrea Arcangeli 			     anon_mapping)) {
562*880a99b6SAndrea Arcangeli 			up_read(&root_anon_vma->rwsem);
563*880a99b6SAndrea Arcangeli 			rcu_read_unlock();
564*880a99b6SAndrea Arcangeli 			goto retry;
565*880a99b6SAndrea Arcangeli 		}
566*880a99b6SAndrea Arcangeli 
567*880a99b6SAndrea Arcangeli 		/*
5689595d769SMatthew Wilcox (Oracle) 		 * If the folio is still mapped, then this anon_vma is still
569eee0f252SHugh Dickins 		 * its anon_vma, and holding the mutex ensures that it will
570bc658c96SPeter Zijlstra 		 * not go away, see anon_vma_free().
57188c22088SPeter Zijlstra 		 */
5729595d769SMatthew Wilcox (Oracle) 		if (!folio_mapped(folio)) {
5734fc3f1d6SIngo Molnar 			up_read(&root_anon_vma->rwsem);
57488c22088SPeter Zijlstra 			anon_vma = NULL;
57588c22088SPeter Zijlstra 		}
57688c22088SPeter Zijlstra 		goto out;
57788c22088SPeter Zijlstra 	}
57888c22088SPeter Zijlstra 
5796d4675e6SMinchan Kim 	if (rwc && rwc->try_lock) {
5806d4675e6SMinchan Kim 		anon_vma = NULL;
5816d4675e6SMinchan Kim 		rwc->contended = true;
5826d4675e6SMinchan Kim 		goto out;
5836d4675e6SMinchan Kim 	}
5846d4675e6SMinchan Kim 
58588c22088SPeter Zijlstra 	/* trylock failed, we got to sleep */
58688c22088SPeter Zijlstra 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
58788c22088SPeter Zijlstra 		anon_vma = NULL;
58888c22088SPeter Zijlstra 		goto out;
58988c22088SPeter Zijlstra 	}
59088c22088SPeter Zijlstra 
5919595d769SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio)) {
5927f39dda9SHugh Dickins 		rcu_read_unlock();
59388c22088SPeter Zijlstra 		put_anon_vma(anon_vma);
5947f39dda9SHugh Dickins 		return NULL;
59588c22088SPeter Zijlstra 	}
59688c22088SPeter Zijlstra 
59788c22088SPeter Zijlstra 	/* we pinned the anon_vma, its safe to sleep */
59888c22088SPeter Zijlstra 	rcu_read_unlock();
5994fc3f1d6SIngo Molnar 	anon_vma_lock_read(anon_vma);
600746b18d4SPeter Zijlstra 
601*880a99b6SAndrea Arcangeli 	/*
602*880a99b6SAndrea Arcangeli 	 * folio_move_anon_rmap() might have changed the anon_vma as we might
603*880a99b6SAndrea Arcangeli 	 * not hold the folio lock here.
604*880a99b6SAndrea Arcangeli 	 */
605*880a99b6SAndrea Arcangeli 	if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
606*880a99b6SAndrea Arcangeli 		     anon_mapping)) {
607*880a99b6SAndrea Arcangeli 		anon_vma_unlock_read(anon_vma);
608*880a99b6SAndrea Arcangeli 		put_anon_vma(anon_vma);
609*880a99b6SAndrea Arcangeli 		anon_vma = NULL;
610*880a99b6SAndrea Arcangeli 		goto retry;
611*880a99b6SAndrea Arcangeli 	}
612*880a99b6SAndrea Arcangeli 
61388c22088SPeter Zijlstra 	if (atomic_dec_and_test(&anon_vma->refcount)) {
61488c22088SPeter Zijlstra 		/*
61588c22088SPeter Zijlstra 		 * Oops, we held the last refcount, release the lock
61688c22088SPeter Zijlstra 		 * and bail -- can't simply use put_anon_vma() because
6174fc3f1d6SIngo Molnar 		 * we'll deadlock on the anon_vma_lock_write() recursion.
61888c22088SPeter Zijlstra 		 */
6194fc3f1d6SIngo Molnar 		anon_vma_unlock_read(anon_vma);
62088c22088SPeter Zijlstra 		__put_anon_vma(anon_vma);
62188c22088SPeter Zijlstra 		anon_vma = NULL;
62288c22088SPeter Zijlstra 	}
62388c22088SPeter Zijlstra 
62488c22088SPeter Zijlstra 	return anon_vma;
62588c22088SPeter Zijlstra 
62688c22088SPeter Zijlstra out:
62788c22088SPeter Zijlstra 	rcu_read_unlock();
628746b18d4SPeter Zijlstra 	return anon_vma;
62934bbd704SOleg Nesterov }
63034bbd704SOleg Nesterov 
63172b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
63272b252aeSMel Gorman /*
63372b252aeSMel Gorman  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
63472b252aeSMel Gorman  * important if a PTE was dirty when it was unmapped that it's flushed
63572b252aeSMel Gorman  * before any IO is initiated on the page to prevent lost writes. Similarly,
63672b252aeSMel Gorman  * it must be flushed before freeing to prevent data leakage.
63772b252aeSMel Gorman  */
63872b252aeSMel Gorman void try_to_unmap_flush(void)
63972b252aeSMel Gorman {
64072b252aeSMel Gorman 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
64172b252aeSMel Gorman 
64272b252aeSMel Gorman 	if (!tlb_ubc->flush_required)
64372b252aeSMel Gorman 		return;
64472b252aeSMel Gorman 
645e73ad5ffSAndy Lutomirski 	arch_tlbbatch_flush(&tlb_ubc->arch);
64672b252aeSMel Gorman 	tlb_ubc->flush_required = false;
647d950c947SMel Gorman 	tlb_ubc->writable = false;
64872b252aeSMel Gorman }
64972b252aeSMel Gorman 
650d950c947SMel Gorman /* Flush iff there are potentially writable TLB entries that can race with IO */
651d950c947SMel Gorman void try_to_unmap_flush_dirty(void)
652d950c947SMel Gorman {
653d950c947SMel Gorman 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
654d950c947SMel Gorman 
655d950c947SMel Gorman 	if (tlb_ubc->writable)
656d950c947SMel Gorman 		try_to_unmap_flush();
657d950c947SMel Gorman }
658d950c947SMel Gorman 
6595ee2fa2fSHuang Ying /*
6605ee2fa2fSHuang Ying  * Bits 0-14 of mm->tlb_flush_batched record pending generations.
6615ee2fa2fSHuang Ying  * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
6625ee2fa2fSHuang Ying  */
6635ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
6645ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_MASK			\
6655ee2fa2fSHuang Ying 	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
6665ee2fa2fSHuang Ying #define TLB_FLUSH_BATCH_PENDING_LARGE			\
6675ee2fa2fSHuang Ying 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
6685ee2fa2fSHuang Ying 
669f73419bbSBarry Song static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
670f73419bbSBarry Song 				      unsigned long uaddr)
67172b252aeSMel Gorman {
67272b252aeSMel Gorman 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
673bdeb9188SUros Bizjak 	int batch;
6744d4b6d66SHuang Ying 	bool writable = pte_dirty(pteval);
6754d4b6d66SHuang Ying 
6764d4b6d66SHuang Ying 	if (!pte_accessible(mm, pteval))
6774d4b6d66SHuang Ying 		return;
67872b252aeSMel Gorman 
679f73419bbSBarry Song 	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
68072b252aeSMel Gorman 	tlb_ubc->flush_required = true;
681d950c947SMel Gorman 
682d950c947SMel Gorman 	/*
6833ea27719SMel Gorman 	 * Ensure compiler does not re-order the setting of tlb_flush_batched
6843ea27719SMel Gorman 	 * before the PTE is cleared.
6853ea27719SMel Gorman 	 */
6863ea27719SMel Gorman 	barrier();
6875ee2fa2fSHuang Ying 	batch = atomic_read(&mm->tlb_flush_batched);
6885ee2fa2fSHuang Ying retry:
6895ee2fa2fSHuang Ying 	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
6905ee2fa2fSHuang Ying 		/*
6915ee2fa2fSHuang Ying 		 * Prevent `pending' from catching up with `flushed' because of
6925ee2fa2fSHuang Ying 		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
6935ee2fa2fSHuang Ying 		 * `pending' becomes large.
6945ee2fa2fSHuang Ying 		 */
695bdeb9188SUros Bizjak 		if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
6965ee2fa2fSHuang Ying 			goto retry;
6975ee2fa2fSHuang Ying 	} else {
6985ee2fa2fSHuang Ying 		atomic_inc(&mm->tlb_flush_batched);
6995ee2fa2fSHuang Ying 	}
7003ea27719SMel Gorman 
7013ea27719SMel Gorman 	/*
702d950c947SMel Gorman 	 * If the PTE was dirty then it's best to assume it's writable. The
703d950c947SMel Gorman 	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
704d950c947SMel Gorman 	 * before the page is queued for IO.
705d950c947SMel Gorman 	 */
706d950c947SMel Gorman 	if (writable)
707d950c947SMel Gorman 		tlb_ubc->writable = true;
70872b252aeSMel Gorman }
70972b252aeSMel Gorman 
71072b252aeSMel Gorman /*
71172b252aeSMel Gorman  * Returns true if the TLB flush should be deferred to the end of a batch of
71272b252aeSMel Gorman  * unmap operations to reduce IPIs.
71372b252aeSMel Gorman  */
71472b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
71572b252aeSMel Gorman {
71672b252aeSMel Gorman 	if (!(flags & TTU_BATCH_FLUSH))
71772b252aeSMel Gorman 		return false;
71872b252aeSMel Gorman 
71965c8d30eSAnshuman Khandual 	return arch_tlbbatch_should_defer(mm);
72072b252aeSMel Gorman }
7213ea27719SMel Gorman 
7223ea27719SMel Gorman /*
7233ea27719SMel Gorman  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
7243ea27719SMel Gorman  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
7253ea27719SMel Gorman  * operation such as mprotect or munmap to race between reclaim unmapping
7263ea27719SMel Gorman  * the page and flushing the page. If this race occurs, it potentially allows
7273ea27719SMel Gorman  * access to data via a stale TLB entry. Tracking all mm's that have TLB
7283ea27719SMel Gorman  * batching in flight would be expensive during reclaim so instead track
7293ea27719SMel Gorman  * whether TLB batching occurred in the past and if so then do a flush here
7303ea27719SMel Gorman  * if required. This will cost one additional flush per reclaim cycle paid
7313ea27719SMel Gorman  * by the first operation at risk such as mprotect and mumap.
7323ea27719SMel Gorman  *
7333ea27719SMel Gorman  * This must be called under the PTL so that an access to tlb_flush_batched
7343ea27719SMel Gorman  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
7353ea27719SMel Gorman  * via the PTL.
7363ea27719SMel Gorman  */
7373ea27719SMel Gorman void flush_tlb_batched_pending(struct mm_struct *mm)
7383ea27719SMel Gorman {
7395ee2fa2fSHuang Ying 	int batch = atomic_read(&mm->tlb_flush_batched);
7405ee2fa2fSHuang Ying 	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
7415ee2fa2fSHuang Ying 	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
7423ea27719SMel Gorman 
7435ee2fa2fSHuang Ying 	if (pending != flushed) {
744db6c1f6fSYicong Yang 		arch_flush_tlb_batched_pending(mm);
7453ea27719SMel Gorman 		/*
7465ee2fa2fSHuang Ying 		 * If the new TLB flushing is pending during flushing, leave
7475ee2fa2fSHuang Ying 		 * mm->tlb_flush_batched as is, to avoid losing flushing.
7483ea27719SMel Gorman 		 */
7495ee2fa2fSHuang Ying 		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
7505ee2fa2fSHuang Ying 			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
7513ea27719SMel Gorman 	}
7523ea27719SMel Gorman }
75372b252aeSMel Gorman #else
754f73419bbSBarry Song static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
755f73419bbSBarry Song 				      unsigned long uaddr)
75672b252aeSMel Gorman {
75772b252aeSMel Gorman }
75872b252aeSMel Gorman 
75972b252aeSMel Gorman static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
76072b252aeSMel Gorman {
76172b252aeSMel Gorman 	return false;
76272b252aeSMel Gorman }
76372b252aeSMel Gorman #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
76472b252aeSMel Gorman 
7651da177e4SLinus Torvalds /*
766bf89c8c8SHuang Shijie  * At what user virtual address is page expected in vma?
767ab941e0fSNaoya Horiguchi  * Caller should check the page is actually part of the vma.
7681da177e4SLinus Torvalds  */
7691da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
7701da177e4SLinus Torvalds {
771e05b3453SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
772e05b3453SMatthew Wilcox (Oracle) 	if (folio_test_anon(folio)) {
773e05b3453SMatthew Wilcox (Oracle) 		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
7744829b906SHugh Dickins 		/*
7754829b906SHugh Dickins 		 * Note: swapoff's unuse_vma() is more efficient with this
7764829b906SHugh Dickins 		 * check, and needs it to match anon_vma when KSM is active.
7774829b906SHugh Dickins 		 */
7784829b906SHugh Dickins 		if (!vma->anon_vma || !page__anon_vma ||
7794829b906SHugh Dickins 		    vma->anon_vma->root != page__anon_vma->root)
78021d0d443SAndrea Arcangeli 			return -EFAULT;
78131657170SJue Wang 	} else if (!vma->vm_file) {
7821da177e4SLinus Torvalds 		return -EFAULT;
783e05b3453SMatthew Wilcox (Oracle) 	} else if (vma->vm_file->f_mapping != folio->mapping) {
7841da177e4SLinus Torvalds 		return -EFAULT;
78531657170SJue Wang 	}
786494334e4SHugh Dickins 
787494334e4SHugh Dickins 	return vma_address(page, vma);
7881da177e4SLinus Torvalds }
7891da177e4SLinus Torvalds 
79050722804SZach O'Keefe /*
79150722804SZach O'Keefe  * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
79250722804SZach O'Keefe  * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
79350722804SZach O'Keefe  * represents.
79450722804SZach O'Keefe  */
7956219049aSBob Liu pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
7966219049aSBob Liu {
7976219049aSBob Liu 	pgd_t *pgd;
798c2febafcSKirill A. Shutemov 	p4d_t *p4d;
7996219049aSBob Liu 	pud_t *pud;
8006219049aSBob Liu 	pmd_t *pmd = NULL;
8016219049aSBob Liu 
8026219049aSBob Liu 	pgd = pgd_offset(mm, address);
8036219049aSBob Liu 	if (!pgd_present(*pgd))
8046219049aSBob Liu 		goto out;
8056219049aSBob Liu 
806c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
807c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
808c2febafcSKirill A. Shutemov 		goto out;
809c2febafcSKirill A. Shutemov 
810c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, address);
8116219049aSBob Liu 	if (!pud_present(*pud))
8126219049aSBob Liu 		goto out;
8136219049aSBob Liu 
8146219049aSBob Liu 	pmd = pmd_offset(pud, address);
8156219049aSBob Liu out:
8166219049aSBob Liu 	return pmd;
8176219049aSBob Liu }
8186219049aSBob Liu 
819b3ac0413SMatthew Wilcox (Oracle) struct folio_referenced_arg {
8209f32624bSJoonsoo Kim 	int mapcount;
8219f32624bSJoonsoo Kim 	int referenced;
8229f32624bSJoonsoo Kim 	unsigned long vm_flags;
8239f32624bSJoonsoo Kim 	struct mem_cgroup *memcg;
8249f32624bSJoonsoo Kim };
8251acbc3f9SYin Fengwei 
82681b4082dSNikita Danilov /*
827b3ac0413SMatthew Wilcox (Oracle)  * arg: folio_referenced_arg will be passed
8281da177e4SLinus Torvalds  */
8292f031c6fSMatthew Wilcox (Oracle) static bool folio_referenced_one(struct folio *folio,
8302f031c6fSMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long address, void *arg)
8311da177e4SLinus Torvalds {
832b3ac0413SMatthew Wilcox (Oracle) 	struct folio_referenced_arg *pra = arg;
833b3ac0413SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
8348749cfeaSVladimir Davydov 	int referenced = 0;
8351acbc3f9SYin Fengwei 	unsigned long start = address, ptes = 0;
8362da28bfdSAndrea Arcangeli 
8378eaededeSKirill A. Shutemov 	while (page_vma_mapped_walk(&pvmw)) {
8388eaededeSKirill A. Shutemov 		address = pvmw.address;
8392da28bfdSAndrea Arcangeli 
8401acbc3f9SYin Fengwei 		if (vma->vm_flags & VM_LOCKED) {
8411acbc3f9SYin Fengwei 			if (!folio_test_large(folio) || !pvmw.pte) {
84247d4f3eeSHugh Dickins 				/* Restore the mlock which got missed */
8431acbc3f9SYin Fengwei 				mlock_vma_folio(folio, vma);
8448eaededeSKirill A. Shutemov 				page_vma_mapped_walk_done(&pvmw);
8459f32624bSJoonsoo Kim 				pra->vm_flags |= VM_LOCKED;
846e4b82222SMinchan Kim 				return false; /* To break the loop */
8472da28bfdSAndrea Arcangeli 			}
8481acbc3f9SYin Fengwei 			/*
8491acbc3f9SYin Fengwei 			 * For large folio fully mapped to VMA, will
8501acbc3f9SYin Fengwei 			 * be handled after the pvmw loop.
8511acbc3f9SYin Fengwei 			 *
8521acbc3f9SYin Fengwei 			 * For large folio cross VMA boundaries, it's
8531acbc3f9SYin Fengwei 			 * expected to be picked  by page reclaim. But
8541acbc3f9SYin Fengwei 			 * should skip reference of pages which are in
8551acbc3f9SYin Fengwei 			 * the range of VM_LOCKED vma. As page reclaim
8561acbc3f9SYin Fengwei 			 * should just count the reference of pages out
8571acbc3f9SYin Fengwei 			 * the range of VM_LOCKED vma.
8581acbc3f9SYin Fengwei 			 */
8591acbc3f9SYin Fengwei 			ptes++;
8601acbc3f9SYin Fengwei 			pra->mapcount--;
8611acbc3f9SYin Fengwei 			continue;
8621acbc3f9SYin Fengwei 		}
8632da28bfdSAndrea Arcangeli 
8648eaededeSKirill A. Shutemov 		if (pvmw.pte) {
865c33c7948SRyan Roberts 			if (lru_gen_enabled() &&
866c33c7948SRyan Roberts 			    pte_young(ptep_get(pvmw.pte))) {
867018ee47fSYu Zhao 				lru_gen_look_around(&pvmw);
868018ee47fSYu Zhao 				referenced++;
869018ee47fSYu Zhao 			}
870018ee47fSYu Zhao 
8718eaededeSKirill A. Shutemov 			if (ptep_clear_flush_young_notify(vma, address,
8728788f678SYu Zhao 						pvmw.pte))
8731da177e4SLinus Torvalds 				referenced++;
8748749cfeaSVladimir Davydov 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
8758eaededeSKirill A. Shutemov 			if (pmdp_clear_flush_young_notify(vma, address,
8768eaededeSKirill A. Shutemov 						pvmw.pmd))
8778749cfeaSVladimir Davydov 				referenced++;
8788749cfeaSVladimir Davydov 		} else {
879b3ac0413SMatthew Wilcox (Oracle) 			/* unexpected pmd-mapped folio? */
8808749cfeaSVladimir Davydov 			WARN_ON_ONCE(1);
8818749cfeaSVladimir Davydov 		}
8828eaededeSKirill A. Shutemov 
8838eaededeSKirill A. Shutemov 		pra->mapcount--;
8848eaededeSKirill A. Shutemov 	}
88571e3aac0SAndrea Arcangeli 
8861acbc3f9SYin Fengwei 	if ((vma->vm_flags & VM_LOCKED) &&
8871acbc3f9SYin Fengwei 			folio_test_large(folio) &&
8881acbc3f9SYin Fengwei 			folio_within_vma(folio, vma)) {
8891acbc3f9SYin Fengwei 		unsigned long s_align, e_align;
8901acbc3f9SYin Fengwei 
8911acbc3f9SYin Fengwei 		s_align = ALIGN_DOWN(start, PMD_SIZE);
8921acbc3f9SYin Fengwei 		e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE);
8931acbc3f9SYin Fengwei 
8941acbc3f9SYin Fengwei 		/* folio doesn't cross page table boundary and fully mapped */
8951acbc3f9SYin Fengwei 		if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) {
8961acbc3f9SYin Fengwei 			/* Restore the mlock which got missed */
8971acbc3f9SYin Fengwei 			mlock_vma_folio(folio, vma);
8981acbc3f9SYin Fengwei 			pra->vm_flags |= VM_LOCKED;
8991acbc3f9SYin Fengwei 			return false; /* To break the loop */
9001acbc3f9SYin Fengwei 		}
9011acbc3f9SYin Fengwei 	}
9021acbc3f9SYin Fengwei 
90333c3fc71SVladimir Davydov 	if (referenced)
904b3ac0413SMatthew Wilcox (Oracle) 		folio_clear_idle(folio);
905b3ac0413SMatthew Wilcox (Oracle) 	if (folio_test_clear_young(folio))
90633c3fc71SVladimir Davydov 		referenced++;
90733c3fc71SVladimir Davydov 
9089f32624bSJoonsoo Kim 	if (referenced) {
9099f32624bSJoonsoo Kim 		pra->referenced++;
91047d4f3eeSHugh Dickins 		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
9111da177e4SLinus Torvalds 	}
9121da177e4SLinus Torvalds 
9139f32624bSJoonsoo Kim 	if (!pra->mapcount)
914e4b82222SMinchan Kim 		return false; /* To break the loop */
9159f32624bSJoonsoo Kim 
916e4b82222SMinchan Kim 	return true;
9179f32624bSJoonsoo Kim }
9189f32624bSJoonsoo Kim 
919b3ac0413SMatthew Wilcox (Oracle) static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
9201da177e4SLinus Torvalds {
921b3ac0413SMatthew Wilcox (Oracle) 	struct folio_referenced_arg *pra = arg;
9229f32624bSJoonsoo Kim 	struct mem_cgroup *memcg = pra->memcg;
9231da177e4SLinus Torvalds 
9248788f678SYu Zhao 	/*
9258788f678SYu Zhao 	 * Ignore references from this mapping if it has no recency. If the
9268788f678SYu Zhao 	 * folio has been used in another mapping, we will catch it; if this
9278788f678SYu Zhao 	 * other mapping is already gone, the unmap path will have set the
9288788f678SYu Zhao 	 * referenced flag or activated the folio in zap_pte_range().
9298788f678SYu Zhao 	 */
9308788f678SYu Zhao 	if (!vma_has_recency(vma))
9318788f678SYu Zhao 		return true;
9328788f678SYu Zhao 
9338788f678SYu Zhao 	/*
9348788f678SYu Zhao 	 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
9358788f678SYu Zhao 	 * of references from different cgroups.
9368788f678SYu Zhao 	 */
9378788f678SYu Zhao 	if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
9389f32624bSJoonsoo Kim 		return true;
9391da177e4SLinus Torvalds 
9409f32624bSJoonsoo Kim 	return false;
9411da177e4SLinus Torvalds }
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds /**
944b3ac0413SMatthew Wilcox (Oracle)  * folio_referenced() - Test if the folio was referenced.
945b3ac0413SMatthew Wilcox (Oracle)  * @folio: The folio to test.
946b3ac0413SMatthew Wilcox (Oracle)  * @is_locked: Caller holds lock on the folio.
94772835c86SJohannes Weiner  * @memcg: target memory cgroup
948b3ac0413SMatthew Wilcox (Oracle)  * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
9491da177e4SLinus Torvalds  *
950b3ac0413SMatthew Wilcox (Oracle)  * Quick test_and_clear_referenced for all mappings of a folio,
951b3ac0413SMatthew Wilcox (Oracle)  *
9526d4675e6SMinchan Kim  * Return: The number of mappings which referenced the folio. Return -1 if
9536d4675e6SMinchan Kim  * the function bailed out due to rmap lock contention.
9541da177e4SLinus Torvalds  */
955b3ac0413SMatthew Wilcox (Oracle) int folio_referenced(struct folio *folio, int is_locked,
956b3ac0413SMatthew Wilcox (Oracle) 		     struct mem_cgroup *memcg, unsigned long *vm_flags)
9571da177e4SLinus Torvalds {
9585ad64688SHugh Dickins 	int we_locked = 0;
959b3ac0413SMatthew Wilcox (Oracle) 	struct folio_referenced_arg pra = {
960b3ac0413SMatthew Wilcox (Oracle) 		.mapcount = folio_mapcount(folio),
9619f32624bSJoonsoo Kim 		.memcg = memcg,
9629f32624bSJoonsoo Kim 	};
9639f32624bSJoonsoo Kim 	struct rmap_walk_control rwc = {
964b3ac0413SMatthew Wilcox (Oracle) 		.rmap_one = folio_referenced_one,
9659f32624bSJoonsoo Kim 		.arg = (void *)&pra,
9662f031c6fSMatthew Wilcox (Oracle) 		.anon_lock = folio_lock_anon_vma_read,
9676d4675e6SMinchan Kim 		.try_lock = true,
9688788f678SYu Zhao 		.invalid_vma = invalid_folio_referenced_vma,
9699f32624bSJoonsoo Kim 	};
9701da177e4SLinus Torvalds 
9716fe6b7e3SWu Fengguang 	*vm_flags = 0;
972059d8442SHuang Shijie 	if (!pra.mapcount)
9739f32624bSJoonsoo Kim 		return 0;
9749f32624bSJoonsoo Kim 
975b3ac0413SMatthew Wilcox (Oracle) 	if (!folio_raw_mapping(folio))
9769f32624bSJoonsoo Kim 		return 0;
9779f32624bSJoonsoo Kim 
978b3ac0413SMatthew Wilcox (Oracle) 	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
979b3ac0413SMatthew Wilcox (Oracle) 		we_locked = folio_trylock(folio);
9809f32624bSJoonsoo Kim 		if (!we_locked)
9819f32624bSJoonsoo Kim 			return 1;
9825ad64688SHugh Dickins 	}
9839f32624bSJoonsoo Kim 
9842f031c6fSMatthew Wilcox (Oracle) 	rmap_walk(folio, &rwc);
9859f32624bSJoonsoo Kim 	*vm_flags = pra.vm_flags;
9869f32624bSJoonsoo Kim 
9875ad64688SHugh Dickins 	if (we_locked)
988b3ac0413SMatthew Wilcox (Oracle) 		folio_unlock(folio);
9899f32624bSJoonsoo Kim 
9906d4675e6SMinchan Kim 	return rwc.contended ? -1 : pra.referenced;
9911da177e4SLinus Torvalds }
9921da177e4SLinus Torvalds 
9936a8e0596SMuchun Song static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
994d08b3851SPeter Zijlstra {
9956a8e0596SMuchun Song 	int cleaned = 0;
9966a8e0596SMuchun Song 	struct vm_area_struct *vma = pvmw->vma;
997ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
9986a8e0596SMuchun Song 	unsigned long address = pvmw->address;
999d08b3851SPeter Zijlstra 
1000369ea824SJérôme Glisse 	/*
1001369ea824SJérôme Glisse 	 * We have to assume the worse case ie pmd for invalidation. Note that
1002e83c09a2SMatthew Wilcox (Oracle) 	 * the folio can not be freed from this function.
1003369ea824SJérôme Glisse 	 */
10047d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
10057d4a8be0SAlistair Popple 				vma->vm_mm, address, vma_address_end(pvmw));
1006ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1007369ea824SJérôme Glisse 
10086a8e0596SMuchun Song 	while (page_vma_mapped_walk(pvmw)) {
1009f27176cfSKirill A. Shutemov 		int ret = 0;
1010369ea824SJérôme Glisse 
10116a8e0596SMuchun Song 		address = pvmw->address;
10126a8e0596SMuchun Song 		if (pvmw->pte) {
10136a8e0596SMuchun Song 			pte_t *pte = pvmw->pte;
1014c33c7948SRyan Roberts 			pte_t entry = ptep_get(pte);
1015f27176cfSKirill A. Shutemov 
1016c33c7948SRyan Roberts 			if (!pte_dirty(entry) && !pte_write(entry))
1017f27176cfSKirill A. Shutemov 				continue;
1018d08b3851SPeter Zijlstra 
1019c33c7948SRyan Roberts 			flush_cache_page(vma, address, pte_pfn(entry));
1020785373b4SLinus Torvalds 			entry = ptep_clear_flush(vma, address, pte);
1021d08b3851SPeter Zijlstra 			entry = pte_wrprotect(entry);
1022c2fda5feSPeter Zijlstra 			entry = pte_mkclean(entry);
1023785373b4SLinus Torvalds 			set_pte_at(vma->vm_mm, address, pte, entry);
1024d08b3851SPeter Zijlstra 			ret = 1;
1025f27176cfSKirill A. Shutemov 		} else {
1026396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
10276a8e0596SMuchun Song 			pmd_t *pmd = pvmw->pmd;
1028f27176cfSKirill A. Shutemov 			pmd_t entry;
1029d08b3851SPeter Zijlstra 
1030f27176cfSKirill A. Shutemov 			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
1031f27176cfSKirill A. Shutemov 				continue;
1032f27176cfSKirill A. Shutemov 
10337f9c9b60SMuchun Song 			flush_cache_range(vma, address,
10347f9c9b60SMuchun Song 					  address + HPAGE_PMD_SIZE);
1035024eee0eSAneesh Kumar K.V 			entry = pmdp_invalidate(vma, address, pmd);
1036f27176cfSKirill A. Shutemov 			entry = pmd_wrprotect(entry);
1037f27176cfSKirill A. Shutemov 			entry = pmd_mkclean(entry);
1038785373b4SLinus Torvalds 			set_pmd_at(vma->vm_mm, address, pmd, entry);
1039f27176cfSKirill A. Shutemov 			ret = 1;
1040f27176cfSKirill A. Shutemov #else
1041e83c09a2SMatthew Wilcox (Oracle) 			/* unexpected pmd-mapped folio? */
1042f27176cfSKirill A. Shutemov 			WARN_ON_ONCE(1);
1043f27176cfSKirill A. Shutemov #endif
1044f27176cfSKirill A. Shutemov 		}
10452ec74c3eSSagi Grimberg 
10460f10851eSJérôme Glisse 		if (ret)
10476a8e0596SMuchun Song 			cleaned++;
10489853a407SJoonsoo Kim 	}
1049f27176cfSKirill A. Shutemov 
1050ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1051369ea824SJérôme Glisse 
10526a8e0596SMuchun Song 	return cleaned;
10536a8e0596SMuchun Song }
10546a8e0596SMuchun Song 
10556a8e0596SMuchun Song static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
10566a8e0596SMuchun Song 			     unsigned long address, void *arg)
10576a8e0596SMuchun Song {
10586a8e0596SMuchun Song 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
10596a8e0596SMuchun Song 	int *cleaned = arg;
10606a8e0596SMuchun Song 
10616a8e0596SMuchun Song 	*cleaned += page_vma_mkclean_one(&pvmw);
10626a8e0596SMuchun Song 
1063e4b82222SMinchan Kim 	return true;
1064d08b3851SPeter Zijlstra }
1065d08b3851SPeter Zijlstra 
10669853a407SJoonsoo Kim static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1067d08b3851SPeter Zijlstra {
10689853a407SJoonsoo Kim 	if (vma->vm_flags & VM_SHARED)
1069871beb8cSFengguang Wu 		return false;
1070d08b3851SPeter Zijlstra 
1071871beb8cSFengguang Wu 	return true;
1072d08b3851SPeter Zijlstra }
1073d08b3851SPeter Zijlstra 
1074d9c08e22SMatthew Wilcox (Oracle) int folio_mkclean(struct folio *folio)
1075d08b3851SPeter Zijlstra {
10769853a407SJoonsoo Kim 	int cleaned = 0;
10779853a407SJoonsoo Kim 	struct address_space *mapping;
10789853a407SJoonsoo Kim 	struct rmap_walk_control rwc = {
10799853a407SJoonsoo Kim 		.arg = (void *)&cleaned,
10809853a407SJoonsoo Kim 		.rmap_one = page_mkclean_one,
10819853a407SJoonsoo Kim 		.invalid_vma = invalid_mkclean_vma,
10829853a407SJoonsoo Kim 	};
1083d08b3851SPeter Zijlstra 
1084d9c08e22SMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
1085d08b3851SPeter Zijlstra 
1086d9c08e22SMatthew Wilcox (Oracle) 	if (!folio_mapped(folio))
10879853a407SJoonsoo Kim 		return 0;
1088d08b3851SPeter Zijlstra 
1089d9c08e22SMatthew Wilcox (Oracle) 	mapping = folio_mapping(folio);
10909853a407SJoonsoo Kim 	if (!mapping)
10919853a407SJoonsoo Kim 		return 0;
10929853a407SJoonsoo Kim 
10932f031c6fSMatthew Wilcox (Oracle) 	rmap_walk(folio, &rwc);
10949853a407SJoonsoo Kim 
10959853a407SJoonsoo Kim 	return cleaned;
1096d08b3851SPeter Zijlstra }
1097d9c08e22SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(folio_mkclean);
1098d08b3851SPeter Zijlstra 
10991da177e4SLinus Torvalds /**
11006a8e0596SMuchun Song  * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
11016a8e0596SMuchun Song  *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
11026a8e0596SMuchun Song  *                     within the @vma of shared mappings. And since clean PTEs
11036a8e0596SMuchun Song  *                     should also be readonly, write protects them too.
11046a8e0596SMuchun Song  * @pfn: start pfn.
11056a8e0596SMuchun Song  * @nr_pages: number of physically contiguous pages srarting with @pfn.
11066a8e0596SMuchun Song  * @pgoff: page offset that the @pfn mapped with.
11076a8e0596SMuchun Song  * @vma: vma that @pfn mapped within.
11086a8e0596SMuchun Song  *
11096a8e0596SMuchun Song  * Returns the number of cleaned PTEs (including PMDs).
11106a8e0596SMuchun Song  */
11116a8e0596SMuchun Song int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
11126a8e0596SMuchun Song 		      struct vm_area_struct *vma)
11136a8e0596SMuchun Song {
11146a8e0596SMuchun Song 	struct page_vma_mapped_walk pvmw = {
11156a8e0596SMuchun Song 		.pfn		= pfn,
11166a8e0596SMuchun Song 		.nr_pages	= nr_pages,
11176a8e0596SMuchun Song 		.pgoff		= pgoff,
11186a8e0596SMuchun Song 		.vma		= vma,
11196a8e0596SMuchun Song 		.flags		= PVMW_SYNC,
11206a8e0596SMuchun Song 	};
11216a8e0596SMuchun Song 
11226a8e0596SMuchun Song 	if (invalid_mkclean_vma(vma, NULL))
11236a8e0596SMuchun Song 		return 0;
11246a8e0596SMuchun Song 
11256a8e0596SMuchun Song 	pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
11266a8e0596SMuchun Song 	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
11276a8e0596SMuchun Song 
11286a8e0596SMuchun Song 	return page_vma_mkclean_one(&pvmw);
11296a8e0596SMuchun Song }
11306a8e0596SMuchun Song 
1131b14224fbSMatthew Wilcox (Oracle) int folio_total_mapcount(struct folio *folio)
11329bd3155eSHugh Dickins {
1133b14224fbSMatthew Wilcox (Oracle) 	int mapcount = folio_entire_mapcount(folio);
1134b14224fbSMatthew Wilcox (Oracle) 	int nr_pages;
1135cb67f428SHugh Dickins 	int i;
1136cb67f428SHugh Dickins 
1137b14224fbSMatthew Wilcox (Oracle) 	/* In the common case, avoid the loop when no pages mapped by PTE */
1138eec20426SMatthew Wilcox (Oracle) 	if (folio_nr_pages_mapped(folio) == 0)
1139be5ef2d9SHugh Dickins 		return mapcount;
1140be5ef2d9SHugh Dickins 	/*
1141b14224fbSMatthew Wilcox (Oracle) 	 * Add all the PTE mappings of those pages mapped by PTE.
1142b14224fbSMatthew Wilcox (Oracle) 	 * Limit the loop to folio_nr_pages_mapped()?
1143be5ef2d9SHugh Dickins 	 * Perhaps: given all the raciness, that may be a good or a bad idea.
1144be5ef2d9SHugh Dickins 	 */
1145b14224fbSMatthew Wilcox (Oracle) 	nr_pages = folio_nr_pages(folio);
1146b14224fbSMatthew Wilcox (Oracle) 	for (i = 0; i < nr_pages; i++)
1147b14224fbSMatthew Wilcox (Oracle) 		mapcount += atomic_read(&folio_page(folio, i)->_mapcount);
1148be5ef2d9SHugh Dickins 
1149be5ef2d9SHugh Dickins 	/* But each of those _mapcounts was based on -1 */
1150b14224fbSMatthew Wilcox (Oracle) 	mapcount += nr_pages;
1151be5ef2d9SHugh Dickins 	return mapcount;
1152cb67f428SHugh Dickins }
1153cb67f428SHugh Dickins 
11546a8e0596SMuchun Song /**
115506968625SDavid Hildenbrand  * folio_move_anon_rmap - move a folio to our anon_vma
115606968625SDavid Hildenbrand  * @folio:	The folio to move to our anon_vma
115706968625SDavid Hildenbrand  * @vma:	The vma the folio belongs to
1158c44b6743SRik van Riel  *
115906968625SDavid Hildenbrand  * When a folio belongs exclusively to one process after a COW event,
116006968625SDavid Hildenbrand  * that folio can be moved into the anon_vma that belongs to just that
116106968625SDavid Hildenbrand  * process, so the rmap code will not search the parent or sibling processes.
1162c44b6743SRik van Riel  */
116306968625SDavid Hildenbrand void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1164c44b6743SRik van Riel {
1165595af4c9SMatthew Wilcox (Oracle) 	void *anon_vma = vma->anon_vma;
1166c44b6743SRik van Riel 
1167595af4c9SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
116881d1b09cSSasha Levin 	VM_BUG_ON_VMA(!anon_vma, vma);
1169c44b6743SRik van Riel 
1170595af4c9SMatthew Wilcox (Oracle) 	anon_vma += PAGE_MAPPING_ANON;
1171414e2fb8SVladimir Davydov 	/*
1172414e2fb8SVladimir Davydov 	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1173b3ac0413SMatthew Wilcox (Oracle) 	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1174b3ac0413SMatthew Wilcox (Oracle) 	 * folio_test_anon()) will not see one without the other.
1175414e2fb8SVladimir Davydov 	 */
1176595af4c9SMatthew Wilcox (Oracle) 	WRITE_ONCE(folio->mapping, anon_vma);
1177c44b6743SRik van Riel }
1178c44b6743SRik van Riel 
1179c44b6743SRik van Riel /**
1180c66db8c0SDavid Hildenbrand  * __folio_set_anon - set up a new anonymous rmap for a folio
1181c66db8c0SDavid Hildenbrand  * @folio:	The folio to set up the new anonymous rmap for.
1182c66db8c0SDavid Hildenbrand  * @vma:	VM area to add the folio to.
11834e1c1975SAndi Kleen  * @address:	User virtual address of the mapping
1184c66db8c0SDavid Hildenbrand  * @exclusive:	Whether the folio is exclusive to the process.
11851da177e4SLinus Torvalds  */
1186c66db8c0SDavid Hildenbrand static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1187c66db8c0SDavid Hildenbrand 			     unsigned long address, bool exclusive)
11881da177e4SLinus Torvalds {
1189e8a03febSRik van Riel 	struct anon_vma *anon_vma = vma->anon_vma;
11902822c1aaSNick Piggin 
1191e8a03febSRik van Riel 	BUG_ON(!anon_vma);
1192ea90002bSLinus Torvalds 
1193ea90002bSLinus Torvalds 	/*
1194c66db8c0SDavid Hildenbrand 	 * If the folio isn't exclusive to this vma, we must use the _oldest_
1195c66db8c0SDavid Hildenbrand 	 * possible anon_vma for the folio mapping!
1196ea90002bSLinus Torvalds 	 */
11974e1c1975SAndi Kleen 	if (!exclusive)
1198288468c3SAndrea Arcangeli 		anon_vma = anon_vma->root;
1199ea90002bSLinus Torvalds 
120016f5e707SAlex Shi 	/*
12015b4bd90fSMatthew Wilcox (Oracle) 	 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
120216f5e707SAlex Shi 	 * Make sure the compiler doesn't split the stores of anon_vma and
120316f5e707SAlex Shi 	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
120416f5e707SAlex Shi 	 * could mistake the mapping for a struct address_space and crash.
120516f5e707SAlex Shi 	 */
12061da177e4SLinus Torvalds 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
12075b4bd90fSMatthew Wilcox (Oracle) 	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
12085b4bd90fSMatthew Wilcox (Oracle) 	folio->index = linear_page_index(vma, address);
12091da177e4SLinus Torvalds }
12109617d95eSNick Piggin 
12119617d95eSNick Piggin /**
121243d8eac4SRandy Dunlap  * __page_check_anon_rmap - sanity check anonymous rmap addition
1213dba438bdSMatthew Wilcox (Oracle)  * @folio:	The folio containing @page.
1214dba438bdSMatthew Wilcox (Oracle)  * @page:	the page to check the mapping of
1215c97a9e10SNick Piggin  * @vma:	the vm area in which the mapping is added
1216c97a9e10SNick Piggin  * @address:	the user virtual address mapped
1217c97a9e10SNick Piggin  */
1218dba438bdSMatthew Wilcox (Oracle) static void __page_check_anon_rmap(struct folio *folio, struct page *page,
1219c97a9e10SNick Piggin 	struct vm_area_struct *vma, unsigned long address)
1220c97a9e10SNick Piggin {
1221c97a9e10SNick Piggin 	/*
1222c97a9e10SNick Piggin 	 * The page's anon-rmap details (mapping and index) are guaranteed to
1223c97a9e10SNick Piggin 	 * be set up correctly at this point.
1224c97a9e10SNick Piggin 	 *
1225c97a9e10SNick Piggin 	 * We have exclusion against page_add_anon_rmap because the caller
122690aaca85SMiaohe Lin 	 * always holds the page locked.
1227c97a9e10SNick Piggin 	 *
1228c97a9e10SNick Piggin 	 * We have exclusion against page_add_new_anon_rmap because those pages
1229c97a9e10SNick Piggin 	 * are initially only visible via the pagetables, and the pte is locked
1230c97a9e10SNick Piggin 	 * over the call to page_add_new_anon_rmap.
1231c97a9e10SNick Piggin 	 */
1232e05b3453SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1233e05b3453SMatthew Wilcox (Oracle) 			folio);
123430c46382SYang Shi 	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
123530c46382SYang Shi 		       page);
1236c97a9e10SNick Piggin }
1237c97a9e10SNick Piggin 
1238c97a9e10SNick Piggin /**
12399617d95eSNick Piggin  * page_add_anon_rmap - add pte mapping to an anonymous page
12409617d95eSNick Piggin  * @page:	the page to add the mapping to
12419617d95eSNick Piggin  * @vma:	the vm area in which the mapping is added
12429617d95eSNick Piggin  * @address:	the user virtual address mapped
1243f1e2db12SDavid Hildenbrand  * @flags:	the rmap flags
12449617d95eSNick Piggin  *
12455ad64688SHugh Dickins  * The caller needs to hold the pte lock, and the page must be locked in
124680e14822SHugh Dickins  * the anon_vma case: to serialize mapping,index checking after setting,
124780e14822SHugh Dickins  * and to ensure that PageAnon is not being upgraded racily to PageKsm
124880e14822SHugh Dickins  * (but PageKsm is never downgraded to PageAnon).
12499617d95eSNick Piggin  */
1250ee0800c2SMatthew Wilcox (Oracle) void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
1251ee0800c2SMatthew Wilcox (Oracle) 		unsigned long address, rmap_t flags)
1252ad8c2ee8SRik van Riel {
1253ee0800c2SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
1254ee0800c2SMatthew Wilcox (Oracle) 	atomic_t *mapped = &folio->_nr_pages_mapped;
12559bd3155eSHugh Dickins 	int nr = 0, nr_pmdmapped = 0;
1256d281ee61SKirill A. Shutemov 	bool compound = flags & RMAP_COMPOUND;
1257132b180fSDavid Hildenbrand 	bool first;
125853f9263bSKirill A. Shutemov 
1259be5ef2d9SHugh Dickins 	/* Is page being mapped by PTE? Is this its first map to be added? */
1260be5ef2d9SHugh Dickins 	if (likely(!compound)) {
1261d8dd5e97SHugh Dickins 		first = atomic_inc_and_test(&page->_mapcount);
1262d8dd5e97SHugh Dickins 		nr = first;
1263ee0800c2SMatthew Wilcox (Oracle) 		if (first && folio_test_large(folio)) {
12644b51634cSHugh Dickins 			nr = atomic_inc_return_relaxed(mapped);
12656287b7daSHugh Dickins 			nr = (nr < COMPOUND_MAPPED);
126653f9263bSKirill A. Shutemov 		}
1267ee0800c2SMatthew Wilcox (Oracle) 	} else if (folio_test_pmd_mappable(folio)) {
1268be5ef2d9SHugh Dickins 		/* That test is redundant: it's for safety or to optimize out */
1269be5ef2d9SHugh Dickins 
1270ee0800c2SMatthew Wilcox (Oracle) 		first = atomic_inc_and_test(&folio->_entire_mapcount);
1271be5ef2d9SHugh Dickins 		if (first) {
12724b51634cSHugh Dickins 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
12736287b7daSHugh Dickins 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1274ee0800c2SMatthew Wilcox (Oracle) 				nr_pmdmapped = folio_nr_pages(folio);
1275eec20426SMatthew Wilcox (Oracle) 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
12766287b7daSHugh Dickins 				/* Raced ahead of a remove and another add? */
12776287b7daSHugh Dickins 				if (unlikely(nr < 0))
12786287b7daSHugh Dickins 					nr = 0;
12796287b7daSHugh Dickins 			} else {
12806287b7daSHugh Dickins 				/* Raced ahead of a remove of COMPOUND_MAPPED */
12816287b7daSHugh Dickins 				nr = 0;
12826287b7daSHugh Dickins 			}
1283be5ef2d9SHugh Dickins 		}
1284be5ef2d9SHugh Dickins 	}
1285cb67f428SHugh Dickins 
12869bd3155eSHugh Dickins 	if (nr_pmdmapped)
1287ee0800c2SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
12889bd3155eSHugh Dickins 	if (nr)
1289ee0800c2SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
12905ad64688SHugh Dickins 
1291c5c54003SDavid Hildenbrand 	if (unlikely(!folio_test_anon(folio))) {
1292c5c54003SDavid Hildenbrand 		VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
1293a1f34ee1SDavid Hildenbrand 		/*
1294a1f34ee1SDavid Hildenbrand 		 * For a PTE-mapped large folio, we only know that the single
1295a1f34ee1SDavid Hildenbrand 		 * PTE is exclusive. Further, __folio_set_anon() might not get
1296a1f34ee1SDavid Hildenbrand 		 * folio->index right when not given the address of the head
1297a1f34ee1SDavid Hildenbrand 		 * page.
1298a1f34ee1SDavid Hildenbrand 		 */
1299a1f34ee1SDavid Hildenbrand 		VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio);
1300c66db8c0SDavid Hildenbrand 		__folio_set_anon(folio, vma, address,
130114f9135dSDavid Hildenbrand 				 !!(flags & RMAP_EXCLUSIVE));
1302c5c54003SDavid Hildenbrand 	} else if (likely(!folio_test_ksm(folio))) {
1303dba438bdSMatthew Wilcox (Oracle) 		__page_check_anon_rmap(folio, page, vma, address);
1304c7c3dec1SJohannes Weiner 	}
1305c66db8c0SDavid Hildenbrand 	if (flags & RMAP_EXCLUSIVE)
1306c66db8c0SDavid Hildenbrand 		SetPageAnonExclusive(page);
1307132b180fSDavid Hildenbrand 	/* While PTE-mapping a THP we have a PMD and a PTE mapping. */
1308132b180fSDavid Hildenbrand 	VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
1309132b180fSDavid Hildenbrand 			  (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
1310132b180fSDavid Hildenbrand 			 PageAnonExclusive(page), folio);
1311cea86fe2SHugh Dickins 
13121acbc3f9SYin Fengwei 	/*
13131acbc3f9SYin Fengwei 	 * For large folio, only mlock it if it's fully mapped to VMA. It's
13141acbc3f9SYin Fengwei 	 * not easy to check whether the large folio is fully mapped to VMA
13151acbc3f9SYin Fengwei 	 * here. Only mlock normal 4K folio and leave page reclaim to handle
13161acbc3f9SYin Fengwei 	 * large folio.
13171acbc3f9SYin Fengwei 	 */
13181acbc3f9SYin Fengwei 	if (!folio_test_large(folio))
13191acbc3f9SYin Fengwei 		mlock_vma_folio(folio, vma);
13201da177e4SLinus Torvalds }
13211da177e4SLinus Torvalds 
132243d8eac4SRandy Dunlap /**
13234d510f3dSMatthew Wilcox (Oracle)  * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
13244d510f3dSMatthew Wilcox (Oracle)  * @folio:	The folio to add the mapping to.
13259617d95eSNick Piggin  * @vma:	the vm area in which the mapping is added
13269617d95eSNick Piggin  * @address:	the user virtual address mapped
132740f2bbf7SDavid Hildenbrand  *
13284d510f3dSMatthew Wilcox (Oracle)  * Like page_add_anon_rmap() but must only be called on *new* folios.
13299617d95eSNick Piggin  * This means the inc-and-test can be bypassed.
13304d510f3dSMatthew Wilcox (Oracle)  * The folio does not have to be locked.
13314d510f3dSMatthew Wilcox (Oracle)  *
1332372cbd4dSRyan Roberts  * If the folio is pmd-mappable, it is accounted as a THP.  As the folio
13334d510f3dSMatthew Wilcox (Oracle)  * is new, it's assumed to be mapped exclusively by a single process.
13349617d95eSNick Piggin  */
13354d510f3dSMatthew Wilcox (Oracle) void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
13364d510f3dSMatthew Wilcox (Oracle) 		unsigned long address)
13379617d95eSNick Piggin {
1338372cbd4dSRyan Roberts 	int nr = folio_nr_pages(folio);
1339d281ee61SKirill A. Shutemov 
1340372cbd4dSRyan Roberts 	VM_BUG_ON_VMA(address < vma->vm_start ||
1341372cbd4dSRyan Roberts 			address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
13424d510f3dSMatthew Wilcox (Oracle) 	__folio_set_swapbacked(folio);
1343372cbd4dSRyan Roberts 	__folio_set_anon(folio, vma, address, true);
1344d8dd5e97SHugh Dickins 
1345372cbd4dSRyan Roberts 	if (likely(!folio_test_large(folio))) {
1346d8dd5e97SHugh Dickins 		/* increment count (starts at -1) */
13474d510f3dSMatthew Wilcox (Oracle) 		atomic_set(&folio->_mapcount, 0);
1348372cbd4dSRyan Roberts 		SetPageAnonExclusive(&folio->page);
1349372cbd4dSRyan Roberts 	} else if (!folio_test_pmd_mappable(folio)) {
1350372cbd4dSRyan Roberts 		int i;
1351372cbd4dSRyan Roberts 
1352372cbd4dSRyan Roberts 		for (i = 0; i < nr; i++) {
1353372cbd4dSRyan Roberts 			struct page *page = folio_page(folio, i);
1354372cbd4dSRyan Roberts 
1355372cbd4dSRyan Roberts 			/* increment count (starts at -1) */
1356372cbd4dSRyan Roberts 			atomic_set(&page->_mapcount, 0);
1357372cbd4dSRyan Roberts 			SetPageAnonExclusive(page);
1358372cbd4dSRyan Roberts 		}
1359372cbd4dSRyan Roberts 
1360372cbd4dSRyan Roberts 		atomic_set(&folio->_nr_pages_mapped, nr);
1361d8dd5e97SHugh Dickins 	} else {
136253f9263bSKirill A. Shutemov 		/* increment count (starts at -1) */
13634d510f3dSMatthew Wilcox (Oracle) 		atomic_set(&folio->_entire_mapcount, 0);
13644d510f3dSMatthew Wilcox (Oracle) 		atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
1365372cbd4dSRyan Roberts 		SetPageAnonExclusive(&folio->page);
13664d510f3dSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
1367d281ee61SKirill A. Shutemov 	}
1368d8dd5e97SHugh Dickins 
13694d510f3dSMatthew Wilcox (Oracle) 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
13709617d95eSNick Piggin }
13719617d95eSNick Piggin 
13721da177e4SLinus Torvalds /**
137386f35f69SYin Fengwei  * folio_add_file_rmap_range - add pte mapping to page range of a folio
137486f35f69SYin Fengwei  * @folio:	The folio to add the mapping to
137586f35f69SYin Fengwei  * @page:	The first page to add
137686f35f69SYin Fengwei  * @nr_pages:	The number of pages which will be mapped
1377cea86fe2SHugh Dickins  * @vma:	the vm area in which the mapping is added
1378e8b098fcSMike Rapoport  * @compound:	charge the page as compound or small page
13791da177e4SLinus Torvalds  *
138086f35f69SYin Fengwei  * The page range of folio is defined by [first_page, first_page + nr_pages)
138186f35f69SYin Fengwei  *
1382b8072f09SHugh Dickins  * The caller needs to hold the pte lock.
13831da177e4SLinus Torvalds  */
138486f35f69SYin Fengwei void folio_add_file_rmap_range(struct folio *folio, struct page *page,
138586f35f69SYin Fengwei 			unsigned int nr_pages, struct vm_area_struct *vma,
1386eb01a2adSMatthew Wilcox (Oracle) 			bool compound)
13871da177e4SLinus Torvalds {
1388eb01a2adSMatthew Wilcox (Oracle) 	atomic_t *mapped = &folio->_nr_pages_mapped;
138986f35f69SYin Fengwei 	unsigned int nr_pmdmapped = 0, first;
139086f35f69SYin Fengwei 	int nr = 0;
1391dd78feddSKirill A. Shutemov 
139286f35f69SYin Fengwei 	VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
13939bd3155eSHugh Dickins 
1394be5ef2d9SHugh Dickins 	/* Is page being mapped by PTE? Is this its first map to be added? */
1395be5ef2d9SHugh Dickins 	if (likely(!compound)) {
139686f35f69SYin Fengwei 		do {
1397d8dd5e97SHugh Dickins 			first = atomic_inc_and_test(&page->_mapcount);
1398eb01a2adSMatthew Wilcox (Oracle) 			if (first && folio_test_large(folio)) {
139986f35f69SYin Fengwei 				first = atomic_inc_return_relaxed(mapped);
140086f35f69SYin Fengwei 				first = (first < COMPOUND_MAPPED);
14019a73f61bSKirill A. Shutemov 			}
140286f35f69SYin Fengwei 
140386f35f69SYin Fengwei 			if (first)
140486f35f69SYin Fengwei 				nr++;
140586f35f69SYin Fengwei 		} while (page++, --nr_pages > 0);
1406eb01a2adSMatthew Wilcox (Oracle) 	} else if (folio_test_pmd_mappable(folio)) {
1407be5ef2d9SHugh Dickins 		/* That test is redundant: it's for safety or to optimize out */
1408be5ef2d9SHugh Dickins 
1409eb01a2adSMatthew Wilcox (Oracle) 		first = atomic_inc_and_test(&folio->_entire_mapcount);
1410be5ef2d9SHugh Dickins 		if (first) {
14114b51634cSHugh Dickins 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
14126287b7daSHugh Dickins 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1413eb01a2adSMatthew Wilcox (Oracle) 				nr_pmdmapped = folio_nr_pages(folio);
1414eec20426SMatthew Wilcox (Oracle) 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
14156287b7daSHugh Dickins 				/* Raced ahead of a remove and another add? */
14166287b7daSHugh Dickins 				if (unlikely(nr < 0))
14176287b7daSHugh Dickins 					nr = 0;
14186287b7daSHugh Dickins 			} else {
14196287b7daSHugh Dickins 				/* Raced ahead of a remove of COMPOUND_MAPPED */
14206287b7daSHugh Dickins 				nr = 0;
14216287b7daSHugh Dickins 			}
1422be5ef2d9SHugh Dickins 		}
1423be5ef2d9SHugh Dickins 	}
14249bd3155eSHugh Dickins 
14259bd3155eSHugh Dickins 	if (nr_pmdmapped)
1426eb01a2adSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
14279bd3155eSHugh Dickins 			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
14285d543f13SHugh Dickins 	if (nr)
1429eb01a2adSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
1430cea86fe2SHugh Dickins 
14311acbc3f9SYin Fengwei 	/* See comments in page_add_anon_rmap() */
14321acbc3f9SYin Fengwei 	if (!folio_test_large(folio))
14331acbc3f9SYin Fengwei 		mlock_vma_folio(folio, vma);
14341da177e4SLinus Torvalds }
14351da177e4SLinus Torvalds 
14361da177e4SLinus Torvalds /**
143786f35f69SYin Fengwei  * page_add_file_rmap - add pte mapping to a file page
143886f35f69SYin Fengwei  * @page:	the page to add the mapping to
143986f35f69SYin Fengwei  * @vma:	the vm area in which the mapping is added
144086f35f69SYin Fengwei  * @compound:	charge the page as compound or small page
144186f35f69SYin Fengwei  *
144286f35f69SYin Fengwei  * The caller needs to hold the pte lock.
144386f35f69SYin Fengwei  */
144486f35f69SYin Fengwei void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
144586f35f69SYin Fengwei 		bool compound)
144686f35f69SYin Fengwei {
144786f35f69SYin Fengwei 	struct folio *folio = page_folio(page);
144886f35f69SYin Fengwei 	unsigned int nr_pages;
144986f35f69SYin Fengwei 
145086f35f69SYin Fengwei 	VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
145186f35f69SYin Fengwei 
145286f35f69SYin Fengwei 	if (likely(!compound))
145386f35f69SYin Fengwei 		nr_pages = 1;
145486f35f69SYin Fengwei 	else
145586f35f69SYin Fengwei 		nr_pages = folio_nr_pages(folio);
145686f35f69SYin Fengwei 
145786f35f69SYin Fengwei 	folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
145886f35f69SYin Fengwei }
145986f35f69SYin Fengwei 
146086f35f69SYin Fengwei /**
14611da177e4SLinus Torvalds  * page_remove_rmap - take down pte mapping from a page
14621da177e4SLinus Torvalds  * @page:	page to remove mapping from
1463cea86fe2SHugh Dickins  * @vma:	the vm area from which the mapping is removed
1464d281ee61SKirill A. Shutemov  * @compound:	uncharge the page as compound or small page
14651da177e4SLinus Torvalds  *
1466b8072f09SHugh Dickins  * The caller needs to hold the pte lock.
14671da177e4SLinus Torvalds  */
146862beb906SMatthew Wilcox (Oracle) void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
146962beb906SMatthew Wilcox (Oracle) 		bool compound)
14701da177e4SLinus Torvalds {
147162beb906SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
147262beb906SMatthew Wilcox (Oracle) 	atomic_t *mapped = &folio->_nr_pages_mapped;
14739bd3155eSHugh Dickins 	int nr = 0, nr_pmdmapped = 0;
14749bd3155eSHugh Dickins 	bool last;
147562beb906SMatthew Wilcox (Oracle) 	enum node_stat_item idx;
14769bd3155eSHugh Dickins 
14779bd3155eSHugh Dickins 	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
14789bd3155eSHugh Dickins 
14799bd3155eSHugh Dickins 	/* Hugetlb pages are not counted in NR_*MAPPED */
148062beb906SMatthew Wilcox (Oracle) 	if (unlikely(folio_test_hugetlb(folio))) {
14819bd3155eSHugh Dickins 		/* hugetlb pages are always mapped with pmds */
148262beb906SMatthew Wilcox (Oracle) 		atomic_dec(&folio->_entire_mapcount);
14839bd3155eSHugh Dickins 		return;
14849bd3155eSHugh Dickins 	}
1485cb67f428SHugh Dickins 
1486be5ef2d9SHugh Dickins 	/* Is page being unmapped by PTE? Is this its last map to be removed? */
1487be5ef2d9SHugh Dickins 	if (likely(!compound)) {
1488d8dd5e97SHugh Dickins 		last = atomic_add_negative(-1, &page->_mapcount);
1489d8dd5e97SHugh Dickins 		nr = last;
149062beb906SMatthew Wilcox (Oracle) 		if (last && folio_test_large(folio)) {
14914b51634cSHugh Dickins 			nr = atomic_dec_return_relaxed(mapped);
14926287b7daSHugh Dickins 			nr = (nr < COMPOUND_MAPPED);
1493cb67f428SHugh Dickins 		}
149462beb906SMatthew Wilcox (Oracle) 	} else if (folio_test_pmd_mappable(folio)) {
1495be5ef2d9SHugh Dickins 		/* That test is redundant: it's for safety or to optimize out */
1496be5ef2d9SHugh Dickins 
149762beb906SMatthew Wilcox (Oracle) 		last = atomic_add_negative(-1, &folio->_entire_mapcount);
1498be5ef2d9SHugh Dickins 		if (last) {
14994b51634cSHugh Dickins 			nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
15006287b7daSHugh Dickins 			if (likely(nr < COMPOUND_MAPPED)) {
150162beb906SMatthew Wilcox (Oracle) 				nr_pmdmapped = folio_nr_pages(folio);
1502eec20426SMatthew Wilcox (Oracle) 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
15036287b7daSHugh Dickins 				/* Raced ahead of another remove and an add? */
15046287b7daSHugh Dickins 				if (unlikely(nr < 0))
15056287b7daSHugh Dickins 					nr = 0;
15066287b7daSHugh Dickins 			} else {
15076287b7daSHugh Dickins 				/* An add of COMPOUND_MAPPED raced ahead */
15086287b7daSHugh Dickins 				nr = 0;
15096287b7daSHugh Dickins 			}
1510be5ef2d9SHugh Dickins 		}
1511be5ef2d9SHugh Dickins 	}
1512cb67f428SHugh Dickins 
15139bd3155eSHugh Dickins 	if (nr_pmdmapped) {
151462beb906SMatthew Wilcox (Oracle) 		if (folio_test_anon(folio))
151562beb906SMatthew Wilcox (Oracle) 			idx = NR_ANON_THPS;
151662beb906SMatthew Wilcox (Oracle) 		else if (folio_test_swapbacked(folio))
151762beb906SMatthew Wilcox (Oracle) 			idx = NR_SHMEM_PMDMAPPED;
151862beb906SMatthew Wilcox (Oracle) 		else
151962beb906SMatthew Wilcox (Oracle) 			idx = NR_FILE_PMDMAPPED;
152062beb906SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped);
15219bd3155eSHugh Dickins 	}
15229bd3155eSHugh Dickins 	if (nr) {
152362beb906SMatthew Wilcox (Oracle) 		idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
152462beb906SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, idx, -nr);
152562beb906SMatthew Wilcox (Oracle) 
15269bd3155eSHugh Dickins 		/*
15277dc7c5efSRyan Roberts 		 * Queue anon large folio for deferred split if at least one
152862beb906SMatthew Wilcox (Oracle) 		 * page of the folio is unmapped and at least one page
152962beb906SMatthew Wilcox (Oracle) 		 * is still mapped.
15309bd3155eSHugh Dickins 		 */
15317dc7c5efSRyan Roberts 		if (folio_test_large(folio) && folio_test_anon(folio))
15329bd3155eSHugh Dickins 			if (!compound || nr < nr_pmdmapped)
1533f158ed61SMatthew Wilcox (Oracle) 				deferred_split_folio(folio);
15349bd3155eSHugh Dickins 	}
15359a982250SKirill A. Shutemov 
153616f8c5b2SHugh Dickins 	/*
1537672aa27dSMatthew Wilcox (Oracle) 	 * It would be tidy to reset folio_test_anon mapping when fully
1538672aa27dSMatthew Wilcox (Oracle) 	 * unmapped, but that might overwrite a racing page_add_anon_rmap
1539672aa27dSMatthew Wilcox (Oracle) 	 * which increments mapcount after us but sets mapping before us:
1540672aa27dSMatthew Wilcox (Oracle) 	 * so leave the reset to free_pages_prepare, and remember that
1541672aa27dSMatthew Wilcox (Oracle) 	 * it's only reliable while mapped.
15421da177e4SLinus Torvalds 	 */
15439bd3155eSHugh Dickins 
15441acbc3f9SYin Fengwei 	munlock_vma_folio(folio, vma);
15451da177e4SLinus Torvalds }
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds /*
154852629506SJoonsoo Kim  * @arg: enum ttu_flags will be passed to this argument
15491da177e4SLinus Torvalds  */
15502f031c6fSMatthew Wilcox (Oracle) static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
155152629506SJoonsoo Kim 		     unsigned long address, void *arg)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
1554869f7ee6SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
15551da177e4SLinus Torvalds 	pte_t pteval;
1556c7ab0d2fSKirill A. Shutemov 	struct page *subpage;
15576c287605SDavid Hildenbrand 	bool anon_exclusive, ret = true;
1558ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
15594708f318SPalmer Dabbelt 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1560c33c7948SRyan Roberts 	unsigned long pfn;
1561935d4f0cSRyan Roberts 	unsigned long hsz = 0;
15621da177e4SLinus Torvalds 
1563732ed558SHugh Dickins 	/*
1564732ed558SHugh Dickins 	 * When racing against e.g. zap_pte_range() on another cpu,
1565732ed558SHugh Dickins 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
15661fb08ac6SYang Shi 	 * try_to_unmap() may return before page_mapped() has become false,
1567732ed558SHugh Dickins 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1568732ed558SHugh Dickins 	 */
1569732ed558SHugh Dickins 	if (flags & TTU_SYNC)
1570732ed558SHugh Dickins 		pvmw.flags = PVMW_SYNC;
1571732ed558SHugh Dickins 
1572a98a2f0cSAlistair Popple 	if (flags & TTU_SPLIT_HUGE_PMD)
1573af28a988SMatthew Wilcox (Oracle) 		split_huge_pmd_address(vma, address, false, folio);
1574fec89c10SKirill A. Shutemov 
1575369ea824SJérôme Glisse 	/*
1576017b1660SMike Kravetz 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1577017b1660SMike Kravetz 	 * For hugetlb, it could be much worse if we need to do pud
1578017b1660SMike Kravetz 	 * invalidation in the case of pmd sharing.
1579017b1660SMike Kravetz 	 *
1580869f7ee6SMatthew Wilcox (Oracle) 	 * Note that the folio can not be freed in this function as call of
1581869f7ee6SMatthew Wilcox (Oracle) 	 * try_to_unmap() must hold a reference on the folio.
1582369ea824SJérôme Glisse 	 */
15832aff7a47SMatthew Wilcox (Oracle) 	range.end = vma_address_end(&pvmw);
15847d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1585494334e4SHugh Dickins 				address, range.end);
1586869f7ee6SMatthew Wilcox (Oracle) 	if (folio_test_hugetlb(folio)) {
1587017b1660SMike Kravetz 		/*
1588017b1660SMike Kravetz 		 * If sharing is possible, start and end will be adjusted
1589017b1660SMike Kravetz 		 * accordingly.
1590017b1660SMike Kravetz 		 */
1591ac46d4f3SJérôme Glisse 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1592ac46d4f3SJérôme Glisse 						     &range.end);
1593935d4f0cSRyan Roberts 
1594935d4f0cSRyan Roberts 		/* We need the huge page size for set_huge_pte_at() */
1595935d4f0cSRyan Roberts 		hsz = huge_page_size(hstate_vma(vma));
1596017b1660SMike Kravetz 	}
1597ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
1598369ea824SJérôme Glisse 
1599c7ab0d2fSKirill A. Shutemov 	while (page_vma_mapped_walk(&pvmw)) {
1600cea86fe2SHugh Dickins 		/* Unexpected PMD-mapped THP? */
1601869f7ee6SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1602cea86fe2SHugh Dickins 
16031da177e4SLinus Torvalds 		/*
1604869f7ee6SMatthew Wilcox (Oracle) 		 * If the folio is in an mlock()d vma, we must not swap it out.
16051da177e4SLinus Torvalds 		 */
1606efdb6720SHugh Dickins 		if (!(flags & TTU_IGNORE_MLOCK) &&
1607efdb6720SHugh Dickins 		    (vma->vm_flags & VM_LOCKED)) {
1608cea86fe2SHugh Dickins 			/* Restore the mlock which got missed */
16091acbc3f9SYin Fengwei 			if (!folio_test_large(folio))
16101acbc3f9SYin Fengwei 				mlock_vma_folio(folio, vma);
1611c7ab0d2fSKirill A. Shutemov 			page_vma_mapped_walk_done(&pvmw);
1612efdb6720SHugh Dickins 			ret = false;
1613c7ab0d2fSKirill A. Shutemov 			break;
1614b87537d9SHugh Dickins 		}
1615c7ab0d2fSKirill A. Shutemov 
1616c33c7948SRyan Roberts 		pfn = pte_pfn(ptep_get(pvmw.pte));
1617c33c7948SRyan Roberts 		subpage = folio_page(folio, pfn - folio_pfn(folio));
1618785373b4SLinus Torvalds 		address = pvmw.address;
16196c287605SDavid Hildenbrand 		anon_exclusive = folio_test_anon(folio) &&
16206c287605SDavid Hildenbrand 				 PageAnonExclusive(subpage);
1621785373b4SLinus Torvalds 
1622dfc7ab57SBaolin Wang 		if (folio_test_hugetlb(folio)) {
16230506c31dSBaolin Wang 			bool anon = folio_test_anon(folio);
16240506c31dSBaolin Wang 
1625017b1660SMike Kravetz 			/*
1626a00a8759SBaolin Wang 			 * The try_to_unmap() is only passed a hugetlb page
1627a00a8759SBaolin Wang 			 * in the case where the hugetlb page is poisoned.
1628a00a8759SBaolin Wang 			 */
1629a00a8759SBaolin Wang 			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
1630a00a8759SBaolin Wang 			/*
163154205e9cSBaolin Wang 			 * huge_pmd_unshare may unmap an entire PMD page.
163254205e9cSBaolin Wang 			 * There is no way of knowing exactly which PMDs may
163354205e9cSBaolin Wang 			 * be cached for this mm, so we must flush them all.
163454205e9cSBaolin Wang 			 * start/end were already adjusted above to cover this
163554205e9cSBaolin Wang 			 * range.
1636017b1660SMike Kravetz 			 */
1637ac46d4f3SJérôme Glisse 			flush_cache_range(vma, range.start, range.end);
163854205e9cSBaolin Wang 
1639dfc7ab57SBaolin Wang 			/*
1640dfc7ab57SBaolin Wang 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1641dfc7ab57SBaolin Wang 			 * held in write mode.  Caller needs to explicitly
1642dfc7ab57SBaolin Wang 			 * do this outside rmap routines.
164340549ba8SMike Kravetz 			 *
164440549ba8SMike Kravetz 			 * We also must hold hugetlb vma_lock in write mode.
164540549ba8SMike Kravetz 			 * Lock order dictates acquiring vma_lock BEFORE
164640549ba8SMike Kravetz 			 * i_mmap_rwsem.  We can only try lock here and fail
164740549ba8SMike Kravetz 			 * if unsuccessful.
1648dfc7ab57SBaolin Wang 			 */
164940549ba8SMike Kravetz 			if (!anon) {
165040549ba8SMike Kravetz 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
165140549ba8SMike Kravetz 				if (!hugetlb_vma_trylock_write(vma)) {
165240549ba8SMike Kravetz 					page_vma_mapped_walk_done(&pvmw);
165340549ba8SMike Kravetz 					ret = false;
165440549ba8SMike Kravetz 					break;
165540549ba8SMike Kravetz 				}
165640549ba8SMike Kravetz 				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
165740549ba8SMike Kravetz 					hugetlb_vma_unlock_write(vma);
165840549ba8SMike Kravetz 					flush_tlb_range(vma,
165940549ba8SMike Kravetz 						range.start, range.end);
1660017b1660SMike Kravetz 					/*
166140549ba8SMike Kravetz 					 * The ref count of the PMD page was
166240549ba8SMike Kravetz 					 * dropped which is part of the way map
166340549ba8SMike Kravetz 					 * counting is done for shared PMDs.
166440549ba8SMike Kravetz 					 * Return 'true' here.  When there is
166540549ba8SMike Kravetz 					 * no other sharing, huge_pmd_unshare
166640549ba8SMike Kravetz 					 * returns false and we will unmap the
166740549ba8SMike Kravetz 					 * actual page and drop map count
1668017b1660SMike Kravetz 					 * to zero.
1669017b1660SMike Kravetz 					 */
1670017b1660SMike Kravetz 					page_vma_mapped_walk_done(&pvmw);
1671017b1660SMike Kravetz 					break;
1672017b1660SMike Kravetz 				}
167340549ba8SMike Kravetz 				hugetlb_vma_unlock_write(vma);
167440549ba8SMike Kravetz 			}
1675a00a8759SBaolin Wang 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
167654205e9cSBaolin Wang 		} else {
1677c33c7948SRyan Roberts 			flush_cache_page(vma, address, pfn);
1678088b8aa5SDavid Hildenbrand 			/* Nuke the page table entry. */
1679088b8aa5SDavid Hildenbrand 			if (should_defer_flush(mm, flags)) {
168072b252aeSMel Gorman 				/*
1681c7ab0d2fSKirill A. Shutemov 				 * We clear the PTE but do not flush so potentially
1682869f7ee6SMatthew Wilcox (Oracle) 				 * a remote CPU could still be writing to the folio.
1683c7ab0d2fSKirill A. Shutemov 				 * If the entry was previously clean then the
1684c7ab0d2fSKirill A. Shutemov 				 * architecture must guarantee that a clear->dirty
1685c7ab0d2fSKirill A. Shutemov 				 * transition on a cached TLB entry is written through
1686c7ab0d2fSKirill A. Shutemov 				 * and traps if the PTE is unmapped.
168772b252aeSMel Gorman 				 */
1688785373b4SLinus Torvalds 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
168972b252aeSMel Gorman 
1690f73419bbSBarry Song 				set_tlb_ubc_flush_pending(mm, pteval, address);
169172b252aeSMel Gorman 			} else {
1692785373b4SLinus Torvalds 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
169372b252aeSMel Gorman 			}
1694a00a8759SBaolin Wang 		}
16951da177e4SLinus Torvalds 
1696999dad82SPeter Xu 		/*
1697999dad82SPeter Xu 		 * Now the pte is cleared. If this pte was uffd-wp armed,
1698999dad82SPeter Xu 		 * we may want to replace a none pte with a marker pte if
1699999dad82SPeter Xu 		 * it's file-backed, so we don't lose the tracking info.
1700999dad82SPeter Xu 		 */
1701999dad82SPeter Xu 		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1702999dad82SPeter Xu 
1703869f7ee6SMatthew Wilcox (Oracle) 		/* Set the dirty flag on the folio now the pte is gone. */
17041da177e4SLinus Torvalds 		if (pte_dirty(pteval))
1705869f7ee6SMatthew Wilcox (Oracle) 			folio_mark_dirty(folio);
17061da177e4SLinus Torvalds 
1707365e9c87SHugh Dickins 		/* Update high watermark before we lower rss */
1708365e9c87SHugh Dickins 		update_hiwater_rss(mm);
1709365e9c87SHugh Dickins 
17106da6b1d4SNaoya Horiguchi 		if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
17115fd27b8eSPunit Agrawal 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1712869f7ee6SMatthew Wilcox (Oracle) 			if (folio_test_hugetlb(folio)) {
1713869f7ee6SMatthew Wilcox (Oracle) 				hugetlb_count_sub(folio_nr_pages(folio), mm);
1714935d4f0cSRyan Roberts 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
1715935d4f0cSRyan Roberts 						hsz);
17165d317b2bSNaoya Horiguchi 			} else {
1717869f7ee6SMatthew Wilcox (Oracle) 				dec_mm_counter(mm, mm_counter(&folio->page));
1718785373b4SLinus Torvalds 				set_pte_at(mm, address, pvmw.pte, pteval);
17195f24ae58SNaoya Horiguchi 			}
1720c7ab0d2fSKirill A. Shutemov 
1721bce73e48SChristian Borntraeger 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
172245961722SKonstantin Weitz 			/*
172345961722SKonstantin Weitz 			 * The guest indicated that the page content is of no
172445961722SKonstantin Weitz 			 * interest anymore. Simply discard the pte, vmscan
172545961722SKonstantin Weitz 			 * will take care of the rest.
1726bce73e48SChristian Borntraeger 			 * A future reference will then fault in a new zero
1727bce73e48SChristian Borntraeger 			 * page. When userfaultfd is active, we must not drop
1728bce73e48SChristian Borntraeger 			 * this page though, as its main user (postcopy
1729bce73e48SChristian Borntraeger 			 * migration) will not expect userfaults on already
1730bce73e48SChristian Borntraeger 			 * copied pages.
173145961722SKonstantin Weitz 			 */
1732869f7ee6SMatthew Wilcox (Oracle) 			dec_mm_counter(mm, mm_counter(&folio->page));
1733869f7ee6SMatthew Wilcox (Oracle) 		} else if (folio_test_anon(folio)) {
1734cfeed8ffSDavid Hildenbrand 			swp_entry_t entry = page_swap_entry(subpage);
1735179ef71cSCyrill Gorcunov 			pte_t swp_pte;
17361da177e4SLinus Torvalds 			/*
17371da177e4SLinus Torvalds 			 * Store the swap location in the pte.
17381da177e4SLinus Torvalds 			 * See handle_pte_fault() ...
17391da177e4SLinus Torvalds 			 */
1740869f7ee6SMatthew Wilcox (Oracle) 			if (unlikely(folio_test_swapbacked(folio) !=
1741869f7ee6SMatthew Wilcox (Oracle) 					folio_test_swapcache(folio))) {
1742eb94a878SMinchan Kim 				WARN_ON_ONCE(1);
174383612a94SMinchan Kim 				ret = false;
1744eb94a878SMinchan Kim 				page_vma_mapped_walk_done(&pvmw);
1745eb94a878SMinchan Kim 				break;
1746eb94a878SMinchan Kim 			}
1747854e9ed0SMinchan Kim 
1748802a3a92SShaohua Li 			/* MADV_FREE page check */
1749869f7ee6SMatthew Wilcox (Oracle) 			if (!folio_test_swapbacked(folio)) {
17506c8e2a25SMauricio Faria de Oliveira 				int ref_count, map_count;
17516c8e2a25SMauricio Faria de Oliveira 
17526c8e2a25SMauricio Faria de Oliveira 				/*
17536c8e2a25SMauricio Faria de Oliveira 				 * Synchronize with gup_pte_range():
17546c8e2a25SMauricio Faria de Oliveira 				 * - clear PTE; barrier; read refcount
17556c8e2a25SMauricio Faria de Oliveira 				 * - inc refcount; barrier; read PTE
17566c8e2a25SMauricio Faria de Oliveira 				 */
17576c8e2a25SMauricio Faria de Oliveira 				smp_mb();
17586c8e2a25SMauricio Faria de Oliveira 
17596c8e2a25SMauricio Faria de Oliveira 				ref_count = folio_ref_count(folio);
17606c8e2a25SMauricio Faria de Oliveira 				map_count = folio_mapcount(folio);
17616c8e2a25SMauricio Faria de Oliveira 
17626c8e2a25SMauricio Faria de Oliveira 				/*
17636c8e2a25SMauricio Faria de Oliveira 				 * Order reads for page refcount and dirty flag
17646c8e2a25SMauricio Faria de Oliveira 				 * (see comments in __remove_mapping()).
17656c8e2a25SMauricio Faria de Oliveira 				 */
17666c8e2a25SMauricio Faria de Oliveira 				smp_rmb();
17676c8e2a25SMauricio Faria de Oliveira 
17686c8e2a25SMauricio Faria de Oliveira 				/*
17696c8e2a25SMauricio Faria de Oliveira 				 * The only page refs must be one from isolation
17706c8e2a25SMauricio Faria de Oliveira 				 * plus the rmap(s) (dropped by discard:).
17716c8e2a25SMauricio Faria de Oliveira 				 */
17726c8e2a25SMauricio Faria de Oliveira 				if (ref_count == 1 + map_count &&
17736c8e2a25SMauricio Faria de Oliveira 				    !folio_test_dirty(folio)) {
1774854e9ed0SMinchan Kim 					dec_mm_counter(mm, MM_ANONPAGES);
1775854e9ed0SMinchan Kim 					goto discard;
1776854e9ed0SMinchan Kim 				}
1777854e9ed0SMinchan Kim 
1778802a3a92SShaohua Li 				/*
1779869f7ee6SMatthew Wilcox (Oracle) 				 * If the folio was redirtied, it cannot be
1780802a3a92SShaohua Li 				 * discarded. Remap the page to page table.
1781802a3a92SShaohua Li 				 */
1782785373b4SLinus Torvalds 				set_pte_at(mm, address, pvmw.pte, pteval);
1783869f7ee6SMatthew Wilcox (Oracle) 				folio_set_swapbacked(folio);
1784e4b82222SMinchan Kim 				ret = false;
1785802a3a92SShaohua Li 				page_vma_mapped_walk_done(&pvmw);
1786802a3a92SShaohua Li 				break;
1787802a3a92SShaohua Li 			}
1788802a3a92SShaohua Li 
1789570a335bSHugh Dickins 			if (swap_duplicate(entry) < 0) {
1790785373b4SLinus Torvalds 				set_pte_at(mm, address, pvmw.pte, pteval);
1791e4b82222SMinchan Kim 				ret = false;
1792c7ab0d2fSKirill A. Shutemov 				page_vma_mapped_walk_done(&pvmw);
1793c7ab0d2fSKirill A. Shutemov 				break;
1794570a335bSHugh Dickins 			}
1795ca827d55SKhalid Aziz 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1796322842eaSDavid Hildenbrand 				swap_free(entry);
1797ca827d55SKhalid Aziz 				set_pte_at(mm, address, pvmw.pte, pteval);
1798ca827d55SKhalid Aziz 				ret = false;
1799ca827d55SKhalid Aziz 				page_vma_mapped_walk_done(&pvmw);
1800ca827d55SKhalid Aziz 				break;
1801ca827d55SKhalid Aziz 			}
1802088b8aa5SDavid Hildenbrand 
1803088b8aa5SDavid Hildenbrand 			/* See page_try_share_anon_rmap(): clear PTE first. */
18046c287605SDavid Hildenbrand 			if (anon_exclusive &&
18056c287605SDavid Hildenbrand 			    page_try_share_anon_rmap(subpage)) {
18066c287605SDavid Hildenbrand 				swap_free(entry);
18076c287605SDavid Hildenbrand 				set_pte_at(mm, address, pvmw.pte, pteval);
18086c287605SDavid Hildenbrand 				ret = false;
18096c287605SDavid Hildenbrand 				page_vma_mapped_walk_done(&pvmw);
18106c287605SDavid Hildenbrand 				break;
18116c287605SDavid Hildenbrand 			}
18121da177e4SLinus Torvalds 			if (list_empty(&mm->mmlist)) {
18131da177e4SLinus Torvalds 				spin_lock(&mmlist_lock);
1814f412ac08SHugh Dickins 				if (list_empty(&mm->mmlist))
18151da177e4SLinus Torvalds 					list_add(&mm->mmlist, &init_mm.mmlist);
18161da177e4SLinus Torvalds 				spin_unlock(&mmlist_lock);
18171da177e4SLinus Torvalds 			}
1818d559db08SKAMEZAWA Hiroyuki 			dec_mm_counter(mm, MM_ANONPAGES);
1819b084d435SKAMEZAWA Hiroyuki 			inc_mm_counter(mm, MM_SWAPENTS);
1820179ef71cSCyrill Gorcunov 			swp_pte = swp_entry_to_pte(entry);
18211493a191SDavid Hildenbrand 			if (anon_exclusive)
18221493a191SDavid Hildenbrand 				swp_pte = pte_swp_mkexclusive(swp_pte);
1823179ef71cSCyrill Gorcunov 			if (pte_soft_dirty(pteval))
1824179ef71cSCyrill Gorcunov 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1825f45ec5ffSPeter Xu 			if (pte_uffd_wp(pteval))
1826f45ec5ffSPeter Xu 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1827785373b4SLinus Torvalds 			set_pte_at(mm, address, pvmw.pte, swp_pte);
18280f10851eSJérôme Glisse 		} else {
18290f10851eSJérôme Glisse 			/*
1830869f7ee6SMatthew Wilcox (Oracle) 			 * This is a locked file-backed folio,
1831869f7ee6SMatthew Wilcox (Oracle) 			 * so it cannot be removed from the page
1832869f7ee6SMatthew Wilcox (Oracle) 			 * cache and replaced by a new folio before
1833869f7ee6SMatthew Wilcox (Oracle) 			 * mmu_notifier_invalidate_range_end, so no
1834869f7ee6SMatthew Wilcox (Oracle) 			 * concurrent thread might update its page table
1835869f7ee6SMatthew Wilcox (Oracle) 			 * to point at a new folio while a device is
1836869f7ee6SMatthew Wilcox (Oracle) 			 * still using this folio.
18370f10851eSJérôme Glisse 			 *
1838ee65728eSMike Rapoport 			 * See Documentation/mm/mmu_notifier.rst
18390f10851eSJérôme Glisse 			 */
1840869f7ee6SMatthew Wilcox (Oracle) 			dec_mm_counter(mm, mm_counter_file(&folio->page));
18410f10851eSJérôme Glisse 		}
18420f10851eSJérôme Glisse discard:
1843869f7ee6SMatthew Wilcox (Oracle) 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1844b7435507SHugh Dickins 		if (vma->vm_flags & VM_LOCKED)
184596f97c43SLorenzo Stoakes 			mlock_drain_local();
1846869f7ee6SMatthew Wilcox (Oracle) 		folio_put(folio);
1847c7ab0d2fSKirill A. Shutemov 	}
1848369ea824SJérôme Glisse 
1849ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
1850369ea824SJérôme Glisse 
1851caed0f48SKOSAKI Motohiro 	return ret;
18521da177e4SLinus Torvalds }
18531da177e4SLinus Torvalds 
185452629506SJoonsoo Kim static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
185552629506SJoonsoo Kim {
1856222100eeSAnshuman Khandual 	return vma_is_temporary_stack(vma);
185752629506SJoonsoo Kim }
185852629506SJoonsoo Kim 
1859f3ad032cSKefeng Wang static int folio_not_mapped(struct folio *folio)
186052629506SJoonsoo Kim {
18612f031c6fSMatthew Wilcox (Oracle) 	return !folio_mapped(folio);
18622a52bcbcSKirill A. Shutemov }
186352629506SJoonsoo Kim 
18641da177e4SLinus Torvalds /**
1865869f7ee6SMatthew Wilcox (Oracle)  * try_to_unmap - Try to remove all page table mappings to a folio.
1866869f7ee6SMatthew Wilcox (Oracle)  * @folio: The folio to unmap.
186714fa31b8SAndi Kleen  * @flags: action and flags
18681da177e4SLinus Torvalds  *
18691da177e4SLinus Torvalds  * Tries to remove all the page table entries which are mapping this
1870869f7ee6SMatthew Wilcox (Oracle)  * folio.  It is the caller's responsibility to check if the folio is
1871869f7ee6SMatthew Wilcox (Oracle)  * still mapped if needed (use TTU_SYNC to prevent accounting races).
18721da177e4SLinus Torvalds  *
1873869f7ee6SMatthew Wilcox (Oracle)  * Context: Caller must hold the folio lock.
18741da177e4SLinus Torvalds  */
1875869f7ee6SMatthew Wilcox (Oracle) void try_to_unmap(struct folio *folio, enum ttu_flags flags)
18761da177e4SLinus Torvalds {
187752629506SJoonsoo Kim 	struct rmap_walk_control rwc = {
187852629506SJoonsoo Kim 		.rmap_one = try_to_unmap_one,
1879802a3a92SShaohua Li 		.arg = (void *)flags,
1880f3ad032cSKefeng Wang 		.done = folio_not_mapped,
18812f031c6fSMatthew Wilcox (Oracle) 		.anon_lock = folio_lock_anon_vma_read,
188252629506SJoonsoo Kim 	};
18831da177e4SLinus Torvalds 
1884a98a2f0cSAlistair Popple 	if (flags & TTU_RMAP_LOCKED)
18852f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_locked(folio, &rwc);
1886a98a2f0cSAlistair Popple 	else
18872f031c6fSMatthew Wilcox (Oracle) 		rmap_walk(folio, &rwc);
1888a98a2f0cSAlistair Popple }
1889a98a2f0cSAlistair Popple 
1890a98a2f0cSAlistair Popple /*
1891a98a2f0cSAlistair Popple  * @arg: enum ttu_flags will be passed to this argument.
1892a98a2f0cSAlistair Popple  *
1893a98a2f0cSAlistair Popple  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
189464b586d1SHugh Dickins  * containing migration entries.
1895a98a2f0cSAlistair Popple  */
18962f031c6fSMatthew Wilcox (Oracle) static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1897a98a2f0cSAlistair Popple 		     unsigned long address, void *arg)
1898a98a2f0cSAlistair Popple {
1899a98a2f0cSAlistair Popple 	struct mm_struct *mm = vma->vm_mm;
19004b8554c5SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1901a98a2f0cSAlistair Popple 	pte_t pteval;
1902a98a2f0cSAlistair Popple 	struct page *subpage;
19036c287605SDavid Hildenbrand 	bool anon_exclusive, ret = true;
1904a98a2f0cSAlistair Popple 	struct mmu_notifier_range range;
1905a98a2f0cSAlistair Popple 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1906c33c7948SRyan Roberts 	unsigned long pfn;
1907935d4f0cSRyan Roberts 	unsigned long hsz = 0;
1908a98a2f0cSAlistair Popple 
1909a98a2f0cSAlistair Popple 	/*
1910a98a2f0cSAlistair Popple 	 * When racing against e.g. zap_pte_range() on another cpu,
1911a98a2f0cSAlistair Popple 	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1912a98a2f0cSAlistair Popple 	 * try_to_migrate() may return before page_mapped() has become false,
1913a98a2f0cSAlistair Popple 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1914a98a2f0cSAlistair Popple 	 */
1915a98a2f0cSAlistair Popple 	if (flags & TTU_SYNC)
1916a98a2f0cSAlistair Popple 		pvmw.flags = PVMW_SYNC;
1917a98a2f0cSAlistair Popple 
1918a98a2f0cSAlistair Popple 	/*
1919a98a2f0cSAlistair Popple 	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1920a98a2f0cSAlistair Popple 	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1921a98a2f0cSAlistair Popple 	 */
1922a98a2f0cSAlistair Popple 	if (flags & TTU_SPLIT_HUGE_PMD)
1923af28a988SMatthew Wilcox (Oracle) 		split_huge_pmd_address(vma, address, true, folio);
1924a98a2f0cSAlistair Popple 
1925a98a2f0cSAlistair Popple 	/*
1926a98a2f0cSAlistair Popple 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1927a98a2f0cSAlistair Popple 	 * For hugetlb, it could be much worse if we need to do pud
1928a98a2f0cSAlistair Popple 	 * invalidation in the case of pmd sharing.
1929a98a2f0cSAlistair Popple 	 *
1930a98a2f0cSAlistair Popple 	 * Note that the page can not be free in this function as call of
1931a98a2f0cSAlistair Popple 	 * try_to_unmap() must hold a reference on the page.
1932a98a2f0cSAlistair Popple 	 */
19332aff7a47SMatthew Wilcox (Oracle) 	range.end = vma_address_end(&pvmw);
19347d4a8be0SAlistair Popple 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1935a98a2f0cSAlistair Popple 				address, range.end);
19364b8554c5SMatthew Wilcox (Oracle) 	if (folio_test_hugetlb(folio)) {
1937a98a2f0cSAlistair Popple 		/*
1938a98a2f0cSAlistair Popple 		 * If sharing is possible, start and end will be adjusted
1939a98a2f0cSAlistair Popple 		 * accordingly.
1940a98a2f0cSAlistair Popple 		 */
1941a98a2f0cSAlistair Popple 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1942a98a2f0cSAlistair Popple 						     &range.end);
1943935d4f0cSRyan Roberts 
1944935d4f0cSRyan Roberts 		/* We need the huge page size for set_huge_pte_at() */
1945935d4f0cSRyan Roberts 		hsz = huge_page_size(hstate_vma(vma));
1946a98a2f0cSAlistair Popple 	}
1947a98a2f0cSAlistair Popple 	mmu_notifier_invalidate_range_start(&range);
1948a98a2f0cSAlistair Popple 
1949a98a2f0cSAlistair Popple 	while (page_vma_mapped_walk(&pvmw)) {
1950a98a2f0cSAlistair Popple #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1951a98a2f0cSAlistair Popple 		/* PMD-mapped THP migration entry */
1952a98a2f0cSAlistair Popple 		if (!pvmw.pte) {
19534b8554c5SMatthew Wilcox (Oracle) 			subpage = folio_page(folio,
19544b8554c5SMatthew Wilcox (Oracle) 				pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
19554b8554c5SMatthew Wilcox (Oracle) 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
19564b8554c5SMatthew Wilcox (Oracle) 					!folio_test_pmd_mappable(folio), folio);
1957a98a2f0cSAlistair Popple 
19587f5abe60SDavid Hildenbrand 			if (set_pmd_migration_entry(&pvmw, subpage)) {
19597f5abe60SDavid Hildenbrand 				ret = false;
19607f5abe60SDavid Hildenbrand 				page_vma_mapped_walk_done(&pvmw);
19617f5abe60SDavid Hildenbrand 				break;
19627f5abe60SDavid Hildenbrand 			}
1963a98a2f0cSAlistair Popple 			continue;
1964a98a2f0cSAlistair Popple 		}
1965a98a2f0cSAlistair Popple #endif
1966a98a2f0cSAlistair Popple 
1967a98a2f0cSAlistair Popple 		/* Unexpected PMD-mapped THP? */
19684b8554c5SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1969a98a2f0cSAlistair Popple 
1970c33c7948SRyan Roberts 		pfn = pte_pfn(ptep_get(pvmw.pte));
1971c33c7948SRyan Roberts 
19721118234eSDavid Hildenbrand 		if (folio_is_zone_device(folio)) {
19731118234eSDavid Hildenbrand 			/*
19741118234eSDavid Hildenbrand 			 * Our PTE is a non-present device exclusive entry and
19751118234eSDavid Hildenbrand 			 * calculating the subpage as for the common case would
19761118234eSDavid Hildenbrand 			 * result in an invalid pointer.
19771118234eSDavid Hildenbrand 			 *
19781118234eSDavid Hildenbrand 			 * Since only PAGE_SIZE pages can currently be
19791118234eSDavid Hildenbrand 			 * migrated, just set it to page. This will need to be
19801118234eSDavid Hildenbrand 			 * changed when hugepage migrations to device private
19811118234eSDavid Hildenbrand 			 * memory are supported.
19821118234eSDavid Hildenbrand 			 */
19831118234eSDavid Hildenbrand 			VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
19841118234eSDavid Hildenbrand 			subpage = &folio->page;
19851118234eSDavid Hildenbrand 		} else {
1986c33c7948SRyan Roberts 			subpage = folio_page(folio, pfn - folio_pfn(folio));
19871118234eSDavid Hildenbrand 		}
1988a98a2f0cSAlistair Popple 		address = pvmw.address;
19896c287605SDavid Hildenbrand 		anon_exclusive = folio_test_anon(folio) &&
19906c287605SDavid Hildenbrand 				 PageAnonExclusive(subpage);
1991a98a2f0cSAlistair Popple 
1992dfc7ab57SBaolin Wang 		if (folio_test_hugetlb(folio)) {
19930506c31dSBaolin Wang 			bool anon = folio_test_anon(folio);
19940506c31dSBaolin Wang 
1995a98a2f0cSAlistair Popple 			/*
199654205e9cSBaolin Wang 			 * huge_pmd_unshare may unmap an entire PMD page.
199754205e9cSBaolin Wang 			 * There is no way of knowing exactly which PMDs may
199854205e9cSBaolin Wang 			 * be cached for this mm, so we must flush them all.
199954205e9cSBaolin Wang 			 * start/end were already adjusted above to cover this
200054205e9cSBaolin Wang 			 * range.
2001a98a2f0cSAlistair Popple 			 */
2002a98a2f0cSAlistair Popple 			flush_cache_range(vma, range.start, range.end);
200354205e9cSBaolin Wang 
2004dfc7ab57SBaolin Wang 			/*
2005dfc7ab57SBaolin Wang 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
2006dfc7ab57SBaolin Wang 			 * held in write mode.  Caller needs to explicitly
2007dfc7ab57SBaolin Wang 			 * do this outside rmap routines.
200840549ba8SMike Kravetz 			 *
200940549ba8SMike Kravetz 			 * We also must hold hugetlb vma_lock in write mode.
201040549ba8SMike Kravetz 			 * Lock order dictates acquiring vma_lock BEFORE
201140549ba8SMike Kravetz 			 * i_mmap_rwsem.  We can only try lock here and
201240549ba8SMike Kravetz 			 * fail if unsuccessful.
2013dfc7ab57SBaolin Wang 			 */
201440549ba8SMike Kravetz 			if (!anon) {
201540549ba8SMike Kravetz 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
201640549ba8SMike Kravetz 				if (!hugetlb_vma_trylock_write(vma)) {
201740549ba8SMike Kravetz 					page_vma_mapped_walk_done(&pvmw);
201840549ba8SMike Kravetz 					ret = false;
201940549ba8SMike Kravetz 					break;
202040549ba8SMike Kravetz 				}
202140549ba8SMike Kravetz 				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
202240549ba8SMike Kravetz 					hugetlb_vma_unlock_write(vma);
202340549ba8SMike Kravetz 					flush_tlb_range(vma,
202440549ba8SMike Kravetz 						range.start, range.end);
2025a98a2f0cSAlistair Popple 
2026a98a2f0cSAlistair Popple 					/*
202740549ba8SMike Kravetz 					 * The ref count of the PMD page was
202840549ba8SMike Kravetz 					 * dropped which is part of the way map
202940549ba8SMike Kravetz 					 * counting is done for shared PMDs.
203040549ba8SMike Kravetz 					 * Return 'true' here.  When there is
203140549ba8SMike Kravetz 					 * no other sharing, huge_pmd_unshare
203240549ba8SMike Kravetz 					 * returns false and we will unmap the
203340549ba8SMike Kravetz 					 * actual page and drop map count
2034a98a2f0cSAlistair Popple 					 * to zero.
2035a98a2f0cSAlistair Popple 					 */
2036a98a2f0cSAlistair Popple 					page_vma_mapped_walk_done(&pvmw);
2037a98a2f0cSAlistair Popple 					break;
2038a98a2f0cSAlistair Popple 				}
203940549ba8SMike Kravetz 				hugetlb_vma_unlock_write(vma);
204040549ba8SMike Kravetz 			}
20415d4af619SBaolin Wang 			/* Nuke the hugetlb page table entry */
20425d4af619SBaolin Wang 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
204354205e9cSBaolin Wang 		} else {
2044c33c7948SRyan Roberts 			flush_cache_page(vma, address, pfn);
2045a98a2f0cSAlistair Popple 			/* Nuke the page table entry. */
20467e12beb8SHuang Ying 			if (should_defer_flush(mm, flags)) {
20477e12beb8SHuang Ying 				/*
20487e12beb8SHuang Ying 				 * We clear the PTE but do not flush so potentially
20497e12beb8SHuang Ying 				 * a remote CPU could still be writing to the folio.
20507e12beb8SHuang Ying 				 * If the entry was previously clean then the
20517e12beb8SHuang Ying 				 * architecture must guarantee that a clear->dirty
20527e12beb8SHuang Ying 				 * transition on a cached TLB entry is written through
20537e12beb8SHuang Ying 				 * and traps if the PTE is unmapped.
20547e12beb8SHuang Ying 				 */
20557e12beb8SHuang Ying 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
20567e12beb8SHuang Ying 
2057f73419bbSBarry Song 				set_tlb_ubc_flush_pending(mm, pteval, address);
20587e12beb8SHuang Ying 			} else {
2059a98a2f0cSAlistair Popple 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
20605d4af619SBaolin Wang 			}
20617e12beb8SHuang Ying 		}
2062a98a2f0cSAlistair Popple 
20634b8554c5SMatthew Wilcox (Oracle) 		/* Set the dirty flag on the folio now the pte is gone. */
2064a98a2f0cSAlistair Popple 		if (pte_dirty(pteval))
20654b8554c5SMatthew Wilcox (Oracle) 			folio_mark_dirty(folio);
2066a98a2f0cSAlistair Popple 
2067a98a2f0cSAlistair Popple 		/* Update high watermark before we lower rss */
2068a98a2f0cSAlistair Popple 		update_hiwater_rss(mm);
2069a98a2f0cSAlistair Popple 
2070f25cbb7aSAlex Sierra 		if (folio_is_device_private(folio)) {
20714b8554c5SMatthew Wilcox (Oracle) 			unsigned long pfn = folio_pfn(folio);
2072a98a2f0cSAlistair Popple 			swp_entry_t entry;
2073a98a2f0cSAlistair Popple 			pte_t swp_pte;
2074a98a2f0cSAlistair Popple 
20756c287605SDavid Hildenbrand 			if (anon_exclusive)
20766c287605SDavid Hildenbrand 				BUG_ON(page_try_share_anon_rmap(subpage));
20776c287605SDavid Hildenbrand 
2078a98a2f0cSAlistair Popple 			/*
2079a98a2f0cSAlistair Popple 			 * Store the pfn of the page in a special migration
2080a98a2f0cSAlistair Popple 			 * pte. do_swap_page() will wait until the migration
2081a98a2f0cSAlistair Popple 			 * pte is removed and then restart fault handling.
2082a98a2f0cSAlistair Popple 			 */
20833d88705cSAlistair Popple 			entry = pte_to_swp_entry(pteval);
20843d88705cSAlistair Popple 			if (is_writable_device_private_entry(entry))
20853d88705cSAlistair Popple 				entry = make_writable_migration_entry(pfn);
20866c287605SDavid Hildenbrand 			else if (anon_exclusive)
20876c287605SDavid Hildenbrand 				entry = make_readable_exclusive_migration_entry(pfn);
20883d88705cSAlistair Popple 			else
20893d88705cSAlistair Popple 				entry = make_readable_migration_entry(pfn);
2090a98a2f0cSAlistair Popple 			swp_pte = swp_entry_to_pte(entry);
2091a98a2f0cSAlistair Popple 
2092a98a2f0cSAlistair Popple 			/*
2093a98a2f0cSAlistair Popple 			 * pteval maps a zone device page and is therefore
2094a98a2f0cSAlistair Popple 			 * a swap pte.
2095a98a2f0cSAlistair Popple 			 */
2096a98a2f0cSAlistair Popple 			if (pte_swp_soft_dirty(pteval))
2097a98a2f0cSAlistair Popple 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2098a98a2f0cSAlistair Popple 			if (pte_swp_uffd_wp(pteval))
2099a98a2f0cSAlistair Popple 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2100a98a2f0cSAlistair Popple 			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
21014cc79b33SAnshuman Khandual 			trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
21024cc79b33SAnshuman Khandual 						compound_order(&folio->page));
2103a98a2f0cSAlistair Popple 			/*
2104a98a2f0cSAlistair Popple 			 * No need to invalidate here it will synchronize on
2105a98a2f0cSAlistair Popple 			 * against the special swap migration pte.
2106a98a2f0cSAlistair Popple 			 */
2107da358d5cSMatthew Wilcox (Oracle) 		} else if (PageHWPoison(subpage)) {
2108a98a2f0cSAlistair Popple 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
21094b8554c5SMatthew Wilcox (Oracle) 			if (folio_test_hugetlb(folio)) {
21104b8554c5SMatthew Wilcox (Oracle) 				hugetlb_count_sub(folio_nr_pages(folio), mm);
2111935d4f0cSRyan Roberts 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
2112935d4f0cSRyan Roberts 						hsz);
2113a98a2f0cSAlistair Popple 			} else {
21144b8554c5SMatthew Wilcox (Oracle) 				dec_mm_counter(mm, mm_counter(&folio->page));
2115a98a2f0cSAlistair Popple 				set_pte_at(mm, address, pvmw.pte, pteval);
2116a98a2f0cSAlistair Popple 			}
2117a98a2f0cSAlistair Popple 
2118a98a2f0cSAlistair Popple 		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2119a98a2f0cSAlistair Popple 			/*
2120a98a2f0cSAlistair Popple 			 * The guest indicated that the page content is of no
2121a98a2f0cSAlistair Popple 			 * interest anymore. Simply discard the pte, vmscan
2122a98a2f0cSAlistair Popple 			 * will take care of the rest.
2123a98a2f0cSAlistair Popple 			 * A future reference will then fault in a new zero
2124a98a2f0cSAlistair Popple 			 * page. When userfaultfd is active, we must not drop
2125a98a2f0cSAlistair Popple 			 * this page though, as its main user (postcopy
2126a98a2f0cSAlistair Popple 			 * migration) will not expect userfaults on already
2127a98a2f0cSAlistair Popple 			 * copied pages.
2128a98a2f0cSAlistair Popple 			 */
21294b8554c5SMatthew Wilcox (Oracle) 			dec_mm_counter(mm, mm_counter(&folio->page));
2130a98a2f0cSAlistair Popple 		} else {
2131a98a2f0cSAlistair Popple 			swp_entry_t entry;
2132a98a2f0cSAlistair Popple 			pte_t swp_pte;
2133a98a2f0cSAlistair Popple 
2134a98a2f0cSAlistair Popple 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
21355d4af619SBaolin Wang 				if (folio_test_hugetlb(folio))
2136935d4f0cSRyan Roberts 					set_huge_pte_at(mm, address, pvmw.pte,
2137935d4f0cSRyan Roberts 							pteval, hsz);
21385d4af619SBaolin Wang 				else
2139a98a2f0cSAlistair Popple 					set_pte_at(mm, address, pvmw.pte, pteval);
2140a98a2f0cSAlistair Popple 				ret = false;
2141a98a2f0cSAlistair Popple 				page_vma_mapped_walk_done(&pvmw);
2142a98a2f0cSAlistair Popple 				break;
2143a98a2f0cSAlistair Popple 			}
21446c287605SDavid Hildenbrand 			VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
21456c287605SDavid Hildenbrand 				       !anon_exclusive, subpage);
2146088b8aa5SDavid Hildenbrand 
2147088b8aa5SDavid Hildenbrand 			/* See page_try_share_anon_rmap(): clear PTE first. */
21486c287605SDavid Hildenbrand 			if (anon_exclusive &&
21496c287605SDavid Hildenbrand 			    page_try_share_anon_rmap(subpage)) {
21505d4af619SBaolin Wang 				if (folio_test_hugetlb(folio))
2151935d4f0cSRyan Roberts 					set_huge_pte_at(mm, address, pvmw.pte,
2152935d4f0cSRyan Roberts 							pteval, hsz);
21535d4af619SBaolin Wang 				else
21546c287605SDavid Hildenbrand 					set_pte_at(mm, address, pvmw.pte, pteval);
21556c287605SDavid Hildenbrand 				ret = false;
21566c287605SDavid Hildenbrand 				page_vma_mapped_walk_done(&pvmw);
21576c287605SDavid Hildenbrand 				break;
21586c287605SDavid Hildenbrand 			}
2159a98a2f0cSAlistair Popple 
2160a98a2f0cSAlistair Popple 			/*
2161a98a2f0cSAlistair Popple 			 * Store the pfn of the page in a special migration
2162a98a2f0cSAlistair Popple 			 * pte. do_swap_page() will wait until the migration
2163a98a2f0cSAlistair Popple 			 * pte is removed and then restart fault handling.
2164a98a2f0cSAlistair Popple 			 */
2165a98a2f0cSAlistair Popple 			if (pte_write(pteval))
2166a98a2f0cSAlistair Popple 				entry = make_writable_migration_entry(
2167a98a2f0cSAlistair Popple 							page_to_pfn(subpage));
21686c287605SDavid Hildenbrand 			else if (anon_exclusive)
21696c287605SDavid Hildenbrand 				entry = make_readable_exclusive_migration_entry(
21706c287605SDavid Hildenbrand 							page_to_pfn(subpage));
2171a98a2f0cSAlistair Popple 			else
2172a98a2f0cSAlistair Popple 				entry = make_readable_migration_entry(
2173a98a2f0cSAlistair Popple 							page_to_pfn(subpage));
21742e346877SPeter Xu 			if (pte_young(pteval))
21752e346877SPeter Xu 				entry = make_migration_entry_young(entry);
21762e346877SPeter Xu 			if (pte_dirty(pteval))
21772e346877SPeter Xu 				entry = make_migration_entry_dirty(entry);
2178a98a2f0cSAlistair Popple 			swp_pte = swp_entry_to_pte(entry);
2179a98a2f0cSAlistair Popple 			if (pte_soft_dirty(pteval))
2180a98a2f0cSAlistair Popple 				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2181a98a2f0cSAlistair Popple 			if (pte_uffd_wp(pteval))
2182a98a2f0cSAlistair Popple 				swp_pte = pte_swp_mkuffd_wp(swp_pte);
21835d4af619SBaolin Wang 			if (folio_test_hugetlb(folio))
2184935d4f0cSRyan Roberts 				set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
2185935d4f0cSRyan Roberts 						hsz);
21865d4af619SBaolin Wang 			else
2187a98a2f0cSAlistair Popple 				set_pte_at(mm, address, pvmw.pte, swp_pte);
21884cc79b33SAnshuman Khandual 			trace_set_migration_pte(address, pte_val(swp_pte),
21894cc79b33SAnshuman Khandual 						compound_order(&folio->page));
2190a98a2f0cSAlistair Popple 			/*
2191a98a2f0cSAlistair Popple 			 * No need to invalidate here it will synchronize on
2192a98a2f0cSAlistair Popple 			 * against the special swap migration pte.
2193a98a2f0cSAlistair Popple 			 */
2194a98a2f0cSAlistair Popple 		}
2195a98a2f0cSAlistair Popple 
21964b8554c5SMatthew Wilcox (Oracle) 		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2197b7435507SHugh Dickins 		if (vma->vm_flags & VM_LOCKED)
219896f97c43SLorenzo Stoakes 			mlock_drain_local();
21994b8554c5SMatthew Wilcox (Oracle) 		folio_put(folio);
2200a98a2f0cSAlistair Popple 	}
2201a98a2f0cSAlistair Popple 
2202a98a2f0cSAlistair Popple 	mmu_notifier_invalidate_range_end(&range);
2203a98a2f0cSAlistair Popple 
2204a98a2f0cSAlistair Popple 	return ret;
2205a98a2f0cSAlistair Popple }
2206a98a2f0cSAlistair Popple 
2207a98a2f0cSAlistair Popple /**
2208a98a2f0cSAlistair Popple  * try_to_migrate - try to replace all page table mappings with swap entries
22094b8554c5SMatthew Wilcox (Oracle)  * @folio: the folio to replace page table entries for
2210a98a2f0cSAlistair Popple  * @flags: action and flags
2211a98a2f0cSAlistair Popple  *
22124b8554c5SMatthew Wilcox (Oracle)  * Tries to remove all the page table entries which are mapping this folio and
22134b8554c5SMatthew Wilcox (Oracle)  * replace them with special swap entries. Caller must hold the folio lock.
2214a98a2f0cSAlistair Popple  */
22154b8554c5SMatthew Wilcox (Oracle) void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2216a98a2f0cSAlistair Popple {
2217a98a2f0cSAlistair Popple 	struct rmap_walk_control rwc = {
2218a98a2f0cSAlistair Popple 		.rmap_one = try_to_migrate_one,
2219a98a2f0cSAlistair Popple 		.arg = (void *)flags,
2220f3ad032cSKefeng Wang 		.done = folio_not_mapped,
22212f031c6fSMatthew Wilcox (Oracle) 		.anon_lock = folio_lock_anon_vma_read,
2222a98a2f0cSAlistair Popple 	};
2223a98a2f0cSAlistair Popple 
2224a98a2f0cSAlistair Popple 	/*
2225a98a2f0cSAlistair Popple 	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
22267e12beb8SHuang Ying 	 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2227a98a2f0cSAlistair Popple 	 */
2228a98a2f0cSAlistair Popple 	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
22297e12beb8SHuang Ying 					TTU_SYNC | TTU_BATCH_FLUSH)))
2230a98a2f0cSAlistair Popple 		return;
2231a98a2f0cSAlistair Popple 
2232f25cbb7aSAlex Sierra 	if (folio_is_zone_device(folio) &&
2233f25cbb7aSAlex Sierra 	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
22346c855fceSHugh Dickins 		return;
22356c855fceSHugh Dickins 
223652629506SJoonsoo Kim 	/*
223752629506SJoonsoo Kim 	 * During exec, a temporary VMA is setup and later moved.
223852629506SJoonsoo Kim 	 * The VMA is moved under the anon_vma lock but not the
223952629506SJoonsoo Kim 	 * page tables leading to a race where migration cannot
224052629506SJoonsoo Kim 	 * find the migration ptes. Rather than increasing the
224152629506SJoonsoo Kim 	 * locking requirements of exec(), migration skips
224252629506SJoonsoo Kim 	 * temporary VMAs until after exec() completes.
224352629506SJoonsoo Kim 	 */
22444b8554c5SMatthew Wilcox (Oracle) 	if (!folio_test_ksm(folio) && folio_test_anon(folio))
224552629506SJoonsoo Kim 		rwc.invalid_vma = invalid_migration_vma;
224652629506SJoonsoo Kim 
22472a52bcbcSKirill A. Shutemov 	if (flags & TTU_RMAP_LOCKED)
22482f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_locked(folio, &rwc);
22492a52bcbcSKirill A. Shutemov 	else
22502f031c6fSMatthew Wilcox (Oracle) 		rmap_walk(folio, &rwc);
2251b291f000SNick Piggin }
2252e9995ef9SHugh Dickins 
2253b756a3b5SAlistair Popple #ifdef CONFIG_DEVICE_PRIVATE
2254b756a3b5SAlistair Popple struct make_exclusive_args {
2255b756a3b5SAlistair Popple 	struct mm_struct *mm;
2256b756a3b5SAlistair Popple 	unsigned long address;
2257b756a3b5SAlistair Popple 	void *owner;
2258b756a3b5SAlistair Popple 	bool valid;
2259b756a3b5SAlistair Popple };
2260b756a3b5SAlistair Popple 
22612f031c6fSMatthew Wilcox (Oracle) static bool page_make_device_exclusive_one(struct folio *folio,
2262b756a3b5SAlistair Popple 		struct vm_area_struct *vma, unsigned long address, void *priv)
2263b756a3b5SAlistair Popple {
2264b756a3b5SAlistair Popple 	struct mm_struct *mm = vma->vm_mm;
22650d251485SMatthew Wilcox (Oracle) 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2266b756a3b5SAlistair Popple 	struct make_exclusive_args *args = priv;
2267b756a3b5SAlistair Popple 	pte_t pteval;
2268b756a3b5SAlistair Popple 	struct page *subpage;
2269b756a3b5SAlistair Popple 	bool ret = true;
2270b756a3b5SAlistair Popple 	struct mmu_notifier_range range;
2271b756a3b5SAlistair Popple 	swp_entry_t entry;
2272b756a3b5SAlistair Popple 	pte_t swp_pte;
2273c33c7948SRyan Roberts 	pte_t ptent;
2274b756a3b5SAlistair Popple 
22757d4a8be0SAlistair Popple 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
2276b756a3b5SAlistair Popple 				      vma->vm_mm, address, min(vma->vm_end,
22770d251485SMatthew Wilcox (Oracle) 				      address + folio_size(folio)),
22780d251485SMatthew Wilcox (Oracle) 				      args->owner);
2279b756a3b5SAlistair Popple 	mmu_notifier_invalidate_range_start(&range);
2280b756a3b5SAlistair Popple 
2281b756a3b5SAlistair Popple 	while (page_vma_mapped_walk(&pvmw)) {
2282b756a3b5SAlistair Popple 		/* Unexpected PMD-mapped THP? */
22830d251485SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2284b756a3b5SAlistair Popple 
2285c33c7948SRyan Roberts 		ptent = ptep_get(pvmw.pte);
2286c33c7948SRyan Roberts 		if (!pte_present(ptent)) {
2287b756a3b5SAlistair Popple 			ret = false;
2288b756a3b5SAlistair Popple 			page_vma_mapped_walk_done(&pvmw);
2289b756a3b5SAlistair Popple 			break;
2290b756a3b5SAlistair Popple 		}
2291b756a3b5SAlistair Popple 
22920d251485SMatthew Wilcox (Oracle) 		subpage = folio_page(folio,
2293c33c7948SRyan Roberts 				pte_pfn(ptent) - folio_pfn(folio));
2294b756a3b5SAlistair Popple 		address = pvmw.address;
2295b756a3b5SAlistair Popple 
2296b756a3b5SAlistair Popple 		/* Nuke the page table entry. */
2297c33c7948SRyan Roberts 		flush_cache_page(vma, address, pte_pfn(ptent));
2298b756a3b5SAlistair Popple 		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2299b756a3b5SAlistair Popple 
23000d251485SMatthew Wilcox (Oracle) 		/* Set the dirty flag on the folio now the pte is gone. */
2301b756a3b5SAlistair Popple 		if (pte_dirty(pteval))
23020d251485SMatthew Wilcox (Oracle) 			folio_mark_dirty(folio);
2303b756a3b5SAlistair Popple 
2304b756a3b5SAlistair Popple 		/*
2305b756a3b5SAlistair Popple 		 * Check that our target page is still mapped at the expected
2306b756a3b5SAlistair Popple 		 * address.
2307b756a3b5SAlistair Popple 		 */
2308b756a3b5SAlistair Popple 		if (args->mm == mm && args->address == address &&
2309b756a3b5SAlistair Popple 		    pte_write(pteval))
2310b756a3b5SAlistair Popple 			args->valid = true;
2311b756a3b5SAlistair Popple 
2312b756a3b5SAlistair Popple 		/*
2313b756a3b5SAlistair Popple 		 * Store the pfn of the page in a special migration
2314b756a3b5SAlistair Popple 		 * pte. do_swap_page() will wait until the migration
2315b756a3b5SAlistair Popple 		 * pte is removed and then restart fault handling.
2316b756a3b5SAlistair Popple 		 */
2317b756a3b5SAlistair Popple 		if (pte_write(pteval))
2318b756a3b5SAlistair Popple 			entry = make_writable_device_exclusive_entry(
2319b756a3b5SAlistair Popple 							page_to_pfn(subpage));
2320b756a3b5SAlistair Popple 		else
2321b756a3b5SAlistair Popple 			entry = make_readable_device_exclusive_entry(
2322b756a3b5SAlistair Popple 							page_to_pfn(subpage));
2323b756a3b5SAlistair Popple 		swp_pte = swp_entry_to_pte(entry);
2324b756a3b5SAlistair Popple 		if (pte_soft_dirty(pteval))
2325b756a3b5SAlistair Popple 			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2326b756a3b5SAlistair Popple 		if (pte_uffd_wp(pteval))
2327b756a3b5SAlistair Popple 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2328b756a3b5SAlistair Popple 
2329b756a3b5SAlistair Popple 		set_pte_at(mm, address, pvmw.pte, swp_pte);
2330b756a3b5SAlistair Popple 
2331b756a3b5SAlistair Popple 		/*
2332b756a3b5SAlistair Popple 		 * There is a reference on the page for the swap entry which has
2333b756a3b5SAlistair Popple 		 * been removed, so shouldn't take another.
2334b756a3b5SAlistair Popple 		 */
2335cea86fe2SHugh Dickins 		page_remove_rmap(subpage, vma, false);
2336b756a3b5SAlistair Popple 	}
2337b756a3b5SAlistair Popple 
2338b756a3b5SAlistair Popple 	mmu_notifier_invalidate_range_end(&range);
2339b756a3b5SAlistair Popple 
2340b756a3b5SAlistair Popple 	return ret;
2341b756a3b5SAlistair Popple }
2342b756a3b5SAlistair Popple 
2343b756a3b5SAlistair Popple /**
23440d251485SMatthew Wilcox (Oracle)  * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
23450d251485SMatthew Wilcox (Oracle)  * @folio: The folio to replace page table entries for.
23460d251485SMatthew Wilcox (Oracle)  * @mm: The mm_struct where the folio is expected to be mapped.
23470d251485SMatthew Wilcox (Oracle)  * @address: Address where the folio is expected to be mapped.
2348b756a3b5SAlistair Popple  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2349b756a3b5SAlistair Popple  *
23500d251485SMatthew Wilcox (Oracle)  * Tries to remove all the page table entries which are mapping this
23510d251485SMatthew Wilcox (Oracle)  * folio and replace them with special device exclusive swap entries to
23520d251485SMatthew Wilcox (Oracle)  * grant a device exclusive access to the folio.
2353b756a3b5SAlistair Popple  *
23540d251485SMatthew Wilcox (Oracle)  * Context: Caller must hold the folio lock.
23550d251485SMatthew Wilcox (Oracle)  * Return: false if the page is still mapped, or if it could not be unmapped
2356b756a3b5SAlistair Popple  * from the expected address. Otherwise returns true (success).
2357b756a3b5SAlistair Popple  */
23580d251485SMatthew Wilcox (Oracle) static bool folio_make_device_exclusive(struct folio *folio,
23590d251485SMatthew Wilcox (Oracle) 		struct mm_struct *mm, unsigned long address, void *owner)
2360b756a3b5SAlistair Popple {
2361b756a3b5SAlistair Popple 	struct make_exclusive_args args = {
2362b756a3b5SAlistair Popple 		.mm = mm,
2363b756a3b5SAlistair Popple 		.address = address,
2364b756a3b5SAlistair Popple 		.owner = owner,
2365b756a3b5SAlistair Popple 		.valid = false,
2366b756a3b5SAlistair Popple 	};
2367b756a3b5SAlistair Popple 	struct rmap_walk_control rwc = {
2368b756a3b5SAlistair Popple 		.rmap_one = page_make_device_exclusive_one,
2369f3ad032cSKefeng Wang 		.done = folio_not_mapped,
23702f031c6fSMatthew Wilcox (Oracle) 		.anon_lock = folio_lock_anon_vma_read,
2371b756a3b5SAlistair Popple 		.arg = &args,
2372b756a3b5SAlistair Popple 	};
2373b756a3b5SAlistair Popple 
2374b756a3b5SAlistair Popple 	/*
23750d251485SMatthew Wilcox (Oracle) 	 * Restrict to anonymous folios for now to avoid potential writeback
23760d251485SMatthew Wilcox (Oracle) 	 * issues.
2377b756a3b5SAlistair Popple 	 */
23780d251485SMatthew Wilcox (Oracle) 	if (!folio_test_anon(folio))
2379b756a3b5SAlistair Popple 		return false;
2380b756a3b5SAlistair Popple 
23812f031c6fSMatthew Wilcox (Oracle) 	rmap_walk(folio, &rwc);
2382b756a3b5SAlistair Popple 
23830d251485SMatthew Wilcox (Oracle) 	return args.valid && !folio_mapcount(folio);
2384b756a3b5SAlistair Popple }
2385b756a3b5SAlistair Popple 
2386b756a3b5SAlistair Popple /**
2387b756a3b5SAlistair Popple  * make_device_exclusive_range() - Mark a range for exclusive use by a device
2388dd062302SAdrian Huang  * @mm: mm_struct of associated target process
2389b756a3b5SAlistair Popple  * @start: start of the region to mark for exclusive device access
2390b756a3b5SAlistair Popple  * @end: end address of region
2391b756a3b5SAlistair Popple  * @pages: returns the pages which were successfully marked for exclusive access
2392b756a3b5SAlistair Popple  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2393b756a3b5SAlistair Popple  *
2394b756a3b5SAlistair Popple  * Returns: number of pages found in the range by GUP. A page is marked for
2395b756a3b5SAlistair Popple  * exclusive access only if the page pointer is non-NULL.
2396b756a3b5SAlistair Popple  *
2397b756a3b5SAlistair Popple  * This function finds ptes mapping page(s) to the given address range, locks
2398b756a3b5SAlistair Popple  * them and replaces mappings with special swap entries preventing userspace CPU
2399b756a3b5SAlistair Popple  * access. On fault these entries are replaced with the original mapping after
2400b756a3b5SAlistair Popple  * calling MMU notifiers.
2401b756a3b5SAlistair Popple  *
2402b756a3b5SAlistair Popple  * A driver using this to program access from a device must use a mmu notifier
2403b756a3b5SAlistair Popple  * critical section to hold a device specific lock during programming. Once
2404b756a3b5SAlistair Popple  * programming is complete it should drop the page lock and reference after
2405b756a3b5SAlistair Popple  * which point CPU access to the page will revoke the exclusive access.
2406b756a3b5SAlistair Popple  */
2407b756a3b5SAlistair Popple int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2408b756a3b5SAlistair Popple 				unsigned long end, struct page **pages,
2409b756a3b5SAlistair Popple 				void *owner)
2410b756a3b5SAlistair Popple {
2411b756a3b5SAlistair Popple 	long npages = (end - start) >> PAGE_SHIFT;
2412b756a3b5SAlistair Popple 	long i;
2413b756a3b5SAlistair Popple 
2414b756a3b5SAlistair Popple 	npages = get_user_pages_remote(mm, start, npages,
2415b756a3b5SAlistair Popple 				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2416ca5e8632SLorenzo Stoakes 				       pages, NULL);
2417b756a3b5SAlistair Popple 	if (npages < 0)
2418b756a3b5SAlistair Popple 		return npages;
2419b756a3b5SAlistair Popple 
2420b756a3b5SAlistair Popple 	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
24210d251485SMatthew Wilcox (Oracle) 		struct folio *folio = page_folio(pages[i]);
24220d251485SMatthew Wilcox (Oracle) 		if (PageTail(pages[i]) || !folio_trylock(folio)) {
24230d251485SMatthew Wilcox (Oracle) 			folio_put(folio);
2424b756a3b5SAlistair Popple 			pages[i] = NULL;
2425b756a3b5SAlistair Popple 			continue;
2426b756a3b5SAlistair Popple 		}
2427b756a3b5SAlistair Popple 
24280d251485SMatthew Wilcox (Oracle) 		if (!folio_make_device_exclusive(folio, mm, start, owner)) {
24290d251485SMatthew Wilcox (Oracle) 			folio_unlock(folio);
24300d251485SMatthew Wilcox (Oracle) 			folio_put(folio);
2431b756a3b5SAlistair Popple 			pages[i] = NULL;
2432b756a3b5SAlistair Popple 		}
2433b756a3b5SAlistair Popple 	}
2434b756a3b5SAlistair Popple 
2435b756a3b5SAlistair Popple 	return npages;
2436b756a3b5SAlistair Popple }
2437b756a3b5SAlistair Popple EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2438b756a3b5SAlistair Popple #endif
2439b756a3b5SAlistair Popple 
244001d8b20dSPeter Zijlstra void __put_anon_vma(struct anon_vma *anon_vma)
244176545066SRik van Riel {
244276545066SRik van Riel 	struct anon_vma *root = anon_vma->root;
244376545066SRik van Riel 
2444624483f3SAndrey Ryabinin 	anon_vma_free(anon_vma);
244501d8b20dSPeter Zijlstra 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
244676545066SRik van Riel 		anon_vma_free(root);
244776545066SRik van Riel }
244876545066SRik van Riel 
24492f031c6fSMatthew Wilcox (Oracle) static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
24506d4675e6SMinchan Kim 					    struct rmap_walk_control *rwc)
2451faecd8ddSJoonsoo Kim {
2452faecd8ddSJoonsoo Kim 	struct anon_vma *anon_vma;
2453faecd8ddSJoonsoo Kim 
24540dd1c7bbSJoonsoo Kim 	if (rwc->anon_lock)
24556d4675e6SMinchan Kim 		return rwc->anon_lock(folio, rwc);
24560dd1c7bbSJoonsoo Kim 
2457faecd8ddSJoonsoo Kim 	/*
24582f031c6fSMatthew Wilcox (Oracle) 	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2459faecd8ddSJoonsoo Kim 	 * because that depends on page_mapped(); but not all its usages
2460c1e8d7c6SMichel Lespinasse 	 * are holding mmap_lock. Users without mmap_lock are required to
2461faecd8ddSJoonsoo Kim 	 * take a reference count to prevent the anon_vma disappearing
2462faecd8ddSJoonsoo Kim 	 */
2463e05b3453SMatthew Wilcox (Oracle) 	anon_vma = folio_anon_vma(folio);
2464faecd8ddSJoonsoo Kim 	if (!anon_vma)
2465faecd8ddSJoonsoo Kim 		return NULL;
2466faecd8ddSJoonsoo Kim 
24676d4675e6SMinchan Kim 	if (anon_vma_trylock_read(anon_vma))
24686d4675e6SMinchan Kim 		goto out;
24696d4675e6SMinchan Kim 
24706d4675e6SMinchan Kim 	if (rwc->try_lock) {
24716d4675e6SMinchan Kim 		anon_vma = NULL;
24726d4675e6SMinchan Kim 		rwc->contended = true;
24736d4675e6SMinchan Kim 		goto out;
24746d4675e6SMinchan Kim 	}
24756d4675e6SMinchan Kim 
2476faecd8ddSJoonsoo Kim 	anon_vma_lock_read(anon_vma);
24776d4675e6SMinchan Kim out:
2478faecd8ddSJoonsoo Kim 	return anon_vma;
2479faecd8ddSJoonsoo Kim }
2480faecd8ddSJoonsoo Kim 
2481e9995ef9SHugh Dickins /*
2482e8351ac9SJoonsoo Kim  * rmap_walk_anon - do something to anonymous page using the object-based
2483e8351ac9SJoonsoo Kim  * rmap method
248489be82b4SKemeng Shi  * @folio: the folio to be handled
2485e8351ac9SJoonsoo Kim  * @rwc: control variable according to each walk type
248689be82b4SKemeng Shi  * @locked: caller holds relevant rmap lock
2487e8351ac9SJoonsoo Kim  *
248889be82b4SKemeng Shi  * Find all the mappings of a folio using the mapping pointer and the vma
248989be82b4SKemeng Shi  * chains contained in the anon_vma struct it points to.
2490e9995ef9SHugh Dickins  */
249184fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_anon(struct folio *folio,
24926d4675e6SMinchan Kim 		struct rmap_walk_control *rwc, bool locked)
2493e9995ef9SHugh Dickins {
2494e9995ef9SHugh Dickins 	struct anon_vma *anon_vma;
2495a8fa41adSKirill A. Shutemov 	pgoff_t pgoff_start, pgoff_end;
24965beb4930SRik van Riel 	struct anon_vma_chain *avc;
2497e9995ef9SHugh Dickins 
2498b9773199SKirill A. Shutemov 	if (locked) {
2499e05b3453SMatthew Wilcox (Oracle) 		anon_vma = folio_anon_vma(folio);
2500b9773199SKirill A. Shutemov 		/* anon_vma disappear under us? */
2501e05b3453SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(!anon_vma, folio);
2502b9773199SKirill A. Shutemov 	} else {
25032f031c6fSMatthew Wilcox (Oracle) 		anon_vma = rmap_walk_anon_lock(folio, rwc);
2504b9773199SKirill A. Shutemov 	}
2505e9995ef9SHugh Dickins 	if (!anon_vma)
25061df631aeSMinchan Kim 		return;
2507faecd8ddSJoonsoo Kim 
25082f031c6fSMatthew Wilcox (Oracle) 	pgoff_start = folio_pgoff(folio);
25092f031c6fSMatthew Wilcox (Oracle) 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2510a8fa41adSKirill A. Shutemov 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2511a8fa41adSKirill A. Shutemov 			pgoff_start, pgoff_end) {
25125beb4930SRik van Riel 		struct vm_area_struct *vma = avc->vma;
25132f031c6fSMatthew Wilcox (Oracle) 		unsigned long address = vma_address(&folio->page, vma);
25140dd1c7bbSJoonsoo Kim 
2515494334e4SHugh Dickins 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2516ad12695fSAndrea Arcangeli 		cond_resched();
2517ad12695fSAndrea Arcangeli 
25180dd1c7bbSJoonsoo Kim 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
25190dd1c7bbSJoonsoo Kim 			continue;
25200dd1c7bbSJoonsoo Kim 
25212f031c6fSMatthew Wilcox (Oracle) 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2522e9995ef9SHugh Dickins 			break;
25232f031c6fSMatthew Wilcox (Oracle) 		if (rwc->done && rwc->done(folio))
25240dd1c7bbSJoonsoo Kim 			break;
2525e9995ef9SHugh Dickins 	}
2526b9773199SKirill A. Shutemov 
2527b9773199SKirill A. Shutemov 	if (!locked)
25284fc3f1d6SIngo Molnar 		anon_vma_unlock_read(anon_vma);
2529e9995ef9SHugh Dickins }
2530e9995ef9SHugh Dickins 
2531e8351ac9SJoonsoo Kim /*
2532e8351ac9SJoonsoo Kim  * rmap_walk_file - do something to file page using the object-based rmap method
253389be82b4SKemeng Shi  * @folio: the folio to be handled
2534e8351ac9SJoonsoo Kim  * @rwc: control variable according to each walk type
253589be82b4SKemeng Shi  * @locked: caller holds relevant rmap lock
2536e8351ac9SJoonsoo Kim  *
253789be82b4SKemeng Shi  * Find all the mappings of a folio using the mapping pointer and the vma chains
2538e8351ac9SJoonsoo Kim  * contained in the address_space struct it points to.
2539e8351ac9SJoonsoo Kim  */
254084fbbe21SMatthew Wilcox (Oracle) static void rmap_walk_file(struct folio *folio,
25416d4675e6SMinchan Kim 		struct rmap_walk_control *rwc, bool locked)
2542e9995ef9SHugh Dickins {
25432f031c6fSMatthew Wilcox (Oracle) 	struct address_space *mapping = folio_mapping(folio);
2544a8fa41adSKirill A. Shutemov 	pgoff_t pgoff_start, pgoff_end;
2545e9995ef9SHugh Dickins 	struct vm_area_struct *vma;
2546e9995ef9SHugh Dickins 
25479f32624bSJoonsoo Kim 	/*
25489f32624bSJoonsoo Kim 	 * The page lock not only makes sure that page->mapping cannot
25499f32624bSJoonsoo Kim 	 * suddenly be NULLified by truncation, it makes sure that the
25509f32624bSJoonsoo Kim 	 * structure at mapping cannot be freed and reused yet,
2551c8c06efaSDavidlohr Bueso 	 * so we can safely take mapping->i_mmap_rwsem.
25529f32624bSJoonsoo Kim 	 */
25532f031c6fSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
25549f32624bSJoonsoo Kim 
2555e9995ef9SHugh Dickins 	if (!mapping)
25561df631aeSMinchan Kim 		return;
25573dec0ba0SDavidlohr Bueso 
25582f031c6fSMatthew Wilcox (Oracle) 	pgoff_start = folio_pgoff(folio);
25592f031c6fSMatthew Wilcox (Oracle) 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
25606d4675e6SMinchan Kim 	if (!locked) {
25616d4675e6SMinchan Kim 		if (i_mmap_trylock_read(mapping))
25626d4675e6SMinchan Kim 			goto lookup;
25636d4675e6SMinchan Kim 
25646d4675e6SMinchan Kim 		if (rwc->try_lock) {
25656d4675e6SMinchan Kim 			rwc->contended = true;
25666d4675e6SMinchan Kim 			return;
25676d4675e6SMinchan Kim 		}
25686d4675e6SMinchan Kim 
25693dec0ba0SDavidlohr Bueso 		i_mmap_lock_read(mapping);
25706d4675e6SMinchan Kim 	}
25716d4675e6SMinchan Kim lookup:
2572a8fa41adSKirill A. Shutemov 	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2573a8fa41adSKirill A. Shutemov 			pgoff_start, pgoff_end) {
25742f031c6fSMatthew Wilcox (Oracle) 		unsigned long address = vma_address(&folio->page, vma);
25750dd1c7bbSJoonsoo Kim 
2576494334e4SHugh Dickins 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2577ad12695fSAndrea Arcangeli 		cond_resched();
2578ad12695fSAndrea Arcangeli 
25790dd1c7bbSJoonsoo Kim 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
25800dd1c7bbSJoonsoo Kim 			continue;
25810dd1c7bbSJoonsoo Kim 
25822f031c6fSMatthew Wilcox (Oracle) 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
25830dd1c7bbSJoonsoo Kim 			goto done;
25842f031c6fSMatthew Wilcox (Oracle) 		if (rwc->done && rwc->done(folio))
25850dd1c7bbSJoonsoo Kim 			goto done;
2586e9995ef9SHugh Dickins 	}
25870dd1c7bbSJoonsoo Kim 
25880dd1c7bbSJoonsoo Kim done:
2589b9773199SKirill A. Shutemov 	if (!locked)
25903dec0ba0SDavidlohr Bueso 		i_mmap_unlock_read(mapping);
2591e9995ef9SHugh Dickins }
2592e9995ef9SHugh Dickins 
25936d4675e6SMinchan Kim void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2594e9995ef9SHugh Dickins {
25952f031c6fSMatthew Wilcox (Oracle) 	if (unlikely(folio_test_ksm(folio)))
25962f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_ksm(folio, rwc);
25972f031c6fSMatthew Wilcox (Oracle) 	else if (folio_test_anon(folio))
25982f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_anon(folio, rwc, false);
2599e9995ef9SHugh Dickins 	else
26002f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_file(folio, rwc, false);
2601b9773199SKirill A. Shutemov }
2602b9773199SKirill A. Shutemov 
2603b9773199SKirill A. Shutemov /* Like rmap_walk, but caller holds relevant rmap lock */
26046d4675e6SMinchan Kim void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2605b9773199SKirill A. Shutemov {
2606b9773199SKirill A. Shutemov 	/* no ksm support for now */
26072f031c6fSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
26082f031c6fSMatthew Wilcox (Oracle) 	if (folio_test_anon(folio))
26092f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_anon(folio, rwc, true);
2610b9773199SKirill A. Shutemov 	else
26112f031c6fSMatthew Wilcox (Oracle) 		rmap_walk_file(folio, rwc, true);
2612e9995ef9SHugh Dickins }
26130fe6e20bSNaoya Horiguchi 
2614e3390f67SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
26150fe6e20bSNaoya Horiguchi /*
2616451b9514SKirill Tkhai  * The following two functions are for anonymous (private mapped) hugepages.
26170fe6e20bSNaoya Horiguchi  * Unlike common anonymous pages, anonymous hugepages have no accounting code
26180fe6e20bSNaoya Horiguchi  * and no lru code, because we handle hugepages differently from common pages.
261928c5209dSDavid Hildenbrand  *
262028c5209dSDavid Hildenbrand  * RMAP_COMPOUND is ignored.
26210fe6e20bSNaoya Horiguchi  */
262209c55050SDavid Hildenbrand void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
262328c5209dSDavid Hildenbrand 			    unsigned long address, rmap_t flags)
26240fe6e20bSNaoya Horiguchi {
2625c5c54003SDavid Hildenbrand 	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2626c5c54003SDavid Hildenbrand 
2627132b180fSDavid Hildenbrand 	atomic_inc(&folio->_entire_mapcount);
2628c66db8c0SDavid Hildenbrand 	if (flags & RMAP_EXCLUSIVE)
262909c55050SDavid Hildenbrand 		SetPageAnonExclusive(&folio->page);
2630132b180fSDavid Hildenbrand 	VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
263109c55050SDavid Hildenbrand 			 PageAnonExclusive(&folio->page), folio);
26320fe6e20bSNaoya Horiguchi }
26330fe6e20bSNaoya Horiguchi 
2634d0ce0e47SSidhartha Kumar void hugepage_add_new_anon_rmap(struct folio *folio,
26350fe6e20bSNaoya Horiguchi 			struct vm_area_struct *vma, unsigned long address)
26360fe6e20bSNaoya Horiguchi {
26370fe6e20bSNaoya Horiguchi 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2638cb67f428SHugh Dickins 	/* increment count (starts at -1) */
2639db4e5dbdSMatthew Wilcox (Oracle) 	atomic_set(&folio->_entire_mapcount, 0);
2640db4e5dbdSMatthew Wilcox (Oracle) 	folio_clear_hugetlb_restore_reserve(folio);
2641c66db8c0SDavid Hildenbrand 	__folio_set_anon(folio, vma, address, true);
2642c66db8c0SDavid Hildenbrand 	SetPageAnonExclusive(&folio->page);
26430fe6e20bSNaoya Horiguchi }
2644e3390f67SNaoya Horiguchi #endif /* CONFIG_HUGETLB_PAGE */
2645