xref: /linux/mm/rmap.c (revision ef24e0aa078fa4965c6e925209780a32b325c0d8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/rmap.c - physical to virtual reverse mappings
4  *
5  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins 2003, 2004
18  */
19 
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_rwsem	(while writing or truncating, not reading or faulting)
24  *   mm->mmap_lock
25  *     mapping->invalidate_lock (in filemap_fault)
26  *       folio_lock
27  *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
28  *           vma_start_write
29  *             mapping->i_mmap_rwsem
30  *               anon_vma->rwsem
31  *                 mm->page_table_lock or pte_lock
32  *                   swap_lock (in swap_duplicate, swap_info_get)
33  *                     mmlist_lock (in mmput, drain_mmlist and others)
34  *                     mapping->private_lock (in block_dirty_folio)
35  *                         i_pages lock (widely used)
36  *                           lruvec->lru_lock (in folio_lruvec_lock_irq)
37  *                     inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38  *                     bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39  *                       sb_lock (within inode_lock in fs/fs-writeback.c)
40  *                       i_pages lock (widely used, in set_page_dirty,
41  *                                 in arch-dependent flush_dcache_mmap_lock,
42  *                                 within bdi.wb->list_lock in __sync_single_inode)
43  *
44  * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
45  *   ->tasklist_lock
46  *     pte map lock
47  *
48  * hugetlbfs PageHuge() take locks in this order:
49  *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50  *     vma_lock (hugetlb specific lock for pmd_sharing)
51  *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
52  *         folio_lock
53  */
54 
55 #include <linux/mm.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/task.h>
58 #include <linux/pagemap.h>
59 #include <linux/swap.h>
60 #include <linux/leafops.h>
61 #include <linux/slab.h>
62 #include <linux/init.h>
63 #include <linux/ksm.h>
64 #include <linux/rmap.h>
65 #include <linux/rcupdate.h>
66 #include <linux/export.h>
67 #include <linux/memcontrol.h>
68 #include <linux/mmu_notifier.h>
69 #include <linux/migrate.h>
70 #include <linux/hugetlb.h>
71 #include <linux/huge_mm.h>
72 #include <linux/backing-dev.h>
73 #include <linux/page_idle.h>
74 #include <linux/memremap.h>
75 #include <linux/userfaultfd_k.h>
76 #include <linux/mm_inline.h>
77 #include <linux/oom.h>
78 
79 #include <asm/tlb.h>
80 
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/migrate.h>
83 
84 #include "internal.h"
85 #include "swap.h"
86 
87 static struct kmem_cache *anon_vma_cachep;
88 static struct kmem_cache *anon_vma_chain_cachep;
89 
90 static inline struct anon_vma *anon_vma_alloc(void)
91 {
92 	struct anon_vma *anon_vma;
93 
94 	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
95 	if (anon_vma) {
96 		atomic_set(&anon_vma->refcount, 1);
97 		anon_vma->num_children = 0;
98 		anon_vma->num_active_vmas = 0;
99 		anon_vma->parent = anon_vma;
100 		/*
101 		 * Initialise the anon_vma root to point to itself. If called
102 		 * from fork, the root will be reset to the parents anon_vma.
103 		 */
104 		anon_vma->root = anon_vma;
105 	}
106 
107 	return anon_vma;
108 }
109 
110 static inline void anon_vma_free(struct anon_vma *anon_vma)
111 {
112 	VM_BUG_ON(atomic_read(&anon_vma->refcount));
113 
114 	/*
115 	 * Synchronize against folio_lock_anon_vma_read() such that
116 	 * we can safely hold the lock without the anon_vma getting
117 	 * freed.
118 	 *
119 	 * Relies on the full mb implied by the atomic_dec_and_test() from
120 	 * put_anon_vma() against the acquire barrier implied by
121 	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
122 	 *
123 	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
124 	 *   down_read_trylock()		  atomic_dec_and_test()
125 	 *   LOCK				  MB
126 	 *   atomic_read()			  rwsem_is_locked()
127 	 *
128 	 * LOCK should suffice since the actual taking of the lock must
129 	 * happen _before_ what follows.
130 	 */
131 	might_sleep();
132 	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
133 		anon_vma_lock_write(anon_vma);
134 		anon_vma_unlock_write(anon_vma);
135 	}
136 
137 	kmem_cache_free(anon_vma_cachep, anon_vma);
138 }
139 
140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
141 {
142 	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
143 }
144 
145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
146 {
147 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
148 }
149 
150 static void anon_vma_chain_assign(struct vm_area_struct *vma,
151 				  struct anon_vma_chain *avc,
152 				  struct anon_vma *anon_vma)
153 {
154 	avc->vma = vma;
155 	avc->anon_vma = anon_vma;
156 	list_add(&avc->same_vma, &vma->anon_vma_chain);
157 }
158 
159 /**
160  * __anon_vma_prepare - attach an anon_vma to a memory region
161  * @vma: the memory region in question
162  *
163  * This makes sure the memory mapping described by 'vma' has
164  * an 'anon_vma' attached to it, so that we can associate the
165  * anonymous pages mapped into it with that anon_vma.
166  *
167  * The common case will be that we already have one, which
168  * is handled inline by anon_vma_prepare(). But if
169  * not we either need to find an adjacent mapping that we
170  * can re-use the anon_vma from (very common when the only
171  * reason for splitting a vma has been mprotect()), or we
172  * allocate a new one.
173  *
174  * Anon-vma allocations are very subtle, because we may have
175  * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
176  * and that may actually touch the rwsem even in the newly
177  * allocated vma (it depends on RCU to make sure that the
178  * anon_vma isn't actually destroyed).
179  *
180  * As a result, we need to do proper anon_vma locking even
181  * for the new allocation. At the same time, we do not want
182  * to do any locking for the common case of already having
183  * an anon_vma.
184  */
185 int __anon_vma_prepare(struct vm_area_struct *vma)
186 {
187 	struct mm_struct *mm = vma->vm_mm;
188 	struct anon_vma *anon_vma, *allocated;
189 	struct anon_vma_chain *avc;
190 
191 	mmap_assert_locked(mm);
192 	might_sleep();
193 
194 	avc = anon_vma_chain_alloc(GFP_KERNEL);
195 	if (!avc)
196 		goto out_enomem;
197 
198 	anon_vma = find_mergeable_anon_vma(vma);
199 	allocated = NULL;
200 	if (!anon_vma) {
201 		anon_vma = anon_vma_alloc();
202 		if (unlikely(!anon_vma))
203 			goto out_enomem_free_avc;
204 		anon_vma->num_children++; /* self-parent link for new root */
205 		allocated = anon_vma;
206 	}
207 
208 	anon_vma_lock_write(anon_vma);
209 	/* page_table_lock to protect against threads */
210 	spin_lock(&mm->page_table_lock);
211 	if (likely(!vma->anon_vma)) {
212 		vma->anon_vma = anon_vma;
213 		anon_vma_chain_assign(vma, avc, anon_vma);
214 		anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
215 		anon_vma->num_active_vmas++;
216 		allocated = NULL;
217 		avc = NULL;
218 	}
219 	spin_unlock(&mm->page_table_lock);
220 	anon_vma_unlock_write(anon_vma);
221 
222 	if (unlikely(allocated))
223 		put_anon_vma(allocated);
224 	if (unlikely(avc))
225 		anon_vma_chain_free(avc);
226 
227 	return 0;
228 
229  out_enomem_free_avc:
230 	anon_vma_chain_free(avc);
231  out_enomem:
232 	return -ENOMEM;
233 }
234 
235 static void check_anon_vma_clone(struct vm_area_struct *dst,
236 				 struct vm_area_struct *src,
237 				 enum vma_operation operation)
238 {
239 	/* The write lock must be held. */
240 	mmap_assert_write_locked(src->vm_mm);
241 	/* If not a fork then must be on same mm. */
242 	VM_WARN_ON_ONCE(operation != VMA_OP_FORK && dst->vm_mm != src->vm_mm);
243 
244 	/* If we have anything to do src->anon_vma must be provided. */
245 	VM_WARN_ON_ONCE(!src->anon_vma && !list_empty(&src->anon_vma_chain));
246 	VM_WARN_ON_ONCE(!src->anon_vma && dst->anon_vma);
247 	/* We are establishing a new anon_vma_chain. */
248 	VM_WARN_ON_ONCE(!list_empty(&dst->anon_vma_chain));
249 	/*
250 	 * On fork, dst->anon_vma is set NULL (temporarily). Otherwise, anon_vma
251 	 * must be the same across dst and src.
252 	 */
253 	VM_WARN_ON_ONCE(dst->anon_vma && dst->anon_vma != src->anon_vma);
254 	/*
255 	 * Essentially equivalent to above - if not a no-op, we should expect
256 	 * dst->anon_vma to be set for everything except a fork.
257 	 */
258 	VM_WARN_ON_ONCE(operation != VMA_OP_FORK && src->anon_vma &&
259 			!dst->anon_vma);
260 	/* For the anon_vma to be compatible, it can only be singular. */
261 	VM_WARN_ON_ONCE(operation == VMA_OP_MERGE_UNFAULTED &&
262 			!list_is_singular(&src->anon_vma_chain));
263 #ifdef CONFIG_PER_VMA_LOCK
264 	/* Only merging an unfaulted VMA leaves the destination attached. */
265 	VM_WARN_ON_ONCE(operation != VMA_OP_MERGE_UNFAULTED &&
266 			vma_is_attached(dst));
267 #endif
268 }
269 
270 static void maybe_reuse_anon_vma(struct vm_area_struct *dst,
271 		struct anon_vma *anon_vma)
272 {
273 	/* If already populated, nothing to do.*/
274 	if (dst->anon_vma)
275 		return;
276 
277 	/*
278 	 * We reuse an anon_vma if any linking VMAs were unmapped and it has
279 	 * only a single child at most.
280 	 */
281 	if (anon_vma->num_active_vmas > 0)
282 		return;
283 	if (anon_vma->num_children > 1)
284 		return;
285 
286 	dst->anon_vma = anon_vma;
287 	anon_vma->num_active_vmas++;
288 }
289 
290 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma);
291 
292 /**
293  * anon_vma_clone - Establishes new anon_vma_chain objects in @dst linking to
294  * all of the anon_vma objects contained within @src anon_vma_chain's.
295  * @dst: The destination VMA with an empty anon_vma_chain.
296  * @src: The source VMA we wish to duplicate.
297  * @operation: The type of operation which resulted in the clone.
298  *
299  * This is the heart of the VMA side of the anon_vma implementation - we invoke
300  * this function whenever we need to set up a new VMA's anon_vma state.
301  *
302  * This is invoked for:
303  *
304  * - VMA Merge, but only when @dst is unfaulted and @src is faulted - meaning we
305  *   clone @src into @dst.
306  * - VMA split.
307  * - VMA (m)remap.
308  * - Fork of faulted VMA.
309  *
310  * In all cases other than fork this is simply a duplication. Fork additionally
311  * adds a new active anon_vma.
312  *
313  * ONLY in the case of fork do we try to 'reuse' existing anon_vma's in an
314  * anon_vma hierarchy, reusing anon_vma's which have no VMA associated with them
315  * but do have a single child. This is to avoid waste of memory when repeatedly
316  * forking.
317  *
318  * Returns: 0 on success, -ENOMEM on failure.
319  */
320 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
321 		   enum vma_operation operation)
322 {
323 	struct anon_vma_chain *avc, *pavc;
324 	struct anon_vma *active_anon_vma = src->anon_vma;
325 
326 	check_anon_vma_clone(dst, src, operation);
327 
328 	if (!active_anon_vma)
329 		return 0;
330 
331 	/*
332 	 * Allocate AVCs. We don't need an anon_vma lock for this as we
333 	 * are not updating the anon_vma rbtree nor are we changing
334 	 * anon_vma statistics.
335 	 *
336 	 * Either src, dst have the same mm for which we hold an exclusive mmap
337 	 * write lock, or we are forking and we hold it on src->vm_mm and dst is
338 	 * not yet accessible to other threads so there's no possibliity of the
339 	 * unlinked AVC's being observed yet.
340 	 */
341 	list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) {
342 		avc = anon_vma_chain_alloc(GFP_KERNEL);
343 		if (!avc)
344 			goto enomem_failure;
345 
346 		anon_vma_chain_assign(dst, avc, pavc->anon_vma);
347 	}
348 
349 	/*
350 	 * Now link the anon_vma's back to the newly inserted AVCs.
351 	 * Note that all anon_vma's share the same root.
352 	 */
353 	anon_vma_lock_write(src->anon_vma);
354 	list_for_each_entry_reverse(avc, &dst->anon_vma_chain, same_vma) {
355 		struct anon_vma *anon_vma = avc->anon_vma;
356 
357 		anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
358 		if (operation == VMA_OP_FORK)
359 			maybe_reuse_anon_vma(dst, anon_vma);
360 	}
361 
362 	if (operation != VMA_OP_FORK)
363 		dst->anon_vma->num_active_vmas++;
364 
365 	anon_vma_unlock_write(active_anon_vma);
366 	return 0;
367 
368  enomem_failure:
369 	cleanup_partial_anon_vmas(dst);
370 	return -ENOMEM;
371 }
372 
373 /*
374  * Attach vma to its own anon_vma, as well as to the anon_vmas that
375  * the corresponding VMA in the parent process is attached to.
376  * Returns 0 on success, non-zero on failure.
377  */
378 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
379 {
380 	struct anon_vma_chain *avc;
381 	struct anon_vma *anon_vma;
382 	int rc;
383 
384 	/* Don't bother if the parent process has no anon_vma here. */
385 	if (!pvma->anon_vma)
386 		return 0;
387 
388 	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
389 	vma->anon_vma = NULL;
390 
391 	anon_vma = anon_vma_alloc();
392 	if (!anon_vma)
393 		return -ENOMEM;
394 	avc = anon_vma_chain_alloc(GFP_KERNEL);
395 	if (!avc) {
396 		put_anon_vma(anon_vma);
397 		return -ENOMEM;
398 	}
399 
400 	/*
401 	 * First, attach the new VMA to the parent VMA's anon_vmas,
402 	 * so rmap can find non-COWed pages in child processes.
403 	 */
404 	rc = anon_vma_clone(vma, pvma, VMA_OP_FORK);
405 	/* An error arose or an existing anon_vma was reused, all done then. */
406 	if (rc || vma->anon_vma) {
407 		put_anon_vma(anon_vma);
408 		anon_vma_chain_free(avc);
409 		return rc;
410 	}
411 
412 	/*
413 	 * OK no reuse, so add our own anon_vma.
414 	 *
415 	 * Since it is not linked anywhere we can safely manipulate anon_vma
416 	 * fields without a lock.
417 	 */
418 
419 	anon_vma->num_active_vmas = 1;
420 	/*
421 	 * The root anon_vma's rwsem is the lock actually used when we
422 	 * lock any of the anon_vmas in this anon_vma tree.
423 	 */
424 	anon_vma->root = pvma->anon_vma->root;
425 	anon_vma->parent = pvma->anon_vma;
426 	/*
427 	 * With refcounts, an anon_vma can stay around longer than the
428 	 * process it belongs to. The root anon_vma needs to be pinned until
429 	 * this anon_vma is freed, because the lock lives in the root.
430 	 */
431 	get_anon_vma(anon_vma->root);
432 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
433 	vma->anon_vma = anon_vma;
434 	anon_vma_chain_assign(vma, avc, anon_vma);
435 	/* Now let rmap see it. */
436 	anon_vma_lock_write(anon_vma);
437 	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
438 	anon_vma->parent->num_children++;
439 	anon_vma_unlock_write(anon_vma);
440 
441 	return 0;
442 }
443 
444 /*
445  * In the unfortunate case of anon_vma_clone() failing to allocate memory we
446  * have to clean things up.
447  *
448  * Since we allocate anon_vma_chain's before we insert them into the interval
449  * trees, we simply have to free up the AVC's and remove the entries from the
450  * VMA's anon_vma_chain.
451  */
452 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma)
453 {
454 	struct anon_vma_chain *avc, *next;
455 
456 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
457 		list_del(&avc->same_vma);
458 		anon_vma_chain_free(avc);
459 	}
460 }
461 
462 /**
463  * unlink_anon_vmas() - remove all links between a VMA and anon_vma's, freeing
464  * anon_vma_chain objects.
465  * @vma: The VMA whose links to anon_vma objects is to be severed.
466  *
467  * As part of the process anon_vma_chain's are freed,
468  * anon_vma->num_children,num_active_vmas is updated as required and, if the
469  * relevant anon_vma references no further VMAs, its reference count is
470  * decremented.
471  */
472 void unlink_anon_vmas(struct vm_area_struct *vma)
473 {
474 	struct anon_vma_chain *avc, *next;
475 	struct anon_vma *active_anon_vma = vma->anon_vma;
476 
477 	/* Always hold mmap lock, read-lock on unmap possibly. */
478 	mmap_assert_locked(vma->vm_mm);
479 
480 	/* Unfaulted is a no-op. */
481 	if (!active_anon_vma) {
482 		VM_WARN_ON_ONCE(!list_empty(&vma->anon_vma_chain));
483 		return;
484 	}
485 
486 	anon_vma_lock_write(active_anon_vma);
487 
488 	/*
489 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
490 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
491 	 */
492 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
493 		struct anon_vma *anon_vma = avc->anon_vma;
494 
495 		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
496 
497 		/*
498 		 * Leave empty anon_vmas on the list - we'll need
499 		 * to free them outside the lock.
500 		 */
501 		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
502 			anon_vma->parent->num_children--;
503 			continue;
504 		}
505 
506 		list_del(&avc->same_vma);
507 		anon_vma_chain_free(avc);
508 	}
509 
510 	active_anon_vma->num_active_vmas--;
511 	/*
512 	 * vma would still be needed after unlink, and anon_vma will be prepared
513 	 * when handle fault.
514 	 */
515 	vma->anon_vma = NULL;
516 	anon_vma_unlock_write(active_anon_vma);
517 
518 
519 	/*
520 	 * Iterate the list once more, it now only contains empty and unlinked
521 	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
522 	 * needing to write-acquire the anon_vma->root->rwsem.
523 	 */
524 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
525 		struct anon_vma *anon_vma = avc->anon_vma;
526 
527 		VM_WARN_ON(anon_vma->num_children);
528 		VM_WARN_ON(anon_vma->num_active_vmas);
529 		put_anon_vma(anon_vma);
530 
531 		list_del(&avc->same_vma);
532 		anon_vma_chain_free(avc);
533 	}
534 }
535 
536 static void anon_vma_ctor(void *data)
537 {
538 	struct anon_vma *anon_vma = data;
539 
540 	init_rwsem(&anon_vma->rwsem);
541 	atomic_set(&anon_vma->refcount, 0);
542 	anon_vma->rb_root = RB_ROOT_CACHED;
543 }
544 
545 void __init anon_vma_init(void)
546 {
547 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
548 			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
549 			anon_vma_ctor);
550 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
551 			SLAB_PANIC|SLAB_ACCOUNT);
552 }
553 
554 /*
555  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
556  *
557  * Since there is no serialization what so ever against folio_remove_rmap_*()
558  * the best this function can do is return a refcount increased anon_vma
559  * that might have been relevant to this page.
560  *
561  * The page might have been remapped to a different anon_vma or the anon_vma
562  * returned may already be freed (and even reused).
563  *
564  * In case it was remapped to a different anon_vma, the new anon_vma will be a
565  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
566  * ensure that any anon_vma obtained from the page will still be valid for as
567  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
568  *
569  * All users of this function must be very careful when walking the anon_vma
570  * chain and verify that the page in question is indeed mapped in it
571  * [ something equivalent to page_mapped_in_vma() ].
572  *
573  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
574  * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
575  * if there is a mapcount, we can dereference the anon_vma after observing
576  * those.
577  *
578  * NOTE: the caller should hold folio lock when calling this.
579  */
580 struct anon_vma *folio_get_anon_vma(const struct folio *folio)
581 {
582 	struct anon_vma *anon_vma = NULL;
583 	unsigned long anon_mapping;
584 
585 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
586 
587 	rcu_read_lock();
588 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
589 	if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
590 		goto out;
591 	if (!folio_mapped(folio))
592 		goto out;
593 
594 	anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
595 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
596 		anon_vma = NULL;
597 		goto out;
598 	}
599 
600 	/*
601 	 * If this folio is still mapped, then its anon_vma cannot have been
602 	 * freed.  But if it has been unmapped, we have no security against the
603 	 * anon_vma structure being freed and reused (for another anon_vma:
604 	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
605 	 * above cannot corrupt).
606 	 */
607 	if (!folio_mapped(folio)) {
608 		rcu_read_unlock();
609 		put_anon_vma(anon_vma);
610 		return NULL;
611 	}
612 out:
613 	rcu_read_unlock();
614 
615 	return anon_vma;
616 }
617 
618 /*
619  * Similar to folio_get_anon_vma() except it locks the anon_vma.
620  *
621  * Its a little more complex as it tries to keep the fast path to a single
622  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
623  * reference like with folio_get_anon_vma() and then block on the mutex
624  * on !rwc->try_lock case.
625  */
626 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
627 					  struct rmap_walk_control *rwc)
628 {
629 	struct anon_vma *anon_vma = NULL;
630 	struct anon_vma *root_anon_vma;
631 	unsigned long anon_mapping;
632 
633 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
634 
635 	rcu_read_lock();
636 	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
637 	if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
638 		goto out;
639 	if (!folio_mapped(folio))
640 		goto out;
641 
642 	anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
643 	root_anon_vma = READ_ONCE(anon_vma->root);
644 	if (down_read_trylock(&root_anon_vma->rwsem)) {
645 		/*
646 		 * If the folio is still mapped, then this anon_vma is still
647 		 * its anon_vma, and holding the mutex ensures that it will
648 		 * not go away, see anon_vma_free().
649 		 */
650 		if (!folio_mapped(folio)) {
651 			up_read(&root_anon_vma->rwsem);
652 			anon_vma = NULL;
653 		}
654 		goto out;
655 	}
656 
657 	if (rwc && rwc->try_lock) {
658 		anon_vma = NULL;
659 		rwc->contended = true;
660 		goto out;
661 	}
662 
663 	/* trylock failed, we got to sleep */
664 	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
665 		anon_vma = NULL;
666 		goto out;
667 	}
668 
669 	if (!folio_mapped(folio)) {
670 		rcu_read_unlock();
671 		put_anon_vma(anon_vma);
672 		return NULL;
673 	}
674 
675 	/* we pinned the anon_vma, its safe to sleep */
676 	rcu_read_unlock();
677 	anon_vma_lock_read(anon_vma);
678 
679 	if (atomic_dec_and_test(&anon_vma->refcount)) {
680 		/*
681 		 * Oops, we held the last refcount, release the lock
682 		 * and bail -- can't simply use put_anon_vma() because
683 		 * we'll deadlock on the anon_vma_lock_write() recursion.
684 		 */
685 		anon_vma_unlock_read(anon_vma);
686 		__put_anon_vma(anon_vma);
687 		anon_vma = NULL;
688 	}
689 
690 	return anon_vma;
691 
692 out:
693 	rcu_read_unlock();
694 	return anon_vma;
695 }
696 
697 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
698 /*
699  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
700  * important if a PTE was dirty when it was unmapped that it's flushed
701  * before any IO is initiated on the page to prevent lost writes. Similarly,
702  * it must be flushed before freeing to prevent data leakage.
703  */
704 void try_to_unmap_flush(void)
705 {
706 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
707 
708 	if (!tlb_ubc->flush_required)
709 		return;
710 
711 	arch_tlbbatch_flush(&tlb_ubc->arch);
712 	tlb_ubc->flush_required = false;
713 	tlb_ubc->writable = false;
714 }
715 
716 /* Flush iff there are potentially writable TLB entries that can race with IO */
717 void try_to_unmap_flush_dirty(void)
718 {
719 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
720 
721 	if (tlb_ubc->writable)
722 		try_to_unmap_flush();
723 }
724 
725 /*
726  * Bits 0-14 of mm->tlb_flush_batched record pending generations.
727  * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
728  */
729 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
730 #define TLB_FLUSH_BATCH_PENDING_MASK			\
731 	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
732 #define TLB_FLUSH_BATCH_PENDING_LARGE			\
733 	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
734 
735 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
736 		unsigned long start, unsigned long end)
737 {
738 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
739 	int batch;
740 	bool writable = pte_dirty(pteval);
741 
742 	if (!pte_accessible(mm, pteval))
743 		return;
744 
745 	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
746 	tlb_ubc->flush_required = true;
747 
748 	/*
749 	 * Ensure compiler does not re-order the setting of tlb_flush_batched
750 	 * before the PTE is cleared.
751 	 */
752 	barrier();
753 	batch = atomic_read(&mm->tlb_flush_batched);
754 retry:
755 	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
756 		/*
757 		 * Prevent `pending' from catching up with `flushed' because of
758 		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
759 		 * `pending' becomes large.
760 		 */
761 		if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
762 			goto retry;
763 	} else {
764 		atomic_inc(&mm->tlb_flush_batched);
765 	}
766 
767 	/*
768 	 * If the PTE was dirty then it's best to assume it's writable. The
769 	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
770 	 * before the page is queued for IO.
771 	 */
772 	if (writable)
773 		tlb_ubc->writable = true;
774 }
775 
776 /*
777  * Returns true if the TLB flush should be deferred to the end of a batch of
778  * unmap operations to reduce IPIs.
779  */
780 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
781 {
782 	if (!(flags & TTU_BATCH_FLUSH))
783 		return false;
784 
785 	return arch_tlbbatch_should_defer(mm);
786 }
787 
788 /*
789  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
790  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
791  * operation such as mprotect or munmap to race between reclaim unmapping
792  * the page and flushing the page. If this race occurs, it potentially allows
793  * access to data via a stale TLB entry. Tracking all mm's that have TLB
794  * batching in flight would be expensive during reclaim so instead track
795  * whether TLB batching occurred in the past and if so then do a flush here
796  * if required. This will cost one additional flush per reclaim cycle paid
797  * by the first operation at risk such as mprotect and mumap.
798  *
799  * This must be called under the PTL so that an access to tlb_flush_batched
800  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
801  * via the PTL.
802  */
803 void flush_tlb_batched_pending(struct mm_struct *mm)
804 {
805 	int batch = atomic_read(&mm->tlb_flush_batched);
806 	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
807 	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
808 
809 	if (pending != flushed) {
810 		flush_tlb_mm(mm);
811 		/*
812 		 * If the new TLB flushing is pending during flushing, leave
813 		 * mm->tlb_flush_batched as is, to avoid losing flushing.
814 		 */
815 		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
816 			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
817 	}
818 }
819 #else
820 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
821 		unsigned long start, unsigned long end)
822 {
823 }
824 
825 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
826 {
827 	return false;
828 }
829 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
830 
831 /**
832  * page_address_in_vma - The virtual address of a page in this VMA.
833  * @folio: The folio containing the page.
834  * @page: The page within the folio.
835  * @vma: The VMA we need to know the address in.
836  *
837  * Calculates the user virtual address of this page in the specified VMA.
838  * It is the caller's responsibility to check the page is actually
839  * within the VMA.  There may not currently be a PTE pointing at this
840  * page, but if a page fault occurs at this address, this is the page
841  * which will be accessed.
842  *
843  * Context: Caller should hold a reference to the folio.  Caller should
844  * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
845  * VMA from being altered.
846  *
847  * Return: The virtual address corresponding to this page in the VMA.
848  */
849 unsigned long page_address_in_vma(const struct folio *folio,
850 		const struct page *page, const struct vm_area_struct *vma)
851 {
852 	if (folio_test_anon(folio)) {
853 		struct anon_vma *anon_vma = folio_anon_vma(folio);
854 		/*
855 		 * Note: swapoff's unuse_vma() is more efficient with this
856 		 * check, and needs it to match anon_vma when KSM is active.
857 		 */
858 		if (!vma->anon_vma || !anon_vma ||
859 		    vma->anon_vma->root != anon_vma->root)
860 			return -EFAULT;
861 	} else if (!vma->vm_file) {
862 		return -EFAULT;
863 	} else if (vma->vm_file->f_mapping != folio->mapping) {
864 		return -EFAULT;
865 	}
866 
867 	/* KSM folios don't reach here because of the !anon_vma check */
868 	return vma_address(vma, page_pgoff(folio, page), 1);
869 }
870 
871 /*
872  * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
873  * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
874  * represents.
875  */
876 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
877 {
878 	pgd_t *pgd;
879 	p4d_t *p4d;
880 	pud_t *pud;
881 	pmd_t *pmd = NULL;
882 
883 	pgd = pgd_offset(mm, address);
884 	if (!pgd_present(*pgd))
885 		goto out;
886 
887 	p4d = p4d_offset(pgd, address);
888 	if (!p4d_present(*p4d))
889 		goto out;
890 
891 	pud = pud_offset(p4d, address);
892 	if (!pud_present(*pud))
893 		goto out;
894 
895 	pmd = pmd_offset(pud, address);
896 out:
897 	return pmd;
898 }
899 
900 struct folio_referenced_arg {
901 	int mapcount;
902 	int referenced;
903 	vm_flags_t vm_flags;
904 	struct mem_cgroup *memcg;
905 };
906 
907 /*
908  * arg: folio_referenced_arg will be passed
909  */
910 static bool folio_referenced_one(struct folio *folio,
911 		struct vm_area_struct *vma, unsigned long address, void *arg)
912 {
913 	struct folio_referenced_arg *pra = arg;
914 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
915 	int ptes = 0, referenced = 0;
916 
917 	while (page_vma_mapped_walk(&pvmw)) {
918 		address = pvmw.address;
919 
920 		if (vma->vm_flags & VM_LOCKED) {
921 			ptes++;
922 			pra->mapcount--;
923 
924 			/* Only mlock fully mapped pages */
925 			if (pvmw.pte && ptes != pvmw.nr_pages)
926 				continue;
927 
928 			/*
929 			 * All PTEs must be protected by page table lock in
930 			 * order to mlock the page.
931 			 *
932 			 * If page table boundary has been cross, current ptl
933 			 * only protect part of ptes.
934 			 */
935 			if (pvmw.flags & PVMW_PGTABLE_CROSSED)
936 				continue;
937 
938 			/* Restore the mlock which got missed */
939 			mlock_vma_folio(folio, vma);
940 			page_vma_mapped_walk_done(&pvmw);
941 			pra->vm_flags |= VM_LOCKED;
942 			return false; /* To break the loop */
943 		}
944 
945 		/*
946 		 * Skip the non-shared swapbacked folio mapped solely by
947 		 * the exiting or OOM-reaped process. This avoids redundant
948 		 * swap-out followed by an immediate unmap.
949 		 */
950 		if ((!atomic_read(&vma->vm_mm->mm_users) ||
951 		    check_stable_address_space(vma->vm_mm)) &&
952 		    folio_test_anon(folio) && folio_test_swapbacked(folio) &&
953 		    !folio_maybe_mapped_shared(folio)) {
954 			pra->referenced = -1;
955 			page_vma_mapped_walk_done(&pvmw);
956 			return false;
957 		}
958 
959 		if (lru_gen_enabled() && pvmw.pte) {
960 			if (lru_gen_look_around(&pvmw))
961 				referenced++;
962 		} else if (pvmw.pte) {
963 			if (ptep_clear_flush_young_notify(vma, address,
964 						pvmw.pte))
965 				referenced++;
966 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
967 			if (pmdp_clear_flush_young_notify(vma, address,
968 						pvmw.pmd))
969 				referenced++;
970 		} else {
971 			/* unexpected pmd-mapped folio? */
972 			WARN_ON_ONCE(1);
973 		}
974 
975 		pra->mapcount--;
976 	}
977 
978 	if (referenced)
979 		folio_clear_idle(folio);
980 	if (folio_test_clear_young(folio))
981 		referenced++;
982 
983 	if (referenced) {
984 		pra->referenced++;
985 		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
986 	}
987 
988 	if (!pra->mapcount)
989 		return false; /* To break the loop */
990 
991 	return true;
992 }
993 
994 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
995 {
996 	struct folio_referenced_arg *pra = arg;
997 	struct mem_cgroup *memcg = pra->memcg;
998 
999 	/*
1000 	 * Ignore references from this mapping if it has no recency. If the
1001 	 * folio has been used in another mapping, we will catch it; if this
1002 	 * other mapping is already gone, the unmap path will have set the
1003 	 * referenced flag or activated the folio in zap_pte_range().
1004 	 */
1005 	if (!vma_has_recency(vma))
1006 		return true;
1007 
1008 	/*
1009 	 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
1010 	 * of references from different cgroups.
1011 	 */
1012 	if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
1013 		return true;
1014 
1015 	return false;
1016 }
1017 
1018 /**
1019  * folio_referenced() - Test if the folio was referenced.
1020  * @folio: The folio to test.
1021  * @is_locked: Caller holds lock on the folio.
1022  * @memcg: target memory cgroup
1023  * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
1024  *
1025  * Quick test_and_clear_referenced for all mappings of a folio,
1026  *
1027  * Return: The number of mappings which referenced the folio. Return -1 if
1028  * the function bailed out due to rmap lock contention.
1029  */
1030 int folio_referenced(struct folio *folio, int is_locked,
1031 		     struct mem_cgroup *memcg, vm_flags_t *vm_flags)
1032 {
1033 	bool we_locked = false;
1034 	struct folio_referenced_arg pra = {
1035 		.mapcount = folio_mapcount(folio),
1036 		.memcg = memcg,
1037 	};
1038 	struct rmap_walk_control rwc = {
1039 		.rmap_one = folio_referenced_one,
1040 		.arg = (void *)&pra,
1041 		.anon_lock = folio_lock_anon_vma_read,
1042 		.try_lock = true,
1043 		.invalid_vma = invalid_folio_referenced_vma,
1044 	};
1045 
1046 	*vm_flags = 0;
1047 	if (!pra.mapcount)
1048 		return 0;
1049 
1050 	if (!folio_raw_mapping(folio))
1051 		return 0;
1052 
1053 	if (!is_locked) {
1054 		we_locked = folio_trylock(folio);
1055 		if (!we_locked)
1056 			return 1;
1057 	}
1058 
1059 	rmap_walk(folio, &rwc);
1060 	*vm_flags = pra.vm_flags;
1061 
1062 	if (we_locked)
1063 		folio_unlock(folio);
1064 
1065 	return rwc.contended ? -1 : pra.referenced;
1066 }
1067 
1068 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
1069 {
1070 	int cleaned = 0;
1071 	struct vm_area_struct *vma = pvmw->vma;
1072 	struct mmu_notifier_range range;
1073 	unsigned long address = pvmw->address;
1074 
1075 	/*
1076 	 * We have to assume the worse case ie pmd for invalidation. Note that
1077 	 * the folio can not be freed from this function.
1078 	 */
1079 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
1080 				vma->vm_mm, address, vma_address_end(pvmw));
1081 	mmu_notifier_invalidate_range_start(&range);
1082 
1083 	while (page_vma_mapped_walk(pvmw)) {
1084 		int ret = 0;
1085 
1086 		address = pvmw->address;
1087 		if (pvmw->pte) {
1088 			pte_t *pte = pvmw->pte;
1089 			pte_t entry = ptep_get(pte);
1090 
1091 			/*
1092 			 * PFN swap PTEs, such as device-exclusive ones, that
1093 			 * actually map pages are clean and not writable from a
1094 			 * CPU perspective. The MMU notifier takes care of any
1095 			 * device aspects.
1096 			 */
1097 			if (!pte_present(entry))
1098 				continue;
1099 			if (!pte_dirty(entry) && !pte_write(entry))
1100 				continue;
1101 
1102 			flush_cache_page(vma, address, pte_pfn(entry));
1103 			entry = ptep_clear_flush(vma, address, pte);
1104 			entry = pte_wrprotect(entry);
1105 			entry = pte_mkclean(entry);
1106 			set_pte_at(vma->vm_mm, address, pte, entry);
1107 			ret = 1;
1108 		} else {
1109 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1110 			pmd_t *pmd = pvmw->pmd;
1111 			pmd_t entry = pmdp_get(pmd);
1112 
1113 			/*
1114 			 * Please see the comment above (!pte_present).
1115 			 * A non present PMD is not writable from a CPU
1116 			 * perspective.
1117 			 */
1118 			if (!pmd_present(entry))
1119 				continue;
1120 			if (!pmd_dirty(entry) && !pmd_write(entry))
1121 				continue;
1122 
1123 			flush_cache_range(vma, address,
1124 					  address + HPAGE_PMD_SIZE);
1125 			entry = pmdp_invalidate(vma, address, pmd);
1126 			entry = pmd_wrprotect(entry);
1127 			entry = pmd_mkclean(entry);
1128 			set_pmd_at(vma->vm_mm, address, pmd, entry);
1129 			ret = 1;
1130 #else
1131 			/* unexpected pmd-mapped folio? */
1132 			WARN_ON_ONCE(1);
1133 #endif
1134 		}
1135 
1136 		if (ret)
1137 			cleaned++;
1138 	}
1139 
1140 	mmu_notifier_invalidate_range_end(&range);
1141 
1142 	return cleaned;
1143 }
1144 
1145 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1146 			     unsigned long address, void *arg)
1147 {
1148 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1149 	int *cleaned = arg;
1150 
1151 	*cleaned += page_vma_mkclean_one(&pvmw);
1152 
1153 	return true;
1154 }
1155 
1156 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1157 {
1158 	if (vma->vm_flags & VM_SHARED)
1159 		return false;
1160 
1161 	return true;
1162 }
1163 
1164 int folio_mkclean(struct folio *folio)
1165 {
1166 	int cleaned = 0;
1167 	struct address_space *mapping;
1168 	struct rmap_walk_control rwc = {
1169 		.arg = (void *)&cleaned,
1170 		.rmap_one = page_mkclean_one,
1171 		.invalid_vma = invalid_mkclean_vma,
1172 	};
1173 
1174 	BUG_ON(!folio_test_locked(folio));
1175 
1176 	if (!folio_mapped(folio))
1177 		return 0;
1178 
1179 	mapping = folio_mapping(folio);
1180 	if (!mapping)
1181 		return 0;
1182 
1183 	rmap_walk(folio, &rwc);
1184 
1185 	return cleaned;
1186 }
1187 EXPORT_SYMBOL_GPL(folio_mkclean);
1188 
1189 struct wrprotect_file_state {
1190 	int cleaned;
1191 	pgoff_t pgoff;
1192 	unsigned long pfn;
1193 	unsigned long nr_pages;
1194 };
1195 
1196 static bool mapping_wrprotect_range_one(struct folio *folio,
1197 		struct vm_area_struct *vma, unsigned long address, void *arg)
1198 {
1199 	struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
1200 	struct page_vma_mapped_walk pvmw = {
1201 		.pfn		= state->pfn,
1202 		.nr_pages	= state->nr_pages,
1203 		.pgoff		= state->pgoff,
1204 		.vma		= vma,
1205 		.address	= address,
1206 		.flags		= PVMW_SYNC,
1207 	};
1208 
1209 	state->cleaned += page_vma_mkclean_one(&pvmw);
1210 
1211 	return true;
1212 }
1213 
1214 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
1215 			     pgoff_t pgoff_start, unsigned long nr_pages,
1216 			     struct rmap_walk_control *rwc, bool locked);
1217 
1218 /**
1219  * mapping_wrprotect_range() - Write-protect all mappings in a specified range.
1220  *
1221  * @mapping:	The mapping whose reverse mapping should be traversed.
1222  * @pgoff:	The page offset at which @pfn is mapped within @mapping.
1223  * @pfn:	The PFN of the page mapped in @mapping at @pgoff.
1224  * @nr_pages:	The number of physically contiguous base pages spanned.
1225  *
1226  * Traverses the reverse mapping, finding all VMAs which contain a shared
1227  * mapping of the pages in the specified range in @mapping, and write-protects
1228  * them (that is, updates the page tables to mark the mappings read-only such
1229  * that a write protection fault arises when the mappings are written to).
1230  *
1231  * The @pfn value need not refer to a folio, but rather can reference a kernel
1232  * allocation which is mapped into userland. We therefore do not require that
1233  * the page maps to a folio with a valid mapping or index field, rather the
1234  * caller specifies these in @mapping and @pgoff.
1235  *
1236  * Return: the number of write-protected PTEs, or an error.
1237  */
1238 int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
1239 		unsigned long pfn, unsigned long nr_pages)
1240 {
1241 	struct wrprotect_file_state state = {
1242 		.cleaned = 0,
1243 		.pgoff = pgoff,
1244 		.pfn = pfn,
1245 		.nr_pages = nr_pages,
1246 	};
1247 	struct rmap_walk_control rwc = {
1248 		.arg = (void *)&state,
1249 		.rmap_one = mapping_wrprotect_range_one,
1250 		.invalid_vma = invalid_mkclean_vma,
1251 	};
1252 
1253 	if (!mapping)
1254 		return 0;
1255 
1256 	__rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
1257 			 /* locked = */false);
1258 
1259 	return state.cleaned;
1260 }
1261 EXPORT_SYMBOL_GPL(mapping_wrprotect_range);
1262 
1263 /**
1264  * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1265  *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1266  *                     within the @vma of shared mappings. And since clean PTEs
1267  *                     should also be readonly, write protects them too.
1268  * @pfn: start pfn.
1269  * @nr_pages: number of physically contiguous pages srarting with @pfn.
1270  * @pgoff: page offset that the @pfn mapped with.
1271  * @vma: vma that @pfn mapped within.
1272  *
1273  * Returns the number of cleaned PTEs (including PMDs).
1274  */
1275 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1276 		      struct vm_area_struct *vma)
1277 {
1278 	struct page_vma_mapped_walk pvmw = {
1279 		.pfn		= pfn,
1280 		.nr_pages	= nr_pages,
1281 		.pgoff		= pgoff,
1282 		.vma		= vma,
1283 		.flags		= PVMW_SYNC,
1284 	};
1285 
1286 	if (invalid_mkclean_vma(vma, NULL))
1287 		return 0;
1288 
1289 	pvmw.address = vma_address(vma, pgoff, nr_pages);
1290 	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1291 
1292 	return page_vma_mkclean_one(&pvmw);
1293 }
1294 
1295 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
1296 {
1297 	int idx;
1298 
1299 	if (nr) {
1300 		idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1301 		lruvec_stat_mod_folio(folio, idx, nr);
1302 	}
1303 	if (nr_pmdmapped) {
1304 		if (folio_test_anon(folio)) {
1305 			idx = NR_ANON_THPS;
1306 			lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
1307 		} else {
1308 			/* NR_*_PMDMAPPED are not maintained per-memcg */
1309 			idx = folio_test_swapbacked(folio) ?
1310 				NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
1311 			__mod_node_page_state(folio_pgdat(folio), idx,
1312 					      nr_pmdmapped);
1313 		}
1314 	}
1315 }
1316 
1317 static __always_inline void __folio_add_rmap(struct folio *folio,
1318 		struct page *page, int nr_pages, struct vm_area_struct *vma,
1319 		enum pgtable_level level)
1320 {
1321 	atomic_t *mapped = &folio->_nr_pages_mapped;
1322 	const int orig_nr_pages = nr_pages;
1323 	int first = 0, nr = 0, nr_pmdmapped = 0;
1324 
1325 	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
1326 
1327 	switch (level) {
1328 	case PGTABLE_LEVEL_PTE:
1329 		if (!folio_test_large(folio)) {
1330 			nr = atomic_inc_and_test(&folio->_mapcount);
1331 			break;
1332 		}
1333 
1334 		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1335 			nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
1336 			if (nr == orig_nr_pages)
1337 				/* Was completely unmapped. */
1338 				nr = folio_large_nr_pages(folio);
1339 			else
1340 				nr = 0;
1341 			break;
1342 		}
1343 
1344 		do {
1345 			first += atomic_inc_and_test(&page->_mapcount);
1346 		} while (page++, --nr_pages > 0);
1347 
1348 		if (first &&
1349 		    atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
1350 			nr = first;
1351 
1352 		folio_add_large_mapcount(folio, orig_nr_pages, vma);
1353 		break;
1354 	case PGTABLE_LEVEL_PMD:
1355 	case PGTABLE_LEVEL_PUD:
1356 		first = atomic_inc_and_test(&folio->_entire_mapcount);
1357 		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1358 			if (level == PGTABLE_LEVEL_PMD && first)
1359 				nr_pmdmapped = folio_large_nr_pages(folio);
1360 			nr = folio_inc_return_large_mapcount(folio, vma);
1361 			if (nr == 1)
1362 				/* Was completely unmapped. */
1363 				nr = folio_large_nr_pages(folio);
1364 			else
1365 				nr = 0;
1366 			break;
1367 		}
1368 
1369 		if (first) {
1370 			nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
1371 			if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
1372 				nr_pages = folio_large_nr_pages(folio);
1373 				/*
1374 				 * We only track PMD mappings of PMD-sized
1375 				 * folios separately.
1376 				 */
1377 				if (level == PGTABLE_LEVEL_PMD)
1378 					nr_pmdmapped = nr_pages;
1379 				nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
1380 				/* Raced ahead of a remove and another add? */
1381 				if (unlikely(nr < 0))
1382 					nr = 0;
1383 			} else {
1384 				/* Raced ahead of a remove of ENTIRELY_MAPPED */
1385 				nr = 0;
1386 			}
1387 		}
1388 		folio_inc_large_mapcount(folio, vma);
1389 		break;
1390 	default:
1391 		BUILD_BUG();
1392 	}
1393 	__folio_mod_stat(folio, nr, nr_pmdmapped);
1394 }
1395 
1396 /**
1397  * folio_move_anon_rmap - move a folio to our anon_vma
1398  * @folio:	The folio to move to our anon_vma
1399  * @vma:	The vma the folio belongs to
1400  *
1401  * When a folio belongs exclusively to one process after a COW event,
1402  * that folio can be moved into the anon_vma that belongs to just that
1403  * process, so the rmap code will not search the parent or sibling processes.
1404  */
1405 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1406 {
1407 	void *anon_vma = vma->anon_vma;
1408 
1409 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1410 	VM_BUG_ON_VMA(!anon_vma, vma);
1411 
1412 	anon_vma += FOLIO_MAPPING_ANON;
1413 	/*
1414 	 * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written
1415 	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1416 	 * folio_test_anon()) will not see one without the other.
1417 	 */
1418 	WRITE_ONCE(folio->mapping, anon_vma);
1419 }
1420 
1421 /**
1422  * __folio_set_anon - set up a new anonymous rmap for a folio
1423  * @folio:	The folio to set up the new anonymous rmap for.
1424  * @vma:	VM area to add the folio to.
1425  * @address:	User virtual address of the mapping
1426  * @exclusive:	Whether the folio is exclusive to the process.
1427  */
1428 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1429 			     unsigned long address, bool exclusive)
1430 {
1431 	struct anon_vma *anon_vma = vma->anon_vma;
1432 
1433 	BUG_ON(!anon_vma);
1434 
1435 	/*
1436 	 * If the folio isn't exclusive to this vma, we must use the _oldest_
1437 	 * possible anon_vma for the folio mapping!
1438 	 */
1439 	if (!exclusive)
1440 		anon_vma = anon_vma->root;
1441 
1442 	/*
1443 	 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1444 	 * Make sure the compiler doesn't split the stores of anon_vma and
1445 	 * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code
1446 	 * could mistake the mapping for a struct address_space and crash.
1447 	 */
1448 	anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON;
1449 	WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1450 	folio->index = linear_page_index(vma, address);
1451 }
1452 
1453 /**
1454  * __page_check_anon_rmap - sanity check anonymous rmap addition
1455  * @folio:	The folio containing @page.
1456  * @page:	the page to check the mapping of
1457  * @vma:	the vm area in which the mapping is added
1458  * @address:	the user virtual address mapped
1459  */
1460 static void __page_check_anon_rmap(const struct folio *folio,
1461 		const struct page *page, struct vm_area_struct *vma,
1462 		unsigned long address)
1463 {
1464 	/*
1465 	 * The page's anon-rmap details (mapping and index) are guaranteed to
1466 	 * be set up correctly at this point.
1467 	 *
1468 	 * We have exclusion against folio_add_anon_rmap_*() because the caller
1469 	 * always holds the page locked.
1470 	 *
1471 	 * We have exclusion against folio_add_new_anon_rmap because those pages
1472 	 * are initially only visible via the pagetables, and the pte is locked
1473 	 * over the call to folio_add_new_anon_rmap.
1474 	 */
1475 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1476 			folio);
1477 	VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
1478 		       page);
1479 }
1480 
1481 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1482 		struct page *page, int nr_pages, struct vm_area_struct *vma,
1483 		unsigned long address, rmap_t flags, enum pgtable_level level)
1484 {
1485 	int i;
1486 
1487 	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
1488 
1489 	__folio_add_rmap(folio, page, nr_pages, vma, level);
1490 
1491 	if (likely(!folio_test_ksm(folio)))
1492 		__page_check_anon_rmap(folio, page, vma, address);
1493 
1494 	if (flags & RMAP_EXCLUSIVE) {
1495 		switch (level) {
1496 		case PGTABLE_LEVEL_PTE:
1497 			for (i = 0; i < nr_pages; i++)
1498 				SetPageAnonExclusive(page + i);
1499 			break;
1500 		case PGTABLE_LEVEL_PMD:
1501 			SetPageAnonExclusive(page);
1502 			break;
1503 		case PGTABLE_LEVEL_PUD:
1504 			/*
1505 			 * Keep the compiler happy, we don't support anonymous
1506 			 * PUD mappings.
1507 			 */
1508 			WARN_ON_ONCE(1);
1509 			break;
1510 		default:
1511 			BUILD_BUG();
1512 		}
1513 	}
1514 
1515 	VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) &&
1516 			 atomic_read(&folio->_mapcount) > 0, folio);
1517 	for (i = 0; i < nr_pages; i++) {
1518 		struct page *cur_page = page + i;
1519 
1520 		VM_WARN_ON_FOLIO(folio_test_large(folio) &&
1521 				 folio_entire_mapcount(folio) > 1 &&
1522 				 PageAnonExclusive(cur_page), folio);
1523 		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
1524 			continue;
1525 
1526 		/*
1527 		 * While PTE-mapping a THP we have a PMD and a PTE
1528 		 * mapping.
1529 		 */
1530 		VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
1531 				 PageAnonExclusive(cur_page), folio);
1532 	}
1533 
1534 	/*
1535 	 * Only mlock it if the folio is fully mapped to the VMA.
1536 	 *
1537 	 * Partially mapped folios can be split on reclaim and part outside
1538 	 * of mlocked VMA can be evicted or freed.
1539 	 */
1540 	if (folio_nr_pages(folio) == nr_pages)
1541 		mlock_vma_folio(folio, vma);
1542 }
1543 
1544 /**
1545  * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1546  * @folio:	The folio to add the mappings to
1547  * @page:	The first page to add
1548  * @nr_pages:	The number of pages which will be mapped
1549  * @vma:	The vm area in which the mappings are added
1550  * @address:	The user virtual address of the first page to map
1551  * @flags:	The rmap flags
1552  *
1553  * The page range of folio is defined by [first_page, first_page + nr_pages)
1554  *
1555  * The caller needs to hold the page table lock, and the page must be locked in
1556  * the anon_vma case: to serialize mapping,index checking after setting,
1557  * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1558  * (but KSM folios are never downgraded).
1559  */
1560 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
1561 		int nr_pages, struct vm_area_struct *vma, unsigned long address,
1562 		rmap_t flags)
1563 {
1564 	__folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
1565 			      PGTABLE_LEVEL_PTE);
1566 }
1567 
1568 /**
1569  * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1570  * @folio:	The folio to add the mapping to
1571  * @page:	The first page to add
1572  * @vma:	The vm area in which the mapping is added
1573  * @address:	The user virtual address of the first page to map
1574  * @flags:	The rmap flags
1575  *
1576  * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1577  *
1578  * The caller needs to hold the page table lock, and the page must be locked in
1579  * the anon_vma case: to serialize mapping,index checking after setting.
1580  */
1581 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
1582 		struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1583 {
1584 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1585 	__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
1586 			      PGTABLE_LEVEL_PMD);
1587 #else
1588 	WARN_ON_ONCE(true);
1589 #endif
1590 }
1591 
1592 /**
1593  * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1594  * @folio:	The folio to add the mapping to.
1595  * @vma:	the vm area in which the mapping is added
1596  * @address:	the user virtual address mapped
1597  * @flags:	The rmap flags
1598  *
1599  * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1600  * This means the inc-and-test can be bypassed.
1601  * The folio doesn't necessarily need to be locked while it's exclusive
1602  * unless two threads map it concurrently. However, the folio must be
1603  * locked if it's shared.
1604  *
1605  * If the folio is pmd-mappable, it is accounted as a THP.
1606  */
1607 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1608 		unsigned long address, rmap_t flags)
1609 {
1610 	const bool exclusive = flags & RMAP_EXCLUSIVE;
1611 	int nr = 1, nr_pmdmapped = 0;
1612 
1613 	VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1614 	VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
1615 
1616 	/*
1617 	 * VM_DROPPABLE mappings don't swap; instead they're just dropped when
1618 	 * under memory pressure.
1619 	 */
1620 	if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
1621 		__folio_set_swapbacked(folio);
1622 	__folio_set_anon(folio, vma, address, exclusive);
1623 
1624 	if (likely(!folio_test_large(folio))) {
1625 		/* increment count (starts at -1) */
1626 		atomic_set(&folio->_mapcount, 0);
1627 		if (exclusive)
1628 			SetPageAnonExclusive(&folio->page);
1629 	} else if (!folio_test_pmd_mappable(folio)) {
1630 		int i;
1631 
1632 		nr = folio_large_nr_pages(folio);
1633 		for (i = 0; i < nr; i++) {
1634 			struct page *page = folio_page(folio, i);
1635 
1636 			if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1637 				/* increment count (starts at -1) */
1638 				atomic_set(&page->_mapcount, 0);
1639 			if (exclusive)
1640 				SetPageAnonExclusive(page);
1641 		}
1642 
1643 		folio_set_large_mapcount(folio, nr, vma);
1644 		if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1645 			atomic_set(&folio->_nr_pages_mapped, nr);
1646 	} else {
1647 		nr = folio_large_nr_pages(folio);
1648 		/* increment count (starts at -1) */
1649 		atomic_set(&folio->_entire_mapcount, 0);
1650 		folio_set_large_mapcount(folio, 1, vma);
1651 		if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1652 			atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1653 		if (exclusive)
1654 			SetPageAnonExclusive(&folio->page);
1655 		nr_pmdmapped = nr;
1656 	}
1657 
1658 	VM_WARN_ON_ONCE(address < vma->vm_start ||
1659 			address + (nr << PAGE_SHIFT) > vma->vm_end);
1660 
1661 	__folio_mod_stat(folio, nr, nr_pmdmapped);
1662 	mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
1663 }
1664 
1665 static __always_inline void __folio_add_file_rmap(struct folio *folio,
1666 		struct page *page, int nr_pages, struct vm_area_struct *vma,
1667 		enum pgtable_level level)
1668 {
1669 	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1670 
1671 	__folio_add_rmap(folio, page, nr_pages, vma, level);
1672 
1673 	/*
1674 	 * Only mlock it if the folio is fully mapped to the VMA.
1675 	 *
1676 	 * Partially mapped folios can be split on reclaim and part outside
1677 	 * of mlocked VMA can be evicted or freed.
1678 	 */
1679 	if (folio_nr_pages(folio) == nr_pages)
1680 		mlock_vma_folio(folio, vma);
1681 }
1682 
1683 /**
1684  * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1685  * @folio:	The folio to add the mappings to
1686  * @page:	The first page to add
1687  * @nr_pages:	The number of pages that will be mapped using PTEs
1688  * @vma:	The vm area in which the mappings are added
1689  *
1690  * The page range of the folio is defined by [page, page + nr_pages)
1691  *
1692  * The caller needs to hold the page table lock.
1693  */
1694 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
1695 		int nr_pages, struct vm_area_struct *vma)
1696 {
1697 	__folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1698 }
1699 
1700 /**
1701  * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1702  * @folio:	The folio to add the mapping to
1703  * @page:	The first page to add
1704  * @vma:	The vm area in which the mapping is added
1705  *
1706  * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1707  *
1708  * The caller needs to hold the page table lock.
1709  */
1710 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
1711 		struct vm_area_struct *vma)
1712 {
1713 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1714 	__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1715 #else
1716 	WARN_ON_ONCE(true);
1717 #endif
1718 }
1719 
1720 /**
1721  * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio
1722  * @folio:	The folio to add the mapping to
1723  * @page:	The first page to add
1724  * @vma:	The vm area in which the mapping is added
1725  *
1726  * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1727  *
1728  * The caller needs to hold the page table lock.
1729  */
1730 void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
1731 		struct vm_area_struct *vma)
1732 {
1733 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1734 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1735 	__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1736 #else
1737 	WARN_ON_ONCE(true);
1738 #endif
1739 }
1740 
1741 static __always_inline void __folio_remove_rmap(struct folio *folio,
1742 		struct page *page, int nr_pages, struct vm_area_struct *vma,
1743 		enum pgtable_level level)
1744 {
1745 	atomic_t *mapped = &folio->_nr_pages_mapped;
1746 	int last = 0, nr = 0, nr_pmdmapped = 0;
1747 	bool partially_mapped = false;
1748 
1749 	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
1750 
1751 	switch (level) {
1752 	case PGTABLE_LEVEL_PTE:
1753 		if (!folio_test_large(folio)) {
1754 			nr = atomic_add_negative(-1, &folio->_mapcount);
1755 			break;
1756 		}
1757 
1758 		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1759 			nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
1760 			if (!nr) {
1761 				/* Now completely unmapped. */
1762 				nr = folio_large_nr_pages(folio);
1763 			} else {
1764 				partially_mapped = nr < folio_large_nr_pages(folio) &&
1765 						   !folio_entire_mapcount(folio);
1766 				nr = 0;
1767 			}
1768 			break;
1769 		}
1770 
1771 		folio_sub_large_mapcount(folio, nr_pages, vma);
1772 		do {
1773 			last += atomic_add_negative(-1, &page->_mapcount);
1774 		} while (page++, --nr_pages > 0);
1775 
1776 		if (last &&
1777 		    atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
1778 			nr = last;
1779 
1780 		partially_mapped = nr && atomic_read(mapped);
1781 		break;
1782 	case PGTABLE_LEVEL_PMD:
1783 	case PGTABLE_LEVEL_PUD:
1784 		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1785 			last = atomic_add_negative(-1, &folio->_entire_mapcount);
1786 			if (level == PGTABLE_LEVEL_PMD && last)
1787 				nr_pmdmapped = folio_large_nr_pages(folio);
1788 			nr = folio_dec_return_large_mapcount(folio, vma);
1789 			if (!nr) {
1790 				/* Now completely unmapped. */
1791 				nr = folio_large_nr_pages(folio);
1792 			} else {
1793 				partially_mapped = last &&
1794 						   nr < folio_large_nr_pages(folio);
1795 				nr = 0;
1796 			}
1797 			break;
1798 		}
1799 
1800 		folio_dec_large_mapcount(folio, vma);
1801 		last = atomic_add_negative(-1, &folio->_entire_mapcount);
1802 		if (last) {
1803 			nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
1804 			if (likely(nr < ENTIRELY_MAPPED)) {
1805 				nr_pages = folio_large_nr_pages(folio);
1806 				if (level == PGTABLE_LEVEL_PMD)
1807 					nr_pmdmapped = nr_pages;
1808 				nr = nr_pages - nr;
1809 				/* Raced ahead of another remove and an add? */
1810 				if (unlikely(nr < 0))
1811 					nr = 0;
1812 			} else {
1813 				/* An add of ENTIRELY_MAPPED raced ahead */
1814 				nr = 0;
1815 			}
1816 		}
1817 
1818 		partially_mapped = nr && nr < nr_pmdmapped;
1819 		break;
1820 	default:
1821 		BUILD_BUG();
1822 	}
1823 
1824 	/*
1825 	 * Queue anon large folio for deferred split if at least one page of
1826 	 * the folio is unmapped and at least one page is still mapped.
1827 	 *
1828 	 * Check partially_mapped first to ensure it is a large folio.
1829 	 *
1830 	 * Device private folios do not support deferred splitting and
1831 	 * shrinker based scanning of the folios to free.
1832 	 */
1833 	if (partially_mapped && folio_test_anon(folio) &&
1834 	    !folio_test_partially_mapped(folio) &&
1835 	    !folio_is_device_private(folio))
1836 		deferred_split_folio(folio, true);
1837 
1838 	__folio_mod_stat(folio, -nr, -nr_pmdmapped);
1839 
1840 	/*
1841 	 * It would be tidy to reset folio_test_anon mapping when fully
1842 	 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1843 	 * which increments mapcount after us but sets mapping before us:
1844 	 * so leave the reset to free_pages_prepare, and remember that
1845 	 * it's only reliable while mapped.
1846 	 */
1847 
1848 	munlock_vma_folio(folio, vma);
1849 }
1850 
1851 /**
1852  * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1853  * @folio:	The folio to remove the mappings from
1854  * @page:	The first page to remove
1855  * @nr_pages:	The number of pages that will be removed from the mapping
1856  * @vma:	The vm area from which the mappings are removed
1857  *
1858  * The page range of the folio is defined by [page, page + nr_pages)
1859  *
1860  * The caller needs to hold the page table lock.
1861  */
1862 void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
1863 		int nr_pages, struct vm_area_struct *vma)
1864 {
1865 	__folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1866 }
1867 
1868 /**
1869  * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1870  * @folio:	The folio to remove the mapping from
1871  * @page:	The first page to remove
1872  * @vma:	The vm area from which the mapping is removed
1873  *
1874  * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1875  *
1876  * The caller needs to hold the page table lock.
1877  */
1878 void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
1879 		struct vm_area_struct *vma)
1880 {
1881 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1882 	__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1883 #else
1884 	WARN_ON_ONCE(true);
1885 #endif
1886 }
1887 
1888 /**
1889  * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio
1890  * @folio:	The folio to remove the mapping from
1891  * @page:	The first page to remove
1892  * @vma:	The vm area from which the mapping is removed
1893  *
1894  * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1895  *
1896  * The caller needs to hold the page table lock.
1897  */
1898 void folio_remove_rmap_pud(struct folio *folio, struct page *page,
1899 		struct vm_area_struct *vma)
1900 {
1901 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1902 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1903 	__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1904 #else
1905 	WARN_ON_ONCE(true);
1906 #endif
1907 }
1908 
1909 static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
1910 			struct page_vma_mapped_walk *pvmw,
1911 			enum ttu_flags flags, pte_t pte)
1912 {
1913 	unsigned long end_addr, addr = pvmw->address;
1914 	struct vm_area_struct *vma = pvmw->vma;
1915 	unsigned int max_nr;
1916 
1917 	if (flags & TTU_HWPOISON)
1918 		return 1;
1919 	if (!folio_test_large(folio))
1920 		return 1;
1921 
1922 	/* We may only batch within a single VMA and a single page table. */
1923 	end_addr = pmd_addr_end(addr, vma->vm_end);
1924 	max_nr = (end_addr - addr) >> PAGE_SHIFT;
1925 
1926 	/* We only support lazyfree batching for now ... */
1927 	if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
1928 		return 1;
1929 	if (pte_unused(pte))
1930 		return 1;
1931 
1932 	return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
1933 }
1934 
1935 /*
1936  * @arg: enum ttu_flags will be passed to this argument
1937  */
1938 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1939 		     unsigned long address, void *arg)
1940 {
1941 	struct mm_struct *mm = vma->vm_mm;
1942 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1943 	bool anon_exclusive, ret = true;
1944 	pte_t pteval;
1945 	struct page *subpage;
1946 	struct mmu_notifier_range range;
1947 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1948 	unsigned long nr_pages = 1, end_addr;
1949 	unsigned long pfn;
1950 	unsigned long hsz = 0;
1951 	int ptes = 0;
1952 
1953 	/*
1954 	 * When racing against e.g. zap_pte_range() on another cpu,
1955 	 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1956 	 * try_to_unmap() may return before page_mapped() has become false,
1957 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1958 	 */
1959 	if (flags & TTU_SYNC)
1960 		pvmw.flags = PVMW_SYNC;
1961 
1962 	/*
1963 	 * For THP, we have to assume the worse case ie pmd for invalidation.
1964 	 * For hugetlb, it could be much worse if we need to do pud
1965 	 * invalidation in the case of pmd sharing.
1966 	 *
1967 	 * Note that the folio can not be freed in this function as call of
1968 	 * try_to_unmap() must hold a reference on the folio.
1969 	 */
1970 	range.end = vma_address_end(&pvmw);
1971 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1972 				address, range.end);
1973 	if (folio_test_hugetlb(folio)) {
1974 		/*
1975 		 * If sharing is possible, start and end will be adjusted
1976 		 * accordingly.
1977 		 */
1978 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1979 						     &range.end);
1980 
1981 		/* We need the huge page size for set_huge_pte_at() */
1982 		hsz = huge_page_size(hstate_vma(vma));
1983 	}
1984 	mmu_notifier_invalidate_range_start(&range);
1985 
1986 	while (page_vma_mapped_walk(&pvmw)) {
1987 		/*
1988 		 * If the folio is in an mlock()d vma, we must not swap it out.
1989 		 */
1990 		if (!(flags & TTU_IGNORE_MLOCK) &&
1991 		    (vma->vm_flags & VM_LOCKED)) {
1992 			ptes++;
1993 
1994 			/*
1995 			 * Set 'ret' to indicate the page cannot be unmapped.
1996 			 *
1997 			 * Do not jump to walk_abort immediately as additional
1998 			 * iteration might be required to detect fully mapped
1999 			 * folio an mlock it.
2000 			 */
2001 			ret = false;
2002 
2003 			/* Only mlock fully mapped pages */
2004 			if (pvmw.pte && ptes != pvmw.nr_pages)
2005 				continue;
2006 
2007 			/*
2008 			 * All PTEs must be protected by page table lock in
2009 			 * order to mlock the page.
2010 			 *
2011 			 * If page table boundary has been cross, current ptl
2012 			 * only protect part of ptes.
2013 			 */
2014 			if (pvmw.flags & PVMW_PGTABLE_CROSSED)
2015 				goto walk_done;
2016 
2017 			/* Restore the mlock which got missed */
2018 			mlock_vma_folio(folio, vma);
2019 			goto walk_done;
2020 		}
2021 
2022 		if (!pvmw.pte) {
2023 			if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
2024 				if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
2025 					goto walk_done;
2026 				/*
2027 				 * unmap_huge_pmd_locked has either already marked
2028 				 * the folio as swap-backed or decided to retain it
2029 				 * due to GUP or speculative references.
2030 				 */
2031 				goto walk_abort;
2032 			}
2033 
2034 			if (flags & TTU_SPLIT_HUGE_PMD) {
2035 				/*
2036 				 * We temporarily have to drop the PTL and
2037 				 * restart so we can process the PTE-mapped THP.
2038 				 */
2039 				split_huge_pmd_locked(vma, pvmw.address,
2040 						      pvmw.pmd, false);
2041 				flags &= ~TTU_SPLIT_HUGE_PMD;
2042 				page_vma_mapped_walk_restart(&pvmw);
2043 				continue;
2044 			}
2045 		}
2046 
2047 		/* Unexpected PMD-mapped THP? */
2048 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2049 
2050 		/*
2051 		 * Handle PFN swap PTEs, such as device-exclusive ones, that
2052 		 * actually map pages.
2053 		 */
2054 		pteval = ptep_get(pvmw.pte);
2055 		if (likely(pte_present(pteval))) {
2056 			pfn = pte_pfn(pteval);
2057 		} else {
2058 			const softleaf_t entry = softleaf_from_pte(pteval);
2059 
2060 			pfn = softleaf_to_pfn(entry);
2061 			VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2062 		}
2063 
2064 		subpage = folio_page(folio, pfn - folio_pfn(folio));
2065 		address = pvmw.address;
2066 		anon_exclusive = folio_test_anon(folio) &&
2067 				 PageAnonExclusive(subpage);
2068 
2069 		if (folio_test_hugetlb(folio)) {
2070 			bool anon = folio_test_anon(folio);
2071 
2072 			/*
2073 			 * The try_to_unmap() is only passed a hugetlb page
2074 			 * in the case where the hugetlb page is poisoned.
2075 			 */
2076 			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
2077 			/*
2078 			 * huge_pmd_unshare may unmap an entire PMD page.
2079 			 * There is no way of knowing exactly which PMDs may
2080 			 * be cached for this mm, so we must flush them all.
2081 			 * start/end were already adjusted above to cover this
2082 			 * range.
2083 			 */
2084 			flush_cache_range(vma, range.start, range.end);
2085 
2086 			/*
2087 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
2088 			 * held in write mode.  Caller needs to explicitly
2089 			 * do this outside rmap routines.
2090 			 *
2091 			 * We also must hold hugetlb vma_lock in write mode.
2092 			 * Lock order dictates acquiring vma_lock BEFORE
2093 			 * i_mmap_rwsem.  We can only try lock here and fail
2094 			 * if unsuccessful.
2095 			 */
2096 			if (!anon) {
2097 				struct mmu_gather tlb;
2098 
2099 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2100 				if (!hugetlb_vma_trylock_write(vma))
2101 					goto walk_abort;
2102 
2103 				tlb_gather_mmu_vma(&tlb, vma);
2104 				if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2105 					hugetlb_vma_unlock_write(vma);
2106 					huge_pmd_unshare_flush(&tlb, vma);
2107 					tlb_finish_mmu(&tlb);
2108 					/*
2109 					 * The PMD table was unmapped,
2110 					 * consequently unmapping the folio.
2111 					 */
2112 					goto walk_done;
2113 				}
2114 				hugetlb_vma_unlock_write(vma);
2115 				tlb_finish_mmu(&tlb);
2116 			}
2117 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2118 			if (pte_dirty(pteval))
2119 				folio_mark_dirty(folio);
2120 		} else if (likely(pte_present(pteval))) {
2121 			nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
2122 			end_addr = address + nr_pages * PAGE_SIZE;
2123 			flush_cache_range(vma, address, end_addr);
2124 
2125 			/* Nuke the page table entry. */
2126 			pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages);
2127 			/*
2128 			 * We clear the PTE but do not flush so potentially
2129 			 * a remote CPU could still be writing to the folio.
2130 			 * If the entry was previously clean then the
2131 			 * architecture must guarantee that a clear->dirty
2132 			 * transition on a cached TLB entry is written through
2133 			 * and traps if the PTE is unmapped.
2134 			 */
2135 			if (should_defer_flush(mm, flags))
2136 				set_tlb_ubc_flush_pending(mm, pteval, address, end_addr);
2137 			else
2138 				flush_tlb_range(vma, address, end_addr);
2139 			if (pte_dirty(pteval))
2140 				folio_mark_dirty(folio);
2141 		} else {
2142 			pte_clear(mm, address, pvmw.pte);
2143 		}
2144 
2145 		/*
2146 		 * Now the pte is cleared. If this pte was uffd-wp armed,
2147 		 * we may want to replace a none pte with a marker pte if
2148 		 * it's file-backed, so we don't lose the tracking info.
2149 		 */
2150 		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
2151 
2152 		/* Update high watermark before we lower rss */
2153 		update_hiwater_rss(mm);
2154 
2155 		if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
2156 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2157 			if (folio_test_hugetlb(folio)) {
2158 				hugetlb_count_sub(folio_nr_pages(folio), mm);
2159 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
2160 						hsz);
2161 			} else {
2162 				dec_mm_counter(mm, mm_counter(folio));
2163 				set_pte_at(mm, address, pvmw.pte, pteval);
2164 			}
2165 		} else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2166 			   !userfaultfd_armed(vma)) {
2167 			/*
2168 			 * The guest indicated that the page content is of no
2169 			 * interest anymore. Simply discard the pte, vmscan
2170 			 * will take care of the rest.
2171 			 * A future reference will then fault in a new zero
2172 			 * page. When userfaultfd is active, we must not drop
2173 			 * this page though, as its main user (postcopy
2174 			 * migration) will not expect userfaults on already
2175 			 * copied pages.
2176 			 */
2177 			dec_mm_counter(mm, mm_counter(folio));
2178 		} else if (folio_test_anon(folio)) {
2179 			swp_entry_t entry = page_swap_entry(subpage);
2180 			pte_t swp_pte;
2181 			/*
2182 			 * Store the swap location in the pte.
2183 			 * See handle_pte_fault() ...
2184 			 */
2185 			if (unlikely(folio_test_swapbacked(folio) !=
2186 					folio_test_swapcache(folio))) {
2187 				WARN_ON_ONCE(1);
2188 				goto walk_abort;
2189 			}
2190 
2191 			/* MADV_FREE page check */
2192 			if (!folio_test_swapbacked(folio)) {
2193 				int ref_count, map_count;
2194 
2195 				/*
2196 				 * Synchronize with gup_pte_range():
2197 				 * - clear PTE; barrier; read refcount
2198 				 * - inc refcount; barrier; read PTE
2199 				 */
2200 				smp_mb();
2201 
2202 				ref_count = folio_ref_count(folio);
2203 				map_count = folio_mapcount(folio);
2204 
2205 				/*
2206 				 * Order reads for page refcount and dirty flag
2207 				 * (see comments in __remove_mapping()).
2208 				 */
2209 				smp_rmb();
2210 
2211 				if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
2212 					/*
2213 					 * redirtied either using the page table or a previously
2214 					 * obtained GUP reference.
2215 					 */
2216 					set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2217 					folio_set_swapbacked(folio);
2218 					goto walk_abort;
2219 				} else if (ref_count != 1 + map_count) {
2220 					/*
2221 					 * Additional reference. Could be a GUP reference or any
2222 					 * speculative reference. GUP users must mark the folio
2223 					 * dirty if there was a modification. This folio cannot be
2224 					 * reclaimed right now either way, so act just like nothing
2225 					 * happened.
2226 					 * We'll come back here later and detect if the folio was
2227 					 * dirtied when the additional reference is gone.
2228 					 */
2229 					set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2230 					goto walk_abort;
2231 				}
2232 				add_mm_counter(mm, MM_ANONPAGES, -nr_pages);
2233 				goto discard;
2234 			}
2235 
2236 			if (folio_dup_swap(folio, subpage) < 0) {
2237 				set_pte_at(mm, address, pvmw.pte, pteval);
2238 				goto walk_abort;
2239 			}
2240 
2241 			/*
2242 			 * arch_unmap_one() is expected to be a NOP on
2243 			 * architectures where we could have PFN swap PTEs,
2244 			 * so we'll not check/care.
2245 			 */
2246 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2247 				folio_put_swap(folio, subpage);
2248 				set_pte_at(mm, address, pvmw.pte, pteval);
2249 				goto walk_abort;
2250 			}
2251 
2252 			/* See folio_try_share_anon_rmap(): clear PTE first. */
2253 			if (anon_exclusive &&
2254 			    folio_try_share_anon_rmap_pte(folio, subpage)) {
2255 				folio_put_swap(folio, subpage);
2256 				set_pte_at(mm, address, pvmw.pte, pteval);
2257 				goto walk_abort;
2258 			}
2259 			if (list_empty(&mm->mmlist)) {
2260 				spin_lock(&mmlist_lock);
2261 				if (list_empty(&mm->mmlist))
2262 					list_add(&mm->mmlist, &init_mm.mmlist);
2263 				spin_unlock(&mmlist_lock);
2264 			}
2265 			dec_mm_counter(mm, MM_ANONPAGES);
2266 			inc_mm_counter(mm, MM_SWAPENTS);
2267 			swp_pte = swp_entry_to_pte(entry);
2268 			if (anon_exclusive)
2269 				swp_pte = pte_swp_mkexclusive(swp_pte);
2270 			if (likely(pte_present(pteval))) {
2271 				if (pte_soft_dirty(pteval))
2272 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2273 				if (pte_uffd_wp(pteval))
2274 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2275 			} else {
2276 				if (pte_swp_soft_dirty(pteval))
2277 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2278 				if (pte_swp_uffd_wp(pteval))
2279 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2280 			}
2281 			set_pte_at(mm, address, pvmw.pte, swp_pte);
2282 		} else {
2283 			/*
2284 			 * This is a locked file-backed folio,
2285 			 * so it cannot be removed from the page
2286 			 * cache and replaced by a new folio before
2287 			 * mmu_notifier_invalidate_range_end, so no
2288 			 * concurrent thread might update its page table
2289 			 * to point at a new folio while a device is
2290 			 * still using this folio.
2291 			 *
2292 			 * See Documentation/mm/mmu_notifier.rst
2293 			 */
2294 			dec_mm_counter(mm, mm_counter_file(folio));
2295 		}
2296 discard:
2297 		if (unlikely(folio_test_hugetlb(folio))) {
2298 			hugetlb_remove_rmap(folio);
2299 		} else {
2300 			folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
2301 		}
2302 		if (vma->vm_flags & VM_LOCKED)
2303 			mlock_drain_local();
2304 		folio_put_refs(folio, nr_pages);
2305 
2306 		/*
2307 		 * If we are sure that we batched the entire folio and cleared
2308 		 * all PTEs, we can just optimize and stop right here.
2309 		 */
2310 		if (nr_pages == folio_nr_pages(folio))
2311 			goto walk_done;
2312 		continue;
2313 walk_abort:
2314 		ret = false;
2315 walk_done:
2316 		page_vma_mapped_walk_done(&pvmw);
2317 		break;
2318 	}
2319 
2320 	mmu_notifier_invalidate_range_end(&range);
2321 
2322 	return ret;
2323 }
2324 
2325 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
2326 {
2327 	return vma_is_temporary_stack(vma);
2328 }
2329 
2330 static int folio_not_mapped(struct folio *folio)
2331 {
2332 	return !folio_mapped(folio);
2333 }
2334 
2335 /**
2336  * try_to_unmap - Try to remove all page table mappings to a folio.
2337  * @folio: The folio to unmap.
2338  * @flags: action and flags
2339  *
2340  * Tries to remove all the page table entries which are mapping this
2341  * folio.  It is the caller's responsibility to check if the folio is
2342  * still mapped if needed (use TTU_SYNC to prevent accounting races).
2343  *
2344  * Context: Caller must hold the folio lock.
2345  */
2346 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
2347 {
2348 	struct rmap_walk_control rwc = {
2349 		.rmap_one = try_to_unmap_one,
2350 		.arg = (void *)flags,
2351 		.done = folio_not_mapped,
2352 		.anon_lock = folio_lock_anon_vma_read,
2353 	};
2354 
2355 	if (flags & TTU_RMAP_LOCKED)
2356 		rmap_walk_locked(folio, &rwc);
2357 	else
2358 		rmap_walk(folio, &rwc);
2359 }
2360 
2361 /*
2362  * @arg: enum ttu_flags will be passed to this argument.
2363  *
2364  * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
2365  * containing migration entries.
2366  */
2367 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
2368 		     unsigned long address, void *arg)
2369 {
2370 	struct mm_struct *mm = vma->vm_mm;
2371 	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2372 	bool anon_exclusive, writable, ret = true;
2373 	pte_t pteval;
2374 	struct page *subpage;
2375 	struct mmu_notifier_range range;
2376 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
2377 	unsigned long pfn;
2378 	unsigned long hsz = 0;
2379 
2380 	/*
2381 	 * When racing against e.g. zap_pte_range() on another cpu,
2382 	 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
2383 	 * try_to_migrate() may return before page_mapped() has become false,
2384 	 * if page table locking is skipped: use TTU_SYNC to wait for that.
2385 	 */
2386 	if (flags & TTU_SYNC)
2387 		pvmw.flags = PVMW_SYNC;
2388 
2389 	/*
2390 	 * For THP, we have to assume the worse case ie pmd for invalidation.
2391 	 * For hugetlb, it could be much worse if we need to do pud
2392 	 * invalidation in the case of pmd sharing.
2393 	 *
2394 	 * Note that the page can not be free in this function as call of
2395 	 * try_to_unmap() must hold a reference on the page.
2396 	 */
2397 	range.end = vma_address_end(&pvmw);
2398 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2399 				address, range.end);
2400 	if (folio_test_hugetlb(folio)) {
2401 		/*
2402 		 * If sharing is possible, start and end will be adjusted
2403 		 * accordingly.
2404 		 */
2405 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
2406 						     &range.end);
2407 
2408 		/* We need the huge page size for set_huge_pte_at() */
2409 		hsz = huge_page_size(hstate_vma(vma));
2410 	}
2411 	mmu_notifier_invalidate_range_start(&range);
2412 
2413 	while (page_vma_mapped_walk(&pvmw)) {
2414 		/* PMD-mapped THP migration entry */
2415 		if (!pvmw.pte) {
2416 			__maybe_unused unsigned long pfn;
2417 			__maybe_unused pmd_t pmdval;
2418 
2419 			if (flags & TTU_SPLIT_HUGE_PMD) {
2420 				split_huge_pmd_locked(vma, pvmw.address,
2421 						      pvmw.pmd, true);
2422 				ret = false;
2423 				page_vma_mapped_walk_done(&pvmw);
2424 				break;
2425 			}
2426 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2427 			pmdval = pmdp_get(pvmw.pmd);
2428 			if (likely(pmd_present(pmdval)))
2429 				pfn = pmd_pfn(pmdval);
2430 			else
2431 				pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
2432 
2433 			subpage = folio_page(folio, pfn - folio_pfn(folio));
2434 
2435 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2436 					!folio_test_pmd_mappable(folio), folio);
2437 
2438 			if (set_pmd_migration_entry(&pvmw, subpage)) {
2439 				ret = false;
2440 				page_vma_mapped_walk_done(&pvmw);
2441 				break;
2442 			}
2443 			continue;
2444 #endif
2445 		}
2446 
2447 		/* Unexpected PMD-mapped THP? */
2448 		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2449 
2450 		/*
2451 		 * Handle PFN swap PTEs, such as device-exclusive ones, that
2452 		 * actually map pages.
2453 		 */
2454 		pteval = ptep_get(pvmw.pte);
2455 		if (likely(pte_present(pteval))) {
2456 			pfn = pte_pfn(pteval);
2457 		} else {
2458 			const softleaf_t entry = softleaf_from_pte(pteval);
2459 
2460 			pfn = softleaf_to_pfn(entry);
2461 			VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2462 		}
2463 
2464 		subpage = folio_page(folio, pfn - folio_pfn(folio));
2465 		address = pvmw.address;
2466 		anon_exclusive = folio_test_anon(folio) &&
2467 				 PageAnonExclusive(subpage);
2468 
2469 		if (folio_test_hugetlb(folio)) {
2470 			bool anon = folio_test_anon(folio);
2471 
2472 			/*
2473 			 * huge_pmd_unshare may unmap an entire PMD page.
2474 			 * There is no way of knowing exactly which PMDs may
2475 			 * be cached for this mm, so we must flush them all.
2476 			 * start/end were already adjusted above to cover this
2477 			 * range.
2478 			 */
2479 			flush_cache_range(vma, range.start, range.end);
2480 
2481 			/*
2482 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
2483 			 * held in write mode.  Caller needs to explicitly
2484 			 * do this outside rmap routines.
2485 			 *
2486 			 * We also must hold hugetlb vma_lock in write mode.
2487 			 * Lock order dictates acquiring vma_lock BEFORE
2488 			 * i_mmap_rwsem.  We can only try lock here and
2489 			 * fail if unsuccessful.
2490 			 */
2491 			if (!anon) {
2492 				struct mmu_gather tlb;
2493 
2494 				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2495 				if (!hugetlb_vma_trylock_write(vma)) {
2496 					page_vma_mapped_walk_done(&pvmw);
2497 					ret = false;
2498 					break;
2499 				}
2500 
2501 				tlb_gather_mmu_vma(&tlb, vma);
2502 				if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2503 					hugetlb_vma_unlock_write(vma);
2504 					huge_pmd_unshare_flush(&tlb, vma);
2505 					tlb_finish_mmu(&tlb);
2506 					/*
2507 					 * The PMD table was unmapped,
2508 					 * consequently unmapping the folio.
2509 					 */
2510 					page_vma_mapped_walk_done(&pvmw);
2511 					break;
2512 				}
2513 				hugetlb_vma_unlock_write(vma);
2514 				tlb_finish_mmu(&tlb);
2515 			}
2516 			/* Nuke the hugetlb page table entry */
2517 			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2518 			if (pte_dirty(pteval))
2519 				folio_mark_dirty(folio);
2520 			writable = pte_write(pteval);
2521 		} else if (likely(pte_present(pteval))) {
2522 			flush_cache_page(vma, address, pfn);
2523 			/* Nuke the page table entry. */
2524 			if (should_defer_flush(mm, flags)) {
2525 				/*
2526 				 * We clear the PTE but do not flush so potentially
2527 				 * a remote CPU could still be writing to the folio.
2528 				 * If the entry was previously clean then the
2529 				 * architecture must guarantee that a clear->dirty
2530 				 * transition on a cached TLB entry is written through
2531 				 * and traps if the PTE is unmapped.
2532 				 */
2533 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
2534 
2535 				set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
2536 			} else {
2537 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
2538 			}
2539 			if (pte_dirty(pteval))
2540 				folio_mark_dirty(folio);
2541 			writable = pte_write(pteval);
2542 		} else {
2543 			const softleaf_t entry = softleaf_from_pte(pteval);
2544 
2545 			pte_clear(mm, address, pvmw.pte);
2546 
2547 			writable = softleaf_is_device_private_write(entry);
2548 		}
2549 
2550 		VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
2551 				!anon_exclusive, folio);
2552 
2553 		/* Update high watermark before we lower rss */
2554 		update_hiwater_rss(mm);
2555 
2556 		if (PageHWPoison(subpage)) {
2557 			VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio);
2558 
2559 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2560 			if (folio_test_hugetlb(folio)) {
2561 				hugetlb_count_sub(folio_nr_pages(folio), mm);
2562 				set_huge_pte_at(mm, address, pvmw.pte, pteval,
2563 						hsz);
2564 			} else {
2565 				dec_mm_counter(mm, mm_counter(folio));
2566 				set_pte_at(mm, address, pvmw.pte, pteval);
2567 			}
2568 		} else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2569 			   !userfaultfd_armed(vma)) {
2570 			/*
2571 			 * The guest indicated that the page content is of no
2572 			 * interest anymore. Simply discard the pte, vmscan
2573 			 * will take care of the rest.
2574 			 * A future reference will then fault in a new zero
2575 			 * page. When userfaultfd is active, we must not drop
2576 			 * this page though, as its main user (postcopy
2577 			 * migration) will not expect userfaults on already
2578 			 * copied pages.
2579 			 */
2580 			dec_mm_counter(mm, mm_counter(folio));
2581 		} else {
2582 			swp_entry_t entry;
2583 			pte_t swp_pte;
2584 
2585 			/*
2586 			 * arch_unmap_one() is expected to be a NOP on
2587 			 * architectures where we could have PFN swap PTEs,
2588 			 * so we'll not check/care.
2589 			 */
2590 			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2591 				if (folio_test_hugetlb(folio))
2592 					set_huge_pte_at(mm, address, pvmw.pte,
2593 							pteval, hsz);
2594 				else
2595 					set_pte_at(mm, address, pvmw.pte, pteval);
2596 				ret = false;
2597 				page_vma_mapped_walk_done(&pvmw);
2598 				break;
2599 			}
2600 
2601 			/* See folio_try_share_anon_rmap_pte(): clear PTE first. */
2602 			if (folio_test_hugetlb(folio)) {
2603 				if (anon_exclusive &&
2604 				    hugetlb_try_share_anon_rmap(folio)) {
2605 					set_huge_pte_at(mm, address, pvmw.pte,
2606 							pteval, hsz);
2607 					ret = false;
2608 					page_vma_mapped_walk_done(&pvmw);
2609 					break;
2610 				}
2611 			} else if (anon_exclusive &&
2612 				   folio_try_share_anon_rmap_pte(folio, subpage)) {
2613 				set_pte_at(mm, address, pvmw.pte, pteval);
2614 				ret = false;
2615 				page_vma_mapped_walk_done(&pvmw);
2616 				break;
2617 			}
2618 
2619 			/*
2620 			 * Store the pfn of the page in a special migration
2621 			 * pte. do_swap_page() will wait until the migration
2622 			 * pte is removed and then restart fault handling.
2623 			 */
2624 			if (writable)
2625 				entry = make_writable_migration_entry(
2626 							page_to_pfn(subpage));
2627 			else if (anon_exclusive)
2628 				entry = make_readable_exclusive_migration_entry(
2629 							page_to_pfn(subpage));
2630 			else
2631 				entry = make_readable_migration_entry(
2632 							page_to_pfn(subpage));
2633 			if (likely(pte_present(pteval))) {
2634 				if (pte_young(pteval))
2635 					entry = make_migration_entry_young(entry);
2636 				if (pte_dirty(pteval))
2637 					entry = make_migration_entry_dirty(entry);
2638 				swp_pte = swp_entry_to_pte(entry);
2639 				if (pte_soft_dirty(pteval))
2640 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2641 				if (pte_uffd_wp(pteval))
2642 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2643 			} else {
2644 				swp_pte = swp_entry_to_pte(entry);
2645 				if (pte_swp_soft_dirty(pteval))
2646 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2647 				if (pte_swp_uffd_wp(pteval))
2648 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2649 			}
2650 			if (folio_test_hugetlb(folio))
2651 				set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
2652 						hsz);
2653 			else
2654 				set_pte_at(mm, address, pvmw.pte, swp_pte);
2655 			trace_set_migration_pte(address, pte_val(swp_pte),
2656 						folio_order(folio));
2657 			/*
2658 			 * No need to invalidate here it will synchronize on
2659 			 * against the special swap migration pte.
2660 			 */
2661 		}
2662 
2663 		if (unlikely(folio_test_hugetlb(folio)))
2664 			hugetlb_remove_rmap(folio);
2665 		else
2666 			folio_remove_rmap_pte(folio, subpage, vma);
2667 		if (vma->vm_flags & VM_LOCKED)
2668 			mlock_drain_local();
2669 		folio_put(folio);
2670 	}
2671 
2672 	mmu_notifier_invalidate_range_end(&range);
2673 
2674 	return ret;
2675 }
2676 
2677 /**
2678  * try_to_migrate - try to replace all page table mappings with swap entries
2679  * @folio: the folio to replace page table entries for
2680  * @flags: action and flags
2681  *
2682  * Tries to remove all the page table entries which are mapping this folio and
2683  * replace them with special swap entries. Caller must hold the folio lock.
2684  */
2685 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2686 {
2687 	struct rmap_walk_control rwc = {
2688 		.rmap_one = try_to_migrate_one,
2689 		.arg = (void *)flags,
2690 		.done = folio_not_mapped,
2691 		.anon_lock = folio_lock_anon_vma_read,
2692 	};
2693 
2694 	/*
2695 	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2696 	 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2697 	 */
2698 	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2699 					TTU_SYNC | TTU_BATCH_FLUSH)))
2700 		return;
2701 
2702 	if (folio_is_zone_device(folio) &&
2703 	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2704 		return;
2705 
2706 	/*
2707 	 * During exec, a temporary VMA is setup and later moved.
2708 	 * The VMA is moved under the anon_vma lock but not the
2709 	 * page tables leading to a race where migration cannot
2710 	 * find the migration ptes. Rather than increasing the
2711 	 * locking requirements of exec(), migration skips
2712 	 * temporary VMAs until after exec() completes.
2713 	 */
2714 	if (!folio_test_ksm(folio) && folio_test_anon(folio))
2715 		rwc.invalid_vma = invalid_migration_vma;
2716 
2717 	if (flags & TTU_RMAP_LOCKED)
2718 		rmap_walk_locked(folio, &rwc);
2719 	else
2720 		rmap_walk(folio, &rwc);
2721 }
2722 
2723 #ifdef CONFIG_DEVICE_PRIVATE
2724 /**
2725  * make_device_exclusive() - Mark a page for exclusive use by a device
2726  * @mm: mm_struct of associated target process
2727  * @addr: the virtual address to mark for exclusive device access
2728  * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2729  * @foliop: folio pointer will be stored here on success.
2730  *
2731  * This function looks up the page mapped at the given address, grabs a
2732  * folio reference, locks the folio and replaces the PTE with special
2733  * device-exclusive PFN swap entry, preventing access through the process
2734  * page tables. The function will return with the folio locked and referenced.
2735  *
2736  * On fault, the device-exclusive entries are replaced with the original PTE
2737  * under folio lock, after calling MMU notifiers.
2738  *
2739  * Only anonymous non-hugetlb folios are supported and the VMA must have
2740  * write permissions such that we can fault in the anonymous page writable
2741  * in order to mark it exclusive. The caller must hold the mmap_lock in read
2742  * mode.
2743  *
2744  * A driver using this to program access from a device must use a mmu notifier
2745  * critical section to hold a device specific lock during programming. Once
2746  * programming is complete it should drop the folio lock and reference after
2747  * which point CPU access to the page will revoke the exclusive access.
2748  *
2749  * Notes:
2750  *   #. This function always operates on individual PTEs mapping individual
2751  *      pages. PMD-sized THPs are first remapped to be mapped by PTEs before
2752  *      the conversion happens on a single PTE corresponding to @addr.
2753  *   #. While concurrent access through the process page tables is prevented,
2754  *      concurrent access through other page references (e.g., earlier GUP
2755  *      invocation) is not handled and not supported.
2756  *   #. device-exclusive entries are considered "clean" and "old" by core-mm.
2757  *      Device drivers must update the folio state when informed by MMU
2758  *      notifiers.
2759  *
2760  * Returns: pointer to mapped page on success, otherwise a negative error.
2761  */
2762 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
2763 		void *owner, struct folio **foliop)
2764 {
2765 	struct mmu_notifier_range range;
2766 	struct folio *folio, *fw_folio;
2767 	struct vm_area_struct *vma;
2768 	struct folio_walk fw;
2769 	struct page *page;
2770 	swp_entry_t entry;
2771 	pte_t swp_pte;
2772 	int ret;
2773 
2774 	mmap_assert_locked(mm);
2775 	addr = PAGE_ALIGN_DOWN(addr);
2776 
2777 	/*
2778 	 * Fault in the page writable and try to lock it; note that if the
2779 	 * address would already be marked for exclusive use by a device,
2780 	 * the GUP call would undo that first by triggering a fault.
2781 	 *
2782 	 * If any other device would already map this page exclusively, the
2783 	 * fault will trigger a conversion to an ordinary
2784 	 * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE.
2785 	 */
2786 retry:
2787 	page = get_user_page_vma_remote(mm, addr,
2788 					FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2789 					&vma);
2790 	if (IS_ERR(page))
2791 		return page;
2792 	folio = page_folio(page);
2793 
2794 	if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
2795 		folio_put(folio);
2796 		return ERR_PTR(-EOPNOTSUPP);
2797 	}
2798 
2799 	ret = folio_lock_killable(folio);
2800 	if (ret) {
2801 		folio_put(folio);
2802 		return ERR_PTR(ret);
2803 	}
2804 
2805 	/*
2806 	 * Inform secondary MMUs that we are going to convert this PTE to
2807 	 * device-exclusive, such that they unmap it now. Note that the
2808 	 * caller must filter this event out to prevent livelocks.
2809 	 */
2810 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
2811 				      mm, addr, addr + PAGE_SIZE, owner);
2812 	mmu_notifier_invalidate_range_start(&range);
2813 
2814 	/*
2815 	 * Let's do a second walk and make sure we still find the same page
2816 	 * mapped writable. Note that any page of an anonymous folio can
2817 	 * only be mapped writable using exactly one PTE ("exclusive"), so
2818 	 * there cannot be other mappings.
2819 	 */
2820 	fw_folio = folio_walk_start(&fw, vma, addr, 0);
2821 	if (fw_folio != folio || fw.page != page ||
2822 	    fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) {
2823 		if (fw_folio)
2824 			folio_walk_end(&fw, vma);
2825 		mmu_notifier_invalidate_range_end(&range);
2826 		folio_unlock(folio);
2827 		folio_put(folio);
2828 		goto retry;
2829 	}
2830 
2831 	/* Nuke the page table entry so we get the uptodate dirty bit. */
2832 	flush_cache_page(vma, addr, page_to_pfn(page));
2833 	fw.pte = ptep_clear_flush(vma, addr, fw.ptep);
2834 
2835 	/* Set the dirty flag on the folio now the PTE is gone. */
2836 	if (pte_dirty(fw.pte))
2837 		folio_mark_dirty(folio);
2838 
2839 	/*
2840 	 * Store the pfn of the page in a special device-exclusive PFN swap PTE.
2841 	 * do_swap_page() will trigger the conversion back while holding the
2842 	 * folio lock.
2843 	 */
2844 	entry = make_device_exclusive_entry(page_to_pfn(page));
2845 	swp_pte = swp_entry_to_pte(entry);
2846 	if (pte_soft_dirty(fw.pte))
2847 		swp_pte = pte_swp_mksoft_dirty(swp_pte);
2848 	/* The pte is writable, uffd-wp does not apply. */
2849 	set_pte_at(mm, addr, fw.ptep, swp_pte);
2850 
2851 	folio_walk_end(&fw, vma);
2852 	mmu_notifier_invalidate_range_end(&range);
2853 	*foliop = folio;
2854 	return page;
2855 }
2856 EXPORT_SYMBOL_GPL(make_device_exclusive);
2857 #endif
2858 
2859 void __put_anon_vma(struct anon_vma *anon_vma)
2860 {
2861 	struct anon_vma *root = anon_vma->root;
2862 
2863 	anon_vma_free(anon_vma);
2864 	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2865 		anon_vma_free(root);
2866 }
2867 
2868 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
2869 					    struct rmap_walk_control *rwc)
2870 {
2871 	struct anon_vma *anon_vma;
2872 
2873 	if (rwc->anon_lock)
2874 		return rwc->anon_lock(folio, rwc);
2875 
2876 	/*
2877 	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2878 	 * because that depends on page_mapped(); but not all its usages
2879 	 * are holding mmap_lock. Users without mmap_lock are required to
2880 	 * take a reference count to prevent the anon_vma disappearing
2881 	 */
2882 	anon_vma = folio_anon_vma(folio);
2883 	if (!anon_vma)
2884 		return NULL;
2885 
2886 	if (anon_vma_trylock_read(anon_vma))
2887 		goto out;
2888 
2889 	if (rwc->try_lock) {
2890 		anon_vma = NULL;
2891 		rwc->contended = true;
2892 		goto out;
2893 	}
2894 
2895 	anon_vma_lock_read(anon_vma);
2896 out:
2897 	return anon_vma;
2898 }
2899 
2900 /*
2901  * rmap_walk_anon - do something to anonymous page using the object-based
2902  * rmap method
2903  * @folio: the folio to be handled
2904  * @rwc: control variable according to each walk type
2905  * @locked: caller holds relevant rmap lock
2906  *
2907  * Find all the mappings of a folio using the mapping pointer and the vma
2908  * chains contained in the anon_vma struct it points to.
2909  */
2910 static void rmap_walk_anon(struct folio *folio,
2911 		struct rmap_walk_control *rwc, bool locked)
2912 {
2913 	struct anon_vma *anon_vma;
2914 	pgoff_t pgoff_start, pgoff_end;
2915 	struct anon_vma_chain *avc;
2916 
2917 	/*
2918 	 * The folio lock ensures that folio->mapping can't be changed under us
2919 	 * to an anon_vma with different root.
2920 	 */
2921 	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2922 
2923 	if (locked) {
2924 		anon_vma = folio_anon_vma(folio);
2925 		/* anon_vma disappear under us? */
2926 		VM_BUG_ON_FOLIO(!anon_vma, folio);
2927 	} else {
2928 		anon_vma = rmap_walk_anon_lock(folio, rwc);
2929 	}
2930 	if (!anon_vma)
2931 		return;
2932 
2933 	pgoff_start = folio_pgoff(folio);
2934 	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2935 	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2936 			pgoff_start, pgoff_end) {
2937 		struct vm_area_struct *vma = avc->vma;
2938 		unsigned long address = vma_address(vma, pgoff_start,
2939 				folio_nr_pages(folio));
2940 
2941 		VM_BUG_ON_VMA(address == -EFAULT, vma);
2942 		cond_resched();
2943 
2944 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2945 			continue;
2946 
2947 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2948 			break;
2949 		if (rwc->done && rwc->done(folio))
2950 			break;
2951 	}
2952 
2953 	if (!locked)
2954 		anon_vma_unlock_read(anon_vma);
2955 }
2956 
2957 /**
2958  * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping
2959  * of a page mapped within a specified page cache object at a specified offset.
2960  *
2961  * @folio: 		Either the folio whose mappings to traverse, or if NULL,
2962  * 			the callbacks specified in @rwc will be configured such
2963  * 			as to be able to look up mappings correctly.
2964  * @mapping: 		The page cache object whose mapping VMAs we intend to
2965  * 			traverse. If @folio is non-NULL, this should be equal to
2966  *			folio_mapping(folio).
2967  * @pgoff_start:	The offset within @mapping of the page which we are
2968  * 			looking up. If @folio is non-NULL, this should be equal
2969  * 			to folio_pgoff(folio).
2970  * @nr_pages:		The number of pages mapped by the mapping. If @folio is
2971  *			non-NULL, this should be equal to folio_nr_pages(folio).
2972  * @rwc:		The reverse mapping walk control object describing how
2973  *			the traversal should proceed.
2974  * @locked:		Is the @mapping already locked? If not, we acquire the
2975  *			lock.
2976  */
2977 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
2978 			     pgoff_t pgoff_start, unsigned long nr_pages,
2979 			     struct rmap_walk_control *rwc, bool locked)
2980 {
2981 	pgoff_t pgoff_end = pgoff_start + nr_pages - 1;
2982 	struct vm_area_struct *vma;
2983 
2984 	VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
2985 	VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio);
2986 	VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio);
2987 
2988 	if (!locked) {
2989 		if (i_mmap_trylock_read(mapping))
2990 			goto lookup;
2991 
2992 		if (rwc->try_lock) {
2993 			rwc->contended = true;
2994 			return;
2995 		}
2996 
2997 		i_mmap_lock_read(mapping);
2998 	}
2999 lookup:
3000 	vma_interval_tree_foreach(vma, &mapping->i_mmap,
3001 			pgoff_start, pgoff_end) {
3002 		unsigned long address = vma_address(vma, pgoff_start, nr_pages);
3003 
3004 		VM_BUG_ON_VMA(address == -EFAULT, vma);
3005 		cond_resched();
3006 
3007 		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3008 			continue;
3009 
3010 		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
3011 			goto done;
3012 		if (rwc->done && rwc->done(folio))
3013 			goto done;
3014 	}
3015 done:
3016 	if (!locked)
3017 		i_mmap_unlock_read(mapping);
3018 }
3019 
3020 /*
3021  * rmap_walk_file - do something to file page using the object-based rmap method
3022  * @folio: the folio to be handled
3023  * @rwc: control variable according to each walk type
3024  * @locked: caller holds relevant rmap lock
3025  *
3026  * Find all the mappings of a folio using the mapping pointer and the vma chains
3027  * contained in the address_space struct it points to.
3028  */
3029 static void rmap_walk_file(struct folio *folio,
3030 		struct rmap_walk_control *rwc, bool locked)
3031 {
3032 	/*
3033 	 * The folio lock not only makes sure that folio->mapping cannot
3034 	 * suddenly be NULLified by truncation, it makes sure that the structure
3035 	 * at mapping cannot be freed and reused yet, so we can safely take
3036 	 * mapping->i_mmap_rwsem.
3037 	 */
3038 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3039 
3040 	if (!folio->mapping)
3041 		return;
3042 
3043 	__rmap_walk_file(folio, folio->mapping, folio->index,
3044 			 folio_nr_pages(folio), rwc, locked);
3045 }
3046 
3047 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
3048 {
3049 	if (unlikely(folio_test_ksm(folio)))
3050 		rmap_walk_ksm(folio, rwc);
3051 	else if (folio_test_anon(folio))
3052 		rmap_walk_anon(folio, rwc, false);
3053 	else
3054 		rmap_walk_file(folio, rwc, false);
3055 }
3056 
3057 /* Like rmap_walk, but caller holds relevant rmap lock */
3058 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
3059 {
3060 	/* no ksm support for now */
3061 	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
3062 	if (folio_test_anon(folio))
3063 		rmap_walk_anon(folio, rwc, true);
3064 	else
3065 		rmap_walk_file(folio, rwc, true);
3066 }
3067 
3068 #ifdef CONFIG_HUGETLB_PAGE
3069 /*
3070  * The following two functions are for anonymous (private mapped) hugepages.
3071  * Unlike common anonymous pages, anonymous hugepages have no accounting code
3072  * and no lru code, because we handle hugepages differently from common pages.
3073  */
3074 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
3075 		unsigned long address, rmap_t flags)
3076 {
3077 	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3078 	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3079 
3080 	atomic_inc(&folio->_entire_mapcount);
3081 	atomic_inc(&folio->_large_mapcount);
3082 	if (flags & RMAP_EXCLUSIVE)
3083 		SetPageAnonExclusive(&folio->page);
3084 	VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
3085 			 PageAnonExclusive(&folio->page), folio);
3086 }
3087 
3088 void hugetlb_add_new_anon_rmap(struct folio *folio,
3089 		struct vm_area_struct *vma, unsigned long address)
3090 {
3091 	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3092 
3093 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
3094 	/* increment count (starts at -1) */
3095 	atomic_set(&folio->_entire_mapcount, 0);
3096 	atomic_set(&folio->_large_mapcount, 0);
3097 	folio_clear_hugetlb_restore_reserve(folio);
3098 	__folio_set_anon(folio, vma, address, true);
3099 	SetPageAnonExclusive(&folio->page);
3100 }
3101 #endif /* CONFIG_HUGETLB_PAGE */
3102