1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/rmap.c - physical to virtual reverse mappings
4 *
5 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins 2003, 2004
18 */
19
20 /*
21 * Lock ordering in mm:
22 *
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
24 * mm->mmap_lock
25 * mapping->invalidate_lock (in filemap_fault)
26 * folio_lock
27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
28 * vma_start_write
29 * mapping->i_mmap_rwsem
30 * anon_vma->rwsem
31 * mm->page_table_lock or pte_lock
32 * swap_lock (in swap_duplicate, swap_info_get)
33 * mmlist_lock (in mmput, drain_mmlist and others)
34 * mapping->private_lock (in block_dirty_folio)
35 * i_pages lock (widely used)
36 * lruvec->lru_lock (in folio_lruvec_lock_irq)
37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39 * sb_lock (within inode_lock in fs/fs-writeback.c)
40 * i_pages lock (widely used, in set_page_dirty,
41 * in arch-dependent flush_dcache_mmap_lock,
42 * within bdi.wb->list_lock in __sync_single_inode)
43 *
44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
45 * ->tasklist_lock
46 * pte map lock
47 *
48 * hugetlbfs PageHuge() take locks in this order:
49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * vma_lock (hugetlb specific lock for pmd_sharing)
51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
52 * folio_lock
53 */
54
55 #include <linux/mm.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/task.h>
58 #include <linux/pagemap.h>
59 #include <linux/swap.h>
60 #include <linux/leafops.h>
61 #include <linux/slab.h>
62 #include <linux/init.h>
63 #include <linux/ksm.h>
64 #include <linux/rmap.h>
65 #include <linux/rcupdate.h>
66 #include <linux/export.h>
67 #include <linux/memcontrol.h>
68 #include <linux/mmu_notifier.h>
69 #include <linux/migrate.h>
70 #include <linux/hugetlb.h>
71 #include <linux/huge_mm.h>
72 #include <linux/backing-dev.h>
73 #include <linux/page_idle.h>
74 #include <linux/memremap.h>
75 #include <linux/userfaultfd_k.h>
76 #include <linux/mm_inline.h>
77 #include <linux/oom.h>
78
79 #include <asm/tlb.h>
80
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/migrate.h>
83
84 #include "internal.h"
85 #include "swap.h"
86
87 static struct kmem_cache *anon_vma_cachep;
88 static struct kmem_cache *anon_vma_chain_cachep;
89
anon_vma_alloc(void)90 static inline struct anon_vma *anon_vma_alloc(void)
91 {
92 struct anon_vma *anon_vma;
93
94 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
95 if (anon_vma) {
96 atomic_set(&anon_vma->refcount, 1);
97 anon_vma->num_children = 0;
98 anon_vma->num_active_vmas = 0;
99 anon_vma->parent = anon_vma;
100 /*
101 * Initialise the anon_vma root to point to itself. If called
102 * from fork, the root will be reset to the parents anon_vma.
103 */
104 anon_vma->root = anon_vma;
105 }
106
107 return anon_vma;
108 }
109
anon_vma_free(struct anon_vma * anon_vma)110 static inline void anon_vma_free(struct anon_vma *anon_vma)
111 {
112 VM_BUG_ON(atomic_read(&anon_vma->refcount));
113
114 /*
115 * Synchronize against folio_lock_anon_vma_read() such that
116 * we can safely hold the lock without the anon_vma getting
117 * freed.
118 *
119 * Relies on the full mb implied by the atomic_dec_and_test() from
120 * put_anon_vma() against the acquire barrier implied by
121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
122 *
123 * folio_lock_anon_vma_read() VS put_anon_vma()
124 * down_read_trylock() atomic_dec_and_test()
125 * LOCK MB
126 * atomic_read() rwsem_is_locked()
127 *
128 * LOCK should suffice since the actual taking of the lock must
129 * happen _before_ what follows.
130 */
131 might_sleep();
132 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
133 anon_vma_lock_write(anon_vma);
134 anon_vma_unlock_write(anon_vma);
135 }
136
137 kmem_cache_free(anon_vma_cachep, anon_vma);
138 }
139
anon_vma_chain_alloc(gfp_t gfp)140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
141 {
142 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
143 }
144
anon_vma_chain_free(struct anon_vma_chain * anon_vma_chain)145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
146 {
147 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
148 }
149
anon_vma_chain_assign(struct vm_area_struct * vma,struct anon_vma_chain * avc,struct anon_vma * anon_vma)150 static void anon_vma_chain_assign(struct vm_area_struct *vma,
151 struct anon_vma_chain *avc,
152 struct anon_vma *anon_vma)
153 {
154 avc->vma = vma;
155 avc->anon_vma = anon_vma;
156 list_add(&avc->same_vma, &vma->anon_vma_chain);
157 }
158
159 /**
160 * __anon_vma_prepare - attach an anon_vma to a memory region
161 * @vma: the memory region in question
162 *
163 * This makes sure the memory mapping described by 'vma' has
164 * an 'anon_vma' attached to it, so that we can associate the
165 * anonymous pages mapped into it with that anon_vma.
166 *
167 * The common case will be that we already have one, which
168 * is handled inline by anon_vma_prepare(). But if
169 * not we either need to find an adjacent mapping that we
170 * can re-use the anon_vma from (very common when the only
171 * reason for splitting a vma has been mprotect()), or we
172 * allocate a new one.
173 *
174 * Anon-vma allocations are very subtle, because we may have
175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
176 * and that may actually touch the rwsem even in the newly
177 * allocated vma (it depends on RCU to make sure that the
178 * anon_vma isn't actually destroyed).
179 *
180 * As a result, we need to do proper anon_vma locking even
181 * for the new allocation. At the same time, we do not want
182 * to do any locking for the common case of already having
183 * an anon_vma.
184 */
__anon_vma_prepare(struct vm_area_struct * vma)185 int __anon_vma_prepare(struct vm_area_struct *vma)
186 {
187 struct mm_struct *mm = vma->vm_mm;
188 struct anon_vma *anon_vma, *allocated;
189 struct anon_vma_chain *avc;
190
191 mmap_assert_locked(mm);
192 might_sleep();
193
194 avc = anon_vma_chain_alloc(GFP_KERNEL);
195 if (!avc)
196 goto out_enomem;
197
198 anon_vma = find_mergeable_anon_vma(vma);
199 allocated = NULL;
200 if (!anon_vma) {
201 anon_vma = anon_vma_alloc();
202 if (unlikely(!anon_vma))
203 goto out_enomem_free_avc;
204 anon_vma->num_children++; /* self-parent link for new root */
205 allocated = anon_vma;
206 }
207
208 anon_vma_lock_write(anon_vma);
209 /* page_table_lock to protect against threads */
210 spin_lock(&mm->page_table_lock);
211 if (likely(!vma->anon_vma)) {
212 vma->anon_vma = anon_vma;
213 anon_vma_chain_assign(vma, avc, anon_vma);
214 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
215 anon_vma->num_active_vmas++;
216 allocated = NULL;
217 avc = NULL;
218 }
219 spin_unlock(&mm->page_table_lock);
220 anon_vma_unlock_write(anon_vma);
221
222 if (unlikely(allocated))
223 put_anon_vma(allocated);
224 if (unlikely(avc))
225 anon_vma_chain_free(avc);
226
227 return 0;
228
229 out_enomem_free_avc:
230 anon_vma_chain_free(avc);
231 out_enomem:
232 return -ENOMEM;
233 }
234
check_anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src,enum vma_operation operation)235 static void check_anon_vma_clone(struct vm_area_struct *dst,
236 struct vm_area_struct *src,
237 enum vma_operation operation)
238 {
239 /* The write lock must be held. */
240 mmap_assert_write_locked(src->vm_mm);
241 /* If not a fork then must be on same mm. */
242 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && dst->vm_mm != src->vm_mm);
243
244 /* If we have anything to do src->anon_vma must be provided. */
245 VM_WARN_ON_ONCE(!src->anon_vma && !list_empty(&src->anon_vma_chain));
246 VM_WARN_ON_ONCE(!src->anon_vma && dst->anon_vma);
247 /* We are establishing a new anon_vma_chain. */
248 VM_WARN_ON_ONCE(!list_empty(&dst->anon_vma_chain));
249 /*
250 * On fork, dst->anon_vma is set NULL (temporarily). Otherwise, anon_vma
251 * must be the same across dst and src.
252 */
253 VM_WARN_ON_ONCE(dst->anon_vma && dst->anon_vma != src->anon_vma);
254 /*
255 * Essentially equivalent to above - if not a no-op, we should expect
256 * dst->anon_vma to be set for everything except a fork.
257 */
258 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && src->anon_vma &&
259 !dst->anon_vma);
260 /* For the anon_vma to be compatible, it can only be singular. */
261 VM_WARN_ON_ONCE(operation == VMA_OP_MERGE_UNFAULTED &&
262 !list_is_singular(&src->anon_vma_chain));
263 #ifdef CONFIG_PER_VMA_LOCK
264 /* Only merging an unfaulted VMA leaves the destination attached. */
265 VM_WARN_ON_ONCE(operation != VMA_OP_MERGE_UNFAULTED &&
266 vma_is_attached(dst));
267 #endif
268 }
269
maybe_reuse_anon_vma(struct vm_area_struct * dst,struct anon_vma * anon_vma)270 static void maybe_reuse_anon_vma(struct vm_area_struct *dst,
271 struct anon_vma *anon_vma)
272 {
273 /* If already populated, nothing to do.*/
274 if (dst->anon_vma)
275 return;
276
277 /*
278 * We reuse an anon_vma if any linking VMAs were unmapped and it has
279 * only a single child at most.
280 */
281 if (anon_vma->num_active_vmas > 0)
282 return;
283 if (anon_vma->num_children > 1)
284 return;
285
286 dst->anon_vma = anon_vma;
287 anon_vma->num_active_vmas++;
288 }
289
290 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma);
291
292 /**
293 * anon_vma_clone - Establishes new anon_vma_chain objects in @dst linking to
294 * all of the anon_vma objects contained within @src anon_vma_chain's.
295 * @dst: The destination VMA with an empty anon_vma_chain.
296 * @src: The source VMA we wish to duplicate.
297 * @operation: The type of operation which resulted in the clone.
298 *
299 * This is the heart of the VMA side of the anon_vma implementation - we invoke
300 * this function whenever we need to set up a new VMA's anon_vma state.
301 *
302 * This is invoked for:
303 *
304 * - VMA Merge, but only when @dst is unfaulted and @src is faulted - meaning we
305 * clone @src into @dst.
306 * - VMA split.
307 * - VMA (m)remap.
308 * - Fork of faulted VMA.
309 *
310 * In all cases other than fork this is simply a duplication. Fork additionally
311 * adds a new active anon_vma.
312 *
313 * ONLY in the case of fork do we try to 'reuse' existing anon_vma's in an
314 * anon_vma hierarchy, reusing anon_vma's which have no VMA associated with them
315 * but do have a single child. This is to avoid waste of memory when repeatedly
316 * forking.
317 *
318 * Returns: 0 on success, -ENOMEM on failure.
319 */
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src,enum vma_operation operation)320 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src,
321 enum vma_operation operation)
322 {
323 struct anon_vma_chain *avc, *pavc;
324 struct anon_vma *active_anon_vma = src->anon_vma;
325
326 check_anon_vma_clone(dst, src, operation);
327
328 if (!active_anon_vma)
329 return 0;
330
331 /*
332 * Allocate AVCs. We don't need an anon_vma lock for this as we
333 * are not updating the anon_vma rbtree nor are we changing
334 * anon_vma statistics.
335 *
336 * Either src, dst have the same mm for which we hold an exclusive mmap
337 * write lock, or we are forking and we hold it on src->vm_mm and dst is
338 * not yet accessible to other threads so there's no possibliity of the
339 * unlinked AVC's being observed yet.
340 */
341 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) {
342 avc = anon_vma_chain_alloc(GFP_KERNEL);
343 if (!avc)
344 goto enomem_failure;
345
346 anon_vma_chain_assign(dst, avc, pavc->anon_vma);
347 }
348
349 /*
350 * Now link the anon_vma's back to the newly inserted AVCs.
351 * Note that all anon_vma's share the same root.
352 */
353 anon_vma_lock_write(src->anon_vma);
354 list_for_each_entry_reverse(avc, &dst->anon_vma_chain, same_vma) {
355 struct anon_vma *anon_vma = avc->anon_vma;
356
357 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
358 if (operation == VMA_OP_FORK)
359 maybe_reuse_anon_vma(dst, anon_vma);
360 }
361
362 if (operation != VMA_OP_FORK)
363 dst->anon_vma->num_active_vmas++;
364
365 anon_vma_unlock_write(active_anon_vma);
366 return 0;
367
368 enomem_failure:
369 cleanup_partial_anon_vmas(dst);
370 return -ENOMEM;
371 }
372
373 /*
374 * Attach vma to its own anon_vma, as well as to the anon_vmas that
375 * the corresponding VMA in the parent process is attached to.
376 * Returns 0 on success, non-zero on failure.
377 */
anon_vma_fork(struct vm_area_struct * vma,struct vm_area_struct * pvma)378 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
379 {
380 struct anon_vma_chain *avc;
381 struct anon_vma *anon_vma;
382 int rc;
383
384 /* Don't bother if the parent process has no anon_vma here. */
385 if (!pvma->anon_vma)
386 return 0;
387
388 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
389 vma->anon_vma = NULL;
390
391 anon_vma = anon_vma_alloc();
392 if (!anon_vma)
393 return -ENOMEM;
394 avc = anon_vma_chain_alloc(GFP_KERNEL);
395 if (!avc) {
396 put_anon_vma(anon_vma);
397 return -ENOMEM;
398 }
399
400 /*
401 * First, attach the new VMA to the parent VMA's anon_vmas,
402 * so rmap can find non-COWed pages in child processes.
403 */
404 rc = anon_vma_clone(vma, pvma, VMA_OP_FORK);
405 /* An error arose or an existing anon_vma was reused, all done then. */
406 if (rc || vma->anon_vma) {
407 put_anon_vma(anon_vma);
408 anon_vma_chain_free(avc);
409 return rc;
410 }
411
412 /*
413 * OK no reuse, so add our own anon_vma.
414 *
415 * Since it is not linked anywhere we can safely manipulate anon_vma
416 * fields without a lock.
417 */
418
419 anon_vma->num_active_vmas = 1;
420 /*
421 * The root anon_vma's rwsem is the lock actually used when we
422 * lock any of the anon_vmas in this anon_vma tree.
423 */
424 anon_vma->root = pvma->anon_vma->root;
425 anon_vma->parent = pvma->anon_vma;
426 /*
427 * With refcounts, an anon_vma can stay around longer than the
428 * process it belongs to. The root anon_vma needs to be pinned until
429 * this anon_vma is freed, because the lock lives in the root.
430 */
431 get_anon_vma(anon_vma->root);
432 /* Mark this anon_vma as the one where our new (COWed) pages go. */
433 vma->anon_vma = anon_vma;
434 anon_vma_chain_assign(vma, avc, anon_vma);
435 /* Now let rmap see it. */
436 anon_vma_lock_write(anon_vma);
437 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
438 anon_vma->parent->num_children++;
439 anon_vma_unlock_write(anon_vma);
440
441 return 0;
442 }
443
444 /*
445 * In the unfortunate case of anon_vma_clone() failing to allocate memory we
446 * have to clean things up.
447 *
448 * Since we allocate anon_vma_chain's before we insert them into the interval
449 * trees, we simply have to free up the AVC's and remove the entries from the
450 * VMA's anon_vma_chain.
451 */
cleanup_partial_anon_vmas(struct vm_area_struct * vma)452 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma)
453 {
454 struct anon_vma_chain *avc, *next;
455
456 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
457 list_del(&avc->same_vma);
458 anon_vma_chain_free(avc);
459 }
460 }
461
462 /**
463 * unlink_anon_vmas() - remove all links between a VMA and anon_vma's, freeing
464 * anon_vma_chain objects.
465 * @vma: The VMA whose links to anon_vma objects is to be severed.
466 *
467 * As part of the process anon_vma_chain's are freed,
468 * anon_vma->num_children,num_active_vmas is updated as required and, if the
469 * relevant anon_vma references no further VMAs, its reference count is
470 * decremented.
471 */
unlink_anon_vmas(struct vm_area_struct * vma)472 void unlink_anon_vmas(struct vm_area_struct *vma)
473 {
474 struct anon_vma_chain *avc, *next;
475 struct anon_vma *active_anon_vma = vma->anon_vma;
476
477 /* Always hold mmap lock, read-lock on unmap possibly. */
478 mmap_assert_locked(vma->vm_mm);
479
480 /* Unfaulted is a no-op. */
481 if (!active_anon_vma) {
482 VM_WARN_ON_ONCE(!list_empty(&vma->anon_vma_chain));
483 return;
484 }
485
486 anon_vma_lock_write(active_anon_vma);
487
488 /*
489 * Unlink each anon_vma chained to the VMA. This list is ordered
490 * from newest to oldest, ensuring the root anon_vma gets freed last.
491 */
492 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
493 struct anon_vma *anon_vma = avc->anon_vma;
494
495 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
496
497 /*
498 * Leave empty anon_vmas on the list - we'll need
499 * to free them outside the lock.
500 */
501 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
502 anon_vma->parent->num_children--;
503 continue;
504 }
505
506 list_del(&avc->same_vma);
507 anon_vma_chain_free(avc);
508 }
509
510 active_anon_vma->num_active_vmas--;
511 /*
512 * vma would still be needed after unlink, and anon_vma will be prepared
513 * when handle fault.
514 */
515 vma->anon_vma = NULL;
516 anon_vma_unlock_write(active_anon_vma);
517
518
519 /*
520 * Iterate the list once more, it now only contains empty and unlinked
521 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
522 * needing to write-acquire the anon_vma->root->rwsem.
523 */
524 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
525 struct anon_vma *anon_vma = avc->anon_vma;
526
527 VM_WARN_ON(anon_vma->num_children);
528 VM_WARN_ON(anon_vma->num_active_vmas);
529 put_anon_vma(anon_vma);
530
531 list_del(&avc->same_vma);
532 anon_vma_chain_free(avc);
533 }
534 }
535
anon_vma_ctor(void * data)536 static void anon_vma_ctor(void *data)
537 {
538 struct anon_vma *anon_vma = data;
539
540 init_rwsem(&anon_vma->rwsem);
541 atomic_set(&anon_vma->refcount, 0);
542 anon_vma->rb_root = RB_ROOT_CACHED;
543 }
544
anon_vma_init(void)545 void __init anon_vma_init(void)
546 {
547 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
548 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
549 anon_vma_ctor);
550 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
551 SLAB_PANIC|SLAB_ACCOUNT);
552 }
553
554 /*
555 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
556 *
557 * Since there is no serialization what so ever against folio_remove_rmap_*()
558 * the best this function can do is return a refcount increased anon_vma
559 * that might have been relevant to this page.
560 *
561 * The page might have been remapped to a different anon_vma or the anon_vma
562 * returned may already be freed (and even reused).
563 *
564 * In case it was remapped to a different anon_vma, the new anon_vma will be a
565 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
566 * ensure that any anon_vma obtained from the page will still be valid for as
567 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
568 *
569 * All users of this function must be very careful when walking the anon_vma
570 * chain and verify that the page in question is indeed mapped in it
571 * [ something equivalent to page_mapped_in_vma() ].
572 *
573 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
574 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
575 * if there is a mapcount, we can dereference the anon_vma after observing
576 * those.
577 *
578 * NOTE: the caller should hold folio lock when calling this.
579 */
folio_get_anon_vma(const struct folio * folio)580 struct anon_vma *folio_get_anon_vma(const struct folio *folio)
581 {
582 struct anon_vma *anon_vma = NULL;
583 unsigned long anon_mapping;
584
585 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
586
587 rcu_read_lock();
588 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
589 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
590 goto out;
591 if (!folio_mapped(folio))
592 goto out;
593
594 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
595 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
596 anon_vma = NULL;
597 goto out;
598 }
599
600 /*
601 * If this folio is still mapped, then its anon_vma cannot have been
602 * freed. But if it has been unmapped, we have no security against the
603 * anon_vma structure being freed and reused (for another anon_vma:
604 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
605 * above cannot corrupt).
606 */
607 if (!folio_mapped(folio)) {
608 rcu_read_unlock();
609 put_anon_vma(anon_vma);
610 return NULL;
611 }
612 out:
613 rcu_read_unlock();
614
615 return anon_vma;
616 }
617
618 /*
619 * Similar to folio_get_anon_vma() except it locks the anon_vma.
620 *
621 * Its a little more complex as it tries to keep the fast path to a single
622 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
623 * reference like with folio_get_anon_vma() and then block on the mutex
624 * on !rwc->try_lock case.
625 */
folio_lock_anon_vma_read(const struct folio * folio,struct rmap_walk_control * rwc)626 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
627 struct rmap_walk_control *rwc)
628 {
629 struct anon_vma *anon_vma = NULL;
630 struct anon_vma *root_anon_vma;
631 unsigned long anon_mapping;
632
633 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
634
635 rcu_read_lock();
636 anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
637 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
638 goto out;
639 if (!folio_mapped(folio))
640 goto out;
641
642 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON);
643 root_anon_vma = READ_ONCE(anon_vma->root);
644 if (down_read_trylock(&root_anon_vma->rwsem)) {
645 /*
646 * If the folio is still mapped, then this anon_vma is still
647 * its anon_vma, and holding the mutex ensures that it will
648 * not go away, see anon_vma_free().
649 */
650 if (!folio_mapped(folio)) {
651 up_read(&root_anon_vma->rwsem);
652 anon_vma = NULL;
653 }
654 goto out;
655 }
656
657 if (rwc && rwc->try_lock) {
658 anon_vma = NULL;
659 rwc->contended = true;
660 goto out;
661 }
662
663 /* trylock failed, we got to sleep */
664 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
665 anon_vma = NULL;
666 goto out;
667 }
668
669 if (!folio_mapped(folio)) {
670 rcu_read_unlock();
671 put_anon_vma(anon_vma);
672 return NULL;
673 }
674
675 /* we pinned the anon_vma, its safe to sleep */
676 rcu_read_unlock();
677 anon_vma_lock_read(anon_vma);
678
679 if (atomic_dec_and_test(&anon_vma->refcount)) {
680 /*
681 * Oops, we held the last refcount, release the lock
682 * and bail -- can't simply use put_anon_vma() because
683 * we'll deadlock on the anon_vma_lock_write() recursion.
684 */
685 anon_vma_unlock_read(anon_vma);
686 __put_anon_vma(anon_vma);
687 anon_vma = NULL;
688 }
689
690 return anon_vma;
691
692 out:
693 rcu_read_unlock();
694 return anon_vma;
695 }
696
697 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
698 /*
699 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
700 * important if a PTE was dirty when it was unmapped that it's flushed
701 * before any IO is initiated on the page to prevent lost writes. Similarly,
702 * it must be flushed before freeing to prevent data leakage.
703 */
try_to_unmap_flush(void)704 void try_to_unmap_flush(void)
705 {
706 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
707
708 if (!tlb_ubc->flush_required)
709 return;
710
711 arch_tlbbatch_flush(&tlb_ubc->arch);
712 tlb_ubc->flush_required = false;
713 tlb_ubc->writable = false;
714 }
715
716 /* Flush iff there are potentially writable TLB entries that can race with IO */
try_to_unmap_flush_dirty(void)717 void try_to_unmap_flush_dirty(void)
718 {
719 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
720
721 if (tlb_ubc->writable)
722 try_to_unmap_flush();
723 }
724
725 /*
726 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
727 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
728 */
729 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16
730 #define TLB_FLUSH_BATCH_PENDING_MASK \
731 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
732 #define TLB_FLUSH_BATCH_PENDING_LARGE \
733 (TLB_FLUSH_BATCH_PENDING_MASK / 2)
734
set_tlb_ubc_flush_pending(struct mm_struct * mm,pte_t pteval,unsigned long start,unsigned long end)735 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
736 unsigned long start, unsigned long end)
737 {
738 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
739 int batch;
740 bool writable = pte_dirty(pteval);
741
742 if (!pte_accessible(mm, pteval))
743 return;
744
745 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
746 tlb_ubc->flush_required = true;
747
748 /*
749 * Ensure compiler does not re-order the setting of tlb_flush_batched
750 * before the PTE is cleared.
751 */
752 barrier();
753 batch = atomic_read(&mm->tlb_flush_batched);
754 retry:
755 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
756 /*
757 * Prevent `pending' from catching up with `flushed' because of
758 * overflow. Reset `pending' and `flushed' to be 1 and 0 if
759 * `pending' becomes large.
760 */
761 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1))
762 goto retry;
763 } else {
764 atomic_inc(&mm->tlb_flush_batched);
765 }
766
767 /*
768 * If the PTE was dirty then it's best to assume it's writable. The
769 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
770 * before the page is queued for IO.
771 */
772 if (writable)
773 tlb_ubc->writable = true;
774 }
775
776 /*
777 * Returns true if the TLB flush should be deferred to the end of a batch of
778 * unmap operations to reduce IPIs.
779 */
should_defer_flush(struct mm_struct * mm,enum ttu_flags flags)780 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
781 {
782 if (!(flags & TTU_BATCH_FLUSH))
783 return false;
784
785 return arch_tlbbatch_should_defer(mm);
786 }
787
788 /*
789 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
790 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
791 * operation such as mprotect or munmap to race between reclaim unmapping
792 * the page and flushing the page. If this race occurs, it potentially allows
793 * access to data via a stale TLB entry. Tracking all mm's that have TLB
794 * batching in flight would be expensive during reclaim so instead track
795 * whether TLB batching occurred in the past and if so then do a flush here
796 * if required. This will cost one additional flush per reclaim cycle paid
797 * by the first operation at risk such as mprotect and mumap.
798 *
799 * This must be called under the PTL so that an access to tlb_flush_batched
800 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
801 * via the PTL.
802 */
flush_tlb_batched_pending(struct mm_struct * mm)803 void flush_tlb_batched_pending(struct mm_struct *mm)
804 {
805 int batch = atomic_read(&mm->tlb_flush_batched);
806 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
807 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
808
809 if (pending != flushed) {
810 flush_tlb_mm(mm);
811 /*
812 * If the new TLB flushing is pending during flushing, leave
813 * mm->tlb_flush_batched as is, to avoid losing flushing.
814 */
815 atomic_cmpxchg(&mm->tlb_flush_batched, batch,
816 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
817 }
818 }
819 #else
set_tlb_ubc_flush_pending(struct mm_struct * mm,pte_t pteval,unsigned long start,unsigned long end)820 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
821 unsigned long start, unsigned long end)
822 {
823 }
824
should_defer_flush(struct mm_struct * mm,enum ttu_flags flags)825 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
826 {
827 return false;
828 }
829 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
830
831 /**
832 * page_address_in_vma - The virtual address of a page in this VMA.
833 * @folio: The folio containing the page.
834 * @page: The page within the folio.
835 * @vma: The VMA we need to know the address in.
836 *
837 * Calculates the user virtual address of this page in the specified VMA.
838 * It is the caller's responsibility to check the page is actually
839 * within the VMA. There may not currently be a PTE pointing at this
840 * page, but if a page fault occurs at this address, this is the page
841 * which will be accessed.
842 *
843 * Context: Caller should hold a reference to the folio. Caller should
844 * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
845 * VMA from being altered.
846 *
847 * Return: The virtual address corresponding to this page in the VMA.
848 */
page_address_in_vma(const struct folio * folio,const struct page * page,const struct vm_area_struct * vma)849 unsigned long page_address_in_vma(const struct folio *folio,
850 const struct page *page, const struct vm_area_struct *vma)
851 {
852 if (folio_test_anon(folio)) {
853 struct anon_vma *anon_vma = folio_anon_vma(folio);
854 /*
855 * Note: swapoff's unuse_vma() is more efficient with this
856 * check, and needs it to match anon_vma when KSM is active.
857 */
858 if (!vma->anon_vma || !anon_vma ||
859 vma->anon_vma->root != anon_vma->root)
860 return -EFAULT;
861 } else if (!vma->vm_file) {
862 return -EFAULT;
863 } else if (vma->vm_file->f_mapping != folio->mapping) {
864 return -EFAULT;
865 }
866
867 /* KSM folios don't reach here because of the !anon_vma check */
868 return vma_address(vma, page_pgoff(folio, page), 1);
869 }
870
871 /*
872 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
873 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
874 * represents.
875 */
mm_find_pmd(struct mm_struct * mm,unsigned long address)876 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
877 {
878 pgd_t *pgd;
879 p4d_t *p4d;
880 pud_t *pud;
881 pmd_t *pmd = NULL;
882
883 pgd = pgd_offset(mm, address);
884 if (!pgd_present(*pgd))
885 goto out;
886
887 p4d = p4d_offset(pgd, address);
888 if (!p4d_present(*p4d))
889 goto out;
890
891 pud = pud_offset(p4d, address);
892 if (!pud_present(*pud))
893 goto out;
894
895 pmd = pmd_offset(pud, address);
896 out:
897 return pmd;
898 }
899
900 struct folio_referenced_arg {
901 int mapcount;
902 int referenced;
903 vm_flags_t vm_flags;
904 struct mem_cgroup *memcg;
905 };
906
907 /*
908 * arg: folio_referenced_arg will be passed
909 */
folio_referenced_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)910 static bool folio_referenced_one(struct folio *folio,
911 struct vm_area_struct *vma, unsigned long address, void *arg)
912 {
913 struct folio_referenced_arg *pra = arg;
914 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
915 int ptes = 0, referenced = 0;
916 unsigned int nr;
917
918 while (page_vma_mapped_walk(&pvmw)) {
919 address = pvmw.address;
920 nr = 1;
921
922 if (vma->vm_flags & VM_LOCKED) {
923 ptes++;
924 pra->mapcount--;
925
926 /* Only mlock fully mapped pages */
927 if (pvmw.pte && ptes != pvmw.nr_pages)
928 continue;
929
930 /*
931 * All PTEs must be protected by page table lock in
932 * order to mlock the page.
933 *
934 * If page table boundary has been cross, current ptl
935 * only protect part of ptes.
936 */
937 if (pvmw.flags & PVMW_PGTABLE_CROSSED)
938 continue;
939
940 /* Restore the mlock which got missed */
941 mlock_vma_folio(folio, vma);
942 page_vma_mapped_walk_done(&pvmw);
943 pra->vm_flags |= VM_LOCKED;
944 return false; /* To break the loop */
945 }
946
947 /*
948 * Skip the non-shared swapbacked folio mapped solely by
949 * the exiting or OOM-reaped process. This avoids redundant
950 * swap-out followed by an immediate unmap.
951 */
952 if ((!atomic_read(&vma->vm_mm->mm_users) ||
953 check_stable_address_space(vma->vm_mm)) &&
954 folio_test_anon(folio) && folio_test_swapbacked(folio) &&
955 !folio_maybe_mapped_shared(folio)) {
956 pra->referenced = -1;
957 page_vma_mapped_walk_done(&pvmw);
958 return false;
959 }
960
961 if (lru_gen_enabled() && pvmw.pte) {
962 if (lru_gen_look_around(&pvmw))
963 referenced++;
964 } else if (pvmw.pte) {
965 if (folio_test_large(folio)) {
966 unsigned long end_addr = pmd_addr_end(address, vma->vm_end);
967 unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT;
968 pte_t pteval = ptep_get(pvmw.pte);
969
970 nr = folio_pte_batch(folio, pvmw.pte,
971 pteval, max_nr);
972 }
973
974 ptes += nr;
975 if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr))
976 referenced++;
977 /* Skip the batched PTEs */
978 pvmw.pte += nr - 1;
979 pvmw.address += (nr - 1) * PAGE_SIZE;
980 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
981 if (pmdp_clear_flush_young_notify(vma, address,
982 pvmw.pmd))
983 referenced++;
984 } else {
985 /* unexpected pmd-mapped folio? */
986 WARN_ON_ONCE(1);
987 }
988
989 pra->mapcount -= nr;
990 /*
991 * If we are sure that we batched the entire folio,
992 * we can just optimize and stop right here.
993 */
994 if (ptes == pvmw.nr_pages) {
995 page_vma_mapped_walk_done(&pvmw);
996 break;
997 }
998 }
999
1000 if (referenced)
1001 folio_clear_idle(folio);
1002 if (folio_test_clear_young(folio))
1003 referenced++;
1004
1005 if (referenced) {
1006 pra->referenced++;
1007 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
1008 }
1009
1010 if (!pra->mapcount)
1011 return false; /* To break the loop */
1012
1013 return true;
1014 }
1015
invalid_folio_referenced_vma(struct vm_area_struct * vma,void * arg)1016 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
1017 {
1018 struct folio_referenced_arg *pra = arg;
1019 struct mem_cgroup *memcg = pra->memcg;
1020
1021 /*
1022 * Ignore references from this mapping if it has no recency. If the
1023 * folio has been used in another mapping, we will catch it; if this
1024 * other mapping is already gone, the unmap path will have set the
1025 * referenced flag or activated the folio in zap_pte_range().
1026 */
1027 if (!vma_has_recency(vma))
1028 return true;
1029
1030 /*
1031 * If we are reclaiming on behalf of a cgroup, skip counting on behalf
1032 * of references from different cgroups.
1033 */
1034 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
1035 return true;
1036
1037 return false;
1038 }
1039
1040 /**
1041 * folio_referenced() - Test if the folio was referenced.
1042 * @folio: The folio to test.
1043 * @is_locked: Caller holds lock on the folio.
1044 * @memcg: target memory cgroup
1045 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
1046 *
1047 * Quick test_and_clear_referenced for all mappings of a folio,
1048 *
1049 * Return: The number of mappings which referenced the folio. Return -1 if
1050 * the function bailed out due to rmap lock contention.
1051 */
folio_referenced(struct folio * folio,int is_locked,struct mem_cgroup * memcg,vm_flags_t * vm_flags)1052 int folio_referenced(struct folio *folio, int is_locked,
1053 struct mem_cgroup *memcg, vm_flags_t *vm_flags)
1054 {
1055 bool we_locked = false;
1056 struct folio_referenced_arg pra = {
1057 .mapcount = folio_mapcount(folio),
1058 .memcg = memcg,
1059 };
1060 struct rmap_walk_control rwc = {
1061 .rmap_one = folio_referenced_one,
1062 .arg = (void *)&pra,
1063 .anon_lock = folio_lock_anon_vma_read,
1064 .try_lock = true,
1065 .invalid_vma = invalid_folio_referenced_vma,
1066 };
1067
1068 *vm_flags = 0;
1069 if (!pra.mapcount)
1070 return 0;
1071
1072 if (!folio_raw_mapping(folio))
1073 return 0;
1074
1075 if (!is_locked) {
1076 we_locked = folio_trylock(folio);
1077 if (!we_locked)
1078 return 1;
1079 }
1080
1081 rmap_walk(folio, &rwc);
1082 *vm_flags = pra.vm_flags;
1083
1084 if (we_locked)
1085 folio_unlock(folio);
1086
1087 return rwc.contended ? -1 : pra.referenced;
1088 }
1089
page_vma_mkclean_one(struct page_vma_mapped_walk * pvmw)1090 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
1091 {
1092 int cleaned = 0;
1093 struct vm_area_struct *vma = pvmw->vma;
1094 struct mmu_notifier_range range;
1095 unsigned long address = pvmw->address;
1096
1097 /*
1098 * We have to assume the worse case ie pmd for invalidation. Note that
1099 * the folio can not be freed from this function.
1100 */
1101 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
1102 vma->vm_mm, address, vma_address_end(pvmw));
1103 mmu_notifier_invalidate_range_start(&range);
1104
1105 while (page_vma_mapped_walk(pvmw)) {
1106 int ret = 0;
1107
1108 address = pvmw->address;
1109 if (pvmw->pte) {
1110 pte_t *pte = pvmw->pte;
1111 pte_t entry = ptep_get(pte);
1112
1113 /*
1114 * PFN swap PTEs, such as device-exclusive ones, that
1115 * actually map pages are clean and not writable from a
1116 * CPU perspective. The MMU notifier takes care of any
1117 * device aspects.
1118 */
1119 if (!pte_present(entry))
1120 continue;
1121 if (!pte_dirty(entry) && !pte_write(entry))
1122 continue;
1123
1124 flush_cache_page(vma, address, pte_pfn(entry));
1125 entry = ptep_clear_flush(vma, address, pte);
1126 entry = pte_wrprotect(entry);
1127 entry = pte_mkclean(entry);
1128 set_pte_at(vma->vm_mm, address, pte, entry);
1129 ret = 1;
1130 } else {
1131 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1132 pmd_t *pmd = pvmw->pmd;
1133 pmd_t entry = pmdp_get(pmd);
1134
1135 /*
1136 * Please see the comment above (!pte_present).
1137 * A non present PMD is not writable from a CPU
1138 * perspective.
1139 */
1140 if (!pmd_present(entry))
1141 continue;
1142 if (!pmd_dirty(entry) && !pmd_write(entry))
1143 continue;
1144
1145 flush_cache_range(vma, address,
1146 address + HPAGE_PMD_SIZE);
1147 entry = pmdp_invalidate(vma, address, pmd);
1148 entry = pmd_wrprotect(entry);
1149 entry = pmd_mkclean(entry);
1150 set_pmd_at(vma->vm_mm, address, pmd, entry);
1151 ret = 1;
1152 #else
1153 /* unexpected pmd-mapped folio? */
1154 WARN_ON_ONCE(1);
1155 #endif
1156 }
1157
1158 if (ret)
1159 cleaned++;
1160 }
1161
1162 mmu_notifier_invalidate_range_end(&range);
1163
1164 return cleaned;
1165 }
1166
page_mkclean_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1167 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1168 unsigned long address, void *arg)
1169 {
1170 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1171 int *cleaned = arg;
1172
1173 *cleaned += page_vma_mkclean_one(&pvmw);
1174
1175 return true;
1176 }
1177
invalid_mkclean_vma(struct vm_area_struct * vma,void * arg)1178 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1179 {
1180 if (vma->vm_flags & VM_SHARED)
1181 return false;
1182
1183 return true;
1184 }
1185
folio_mkclean(struct folio * folio)1186 int folio_mkclean(struct folio *folio)
1187 {
1188 int cleaned = 0;
1189 struct address_space *mapping;
1190 struct rmap_walk_control rwc = {
1191 .arg = (void *)&cleaned,
1192 .rmap_one = page_mkclean_one,
1193 .invalid_vma = invalid_mkclean_vma,
1194 };
1195
1196 BUG_ON(!folio_test_locked(folio));
1197
1198 if (!folio_mapped(folio))
1199 return 0;
1200
1201 mapping = folio_mapping(folio);
1202 if (!mapping)
1203 return 0;
1204
1205 rmap_walk(folio, &rwc);
1206
1207 return cleaned;
1208 }
1209 EXPORT_SYMBOL_GPL(folio_mkclean);
1210
1211 struct wrprotect_file_state {
1212 int cleaned;
1213 pgoff_t pgoff;
1214 unsigned long pfn;
1215 unsigned long nr_pages;
1216 };
1217
mapping_wrprotect_range_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1218 static bool mapping_wrprotect_range_one(struct folio *folio,
1219 struct vm_area_struct *vma, unsigned long address, void *arg)
1220 {
1221 struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
1222 struct page_vma_mapped_walk pvmw = {
1223 .pfn = state->pfn,
1224 .nr_pages = state->nr_pages,
1225 .pgoff = state->pgoff,
1226 .vma = vma,
1227 .address = address,
1228 .flags = PVMW_SYNC,
1229 };
1230
1231 state->cleaned += page_vma_mkclean_one(&pvmw);
1232
1233 return true;
1234 }
1235
1236 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
1237 pgoff_t pgoff_start, unsigned long nr_pages,
1238 struct rmap_walk_control *rwc, bool locked);
1239
1240 /**
1241 * mapping_wrprotect_range() - Write-protect all mappings in a specified range.
1242 *
1243 * @mapping: The mapping whose reverse mapping should be traversed.
1244 * @pgoff: The page offset at which @pfn is mapped within @mapping.
1245 * @pfn: The PFN of the page mapped in @mapping at @pgoff.
1246 * @nr_pages: The number of physically contiguous base pages spanned.
1247 *
1248 * Traverses the reverse mapping, finding all VMAs which contain a shared
1249 * mapping of the pages in the specified range in @mapping, and write-protects
1250 * them (that is, updates the page tables to mark the mappings read-only such
1251 * that a write protection fault arises when the mappings are written to).
1252 *
1253 * The @pfn value need not refer to a folio, but rather can reference a kernel
1254 * allocation which is mapped into userland. We therefore do not require that
1255 * the page maps to a folio with a valid mapping or index field, rather the
1256 * caller specifies these in @mapping and @pgoff.
1257 *
1258 * Return: the number of write-protected PTEs, or an error.
1259 */
mapping_wrprotect_range(struct address_space * mapping,pgoff_t pgoff,unsigned long pfn,unsigned long nr_pages)1260 int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
1261 unsigned long pfn, unsigned long nr_pages)
1262 {
1263 struct wrprotect_file_state state = {
1264 .cleaned = 0,
1265 .pgoff = pgoff,
1266 .pfn = pfn,
1267 .nr_pages = nr_pages,
1268 };
1269 struct rmap_walk_control rwc = {
1270 .arg = (void *)&state,
1271 .rmap_one = mapping_wrprotect_range_one,
1272 .invalid_vma = invalid_mkclean_vma,
1273 };
1274
1275 if (!mapping)
1276 return 0;
1277
1278 __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
1279 /* locked = */false);
1280
1281 return state.cleaned;
1282 }
1283 EXPORT_SYMBOL_GPL(mapping_wrprotect_range);
1284
1285 /**
1286 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1287 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1288 * within the @vma of shared mappings. And since clean PTEs
1289 * should also be readonly, write protects them too.
1290 * @pfn: start pfn.
1291 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1292 * @pgoff: page offset that the @pfn mapped with.
1293 * @vma: vma that @pfn mapped within.
1294 *
1295 * Returns the number of cleaned PTEs (including PMDs).
1296 */
pfn_mkclean_range(unsigned long pfn,unsigned long nr_pages,pgoff_t pgoff,struct vm_area_struct * vma)1297 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1298 struct vm_area_struct *vma)
1299 {
1300 struct page_vma_mapped_walk pvmw = {
1301 .pfn = pfn,
1302 .nr_pages = nr_pages,
1303 .pgoff = pgoff,
1304 .vma = vma,
1305 .flags = PVMW_SYNC,
1306 };
1307
1308 if (invalid_mkclean_vma(vma, NULL))
1309 return 0;
1310
1311 pvmw.address = vma_address(vma, pgoff, nr_pages);
1312 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1313
1314 return page_vma_mkclean_one(&pvmw);
1315 }
1316
__folio_mod_stat(struct folio * folio,int nr,int nr_pmdmapped)1317 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
1318 {
1319 int idx;
1320
1321 if (nr) {
1322 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
1323 lruvec_stat_mod_folio(folio, idx, nr);
1324 }
1325 if (nr_pmdmapped) {
1326 if (folio_test_anon(folio)) {
1327 idx = NR_ANON_THPS;
1328 lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
1329 } else {
1330 /* NR_*_PMDMAPPED are not maintained per-memcg */
1331 idx = folio_test_swapbacked(folio) ?
1332 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED;
1333 __mod_node_page_state(folio_pgdat(folio), idx,
1334 nr_pmdmapped);
1335 }
1336 }
1337 }
1338
__folio_add_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1339 static __always_inline void __folio_add_rmap(struct folio *folio,
1340 struct page *page, int nr_pages, struct vm_area_struct *vma,
1341 enum pgtable_level level)
1342 {
1343 atomic_t *mapped = &folio->_nr_pages_mapped;
1344 const int orig_nr_pages = nr_pages;
1345 int first = 0, nr = 0, nr_pmdmapped = 0;
1346
1347 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1348
1349 switch (level) {
1350 case PGTABLE_LEVEL_PTE:
1351 if (!folio_test_large(folio)) {
1352 nr = atomic_inc_and_test(&folio->_mapcount);
1353 break;
1354 }
1355
1356 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1357 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
1358 if (nr == orig_nr_pages)
1359 /* Was completely unmapped. */
1360 nr = folio_large_nr_pages(folio);
1361 else
1362 nr = 0;
1363 break;
1364 }
1365
1366 do {
1367 first += atomic_inc_and_test(&page->_mapcount);
1368 } while (page++, --nr_pages > 0);
1369
1370 if (first &&
1371 atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
1372 nr = first;
1373
1374 folio_add_large_mapcount(folio, orig_nr_pages, vma);
1375 break;
1376 case PGTABLE_LEVEL_PMD:
1377 case PGTABLE_LEVEL_PUD:
1378 first = atomic_inc_and_test(&folio->_entire_mapcount);
1379 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1380 if (level == PGTABLE_LEVEL_PMD && first)
1381 nr_pmdmapped = folio_large_nr_pages(folio);
1382 nr = folio_inc_return_large_mapcount(folio, vma);
1383 if (nr == 1)
1384 /* Was completely unmapped. */
1385 nr = folio_large_nr_pages(folio);
1386 else
1387 nr = 0;
1388 break;
1389 }
1390
1391 if (first) {
1392 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
1393 if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
1394 nr_pages = folio_large_nr_pages(folio);
1395 /*
1396 * We only track PMD mappings of PMD-sized
1397 * folios separately.
1398 */
1399 if (level == PGTABLE_LEVEL_PMD)
1400 nr_pmdmapped = nr_pages;
1401 nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
1402 /* Raced ahead of a remove and another add? */
1403 if (unlikely(nr < 0))
1404 nr = 0;
1405 } else {
1406 /* Raced ahead of a remove of ENTIRELY_MAPPED */
1407 nr = 0;
1408 }
1409 }
1410 folio_inc_large_mapcount(folio, vma);
1411 break;
1412 default:
1413 BUILD_BUG();
1414 }
1415 __folio_mod_stat(folio, nr, nr_pmdmapped);
1416 }
1417
1418 /**
1419 * folio_move_anon_rmap - move a folio to our anon_vma
1420 * @folio: The folio to move to our anon_vma
1421 * @vma: The vma the folio belongs to
1422 *
1423 * When a folio belongs exclusively to one process after a COW event,
1424 * that folio can be moved into the anon_vma that belongs to just that
1425 * process, so the rmap code will not search the parent or sibling processes.
1426 */
folio_move_anon_rmap(struct folio * folio,struct vm_area_struct * vma)1427 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1428 {
1429 void *anon_vma = vma->anon_vma;
1430
1431 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1432 VM_BUG_ON_VMA(!anon_vma, vma);
1433
1434 anon_vma += FOLIO_MAPPING_ANON;
1435 /*
1436 * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written
1437 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1438 * folio_test_anon()) will not see one without the other.
1439 */
1440 WRITE_ONCE(folio->mapping, anon_vma);
1441 }
1442
1443 /**
1444 * __folio_set_anon - set up a new anonymous rmap for a folio
1445 * @folio: The folio to set up the new anonymous rmap for.
1446 * @vma: VM area to add the folio to.
1447 * @address: User virtual address of the mapping
1448 * @exclusive: Whether the folio is exclusive to the process.
1449 */
__folio_set_anon(struct folio * folio,struct vm_area_struct * vma,unsigned long address,bool exclusive)1450 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1451 unsigned long address, bool exclusive)
1452 {
1453 struct anon_vma *anon_vma = vma->anon_vma;
1454
1455 BUG_ON(!anon_vma);
1456
1457 /*
1458 * If the folio isn't exclusive to this vma, we must use the _oldest_
1459 * possible anon_vma for the folio mapping!
1460 */
1461 if (!exclusive)
1462 anon_vma = anon_vma->root;
1463
1464 /*
1465 * page_idle does a lockless/optimistic rmap scan on folio->mapping.
1466 * Make sure the compiler doesn't split the stores of anon_vma and
1467 * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code
1468 * could mistake the mapping for a struct address_space and crash.
1469 */
1470 anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON;
1471 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
1472 folio->index = linear_page_index(vma, address);
1473 }
1474
1475 /**
1476 * __page_check_anon_rmap - sanity check anonymous rmap addition
1477 * @folio: The folio containing @page.
1478 * @page: the page to check the mapping of
1479 * @vma: the vm area in which the mapping is added
1480 * @address: the user virtual address mapped
1481 */
__page_check_anon_rmap(const struct folio * folio,const struct page * page,struct vm_area_struct * vma,unsigned long address)1482 static void __page_check_anon_rmap(const struct folio *folio,
1483 const struct page *page, struct vm_area_struct *vma,
1484 unsigned long address)
1485 {
1486 /*
1487 * The page's anon-rmap details (mapping and index) are guaranteed to
1488 * be set up correctly at this point.
1489 *
1490 * We have exclusion against folio_add_anon_rmap_*() because the caller
1491 * always holds the page locked.
1492 *
1493 * We have exclusion against folio_add_new_anon_rmap because those pages
1494 * are initially only visible via the pagetables, and the pte is locked
1495 * over the call to folio_add_new_anon_rmap.
1496 */
1497 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1498 folio);
1499 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
1500 page);
1501 }
1502
__folio_add_anon_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags,enum pgtable_level level)1503 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
1504 struct page *page, int nr_pages, struct vm_area_struct *vma,
1505 unsigned long address, rmap_t flags, enum pgtable_level level)
1506 {
1507 int i;
1508
1509 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
1510
1511 __folio_add_rmap(folio, page, nr_pages, vma, level);
1512
1513 if (likely(!folio_test_ksm(folio)))
1514 __page_check_anon_rmap(folio, page, vma, address);
1515
1516 if (flags & RMAP_EXCLUSIVE) {
1517 switch (level) {
1518 case PGTABLE_LEVEL_PTE:
1519 for (i = 0; i < nr_pages; i++)
1520 SetPageAnonExclusive(page + i);
1521 break;
1522 case PGTABLE_LEVEL_PMD:
1523 SetPageAnonExclusive(page);
1524 break;
1525 case PGTABLE_LEVEL_PUD:
1526 /*
1527 * Keep the compiler happy, we don't support anonymous
1528 * PUD mappings.
1529 */
1530 WARN_ON_ONCE(1);
1531 break;
1532 default:
1533 BUILD_BUG();
1534 }
1535 }
1536
1537 VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) &&
1538 atomic_read(&folio->_mapcount) > 0, folio);
1539 for (i = 0; i < nr_pages; i++) {
1540 struct page *cur_page = page + i;
1541
1542 VM_WARN_ON_FOLIO(folio_test_large(folio) &&
1543 folio_entire_mapcount(folio) > 1 &&
1544 PageAnonExclusive(cur_page), folio);
1545 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
1546 continue;
1547
1548 /*
1549 * While PTE-mapping a THP we have a PMD and a PTE
1550 * mapping.
1551 */
1552 VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
1553 PageAnonExclusive(cur_page), folio);
1554 }
1555
1556 /*
1557 * Only mlock it if the folio is fully mapped to the VMA.
1558 *
1559 * Partially mapped folios can be split on reclaim and part outside
1560 * of mlocked VMA can be evicted or freed.
1561 */
1562 if (folio_nr_pages(folio) == nr_pages)
1563 mlock_vma_folio(folio, vma);
1564 }
1565
1566 /**
1567 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
1568 * @folio: The folio to add the mappings to
1569 * @page: The first page to add
1570 * @nr_pages: The number of pages which will be mapped
1571 * @vma: The vm area in which the mappings are added
1572 * @address: The user virtual address of the first page to map
1573 * @flags: The rmap flags
1574 *
1575 * The page range of folio is defined by [first_page, first_page + nr_pages)
1576 *
1577 * The caller needs to hold the page table lock, and the page must be locked in
1578 * the anon_vma case: to serialize mapping,index checking after setting,
1579 * and to ensure that an anon folio is not being upgraded racily to a KSM folio
1580 * (but KSM folios are never downgraded).
1581 */
folio_add_anon_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1582 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
1583 int nr_pages, struct vm_area_struct *vma, unsigned long address,
1584 rmap_t flags)
1585 {
1586 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
1587 PGTABLE_LEVEL_PTE);
1588 }
1589
1590 /**
1591 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
1592 * @folio: The folio to add the mapping to
1593 * @page: The first page to add
1594 * @vma: The vm area in which the mapping is added
1595 * @address: The user virtual address of the first page to map
1596 * @flags: The rmap flags
1597 *
1598 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
1599 *
1600 * The caller needs to hold the page table lock, and the page must be locked in
1601 * the anon_vma case: to serialize mapping,index checking after setting.
1602 */
folio_add_anon_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1603 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
1604 struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1605 {
1606 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1607 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
1608 PGTABLE_LEVEL_PMD);
1609 #else
1610 WARN_ON_ONCE(true);
1611 #endif
1612 }
1613
1614 /**
1615 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1616 * @folio: The folio to add the mapping to.
1617 * @vma: the vm area in which the mapping is added
1618 * @address: the user virtual address mapped
1619 * @flags: The rmap flags
1620 *
1621 * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
1622 * This means the inc-and-test can be bypassed.
1623 * The folio doesn't necessarily need to be locked while it's exclusive
1624 * unless two threads map it concurrently. However, the folio must be
1625 * locked if it's shared.
1626 *
1627 * If the folio is pmd-mappable, it is accounted as a THP.
1628 */
folio_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags)1629 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1630 unsigned long address, rmap_t flags)
1631 {
1632 const bool exclusive = flags & RMAP_EXCLUSIVE;
1633 int nr = 1, nr_pmdmapped = 0;
1634
1635 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
1636 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
1637
1638 /*
1639 * VM_DROPPABLE mappings don't swap; instead they're just dropped when
1640 * under memory pressure.
1641 */
1642 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
1643 __folio_set_swapbacked(folio);
1644 __folio_set_anon(folio, vma, address, exclusive);
1645
1646 if (likely(!folio_test_large(folio))) {
1647 /* increment count (starts at -1) */
1648 atomic_set(&folio->_mapcount, 0);
1649 if (exclusive)
1650 SetPageAnonExclusive(&folio->page);
1651 } else if (!folio_test_pmd_mappable(folio)) {
1652 int i;
1653
1654 nr = folio_large_nr_pages(folio);
1655 for (i = 0; i < nr; i++) {
1656 struct page *page = folio_page(folio, i);
1657
1658 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1659 /* increment count (starts at -1) */
1660 atomic_set(&page->_mapcount, 0);
1661 if (exclusive)
1662 SetPageAnonExclusive(page);
1663 }
1664
1665 folio_set_large_mapcount(folio, nr, vma);
1666 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1667 atomic_set(&folio->_nr_pages_mapped, nr);
1668 } else {
1669 nr = folio_large_nr_pages(folio);
1670 /* increment count (starts at -1) */
1671 atomic_set(&folio->_entire_mapcount, 0);
1672 folio_set_large_mapcount(folio, 1, vma);
1673 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
1674 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
1675 if (exclusive)
1676 SetPageAnonExclusive(&folio->page);
1677 nr_pmdmapped = nr;
1678 }
1679
1680 VM_WARN_ON_ONCE(address < vma->vm_start ||
1681 address + (nr << PAGE_SHIFT) > vma->vm_end);
1682
1683 __folio_mod_stat(folio, nr, nr_pmdmapped);
1684 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
1685 }
1686
__folio_add_file_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1687 static __always_inline void __folio_add_file_rmap(struct folio *folio,
1688 struct page *page, int nr_pages, struct vm_area_struct *vma,
1689 enum pgtable_level level)
1690 {
1691 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
1692
1693 __folio_add_rmap(folio, page, nr_pages, vma, level);
1694
1695 /*
1696 * Only mlock it if the folio is fully mapped to the VMA.
1697 *
1698 * Partially mapped folios can be split on reclaim and part outside
1699 * of mlocked VMA can be evicted or freed.
1700 */
1701 if (folio_nr_pages(folio) == nr_pages)
1702 mlock_vma_folio(folio, vma);
1703 }
1704
1705 /**
1706 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
1707 * @folio: The folio to add the mappings to
1708 * @page: The first page to add
1709 * @nr_pages: The number of pages that will be mapped using PTEs
1710 * @vma: The vm area in which the mappings are added
1711 *
1712 * The page range of the folio is defined by [page, page + nr_pages)
1713 *
1714 * The caller needs to hold the page table lock.
1715 */
folio_add_file_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma)1716 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
1717 int nr_pages, struct vm_area_struct *vma)
1718 {
1719 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1720 }
1721
1722 /**
1723 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
1724 * @folio: The folio to add the mapping to
1725 * @page: The first page to add
1726 * @vma: The vm area in which the mapping is added
1727 *
1728 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1729 *
1730 * The caller needs to hold the page table lock.
1731 */
folio_add_file_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma)1732 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
1733 struct vm_area_struct *vma)
1734 {
1735 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1736 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1737 #else
1738 WARN_ON_ONCE(true);
1739 #endif
1740 }
1741
1742 /**
1743 * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio
1744 * @folio: The folio to add the mapping to
1745 * @page: The first page to add
1746 * @vma: The vm area in which the mapping is added
1747 *
1748 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1749 *
1750 * The caller needs to hold the page table lock.
1751 */
folio_add_file_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma)1752 void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
1753 struct vm_area_struct *vma)
1754 {
1755 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1756 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1757 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1758 #else
1759 WARN_ON_ONCE(true);
1760 #endif
1761 }
1762
__folio_remove_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma,enum pgtable_level level)1763 static __always_inline void __folio_remove_rmap(struct folio *folio,
1764 struct page *page, int nr_pages, struct vm_area_struct *vma,
1765 enum pgtable_level level)
1766 {
1767 atomic_t *mapped = &folio->_nr_pages_mapped;
1768 int last = 0, nr = 0, nr_pmdmapped = 0;
1769 bool partially_mapped = false;
1770
1771 __folio_rmap_sanity_checks(folio, page, nr_pages, level);
1772
1773 switch (level) {
1774 case PGTABLE_LEVEL_PTE:
1775 if (!folio_test_large(folio)) {
1776 nr = atomic_add_negative(-1, &folio->_mapcount);
1777 break;
1778 }
1779
1780 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1781 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
1782 if (!nr) {
1783 /* Now completely unmapped. */
1784 nr = folio_large_nr_pages(folio);
1785 } else {
1786 partially_mapped = nr < folio_large_nr_pages(folio) &&
1787 !folio_entire_mapcount(folio);
1788 nr = 0;
1789 }
1790 break;
1791 }
1792
1793 folio_sub_large_mapcount(folio, nr_pages, vma);
1794 do {
1795 last += atomic_add_negative(-1, &page->_mapcount);
1796 } while (page++, --nr_pages > 0);
1797
1798 if (last &&
1799 atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED)
1800 nr = last;
1801
1802 partially_mapped = nr && atomic_read(mapped);
1803 break;
1804 case PGTABLE_LEVEL_PMD:
1805 case PGTABLE_LEVEL_PUD:
1806 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
1807 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1808 if (level == PGTABLE_LEVEL_PMD && last)
1809 nr_pmdmapped = folio_large_nr_pages(folio);
1810 nr = folio_dec_return_large_mapcount(folio, vma);
1811 if (!nr) {
1812 /* Now completely unmapped. */
1813 nr = folio_large_nr_pages(folio);
1814 } else {
1815 partially_mapped = last &&
1816 nr < folio_large_nr_pages(folio);
1817 nr = 0;
1818 }
1819 break;
1820 }
1821
1822 folio_dec_large_mapcount(folio, vma);
1823 last = atomic_add_negative(-1, &folio->_entire_mapcount);
1824 if (last) {
1825 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
1826 if (likely(nr < ENTIRELY_MAPPED)) {
1827 nr_pages = folio_large_nr_pages(folio);
1828 if (level == PGTABLE_LEVEL_PMD)
1829 nr_pmdmapped = nr_pages;
1830 nr = nr_pages - nr;
1831 /* Raced ahead of another remove and an add? */
1832 if (unlikely(nr < 0))
1833 nr = 0;
1834 } else {
1835 /* An add of ENTIRELY_MAPPED raced ahead */
1836 nr = 0;
1837 }
1838 }
1839
1840 partially_mapped = nr && nr < nr_pmdmapped;
1841 break;
1842 default:
1843 BUILD_BUG();
1844 }
1845
1846 /*
1847 * Queue anon large folio for deferred split if at least one page of
1848 * the folio is unmapped and at least one page is still mapped.
1849 *
1850 * Check partially_mapped first to ensure it is a large folio.
1851 *
1852 * Device private folios do not support deferred splitting and
1853 * shrinker based scanning of the folios to free.
1854 */
1855 if (partially_mapped && folio_test_anon(folio) &&
1856 !folio_test_partially_mapped(folio) &&
1857 !folio_is_device_private(folio))
1858 deferred_split_folio(folio, true);
1859
1860 __folio_mod_stat(folio, -nr, -nr_pmdmapped);
1861
1862 /*
1863 * It would be tidy to reset folio_test_anon mapping when fully
1864 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
1865 * which increments mapcount after us but sets mapping before us:
1866 * so leave the reset to free_pages_prepare, and remember that
1867 * it's only reliable while mapped.
1868 */
1869
1870 munlock_vma_folio(folio, vma);
1871 }
1872
1873 /**
1874 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
1875 * @folio: The folio to remove the mappings from
1876 * @page: The first page to remove
1877 * @nr_pages: The number of pages that will be removed from the mapping
1878 * @vma: The vm area from which the mappings are removed
1879 *
1880 * The page range of the folio is defined by [page, page + nr_pages)
1881 *
1882 * The caller needs to hold the page table lock.
1883 */
folio_remove_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * vma)1884 void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
1885 int nr_pages, struct vm_area_struct *vma)
1886 {
1887 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
1888 }
1889
1890 /**
1891 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
1892 * @folio: The folio to remove the mapping from
1893 * @page: The first page to remove
1894 * @vma: The vm area from which the mapping is removed
1895 *
1896 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
1897 *
1898 * The caller needs to hold the page table lock.
1899 */
folio_remove_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * vma)1900 void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
1901 struct vm_area_struct *vma)
1902 {
1903 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1904 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
1905 #else
1906 WARN_ON_ONCE(true);
1907 #endif
1908 }
1909
1910 /**
1911 * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio
1912 * @folio: The folio to remove the mapping from
1913 * @page: The first page to remove
1914 * @vma: The vm area from which the mapping is removed
1915 *
1916 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
1917 *
1918 * The caller needs to hold the page table lock.
1919 */
folio_remove_rmap_pud(struct folio * folio,struct page * page,struct vm_area_struct * vma)1920 void folio_remove_rmap_pud(struct folio *folio, struct page *page,
1921 struct vm_area_struct *vma)
1922 {
1923 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1924 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1925 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
1926 #else
1927 WARN_ON_ONCE(true);
1928 #endif
1929 }
1930
folio_unmap_pte_batch(struct folio * folio,struct page_vma_mapped_walk * pvmw,enum ttu_flags flags,pte_t pte)1931 static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
1932 struct page_vma_mapped_walk *pvmw,
1933 enum ttu_flags flags, pte_t pte)
1934 {
1935 unsigned long end_addr, addr = pvmw->address;
1936 struct vm_area_struct *vma = pvmw->vma;
1937 unsigned int max_nr;
1938
1939 if (flags & TTU_HWPOISON)
1940 return 1;
1941 if (!folio_test_large(folio))
1942 return 1;
1943
1944 /* We may only batch within a single VMA and a single page table. */
1945 end_addr = pmd_addr_end(addr, vma->vm_end);
1946 max_nr = (end_addr - addr) >> PAGE_SHIFT;
1947
1948 /* We only support lazyfree or file folios batching for now ... */
1949 if (folio_test_anon(folio) && folio_test_swapbacked(folio))
1950 return 1;
1951
1952 if (pte_unused(pte))
1953 return 1;
1954
1955 if (userfaultfd_wp(vma))
1956 return 1;
1957
1958 return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
1959 }
1960
1961 /*
1962 * @arg: enum ttu_flags will be passed to this argument
1963 */
try_to_unmap_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)1964 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1965 unsigned long address, void *arg)
1966 {
1967 struct mm_struct *mm = vma->vm_mm;
1968 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1969 bool anon_exclusive, ret = true;
1970 pte_t pteval;
1971 struct page *subpage;
1972 struct mmu_notifier_range range;
1973 enum ttu_flags flags = (enum ttu_flags)(long)arg;
1974 unsigned long nr_pages = 1, end_addr;
1975 unsigned long pfn;
1976 unsigned long hsz = 0;
1977 int ptes = 0;
1978
1979 /*
1980 * When racing against e.g. zap_pte_range() on another cpu,
1981 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
1982 * try_to_unmap() may return before page_mapped() has become false,
1983 * if page table locking is skipped: use TTU_SYNC to wait for that.
1984 */
1985 if (flags & TTU_SYNC)
1986 pvmw.flags = PVMW_SYNC;
1987
1988 /*
1989 * For THP, we have to assume the worse case ie pmd for invalidation.
1990 * For hugetlb, it could be much worse if we need to do pud
1991 * invalidation in the case of pmd sharing.
1992 *
1993 * Note that the folio can not be freed in this function as call of
1994 * try_to_unmap() must hold a reference on the folio.
1995 */
1996 range.end = vma_address_end(&pvmw);
1997 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1998 address, range.end);
1999 if (folio_test_hugetlb(folio)) {
2000 /*
2001 * If sharing is possible, start and end will be adjusted
2002 * accordingly.
2003 */
2004 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2005 &range.end);
2006
2007 /* We need the huge page size for set_huge_pte_at() */
2008 hsz = huge_page_size(hstate_vma(vma));
2009 }
2010 mmu_notifier_invalidate_range_start(&range);
2011
2012 while (page_vma_mapped_walk(&pvmw)) {
2013 /*
2014 * If the folio is in an mlock()d vma, we must not swap it out.
2015 */
2016 if (!(flags & TTU_IGNORE_MLOCK) &&
2017 (vma->vm_flags & VM_LOCKED)) {
2018 ptes++;
2019
2020 /*
2021 * Set 'ret' to indicate the page cannot be unmapped.
2022 *
2023 * Do not jump to walk_abort immediately as additional
2024 * iteration might be required to detect fully mapped
2025 * folio an mlock it.
2026 */
2027 ret = false;
2028
2029 /* Only mlock fully mapped pages */
2030 if (pvmw.pte && ptes != pvmw.nr_pages)
2031 continue;
2032
2033 /*
2034 * All PTEs must be protected by page table lock in
2035 * order to mlock the page.
2036 *
2037 * If page table boundary has been cross, current ptl
2038 * only protect part of ptes.
2039 */
2040 if (pvmw.flags & PVMW_PGTABLE_CROSSED)
2041 goto walk_done;
2042
2043 /* Restore the mlock which got missed */
2044 mlock_vma_folio(folio, vma);
2045 goto walk_done;
2046 }
2047
2048 if (!pvmw.pte) {
2049 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
2050 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
2051 goto walk_done;
2052 /*
2053 * unmap_huge_pmd_locked has either already marked
2054 * the folio as swap-backed or decided to retain it
2055 * due to GUP or speculative references.
2056 */
2057 goto walk_abort;
2058 }
2059
2060 if (flags & TTU_SPLIT_HUGE_PMD) {
2061 /*
2062 * We temporarily have to drop the PTL and
2063 * restart so we can process the PTE-mapped THP.
2064 */
2065 split_huge_pmd_locked(vma, pvmw.address,
2066 pvmw.pmd, false);
2067 flags &= ~TTU_SPLIT_HUGE_PMD;
2068 page_vma_mapped_walk_restart(&pvmw);
2069 continue;
2070 }
2071 }
2072
2073 /* Unexpected PMD-mapped THP? */
2074 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2075
2076 /*
2077 * Handle PFN swap PTEs, such as device-exclusive ones, that
2078 * actually map pages.
2079 */
2080 pteval = ptep_get(pvmw.pte);
2081 if (likely(pte_present(pteval))) {
2082 pfn = pte_pfn(pteval);
2083 } else {
2084 const softleaf_t entry = softleaf_from_pte(pteval);
2085
2086 pfn = softleaf_to_pfn(entry);
2087 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2088 }
2089
2090 subpage = folio_page(folio, pfn - folio_pfn(folio));
2091 address = pvmw.address;
2092 anon_exclusive = folio_test_anon(folio) &&
2093 PageAnonExclusive(subpage);
2094
2095 if (folio_test_hugetlb(folio)) {
2096 bool anon = folio_test_anon(folio);
2097
2098 /*
2099 * The try_to_unmap() is only passed a hugetlb page
2100 * in the case where the hugetlb page is poisoned.
2101 */
2102 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
2103 /*
2104 * huge_pmd_unshare may unmap an entire PMD page.
2105 * There is no way of knowing exactly which PMDs may
2106 * be cached for this mm, so we must flush them all.
2107 * start/end were already adjusted above to cover this
2108 * range.
2109 */
2110 flush_cache_range(vma, range.start, range.end);
2111
2112 /*
2113 * To call huge_pmd_unshare, i_mmap_rwsem must be
2114 * held in write mode. Caller needs to explicitly
2115 * do this outside rmap routines.
2116 *
2117 * We also must hold hugetlb vma_lock in write mode.
2118 * Lock order dictates acquiring vma_lock BEFORE
2119 * i_mmap_rwsem. We can only try lock here and fail
2120 * if unsuccessful.
2121 */
2122 if (!anon) {
2123 struct mmu_gather tlb;
2124
2125 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2126 if (!hugetlb_vma_trylock_write(vma))
2127 goto walk_abort;
2128
2129 tlb_gather_mmu_vma(&tlb, vma);
2130 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2131 hugetlb_vma_unlock_write(vma);
2132 huge_pmd_unshare_flush(&tlb, vma);
2133 tlb_finish_mmu(&tlb);
2134 /*
2135 * The PMD table was unmapped,
2136 * consequently unmapping the folio.
2137 */
2138 goto walk_done;
2139 }
2140 hugetlb_vma_unlock_write(vma);
2141 tlb_finish_mmu(&tlb);
2142 }
2143 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2144 if (pte_dirty(pteval))
2145 folio_mark_dirty(folio);
2146 } else if (likely(pte_present(pteval))) {
2147 nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);
2148 end_addr = address + nr_pages * PAGE_SIZE;
2149 flush_cache_range(vma, address, end_addr);
2150
2151 /* Nuke the page table entry. */
2152 pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages);
2153 /*
2154 * We clear the PTE but do not flush so potentially
2155 * a remote CPU could still be writing to the folio.
2156 * If the entry was previously clean then the
2157 * architecture must guarantee that a clear->dirty
2158 * transition on a cached TLB entry is written through
2159 * and traps if the PTE is unmapped.
2160 */
2161 if (should_defer_flush(mm, flags))
2162 set_tlb_ubc_flush_pending(mm, pteval, address, end_addr);
2163 else
2164 flush_tlb_range(vma, address, end_addr);
2165 if (pte_dirty(pteval))
2166 folio_mark_dirty(folio);
2167 } else {
2168 pte_clear(mm, address, pvmw.pte);
2169 }
2170
2171 /*
2172 * Now the pte is cleared. If this pte was uffd-wp armed,
2173 * we may want to replace a none pte with a marker pte if
2174 * it's file-backed, so we don't lose the tracking info.
2175 */
2176 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
2177
2178 /* Update high watermark before we lower rss */
2179 update_hiwater_rss(mm);
2180
2181 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
2182 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2183 if (folio_test_hugetlb(folio)) {
2184 hugetlb_count_sub(folio_nr_pages(folio), mm);
2185 set_huge_pte_at(mm, address, pvmw.pte, pteval,
2186 hsz);
2187 } else {
2188 dec_mm_counter(mm, mm_counter(folio));
2189 set_pte_at(mm, address, pvmw.pte, pteval);
2190 }
2191 } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2192 !userfaultfd_armed(vma)) {
2193 /*
2194 * The guest indicated that the page content is of no
2195 * interest anymore. Simply discard the pte, vmscan
2196 * will take care of the rest.
2197 * A future reference will then fault in a new zero
2198 * page. When userfaultfd is active, we must not drop
2199 * this page though, as its main user (postcopy
2200 * migration) will not expect userfaults on already
2201 * copied pages.
2202 */
2203 dec_mm_counter(mm, mm_counter(folio));
2204 } else if (folio_test_anon(folio)) {
2205 swp_entry_t entry = page_swap_entry(subpage);
2206 pte_t swp_pte;
2207 /*
2208 * Store the swap location in the pte.
2209 * See handle_pte_fault() ...
2210 */
2211 if (unlikely(folio_test_swapbacked(folio) !=
2212 folio_test_swapcache(folio))) {
2213 WARN_ON_ONCE(1);
2214 goto walk_abort;
2215 }
2216
2217 /* MADV_FREE page check */
2218 if (!folio_test_swapbacked(folio)) {
2219 int ref_count, map_count;
2220
2221 /*
2222 * Synchronize with gup_pte_range():
2223 * - clear PTE; barrier; read refcount
2224 * - inc refcount; barrier; read PTE
2225 */
2226 smp_mb();
2227
2228 ref_count = folio_ref_count(folio);
2229 map_count = folio_mapcount(folio);
2230
2231 /*
2232 * Order reads for page refcount and dirty flag
2233 * (see comments in __remove_mapping()).
2234 */
2235 smp_rmb();
2236
2237 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
2238 /*
2239 * redirtied either using the page table or a previously
2240 * obtained GUP reference.
2241 */
2242 set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2243 folio_set_swapbacked(folio);
2244 goto walk_abort;
2245 } else if (ref_count != 1 + map_count) {
2246 /*
2247 * Additional reference. Could be a GUP reference or any
2248 * speculative reference. GUP users must mark the folio
2249 * dirty if there was a modification. This folio cannot be
2250 * reclaimed right now either way, so act just like nothing
2251 * happened.
2252 * We'll come back here later and detect if the folio was
2253 * dirtied when the additional reference is gone.
2254 */
2255 set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
2256 goto walk_abort;
2257 }
2258 add_mm_counter(mm, MM_ANONPAGES, -nr_pages);
2259 goto discard;
2260 }
2261
2262 if (folio_dup_swap(folio, subpage) < 0) {
2263 set_pte_at(mm, address, pvmw.pte, pteval);
2264 goto walk_abort;
2265 }
2266
2267 /*
2268 * arch_unmap_one() is expected to be a NOP on
2269 * architectures where we could have PFN swap PTEs,
2270 * so we'll not check/care.
2271 */
2272 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2273 folio_put_swap(folio, subpage);
2274 set_pte_at(mm, address, pvmw.pte, pteval);
2275 goto walk_abort;
2276 }
2277
2278 /* See folio_try_share_anon_rmap(): clear PTE first. */
2279 if (anon_exclusive &&
2280 folio_try_share_anon_rmap_pte(folio, subpage)) {
2281 folio_put_swap(folio, subpage);
2282 set_pte_at(mm, address, pvmw.pte, pteval);
2283 goto walk_abort;
2284 }
2285 if (list_empty(&mm->mmlist)) {
2286 spin_lock(&mmlist_lock);
2287 if (list_empty(&mm->mmlist))
2288 list_add(&mm->mmlist, &init_mm.mmlist);
2289 spin_unlock(&mmlist_lock);
2290 }
2291 dec_mm_counter(mm, MM_ANONPAGES);
2292 inc_mm_counter(mm, MM_SWAPENTS);
2293 swp_pte = swp_entry_to_pte(entry);
2294 if (anon_exclusive)
2295 swp_pte = pte_swp_mkexclusive(swp_pte);
2296 if (likely(pte_present(pteval))) {
2297 if (pte_soft_dirty(pteval))
2298 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2299 if (pte_uffd_wp(pteval))
2300 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2301 } else {
2302 if (pte_swp_soft_dirty(pteval))
2303 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2304 if (pte_swp_uffd_wp(pteval))
2305 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2306 }
2307 set_pte_at(mm, address, pvmw.pte, swp_pte);
2308 } else {
2309 /*
2310 * This is a locked file-backed folio,
2311 * so it cannot be removed from the page
2312 * cache and replaced by a new folio before
2313 * mmu_notifier_invalidate_range_end, so no
2314 * concurrent thread might update its page table
2315 * to point at a new folio while a device is
2316 * still using this folio.
2317 *
2318 * See Documentation/mm/mmu_notifier.rst
2319 */
2320 add_mm_counter(mm, mm_counter_file(folio), -nr_pages);
2321 }
2322 discard:
2323 if (unlikely(folio_test_hugetlb(folio))) {
2324 hugetlb_remove_rmap(folio);
2325 } else {
2326 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
2327 }
2328 if (vma->vm_flags & VM_LOCKED)
2329 mlock_drain_local();
2330 folio_put_refs(folio, nr_pages);
2331
2332 /*
2333 * If we are sure that we batched the entire folio and cleared
2334 * all PTEs, we can just optimize and stop right here.
2335 */
2336 if (nr_pages == folio_nr_pages(folio))
2337 goto walk_done;
2338 continue;
2339 walk_abort:
2340 ret = false;
2341 walk_done:
2342 page_vma_mapped_walk_done(&pvmw);
2343 break;
2344 }
2345
2346 mmu_notifier_invalidate_range_end(&range);
2347
2348 return ret;
2349 }
2350
invalid_migration_vma(struct vm_area_struct * vma,void * arg)2351 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
2352 {
2353 return vma_is_temporary_stack(vma);
2354 }
2355
folio_not_mapped(struct folio * folio)2356 static int folio_not_mapped(struct folio *folio)
2357 {
2358 return !folio_mapped(folio);
2359 }
2360
2361 /**
2362 * try_to_unmap - Try to remove all page table mappings to a folio.
2363 * @folio: The folio to unmap.
2364 * @flags: action and flags
2365 *
2366 * Tries to remove all the page table entries which are mapping this
2367 * folio. It is the caller's responsibility to check if the folio is
2368 * still mapped if needed (use TTU_SYNC to prevent accounting races).
2369 *
2370 * Context: Caller must hold the folio lock.
2371 */
try_to_unmap(struct folio * folio,enum ttu_flags flags)2372 void try_to_unmap(struct folio *folio, enum ttu_flags flags)
2373 {
2374 struct rmap_walk_control rwc = {
2375 .rmap_one = try_to_unmap_one,
2376 .arg = (void *)flags,
2377 .done = folio_not_mapped,
2378 .anon_lock = folio_lock_anon_vma_read,
2379 };
2380
2381 if (flags & TTU_RMAP_LOCKED)
2382 rmap_walk_locked(folio, &rwc);
2383 else
2384 rmap_walk(folio, &rwc);
2385 }
2386
2387 /*
2388 * @arg: enum ttu_flags will be passed to this argument.
2389 *
2390 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
2391 * containing migration entries.
2392 */
try_to_migrate_one(struct folio * folio,struct vm_area_struct * vma,unsigned long address,void * arg)2393 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
2394 unsigned long address, void *arg)
2395 {
2396 struct mm_struct *mm = vma->vm_mm;
2397 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2398 bool anon_exclusive, writable, ret = true;
2399 pte_t pteval;
2400 struct page *subpage;
2401 struct mmu_notifier_range range;
2402 enum ttu_flags flags = (enum ttu_flags)(long)arg;
2403 unsigned long pfn;
2404 unsigned long hsz = 0;
2405
2406 /*
2407 * When racing against e.g. zap_pte_range() on another cpu,
2408 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
2409 * try_to_migrate() may return before page_mapped() has become false,
2410 * if page table locking is skipped: use TTU_SYNC to wait for that.
2411 */
2412 if (flags & TTU_SYNC)
2413 pvmw.flags = PVMW_SYNC;
2414
2415 /*
2416 * For THP, we have to assume the worse case ie pmd for invalidation.
2417 * For hugetlb, it could be much worse if we need to do pud
2418 * invalidation in the case of pmd sharing.
2419 *
2420 * Note that the page can not be free in this function as call of
2421 * try_to_unmap() must hold a reference on the page.
2422 */
2423 range.end = vma_address_end(&pvmw);
2424 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2425 address, range.end);
2426 if (folio_test_hugetlb(folio)) {
2427 /*
2428 * If sharing is possible, start and end will be adjusted
2429 * accordingly.
2430 */
2431 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2432 &range.end);
2433
2434 /* We need the huge page size for set_huge_pte_at() */
2435 hsz = huge_page_size(hstate_vma(vma));
2436 }
2437 mmu_notifier_invalidate_range_start(&range);
2438
2439 while (page_vma_mapped_walk(&pvmw)) {
2440 /* PMD-mapped THP migration entry */
2441 if (!pvmw.pte) {
2442 __maybe_unused unsigned long pfn;
2443 __maybe_unused pmd_t pmdval;
2444
2445 if (flags & TTU_SPLIT_HUGE_PMD) {
2446 split_huge_pmd_locked(vma, pvmw.address,
2447 pvmw.pmd, true);
2448 ret = false;
2449 page_vma_mapped_walk_done(&pvmw);
2450 break;
2451 }
2452 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2453 pmdval = pmdp_get(pvmw.pmd);
2454 if (likely(pmd_present(pmdval)))
2455 pfn = pmd_pfn(pmdval);
2456 else
2457 pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
2458
2459 subpage = folio_page(folio, pfn - folio_pfn(folio));
2460
2461 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
2462 !folio_test_pmd_mappable(folio), folio);
2463
2464 if (set_pmd_migration_entry(&pvmw, subpage)) {
2465 ret = false;
2466 page_vma_mapped_walk_done(&pvmw);
2467 break;
2468 }
2469 continue;
2470 #endif
2471 }
2472
2473 /* Unexpected PMD-mapped THP? */
2474 VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2475
2476 /*
2477 * Handle PFN swap PTEs, such as device-exclusive ones, that
2478 * actually map pages.
2479 */
2480 pteval = ptep_get(pvmw.pte);
2481 if (likely(pte_present(pteval))) {
2482 pfn = pte_pfn(pteval);
2483 } else {
2484 const softleaf_t entry = softleaf_from_pte(pteval);
2485
2486 pfn = softleaf_to_pfn(entry);
2487 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
2488 }
2489
2490 subpage = folio_page(folio, pfn - folio_pfn(folio));
2491 address = pvmw.address;
2492 anon_exclusive = folio_test_anon(folio) &&
2493 PageAnonExclusive(subpage);
2494
2495 if (folio_test_hugetlb(folio)) {
2496 bool anon = folio_test_anon(folio);
2497
2498 /*
2499 * huge_pmd_unshare may unmap an entire PMD page.
2500 * There is no way of knowing exactly which PMDs may
2501 * be cached for this mm, so we must flush them all.
2502 * start/end were already adjusted above to cover this
2503 * range.
2504 */
2505 flush_cache_range(vma, range.start, range.end);
2506
2507 /*
2508 * To call huge_pmd_unshare, i_mmap_rwsem must be
2509 * held in write mode. Caller needs to explicitly
2510 * do this outside rmap routines.
2511 *
2512 * We also must hold hugetlb vma_lock in write mode.
2513 * Lock order dictates acquiring vma_lock BEFORE
2514 * i_mmap_rwsem. We can only try lock here and
2515 * fail if unsuccessful.
2516 */
2517 if (!anon) {
2518 struct mmu_gather tlb;
2519
2520 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
2521 if (!hugetlb_vma_trylock_write(vma)) {
2522 page_vma_mapped_walk_done(&pvmw);
2523 ret = false;
2524 break;
2525 }
2526
2527 tlb_gather_mmu_vma(&tlb, vma);
2528 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) {
2529 hugetlb_vma_unlock_write(vma);
2530 huge_pmd_unshare_flush(&tlb, vma);
2531 tlb_finish_mmu(&tlb);
2532 /*
2533 * The PMD table was unmapped,
2534 * consequently unmapping the folio.
2535 */
2536 page_vma_mapped_walk_done(&pvmw);
2537 break;
2538 }
2539 hugetlb_vma_unlock_write(vma);
2540 tlb_finish_mmu(&tlb);
2541 }
2542 /* Nuke the hugetlb page table entry */
2543 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2544 if (pte_dirty(pteval))
2545 folio_mark_dirty(folio);
2546 writable = pte_write(pteval);
2547 } else if (likely(pte_present(pteval))) {
2548 flush_cache_page(vma, address, pfn);
2549 /* Nuke the page table entry. */
2550 if (should_defer_flush(mm, flags)) {
2551 /*
2552 * We clear the PTE but do not flush so potentially
2553 * a remote CPU could still be writing to the folio.
2554 * If the entry was previously clean then the
2555 * architecture must guarantee that a clear->dirty
2556 * transition on a cached TLB entry is written through
2557 * and traps if the PTE is unmapped.
2558 */
2559 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
2560
2561 set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
2562 } else {
2563 pteval = ptep_clear_flush(vma, address, pvmw.pte);
2564 }
2565 if (pte_dirty(pteval))
2566 folio_mark_dirty(folio);
2567 writable = pte_write(pteval);
2568 } else {
2569 const softleaf_t entry = softleaf_from_pte(pteval);
2570
2571 pte_clear(mm, address, pvmw.pte);
2572
2573 writable = softleaf_is_device_private_write(entry);
2574 }
2575
2576 VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
2577 !anon_exclusive, folio);
2578
2579 /* Update high watermark before we lower rss */
2580 update_hiwater_rss(mm);
2581
2582 if (PageHWPoison(subpage)) {
2583 VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio);
2584
2585 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2586 if (folio_test_hugetlb(folio)) {
2587 hugetlb_count_sub(folio_nr_pages(folio), mm);
2588 set_huge_pte_at(mm, address, pvmw.pte, pteval,
2589 hsz);
2590 } else {
2591 dec_mm_counter(mm, mm_counter(folio));
2592 set_pte_at(mm, address, pvmw.pte, pteval);
2593 }
2594 } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
2595 !userfaultfd_armed(vma)) {
2596 /*
2597 * The guest indicated that the page content is of no
2598 * interest anymore. Simply discard the pte, vmscan
2599 * will take care of the rest.
2600 * A future reference will then fault in a new zero
2601 * page. When userfaultfd is active, we must not drop
2602 * this page though, as its main user (postcopy
2603 * migration) will not expect userfaults on already
2604 * copied pages.
2605 */
2606 dec_mm_counter(mm, mm_counter(folio));
2607 } else {
2608 swp_entry_t entry;
2609 pte_t swp_pte;
2610
2611 /*
2612 * arch_unmap_one() is expected to be a NOP on
2613 * architectures where we could have PFN swap PTEs,
2614 * so we'll not check/care.
2615 */
2616 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2617 if (folio_test_hugetlb(folio))
2618 set_huge_pte_at(mm, address, pvmw.pte,
2619 pteval, hsz);
2620 else
2621 set_pte_at(mm, address, pvmw.pte, pteval);
2622 ret = false;
2623 page_vma_mapped_walk_done(&pvmw);
2624 break;
2625 }
2626
2627 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
2628 if (folio_test_hugetlb(folio)) {
2629 if (anon_exclusive &&
2630 hugetlb_try_share_anon_rmap(folio)) {
2631 set_huge_pte_at(mm, address, pvmw.pte,
2632 pteval, hsz);
2633 ret = false;
2634 page_vma_mapped_walk_done(&pvmw);
2635 break;
2636 }
2637 } else if (anon_exclusive &&
2638 folio_try_share_anon_rmap_pte(folio, subpage)) {
2639 set_pte_at(mm, address, pvmw.pte, pteval);
2640 ret = false;
2641 page_vma_mapped_walk_done(&pvmw);
2642 break;
2643 }
2644
2645 /*
2646 * Store the pfn of the page in a special migration
2647 * pte. do_swap_page() will wait until the migration
2648 * pte is removed and then restart fault handling.
2649 */
2650 if (writable)
2651 entry = make_writable_migration_entry(
2652 page_to_pfn(subpage));
2653 else if (anon_exclusive)
2654 entry = make_readable_exclusive_migration_entry(
2655 page_to_pfn(subpage));
2656 else
2657 entry = make_readable_migration_entry(
2658 page_to_pfn(subpage));
2659 if (likely(pte_present(pteval))) {
2660 if (pte_young(pteval))
2661 entry = make_migration_entry_young(entry);
2662 if (pte_dirty(pteval))
2663 entry = make_migration_entry_dirty(entry);
2664 swp_pte = swp_entry_to_pte(entry);
2665 if (pte_soft_dirty(pteval))
2666 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2667 if (pte_uffd_wp(pteval))
2668 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2669 } else {
2670 swp_pte = swp_entry_to_pte(entry);
2671 if (pte_swp_soft_dirty(pteval))
2672 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2673 if (pte_swp_uffd_wp(pteval))
2674 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2675 }
2676 if (folio_test_hugetlb(folio))
2677 set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
2678 hsz);
2679 else
2680 set_pte_at(mm, address, pvmw.pte, swp_pte);
2681 trace_set_migration_pte(address, pte_val(swp_pte),
2682 folio_order(folio));
2683 /*
2684 * No need to invalidate here it will synchronize on
2685 * against the special swap migration pte.
2686 */
2687 }
2688
2689 if (unlikely(folio_test_hugetlb(folio)))
2690 hugetlb_remove_rmap(folio);
2691 else
2692 folio_remove_rmap_pte(folio, subpage, vma);
2693 if (vma->vm_flags & VM_LOCKED)
2694 mlock_drain_local();
2695 folio_put(folio);
2696 }
2697
2698 mmu_notifier_invalidate_range_end(&range);
2699
2700 return ret;
2701 }
2702
2703 /**
2704 * try_to_migrate - try to replace all page table mappings with swap entries
2705 * @folio: the folio to replace page table entries for
2706 * @flags: action and flags
2707 *
2708 * Tries to remove all the page table entries which are mapping this folio and
2709 * replace them with special swap entries. Caller must hold the folio lock.
2710 */
try_to_migrate(struct folio * folio,enum ttu_flags flags)2711 void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2712 {
2713 struct rmap_walk_control rwc = {
2714 .rmap_one = try_to_migrate_one,
2715 .arg = (void *)flags,
2716 .done = folio_not_mapped,
2717 .anon_lock = folio_lock_anon_vma_read,
2718 };
2719
2720 /*
2721 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2722 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags.
2723 */
2724 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2725 TTU_SYNC | TTU_BATCH_FLUSH)))
2726 return;
2727
2728 if (folio_is_zone_device(folio) &&
2729 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2730 return;
2731
2732 /*
2733 * During exec, a temporary VMA is setup and later moved.
2734 * The VMA is moved under the anon_vma lock but not the
2735 * page tables leading to a race where migration cannot
2736 * find the migration ptes. Rather than increasing the
2737 * locking requirements of exec(), migration skips
2738 * temporary VMAs until after exec() completes.
2739 */
2740 if (!folio_test_ksm(folio) && folio_test_anon(folio))
2741 rwc.invalid_vma = invalid_migration_vma;
2742
2743 if (flags & TTU_RMAP_LOCKED)
2744 rmap_walk_locked(folio, &rwc);
2745 else
2746 rmap_walk(folio, &rwc);
2747 }
2748
2749 #ifdef CONFIG_DEVICE_PRIVATE
2750 /**
2751 * make_device_exclusive() - Mark a page for exclusive use by a device
2752 * @mm: mm_struct of associated target process
2753 * @addr: the virtual address to mark for exclusive device access
2754 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2755 * @foliop: folio pointer will be stored here on success.
2756 *
2757 * This function looks up the page mapped at the given address, grabs a
2758 * folio reference, locks the folio and replaces the PTE with special
2759 * device-exclusive PFN swap entry, preventing access through the process
2760 * page tables. The function will return with the folio locked and referenced.
2761 *
2762 * On fault, the device-exclusive entries are replaced with the original PTE
2763 * under folio lock, after calling MMU notifiers.
2764 *
2765 * Only anonymous non-hugetlb folios are supported and the VMA must have
2766 * write permissions such that we can fault in the anonymous page writable
2767 * in order to mark it exclusive. The caller must hold the mmap_lock in read
2768 * mode.
2769 *
2770 * A driver using this to program access from a device must use a mmu notifier
2771 * critical section to hold a device specific lock during programming. Once
2772 * programming is complete it should drop the folio lock and reference after
2773 * which point CPU access to the page will revoke the exclusive access.
2774 *
2775 * Notes:
2776 * #. This function always operates on individual PTEs mapping individual
2777 * pages. PMD-sized THPs are first remapped to be mapped by PTEs before
2778 * the conversion happens on a single PTE corresponding to @addr.
2779 * #. While concurrent access through the process page tables is prevented,
2780 * concurrent access through other page references (e.g., earlier GUP
2781 * invocation) is not handled and not supported.
2782 * #. device-exclusive entries are considered "clean" and "old" by core-mm.
2783 * Device drivers must update the folio state when informed by MMU
2784 * notifiers.
2785 *
2786 * Returns: pointer to mapped page on success, otherwise a negative error.
2787 */
make_device_exclusive(struct mm_struct * mm,unsigned long addr,void * owner,struct folio ** foliop)2788 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
2789 void *owner, struct folio **foliop)
2790 {
2791 struct mmu_notifier_range range;
2792 struct folio *folio, *fw_folio;
2793 struct vm_area_struct *vma;
2794 struct folio_walk fw;
2795 struct page *page;
2796 swp_entry_t entry;
2797 pte_t swp_pte;
2798 int ret;
2799
2800 mmap_assert_locked(mm);
2801 addr = PAGE_ALIGN_DOWN(addr);
2802
2803 /*
2804 * Fault in the page writable and try to lock it; note that if the
2805 * address would already be marked for exclusive use by a device,
2806 * the GUP call would undo that first by triggering a fault.
2807 *
2808 * If any other device would already map this page exclusively, the
2809 * fault will trigger a conversion to an ordinary
2810 * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE.
2811 */
2812 retry:
2813 page = get_user_page_vma_remote(mm, addr,
2814 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2815 &vma);
2816 if (IS_ERR(page))
2817 return page;
2818 folio = page_folio(page);
2819
2820 if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
2821 folio_put(folio);
2822 return ERR_PTR(-EOPNOTSUPP);
2823 }
2824
2825 ret = folio_lock_killable(folio);
2826 if (ret) {
2827 folio_put(folio);
2828 return ERR_PTR(ret);
2829 }
2830
2831 /*
2832 * Inform secondary MMUs that we are going to convert this PTE to
2833 * device-exclusive, such that they unmap it now. Note that the
2834 * caller must filter this event out to prevent livelocks.
2835 */
2836 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
2837 mm, addr, addr + PAGE_SIZE, owner);
2838 mmu_notifier_invalidate_range_start(&range);
2839
2840 /*
2841 * Let's do a second walk and make sure we still find the same page
2842 * mapped writable. Note that any page of an anonymous folio can
2843 * only be mapped writable using exactly one PTE ("exclusive"), so
2844 * there cannot be other mappings.
2845 */
2846 fw_folio = folio_walk_start(&fw, vma, addr, 0);
2847 if (fw_folio != folio || fw.page != page ||
2848 fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) {
2849 if (fw_folio)
2850 folio_walk_end(&fw, vma);
2851 mmu_notifier_invalidate_range_end(&range);
2852 folio_unlock(folio);
2853 folio_put(folio);
2854 goto retry;
2855 }
2856
2857 /* Nuke the page table entry so we get the uptodate dirty bit. */
2858 flush_cache_page(vma, addr, page_to_pfn(page));
2859 fw.pte = ptep_clear_flush(vma, addr, fw.ptep);
2860
2861 /* Set the dirty flag on the folio now the PTE is gone. */
2862 if (pte_dirty(fw.pte))
2863 folio_mark_dirty(folio);
2864
2865 /*
2866 * Store the pfn of the page in a special device-exclusive PFN swap PTE.
2867 * do_swap_page() will trigger the conversion back while holding the
2868 * folio lock.
2869 */
2870 entry = make_device_exclusive_entry(page_to_pfn(page));
2871 swp_pte = swp_entry_to_pte(entry);
2872 if (pte_soft_dirty(fw.pte))
2873 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2874 /* The pte is writable, uffd-wp does not apply. */
2875 set_pte_at(mm, addr, fw.ptep, swp_pte);
2876
2877 folio_walk_end(&fw, vma);
2878 mmu_notifier_invalidate_range_end(&range);
2879 *foliop = folio;
2880 return page;
2881 }
2882 EXPORT_SYMBOL_GPL(make_device_exclusive);
2883 #endif
2884
__put_anon_vma(struct anon_vma * anon_vma)2885 void __put_anon_vma(struct anon_vma *anon_vma)
2886 {
2887 struct anon_vma *root = anon_vma->root;
2888
2889 anon_vma_free(anon_vma);
2890 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2891 anon_vma_free(root);
2892 }
2893
rmap_walk_anon_lock(const struct folio * folio,struct rmap_walk_control * rwc)2894 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
2895 struct rmap_walk_control *rwc)
2896 {
2897 struct anon_vma *anon_vma;
2898
2899 if (rwc->anon_lock)
2900 return rwc->anon_lock(folio, rwc);
2901
2902 /*
2903 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2904 * because that depends on page_mapped(); but not all its usages
2905 * are holding mmap_lock. Users without mmap_lock are required to
2906 * take a reference count to prevent the anon_vma disappearing
2907 */
2908 anon_vma = folio_anon_vma(folio);
2909 if (!anon_vma)
2910 return NULL;
2911
2912 if (anon_vma_trylock_read(anon_vma))
2913 goto out;
2914
2915 if (rwc->try_lock) {
2916 anon_vma = NULL;
2917 rwc->contended = true;
2918 goto out;
2919 }
2920
2921 anon_vma_lock_read(anon_vma);
2922 out:
2923 return anon_vma;
2924 }
2925
2926 /*
2927 * rmap_walk_anon - do something to anonymous page using the object-based
2928 * rmap method
2929 * @folio: the folio to be handled
2930 * @rwc: control variable according to each walk type
2931 * @locked: caller holds relevant rmap lock
2932 *
2933 * Find all the mappings of a folio using the mapping pointer and the vma
2934 * chains contained in the anon_vma struct it points to.
2935 */
rmap_walk_anon(struct folio * folio,struct rmap_walk_control * rwc,bool locked)2936 static void rmap_walk_anon(struct folio *folio,
2937 struct rmap_walk_control *rwc, bool locked)
2938 {
2939 struct anon_vma *anon_vma;
2940 pgoff_t pgoff_start, pgoff_end;
2941 struct anon_vma_chain *avc;
2942
2943 /*
2944 * The folio lock ensures that folio->mapping can't be changed under us
2945 * to an anon_vma with different root.
2946 */
2947 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2948
2949 if (locked) {
2950 anon_vma = folio_anon_vma(folio);
2951 /* anon_vma disappear under us? */
2952 VM_BUG_ON_FOLIO(!anon_vma, folio);
2953 } else {
2954 anon_vma = rmap_walk_anon_lock(folio, rwc);
2955 }
2956 if (!anon_vma)
2957 return;
2958
2959 pgoff_start = folio_pgoff(folio);
2960 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2961 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2962 pgoff_start, pgoff_end) {
2963 struct vm_area_struct *vma = avc->vma;
2964 unsigned long address = vma_address(vma, pgoff_start,
2965 folio_nr_pages(folio));
2966
2967 VM_BUG_ON_VMA(address == -EFAULT, vma);
2968 cond_resched();
2969
2970 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2971 continue;
2972
2973 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2974 break;
2975 if (rwc->done && rwc->done(folio))
2976 break;
2977 }
2978
2979 if (!locked)
2980 anon_vma_unlock_read(anon_vma);
2981 }
2982
2983 /**
2984 * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping
2985 * of a page mapped within a specified page cache object at a specified offset.
2986 *
2987 * @folio: Either the folio whose mappings to traverse, or if NULL,
2988 * the callbacks specified in @rwc will be configured such
2989 * as to be able to look up mappings correctly.
2990 * @mapping: The page cache object whose mapping VMAs we intend to
2991 * traverse. If @folio is non-NULL, this should be equal to
2992 * folio_mapping(folio).
2993 * @pgoff_start: The offset within @mapping of the page which we are
2994 * looking up. If @folio is non-NULL, this should be equal
2995 * to folio_pgoff(folio).
2996 * @nr_pages: The number of pages mapped by the mapping. If @folio is
2997 * non-NULL, this should be equal to folio_nr_pages(folio).
2998 * @rwc: The reverse mapping walk control object describing how
2999 * the traversal should proceed.
3000 * @locked: Is the @mapping already locked? If not, we acquire the
3001 * lock.
3002 */
__rmap_walk_file(struct folio * folio,struct address_space * mapping,pgoff_t pgoff_start,unsigned long nr_pages,struct rmap_walk_control * rwc,bool locked)3003 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
3004 pgoff_t pgoff_start, unsigned long nr_pages,
3005 struct rmap_walk_control *rwc, bool locked)
3006 {
3007 pgoff_t pgoff_end = pgoff_start + nr_pages - 1;
3008 struct vm_area_struct *vma;
3009
3010 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
3011 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio);
3012 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio);
3013
3014 if (!locked) {
3015 if (i_mmap_trylock_read(mapping))
3016 goto lookup;
3017
3018 if (rwc->try_lock) {
3019 rwc->contended = true;
3020 return;
3021 }
3022
3023 i_mmap_lock_read(mapping);
3024 }
3025 lookup:
3026 vma_interval_tree_foreach(vma, &mapping->i_mmap,
3027 pgoff_start, pgoff_end) {
3028 unsigned long address = vma_address(vma, pgoff_start, nr_pages);
3029
3030 VM_BUG_ON_VMA(address == -EFAULT, vma);
3031 cond_resched();
3032
3033 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3034 continue;
3035
3036 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
3037 goto done;
3038 if (rwc->done && rwc->done(folio))
3039 goto done;
3040 }
3041 done:
3042 if (!locked)
3043 i_mmap_unlock_read(mapping);
3044 }
3045
3046 /*
3047 * rmap_walk_file - do something to file page using the object-based rmap method
3048 * @folio: the folio to be handled
3049 * @rwc: control variable according to each walk type
3050 * @locked: caller holds relevant rmap lock
3051 *
3052 * Find all the mappings of a folio using the mapping pointer and the vma chains
3053 * contained in the address_space struct it points to.
3054 */
rmap_walk_file(struct folio * folio,struct rmap_walk_control * rwc,bool locked)3055 static void rmap_walk_file(struct folio *folio,
3056 struct rmap_walk_control *rwc, bool locked)
3057 {
3058 /*
3059 * The folio lock not only makes sure that folio->mapping cannot
3060 * suddenly be NULLified by truncation, it makes sure that the structure
3061 * at mapping cannot be freed and reused yet, so we can safely take
3062 * mapping->i_mmap_rwsem.
3063 */
3064 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3065
3066 if (!folio->mapping)
3067 return;
3068
3069 __rmap_walk_file(folio, folio->mapping, folio->index,
3070 folio_nr_pages(folio), rwc, locked);
3071 }
3072
rmap_walk(struct folio * folio,struct rmap_walk_control * rwc)3073 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
3074 {
3075 if (unlikely(folio_test_ksm(folio)))
3076 rmap_walk_ksm(folio, rwc);
3077 else if (folio_test_anon(folio))
3078 rmap_walk_anon(folio, rwc, false);
3079 else
3080 rmap_walk_file(folio, rwc, false);
3081 }
3082
3083 /* Like rmap_walk, but caller holds relevant rmap lock */
rmap_walk_locked(struct folio * folio,struct rmap_walk_control * rwc)3084 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
3085 {
3086 /* no ksm support for now */
3087 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
3088 if (folio_test_anon(folio))
3089 rmap_walk_anon(folio, rwc, true);
3090 else
3091 rmap_walk_file(folio, rwc, true);
3092 }
3093
3094 #ifdef CONFIG_HUGETLB_PAGE
3095 /*
3096 * The following two functions are for anonymous (private mapped) hugepages.
3097 * Unlike common anonymous pages, anonymous hugepages have no accounting code
3098 * and no lru code, because we handle hugepages differently from common pages.
3099 */
hugetlb_add_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address,rmap_t flags)3100 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
3101 unsigned long address, rmap_t flags)
3102 {
3103 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3104 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
3105
3106 atomic_inc(&folio->_entire_mapcount);
3107 atomic_inc(&folio->_large_mapcount);
3108 if (flags & RMAP_EXCLUSIVE)
3109 SetPageAnonExclusive(&folio->page);
3110 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
3111 PageAnonExclusive(&folio->page), folio);
3112 }
3113
hugetlb_add_new_anon_rmap(struct folio * folio,struct vm_area_struct * vma,unsigned long address)3114 void hugetlb_add_new_anon_rmap(struct folio *folio,
3115 struct vm_area_struct *vma, unsigned long address)
3116 {
3117 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
3118
3119 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
3120 /* increment count (starts at -1) */
3121 atomic_set(&folio->_entire_mapcount, 0);
3122 atomic_set(&folio->_large_mapcount, 0);
3123 folio_clear_hugetlb_restore_reserve(folio);
3124 __folio_set_anon(folio, vma, address, true);
3125 SetPageAnonExclusive(&folio->page);
3126 }
3127 #endif /* CONFIG_HUGETLB_PAGE */
3128