1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/rmap.c - physical to virtual reverse mappings 4 * 5 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * folio_lock 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28 * vma_start_write 29 * mapping->i_mmap_rwsem 30 * anon_vma->rwsem 31 * mm->page_table_lock or pte_lock 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in block_dirty_folio) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in folio_lruvec_lock_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * hugetlbfs PageHuge() take locks in this order: 49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 50 * vma_lock (hugetlb specific lock for pmd_sharing) 51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 52 * folio_lock 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/leafops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 #include <linux/mm_inline.h> 77 #include <linux/oom.h> 78 79 #include <asm/tlb.h> 80 81 #define CREATE_TRACE_POINTS 82 #include <trace/events/migrate.h> 83 84 #include "internal.h" 85 #include "swap.h" 86 87 static struct kmem_cache *anon_vma_cachep; 88 static struct kmem_cache *anon_vma_chain_cachep; 89 90 static inline struct anon_vma *anon_vma_alloc(void) 91 { 92 struct anon_vma *anon_vma; 93 94 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 95 if (anon_vma) { 96 atomic_set(&anon_vma->refcount, 1); 97 anon_vma->num_children = 0; 98 anon_vma->num_active_vmas = 0; 99 anon_vma->parent = anon_vma; 100 /* 101 * Initialise the anon_vma root to point to itself. If called 102 * from fork, the root will be reset to the parents anon_vma. 103 */ 104 anon_vma->root = anon_vma; 105 } 106 107 return anon_vma; 108 } 109 110 static inline void anon_vma_free(struct anon_vma *anon_vma) 111 { 112 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 113 114 /* 115 * Synchronize against folio_lock_anon_vma_read() such that 116 * we can safely hold the lock without the anon_vma getting 117 * freed. 118 * 119 * Relies on the full mb implied by the atomic_dec_and_test() from 120 * put_anon_vma() against the acquire barrier implied by 121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 122 * 123 * folio_lock_anon_vma_read() VS put_anon_vma() 124 * down_read_trylock() atomic_dec_and_test() 125 * LOCK MB 126 * atomic_read() rwsem_is_locked() 127 * 128 * LOCK should suffice since the actual taking of the lock must 129 * happen _before_ what follows. 130 */ 131 might_sleep(); 132 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 133 anon_vma_lock_write(anon_vma); 134 anon_vma_unlock_write(anon_vma); 135 } 136 137 kmem_cache_free(anon_vma_cachep, anon_vma); 138 } 139 140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 141 { 142 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 143 } 144 145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 146 { 147 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 148 } 149 150 static void anon_vma_chain_assign(struct vm_area_struct *vma, 151 struct anon_vma_chain *avc, 152 struct anon_vma *anon_vma) 153 { 154 avc->vma = vma; 155 avc->anon_vma = anon_vma; 156 list_add(&avc->same_vma, &vma->anon_vma_chain); 157 } 158 159 /** 160 * __anon_vma_prepare - attach an anon_vma to a memory region 161 * @vma: the memory region in question 162 * 163 * This makes sure the memory mapping described by 'vma' has 164 * an 'anon_vma' attached to it, so that we can associate the 165 * anonymous pages mapped into it with that anon_vma. 166 * 167 * The common case will be that we already have one, which 168 * is handled inline by anon_vma_prepare(). But if 169 * not we either need to find an adjacent mapping that we 170 * can re-use the anon_vma from (very common when the only 171 * reason for splitting a vma has been mprotect()), or we 172 * allocate a new one. 173 * 174 * Anon-vma allocations are very subtle, because we may have 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 176 * and that may actually touch the rwsem even in the newly 177 * allocated vma (it depends on RCU to make sure that the 178 * anon_vma isn't actually destroyed). 179 * 180 * As a result, we need to do proper anon_vma locking even 181 * for the new allocation. At the same time, we do not want 182 * to do any locking for the common case of already having 183 * an anon_vma. 184 */ 185 int __anon_vma_prepare(struct vm_area_struct *vma) 186 { 187 struct mm_struct *mm = vma->vm_mm; 188 struct anon_vma *anon_vma, *allocated; 189 struct anon_vma_chain *avc; 190 191 mmap_assert_locked(mm); 192 might_sleep(); 193 194 avc = anon_vma_chain_alloc(GFP_KERNEL); 195 if (!avc) 196 goto out_enomem; 197 198 anon_vma = find_mergeable_anon_vma(vma); 199 allocated = NULL; 200 if (!anon_vma) { 201 anon_vma = anon_vma_alloc(); 202 if (unlikely(!anon_vma)) 203 goto out_enomem_free_avc; 204 anon_vma->num_children++; /* self-parent link for new root */ 205 allocated = anon_vma; 206 } 207 208 anon_vma_lock_write(anon_vma); 209 /* page_table_lock to protect against threads */ 210 spin_lock(&mm->page_table_lock); 211 if (likely(!vma->anon_vma)) { 212 vma->anon_vma = anon_vma; 213 anon_vma_chain_assign(vma, avc, anon_vma); 214 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 215 anon_vma->num_active_vmas++; 216 allocated = NULL; 217 avc = NULL; 218 } 219 spin_unlock(&mm->page_table_lock); 220 anon_vma_unlock_write(anon_vma); 221 222 if (unlikely(allocated)) 223 put_anon_vma(allocated); 224 if (unlikely(avc)) 225 anon_vma_chain_free(avc); 226 227 return 0; 228 229 out_enomem_free_avc: 230 anon_vma_chain_free(avc); 231 out_enomem: 232 return -ENOMEM; 233 } 234 235 static void check_anon_vma_clone(struct vm_area_struct *dst, 236 struct vm_area_struct *src, 237 enum vma_operation operation) 238 { 239 /* The write lock must be held. */ 240 mmap_assert_write_locked(src->vm_mm); 241 /* If not a fork then must be on same mm. */ 242 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && dst->vm_mm != src->vm_mm); 243 244 /* If we have anything to do src->anon_vma must be provided. */ 245 VM_WARN_ON_ONCE(!src->anon_vma && !list_empty(&src->anon_vma_chain)); 246 VM_WARN_ON_ONCE(!src->anon_vma && dst->anon_vma); 247 /* We are establishing a new anon_vma_chain. */ 248 VM_WARN_ON_ONCE(!list_empty(&dst->anon_vma_chain)); 249 /* 250 * On fork, dst->anon_vma is set NULL (temporarily). Otherwise, anon_vma 251 * must be the same across dst and src. 252 */ 253 VM_WARN_ON_ONCE(dst->anon_vma && dst->anon_vma != src->anon_vma); 254 /* 255 * Essentially equivalent to above - if not a no-op, we should expect 256 * dst->anon_vma to be set for everything except a fork. 257 */ 258 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && src->anon_vma && 259 !dst->anon_vma); 260 /* For the anon_vma to be compatible, it can only be singular. */ 261 VM_WARN_ON_ONCE(operation == VMA_OP_MERGE_UNFAULTED && 262 !list_is_singular(&src->anon_vma_chain)); 263 #ifdef CONFIG_PER_VMA_LOCK 264 /* Only merging an unfaulted VMA leaves the destination attached. */ 265 VM_WARN_ON_ONCE(operation != VMA_OP_MERGE_UNFAULTED && 266 vma_is_attached(dst)); 267 #endif 268 } 269 270 static void maybe_reuse_anon_vma(struct vm_area_struct *dst, 271 struct anon_vma *anon_vma) 272 { 273 /* If already populated, nothing to do.*/ 274 if (dst->anon_vma) 275 return; 276 277 /* 278 * We reuse an anon_vma if any linking VMAs were unmapped and it has 279 * only a single child at most. 280 */ 281 if (anon_vma->num_active_vmas > 0) 282 return; 283 if (anon_vma->num_children > 1) 284 return; 285 286 dst->anon_vma = anon_vma; 287 anon_vma->num_active_vmas++; 288 } 289 290 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma); 291 292 /** 293 * anon_vma_clone - Establishes new anon_vma_chain objects in @dst linking to 294 * all of the anon_vma objects contained within @src anon_vma_chain's. 295 * @dst: The destination VMA with an empty anon_vma_chain. 296 * @src: The source VMA we wish to duplicate. 297 * @operation: The type of operation which resulted in the clone. 298 * 299 * This is the heart of the VMA side of the anon_vma implementation - we invoke 300 * this function whenever we need to set up a new VMA's anon_vma state. 301 * 302 * This is invoked for: 303 * 304 * - VMA Merge, but only when @dst is unfaulted and @src is faulted - meaning we 305 * clone @src into @dst. 306 * - VMA split. 307 * - VMA (m)remap. 308 * - Fork of faulted VMA. 309 * 310 * In all cases other than fork this is simply a duplication. Fork additionally 311 * adds a new active anon_vma. 312 * 313 * ONLY in the case of fork do we try to 'reuse' existing anon_vma's in an 314 * anon_vma hierarchy, reusing anon_vma's which have no VMA associated with them 315 * but do have a single child. This is to avoid waste of memory when repeatedly 316 * forking. 317 * 318 * Returns: 0 on success, -ENOMEM on failure. 319 */ 320 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src, 321 enum vma_operation operation) 322 { 323 struct anon_vma_chain *avc, *pavc; 324 struct anon_vma *active_anon_vma = src->anon_vma; 325 326 check_anon_vma_clone(dst, src, operation); 327 328 if (!active_anon_vma) 329 return 0; 330 331 /* 332 * Allocate AVCs. We don't need an anon_vma lock for this as we 333 * are not updating the anon_vma rbtree nor are we changing 334 * anon_vma statistics. 335 * 336 * Either src, dst have the same mm for which we hold an exclusive mmap 337 * write lock, or we are forking and we hold it on src->vm_mm and dst is 338 * not yet accessible to other threads so there's no possibliity of the 339 * unlinked AVC's being observed yet. 340 */ 341 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 342 avc = anon_vma_chain_alloc(GFP_KERNEL); 343 if (!avc) 344 goto enomem_failure; 345 346 anon_vma_chain_assign(dst, avc, pavc->anon_vma); 347 } 348 349 /* 350 * Now link the anon_vma's back to the newly inserted AVCs. 351 * Note that all anon_vma's share the same root. 352 */ 353 anon_vma_lock_write(src->anon_vma); 354 list_for_each_entry_reverse(avc, &dst->anon_vma_chain, same_vma) { 355 struct anon_vma *anon_vma = avc->anon_vma; 356 357 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 358 if (operation == VMA_OP_FORK) 359 maybe_reuse_anon_vma(dst, anon_vma); 360 } 361 362 if (operation != VMA_OP_FORK) 363 dst->anon_vma->num_active_vmas++; 364 365 anon_vma_unlock_write(active_anon_vma); 366 return 0; 367 368 enomem_failure: 369 cleanup_partial_anon_vmas(dst); 370 return -ENOMEM; 371 } 372 373 /* 374 * Attach vma to its own anon_vma, as well as to the anon_vmas that 375 * the corresponding VMA in the parent process is attached to. 376 * Returns 0 on success, non-zero on failure. 377 */ 378 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 379 { 380 struct anon_vma_chain *avc; 381 struct anon_vma *anon_vma; 382 int rc; 383 384 /* Don't bother if the parent process has no anon_vma here. */ 385 if (!pvma->anon_vma) 386 return 0; 387 388 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 389 vma->anon_vma = NULL; 390 391 anon_vma = anon_vma_alloc(); 392 if (!anon_vma) 393 return -ENOMEM; 394 avc = anon_vma_chain_alloc(GFP_KERNEL); 395 if (!avc) { 396 put_anon_vma(anon_vma); 397 return -ENOMEM; 398 } 399 400 /* 401 * First, attach the new VMA to the parent VMA's anon_vmas, 402 * so rmap can find non-COWed pages in child processes. 403 */ 404 rc = anon_vma_clone(vma, pvma, VMA_OP_FORK); 405 /* An error arose or an existing anon_vma was reused, all done then. */ 406 if (rc || vma->anon_vma) { 407 put_anon_vma(anon_vma); 408 anon_vma_chain_free(avc); 409 return rc; 410 } 411 412 /* 413 * OK no reuse, so add our own anon_vma. 414 * 415 * Since it is not linked anywhere we can safely manipulate anon_vma 416 * fields without a lock. 417 */ 418 419 anon_vma->num_active_vmas = 1; 420 /* 421 * The root anon_vma's rwsem is the lock actually used when we 422 * lock any of the anon_vmas in this anon_vma tree. 423 */ 424 anon_vma->root = pvma->anon_vma->root; 425 anon_vma->parent = pvma->anon_vma; 426 /* 427 * With refcounts, an anon_vma can stay around longer than the 428 * process it belongs to. The root anon_vma needs to be pinned until 429 * this anon_vma is freed, because the lock lives in the root. 430 */ 431 get_anon_vma(anon_vma->root); 432 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 433 vma->anon_vma = anon_vma; 434 anon_vma_chain_assign(vma, avc, anon_vma); 435 /* Now let rmap see it. */ 436 anon_vma_lock_write(anon_vma); 437 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 438 anon_vma->parent->num_children++; 439 anon_vma_unlock_write(anon_vma); 440 441 return 0; 442 } 443 444 /* 445 * In the unfortunate case of anon_vma_clone() failing to allocate memory we 446 * have to clean things up. 447 * 448 * Since we allocate anon_vma_chain's before we insert them into the interval 449 * trees, we simply have to free up the AVC's and remove the entries from the 450 * VMA's anon_vma_chain. 451 */ 452 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma) 453 { 454 struct anon_vma_chain *avc, *next; 455 456 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 457 list_del(&avc->same_vma); 458 anon_vma_chain_free(avc); 459 } 460 461 /* 462 * The anon_vma assigned to this VMA is no longer valid, as we were not 463 * able to correctly clone AVC state. Avoid inconsistent anon_vma tree 464 * state by resetting. 465 */ 466 vma->anon_vma = NULL; 467 } 468 469 /** 470 * unlink_anon_vmas() - remove all links between a VMA and anon_vma's, freeing 471 * anon_vma_chain objects. 472 * @vma: The VMA whose links to anon_vma objects is to be severed. 473 * 474 * As part of the process anon_vma_chain's are freed, 475 * anon_vma->num_children,num_active_vmas is updated as required and, if the 476 * relevant anon_vma references no further VMAs, its reference count is 477 * decremented. 478 */ 479 void unlink_anon_vmas(struct vm_area_struct *vma) 480 { 481 struct anon_vma_chain *avc, *next; 482 struct anon_vma *active_anon_vma = vma->anon_vma; 483 484 /* Always hold mmap lock, read-lock on unmap possibly. */ 485 mmap_assert_locked(vma->vm_mm); 486 487 /* Unfaulted is a no-op. */ 488 if (!active_anon_vma) { 489 VM_WARN_ON_ONCE(!list_empty(&vma->anon_vma_chain)); 490 return; 491 } 492 493 anon_vma_lock_write(active_anon_vma); 494 495 /* 496 * Unlink each anon_vma chained to the VMA. This list is ordered 497 * from newest to oldest, ensuring the root anon_vma gets freed last. 498 */ 499 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 500 struct anon_vma *anon_vma = avc->anon_vma; 501 502 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 503 504 /* 505 * Leave empty anon_vmas on the list - we'll need 506 * to free them outside the lock. 507 */ 508 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 509 anon_vma->parent->num_children--; 510 continue; 511 } 512 513 list_del(&avc->same_vma); 514 anon_vma_chain_free(avc); 515 } 516 517 active_anon_vma->num_active_vmas--; 518 /* 519 * vma would still be needed after unlink, and anon_vma will be prepared 520 * when handle fault. 521 */ 522 vma->anon_vma = NULL; 523 anon_vma_unlock_write(active_anon_vma); 524 525 526 /* 527 * Iterate the list once more, it now only contains empty and unlinked 528 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 529 * needing to write-acquire the anon_vma->root->rwsem. 530 */ 531 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 532 struct anon_vma *anon_vma = avc->anon_vma; 533 534 VM_WARN_ON(anon_vma->num_children); 535 VM_WARN_ON(anon_vma->num_active_vmas); 536 put_anon_vma(anon_vma); 537 538 list_del(&avc->same_vma); 539 anon_vma_chain_free(avc); 540 } 541 } 542 543 static void anon_vma_ctor(void *data) 544 { 545 struct anon_vma *anon_vma = data; 546 547 init_rwsem(&anon_vma->rwsem); 548 atomic_set(&anon_vma->refcount, 0); 549 anon_vma->rb_root = RB_ROOT_CACHED; 550 } 551 552 void __init anon_vma_init(void) 553 { 554 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 555 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 556 anon_vma_ctor); 557 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 558 SLAB_PANIC|SLAB_ACCOUNT); 559 } 560 561 /* 562 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 563 * 564 * Since there is no serialization what so ever against folio_remove_rmap_*() 565 * the best this function can do is return a refcount increased anon_vma 566 * that might have been relevant to this page. 567 * 568 * The page might have been remapped to a different anon_vma or the anon_vma 569 * returned may already be freed (and even reused). 570 * 571 * In case it was remapped to a different anon_vma, the new anon_vma will be a 572 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 573 * ensure that any anon_vma obtained from the page will still be valid for as 574 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 575 * 576 * All users of this function must be very careful when walking the anon_vma 577 * chain and verify that the page in question is indeed mapped in it 578 * [ something equivalent to page_mapped_in_vma() ]. 579 * 580 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 581 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid 582 * if there is a mapcount, we can dereference the anon_vma after observing 583 * those. 584 * 585 * NOTE: the caller should hold folio lock when calling this. 586 */ 587 struct anon_vma *folio_get_anon_vma(const struct folio *folio) 588 { 589 struct anon_vma *anon_vma = NULL; 590 unsigned long anon_mapping; 591 592 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 593 594 rcu_read_lock(); 595 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 596 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) 597 goto out; 598 if (!folio_mapped(folio)) 599 goto out; 600 601 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); 602 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 603 anon_vma = NULL; 604 goto out; 605 } 606 607 /* 608 * If this folio is still mapped, then its anon_vma cannot have been 609 * freed. But if it has been unmapped, we have no security against the 610 * anon_vma structure being freed and reused (for another anon_vma: 611 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 612 * above cannot corrupt). 613 */ 614 if (!folio_mapped(folio)) { 615 rcu_read_unlock(); 616 put_anon_vma(anon_vma); 617 return NULL; 618 } 619 out: 620 rcu_read_unlock(); 621 622 return anon_vma; 623 } 624 625 /* 626 * Similar to folio_get_anon_vma() except it locks the anon_vma. 627 * 628 * Its a little more complex as it tries to keep the fast path to a single 629 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 630 * reference like with folio_get_anon_vma() and then block on the mutex 631 * on !rwc->try_lock case. 632 */ 633 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, 634 struct rmap_walk_control *rwc) 635 { 636 struct anon_vma *anon_vma = NULL; 637 struct anon_vma *root_anon_vma; 638 unsigned long anon_mapping; 639 640 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 641 642 rcu_read_lock(); 643 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 644 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) 645 goto out; 646 if (!folio_mapped(folio)) 647 goto out; 648 649 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); 650 root_anon_vma = READ_ONCE(anon_vma->root); 651 if (down_read_trylock(&root_anon_vma->rwsem)) { 652 /* 653 * If the folio is still mapped, then this anon_vma is still 654 * its anon_vma, and holding the mutex ensures that it will 655 * not go away, see anon_vma_free(). 656 */ 657 if (!folio_mapped(folio)) { 658 up_read(&root_anon_vma->rwsem); 659 anon_vma = NULL; 660 } 661 goto out; 662 } 663 664 if (rwc && rwc->try_lock) { 665 anon_vma = NULL; 666 rwc->contended = true; 667 goto out; 668 } 669 670 /* trylock failed, we got to sleep */ 671 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 672 anon_vma = NULL; 673 goto out; 674 } 675 676 if (!folio_mapped(folio)) { 677 rcu_read_unlock(); 678 put_anon_vma(anon_vma); 679 return NULL; 680 } 681 682 /* we pinned the anon_vma, its safe to sleep */ 683 rcu_read_unlock(); 684 anon_vma_lock_read(anon_vma); 685 686 if (atomic_dec_and_test(&anon_vma->refcount)) { 687 /* 688 * Oops, we held the last refcount, release the lock 689 * and bail -- can't simply use put_anon_vma() because 690 * we'll deadlock on the anon_vma_lock_write() recursion. 691 */ 692 anon_vma_unlock_read(anon_vma); 693 __put_anon_vma(anon_vma); 694 anon_vma = NULL; 695 } 696 697 return anon_vma; 698 699 out: 700 rcu_read_unlock(); 701 return anon_vma; 702 } 703 704 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 705 /* 706 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 707 * important if a PTE was dirty when it was unmapped that it's flushed 708 * before any IO is initiated on the page to prevent lost writes. Similarly, 709 * it must be flushed before freeing to prevent data leakage. 710 */ 711 void try_to_unmap_flush(void) 712 { 713 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 714 715 if (!tlb_ubc->flush_required) 716 return; 717 718 arch_tlbbatch_flush(&tlb_ubc->arch); 719 tlb_ubc->flush_required = false; 720 tlb_ubc->writable = false; 721 } 722 723 /* Flush iff there are potentially writable TLB entries that can race with IO */ 724 void try_to_unmap_flush_dirty(void) 725 { 726 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 727 728 if (tlb_ubc->writable) 729 try_to_unmap_flush(); 730 } 731 732 /* 733 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 734 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 735 */ 736 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 737 #define TLB_FLUSH_BATCH_PENDING_MASK \ 738 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 739 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 740 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 741 742 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 743 unsigned long start, unsigned long end) 744 { 745 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 746 int batch; 747 bool writable = pte_dirty(pteval); 748 749 if (!pte_accessible(mm, pteval)) 750 return; 751 752 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); 753 tlb_ubc->flush_required = true; 754 755 /* 756 * Ensure compiler does not re-order the setting of tlb_flush_batched 757 * before the PTE is cleared. 758 */ 759 barrier(); 760 batch = atomic_read(&mm->tlb_flush_batched); 761 retry: 762 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 763 /* 764 * Prevent `pending' from catching up with `flushed' because of 765 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 766 * `pending' becomes large. 767 */ 768 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) 769 goto retry; 770 } else { 771 atomic_inc(&mm->tlb_flush_batched); 772 } 773 774 /* 775 * If the PTE was dirty then it's best to assume it's writable. The 776 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 777 * before the page is queued for IO. 778 */ 779 if (writable) 780 tlb_ubc->writable = true; 781 } 782 783 /* 784 * Returns true if the TLB flush should be deferred to the end of a batch of 785 * unmap operations to reduce IPIs. 786 */ 787 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 788 { 789 if (!(flags & TTU_BATCH_FLUSH)) 790 return false; 791 792 return arch_tlbbatch_should_defer(mm); 793 } 794 795 /* 796 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 797 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 798 * operation such as mprotect or munmap to race between reclaim unmapping 799 * the page and flushing the page. If this race occurs, it potentially allows 800 * access to data via a stale TLB entry. Tracking all mm's that have TLB 801 * batching in flight would be expensive during reclaim so instead track 802 * whether TLB batching occurred in the past and if so then do a flush here 803 * if required. This will cost one additional flush per reclaim cycle paid 804 * by the first operation at risk such as mprotect and mumap. 805 * 806 * This must be called under the PTL so that an access to tlb_flush_batched 807 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 808 * via the PTL. 809 */ 810 void flush_tlb_batched_pending(struct mm_struct *mm) 811 { 812 int batch = atomic_read(&mm->tlb_flush_batched); 813 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 814 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 815 816 if (pending != flushed) { 817 flush_tlb_mm(mm); 818 /* 819 * If the new TLB flushing is pending during flushing, leave 820 * mm->tlb_flush_batched as is, to avoid losing flushing. 821 */ 822 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 823 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 824 } 825 } 826 #else 827 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 828 unsigned long start, unsigned long end) 829 { 830 } 831 832 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 833 { 834 return false; 835 } 836 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 837 838 /** 839 * page_address_in_vma - The virtual address of a page in this VMA. 840 * @folio: The folio containing the page. 841 * @page: The page within the folio. 842 * @vma: The VMA we need to know the address in. 843 * 844 * Calculates the user virtual address of this page in the specified VMA. 845 * It is the caller's responsibility to check the page is actually 846 * within the VMA. There may not currently be a PTE pointing at this 847 * page, but if a page fault occurs at this address, this is the page 848 * which will be accessed. 849 * 850 * Context: Caller should hold a reference to the folio. Caller should 851 * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the 852 * VMA from being altered. 853 * 854 * Return: The virtual address corresponding to this page in the VMA. 855 */ 856 unsigned long page_address_in_vma(const struct folio *folio, 857 const struct page *page, const struct vm_area_struct *vma) 858 { 859 if (folio_test_anon(folio)) { 860 struct anon_vma *anon_vma = folio_anon_vma(folio); 861 /* 862 * Note: swapoff's unuse_vma() is more efficient with this 863 * check, and needs it to match anon_vma when KSM is active. 864 */ 865 if (!vma->anon_vma || !anon_vma || 866 vma->anon_vma->root != anon_vma->root) 867 return -EFAULT; 868 } else if (!vma->vm_file) { 869 return -EFAULT; 870 } else if (vma->vm_file->f_mapping != folio->mapping) { 871 return -EFAULT; 872 } 873 874 /* KSM folios don't reach here because of the !anon_vma check */ 875 return vma_address(vma, page_pgoff(folio, page), 1); 876 } 877 878 /* 879 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 880 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 881 * represents. 882 */ 883 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 884 { 885 pgd_t *pgd; 886 p4d_t *p4d; 887 pud_t *pud; 888 pmd_t *pmd = NULL; 889 890 pgd = pgd_offset(mm, address); 891 if (!pgd_present(*pgd)) 892 goto out; 893 894 p4d = p4d_offset(pgd, address); 895 if (!p4d_present(*p4d)) 896 goto out; 897 898 pud = pud_offset(p4d, address); 899 if (!pud_present(*pud)) 900 goto out; 901 902 pmd = pmd_offset(pud, address); 903 out: 904 return pmd; 905 } 906 907 struct folio_referenced_arg { 908 int mapcount; 909 int referenced; 910 vm_flags_t vm_flags; 911 struct mem_cgroup *memcg; 912 }; 913 914 /* 915 * arg: folio_referenced_arg will be passed 916 */ 917 static bool folio_referenced_one(struct folio *folio, 918 struct vm_area_struct *vma, unsigned long address, void *arg) 919 { 920 struct folio_referenced_arg *pra = arg; 921 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 922 int ptes = 0, referenced = 0; 923 unsigned int nr; 924 925 while (page_vma_mapped_walk(&pvmw)) { 926 address = pvmw.address; 927 nr = 1; 928 929 if (vma->vm_flags & VM_LOCKED) { 930 ptes++; 931 pra->mapcount--; 932 933 /* Only mlock fully mapped pages */ 934 if (pvmw.pte && ptes != pvmw.nr_pages) 935 continue; 936 937 /* 938 * All PTEs must be protected by page table lock in 939 * order to mlock the page. 940 * 941 * If page table boundary has been cross, current ptl 942 * only protect part of ptes. 943 */ 944 if (pvmw.flags & PVMW_PGTABLE_CROSSED) 945 continue; 946 947 /* Restore the mlock which got missed */ 948 mlock_vma_folio(folio, vma); 949 page_vma_mapped_walk_done(&pvmw); 950 pra->vm_flags |= VM_LOCKED; 951 return false; /* To break the loop */ 952 } 953 954 /* 955 * Skip the non-shared swapbacked folio mapped solely by 956 * the exiting or OOM-reaped process. This avoids redundant 957 * swap-out followed by an immediate unmap. 958 */ 959 if ((!atomic_read(&vma->vm_mm->mm_users) || 960 check_stable_address_space(vma->vm_mm)) && 961 folio_test_anon(folio) && folio_test_swapbacked(folio) && 962 !folio_maybe_mapped_shared(folio)) { 963 pra->referenced = -1; 964 page_vma_mapped_walk_done(&pvmw); 965 return false; 966 } 967 968 if (pvmw.pte && folio_test_large(folio)) { 969 const unsigned long end_addr = pmd_addr_end(address, vma->vm_end); 970 const unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT; 971 pte_t pteval = ptep_get(pvmw.pte); 972 973 nr = folio_pte_batch(folio, pvmw.pte, pteval, max_nr); 974 } 975 976 if (lru_gen_enabled() && pvmw.pte) { 977 if (lru_gen_look_around(&pvmw, nr)) 978 referenced++; 979 } else if (pvmw.pte) { 980 if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr)) 981 referenced++; 982 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 983 if (pmdp_clear_flush_young_notify(vma, address, 984 pvmw.pmd)) 985 referenced++; 986 } else { 987 /* unexpected pmd-mapped folio? */ 988 WARN_ON_ONCE(1); 989 } 990 991 ptes += nr; 992 pra->mapcount -= nr; 993 /* 994 * If we are sure that we batched the entire folio, 995 * we can just optimize and stop right here. 996 */ 997 if (ptes == pvmw.nr_pages) { 998 page_vma_mapped_walk_done(&pvmw); 999 break; 1000 } 1001 1002 /* Skip the batched PTEs */ 1003 pvmw.pte += nr - 1; 1004 pvmw.address += (nr - 1) * PAGE_SIZE; 1005 } 1006 1007 if (referenced) 1008 folio_clear_idle(folio); 1009 if (folio_test_clear_young(folio)) 1010 referenced++; 1011 1012 if (referenced) { 1013 pra->referenced++; 1014 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 1015 } 1016 1017 if (!pra->mapcount) 1018 return false; /* To break the loop */ 1019 1020 return true; 1021 } 1022 1023 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 1024 { 1025 struct folio_referenced_arg *pra = arg; 1026 struct mem_cgroup *memcg = pra->memcg; 1027 1028 /* 1029 * Ignore references from this mapping if it has no recency. If the 1030 * folio has been used in another mapping, we will catch it; if this 1031 * other mapping is already gone, the unmap path will have set the 1032 * referenced flag or activated the folio in zap_pte_range(). 1033 */ 1034 if (!vma_has_recency(vma)) 1035 return true; 1036 1037 /* 1038 * If we are reclaiming on behalf of a cgroup, skip counting on behalf 1039 * of references from different cgroups. 1040 */ 1041 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 1042 return true; 1043 1044 return false; 1045 } 1046 1047 /** 1048 * folio_referenced() - Test if the folio was referenced. 1049 * @folio: The folio to test. 1050 * @is_locked: Caller holds lock on the folio. 1051 * @memcg: target memory cgroup 1052 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 1053 * 1054 * Quick test_and_clear_referenced for all mappings of a folio, 1055 * 1056 * Return: The number of mappings which referenced the folio. Return -1 if 1057 * the function bailed out due to rmap lock contention. 1058 */ 1059 int folio_referenced(struct folio *folio, int is_locked, 1060 struct mem_cgroup *memcg, vm_flags_t *vm_flags) 1061 { 1062 bool we_locked = false; 1063 struct folio_referenced_arg pra = { 1064 .mapcount = folio_mapcount(folio), 1065 .memcg = memcg, 1066 }; 1067 struct rmap_walk_control rwc = { 1068 .rmap_one = folio_referenced_one, 1069 .arg = (void *)&pra, 1070 .anon_lock = folio_lock_anon_vma_read, 1071 .try_lock = true, 1072 .invalid_vma = invalid_folio_referenced_vma, 1073 }; 1074 1075 VM_WARN_ON_ONCE_FOLIO(folio_is_zone_device(folio), folio); 1076 *vm_flags = 0; 1077 if (!pra.mapcount) 1078 return 0; 1079 1080 if (!folio_raw_mapping(folio)) 1081 return 0; 1082 1083 if (!is_locked) { 1084 we_locked = folio_trylock(folio); 1085 if (!we_locked) 1086 return 1; 1087 } 1088 1089 rmap_walk(folio, &rwc); 1090 *vm_flags = pra.vm_flags; 1091 1092 if (we_locked) 1093 folio_unlock(folio); 1094 1095 return rwc.contended ? -1 : pra.referenced; 1096 } 1097 1098 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 1099 { 1100 int cleaned = 0; 1101 struct vm_area_struct *vma = pvmw->vma; 1102 struct mmu_notifier_range range; 1103 unsigned long address = pvmw->address; 1104 1105 /* 1106 * We have to assume the worse case ie pmd for invalidation. Note that 1107 * the folio can not be freed from this function. 1108 */ 1109 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 1110 vma->vm_mm, address, vma_address_end(pvmw)); 1111 mmu_notifier_invalidate_range_start(&range); 1112 1113 while (page_vma_mapped_walk(pvmw)) { 1114 int ret = 0; 1115 1116 address = pvmw->address; 1117 if (pvmw->pte) { 1118 pte_t *pte = pvmw->pte; 1119 pte_t entry = ptep_get(pte); 1120 1121 /* 1122 * PFN swap PTEs, such as device-exclusive ones, that 1123 * actually map pages are clean and not writable from a 1124 * CPU perspective. The MMU notifier takes care of any 1125 * device aspects. 1126 */ 1127 if (!pte_present(entry)) 1128 continue; 1129 if (!pte_dirty(entry) && !pte_write(entry)) 1130 continue; 1131 1132 flush_cache_page(vma, address, pte_pfn(entry)); 1133 entry = ptep_clear_flush(vma, address, pte); 1134 entry = pte_wrprotect(entry); 1135 entry = pte_mkclean(entry); 1136 set_pte_at(vma->vm_mm, address, pte, entry); 1137 ret = 1; 1138 } else { 1139 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1140 pmd_t *pmd = pvmw->pmd; 1141 pmd_t entry = pmdp_get(pmd); 1142 1143 /* 1144 * Please see the comment above (!pte_present). 1145 * A non present PMD is not writable from a CPU 1146 * perspective. 1147 */ 1148 if (!pmd_present(entry)) 1149 continue; 1150 if (!pmd_dirty(entry) && !pmd_write(entry)) 1151 continue; 1152 1153 flush_cache_range(vma, address, 1154 address + HPAGE_PMD_SIZE); 1155 entry = pmdp_invalidate(vma, address, pmd); 1156 entry = pmd_wrprotect(entry); 1157 entry = pmd_mkclean(entry); 1158 set_pmd_at(vma->vm_mm, address, pmd, entry); 1159 ret = 1; 1160 #else 1161 /* unexpected pmd-mapped folio? */ 1162 WARN_ON_ONCE(1); 1163 #endif 1164 } 1165 1166 if (ret) 1167 cleaned++; 1168 } 1169 1170 mmu_notifier_invalidate_range_end(&range); 1171 1172 return cleaned; 1173 } 1174 1175 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1176 unsigned long address, void *arg) 1177 { 1178 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1179 int *cleaned = arg; 1180 1181 *cleaned += page_vma_mkclean_one(&pvmw); 1182 1183 return true; 1184 } 1185 1186 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1187 { 1188 if (vma->vm_flags & VM_SHARED) 1189 return false; 1190 1191 return true; 1192 } 1193 1194 int folio_mkclean(struct folio *folio) 1195 { 1196 int cleaned = 0; 1197 struct address_space *mapping; 1198 struct rmap_walk_control rwc = { 1199 .arg = (void *)&cleaned, 1200 .rmap_one = page_mkclean_one, 1201 .invalid_vma = invalid_mkclean_vma, 1202 }; 1203 1204 BUG_ON(!folio_test_locked(folio)); 1205 1206 if (!folio_mapped(folio)) 1207 return 0; 1208 1209 mapping = folio_mapping(folio); 1210 if (!mapping) 1211 return 0; 1212 1213 rmap_walk(folio, &rwc); 1214 1215 return cleaned; 1216 } 1217 EXPORT_SYMBOL_GPL(folio_mkclean); 1218 1219 struct wrprotect_file_state { 1220 int cleaned; 1221 pgoff_t pgoff; 1222 unsigned long pfn; 1223 unsigned long nr_pages; 1224 }; 1225 1226 static bool mapping_wrprotect_range_one(struct folio *folio, 1227 struct vm_area_struct *vma, unsigned long address, void *arg) 1228 { 1229 struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; 1230 struct page_vma_mapped_walk pvmw = { 1231 .pfn = state->pfn, 1232 .nr_pages = state->nr_pages, 1233 .pgoff = state->pgoff, 1234 .vma = vma, 1235 .address = address, 1236 .flags = PVMW_SYNC, 1237 }; 1238 1239 state->cleaned += page_vma_mkclean_one(&pvmw); 1240 1241 return true; 1242 } 1243 1244 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, 1245 pgoff_t pgoff_start, unsigned long nr_pages, 1246 struct rmap_walk_control *rwc, bool locked); 1247 1248 /** 1249 * mapping_wrprotect_range() - Write-protect all mappings in a specified range. 1250 * 1251 * @mapping: The mapping whose reverse mapping should be traversed. 1252 * @pgoff: The page offset at which @pfn is mapped within @mapping. 1253 * @pfn: The PFN of the page mapped in @mapping at @pgoff. 1254 * @nr_pages: The number of physically contiguous base pages spanned. 1255 * 1256 * Traverses the reverse mapping, finding all VMAs which contain a shared 1257 * mapping of the pages in the specified range in @mapping, and write-protects 1258 * them (that is, updates the page tables to mark the mappings read-only such 1259 * that a write protection fault arises when the mappings are written to). 1260 * 1261 * The @pfn value need not refer to a folio, but rather can reference a kernel 1262 * allocation which is mapped into userland. We therefore do not require that 1263 * the page maps to a folio with a valid mapping or index field, rather the 1264 * caller specifies these in @mapping and @pgoff. 1265 * 1266 * Return: the number of write-protected PTEs, or an error. 1267 */ 1268 int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, 1269 unsigned long pfn, unsigned long nr_pages) 1270 { 1271 struct wrprotect_file_state state = { 1272 .cleaned = 0, 1273 .pgoff = pgoff, 1274 .pfn = pfn, 1275 .nr_pages = nr_pages, 1276 }; 1277 struct rmap_walk_control rwc = { 1278 .arg = (void *)&state, 1279 .rmap_one = mapping_wrprotect_range_one, 1280 .invalid_vma = invalid_mkclean_vma, 1281 }; 1282 1283 if (!mapping) 1284 return 0; 1285 1286 __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, 1287 /* locked = */false); 1288 1289 return state.cleaned; 1290 } 1291 EXPORT_SYMBOL_GPL(mapping_wrprotect_range); 1292 1293 /** 1294 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1295 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1296 * within the @vma of shared mappings. And since clean PTEs 1297 * should also be readonly, write protects them too. 1298 * @pfn: start pfn. 1299 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1300 * @pgoff: page offset that the @pfn mapped with. 1301 * @vma: vma that @pfn mapped within. 1302 * 1303 * Returns the number of cleaned PTEs (including PMDs). 1304 */ 1305 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1306 struct vm_area_struct *vma) 1307 { 1308 struct page_vma_mapped_walk pvmw = { 1309 .pfn = pfn, 1310 .nr_pages = nr_pages, 1311 .pgoff = pgoff, 1312 .vma = vma, 1313 .flags = PVMW_SYNC, 1314 }; 1315 1316 if (invalid_mkclean_vma(vma, NULL)) 1317 return 0; 1318 1319 pvmw.address = vma_address(vma, pgoff, nr_pages); 1320 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1321 1322 return page_vma_mkclean_one(&pvmw); 1323 } 1324 1325 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) 1326 { 1327 int idx; 1328 1329 if (nr) { 1330 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 1331 lruvec_stat_mod_folio(folio, idx, nr); 1332 } 1333 if (nr_pmdmapped) { 1334 if (folio_test_anon(folio)) { 1335 idx = NR_ANON_THPS; 1336 lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); 1337 } else { 1338 /* NR_*_PMDMAPPED are not maintained per-memcg */ 1339 idx = folio_test_swapbacked(folio) ? 1340 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; 1341 __mod_node_page_state(folio_pgdat(folio), idx, 1342 nr_pmdmapped); 1343 } 1344 } 1345 } 1346 1347 static __always_inline void __folio_add_rmap(struct folio *folio, 1348 struct page *page, int nr_pages, struct vm_area_struct *vma, 1349 enum pgtable_level level) 1350 { 1351 atomic_t *mapped = &folio->_nr_pages_mapped; 1352 const int orig_nr_pages = nr_pages; 1353 int first = 0, nr = 0, nr_pmdmapped = 0; 1354 1355 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1356 1357 switch (level) { 1358 case PGTABLE_LEVEL_PTE: 1359 if (!folio_test_large(folio)) { 1360 nr = atomic_inc_and_test(&folio->_mapcount); 1361 break; 1362 } 1363 1364 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1365 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); 1366 if (nr == orig_nr_pages) 1367 /* Was completely unmapped. */ 1368 nr = folio_large_nr_pages(folio); 1369 else 1370 nr = 0; 1371 break; 1372 } 1373 1374 do { 1375 first += atomic_inc_and_test(&page->_mapcount); 1376 } while (page++, --nr_pages > 0); 1377 1378 if (first && 1379 atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) 1380 nr = first; 1381 1382 folio_add_large_mapcount(folio, orig_nr_pages, vma); 1383 break; 1384 case PGTABLE_LEVEL_PMD: 1385 case PGTABLE_LEVEL_PUD: 1386 first = atomic_inc_and_test(&folio->_entire_mapcount); 1387 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1388 if (level == PGTABLE_LEVEL_PMD && first) 1389 nr_pmdmapped = folio_large_nr_pages(folio); 1390 nr = folio_inc_return_large_mapcount(folio, vma); 1391 if (nr == 1) 1392 /* Was completely unmapped. */ 1393 nr = folio_large_nr_pages(folio); 1394 else 1395 nr = 0; 1396 break; 1397 } 1398 1399 if (first) { 1400 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); 1401 if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { 1402 nr_pages = folio_large_nr_pages(folio); 1403 /* 1404 * We only track PMD mappings of PMD-sized 1405 * folios separately. 1406 */ 1407 if (level == PGTABLE_LEVEL_PMD) 1408 nr_pmdmapped = nr_pages; 1409 nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); 1410 /* Raced ahead of a remove and another add? */ 1411 if (unlikely(nr < 0)) 1412 nr = 0; 1413 } else { 1414 /* Raced ahead of a remove of ENTIRELY_MAPPED */ 1415 nr = 0; 1416 } 1417 } 1418 folio_inc_large_mapcount(folio, vma); 1419 break; 1420 default: 1421 BUILD_BUG(); 1422 } 1423 __folio_mod_stat(folio, nr, nr_pmdmapped); 1424 } 1425 1426 /** 1427 * folio_move_anon_rmap - move a folio to our anon_vma 1428 * @folio: The folio to move to our anon_vma 1429 * @vma: The vma the folio belongs to 1430 * 1431 * When a folio belongs exclusively to one process after a COW event, 1432 * that folio can be moved into the anon_vma that belongs to just that 1433 * process, so the rmap code will not search the parent or sibling processes. 1434 */ 1435 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) 1436 { 1437 void *anon_vma = vma->anon_vma; 1438 1439 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1440 VM_BUG_ON_VMA(!anon_vma, vma); 1441 1442 anon_vma += FOLIO_MAPPING_ANON; 1443 /* 1444 * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written 1445 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1446 * folio_test_anon()) will not see one without the other. 1447 */ 1448 WRITE_ONCE(folio->mapping, anon_vma); 1449 } 1450 1451 /** 1452 * __folio_set_anon - set up a new anonymous rmap for a folio 1453 * @folio: The folio to set up the new anonymous rmap for. 1454 * @vma: VM area to add the folio to. 1455 * @address: User virtual address of the mapping 1456 * @exclusive: Whether the folio is exclusive to the process. 1457 */ 1458 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, 1459 unsigned long address, bool exclusive) 1460 { 1461 struct anon_vma *anon_vma = vma->anon_vma; 1462 1463 BUG_ON(!anon_vma); 1464 1465 /* 1466 * If the folio isn't exclusive to this vma, we must use the _oldest_ 1467 * possible anon_vma for the folio mapping! 1468 */ 1469 if (!exclusive) 1470 anon_vma = anon_vma->root; 1471 1472 /* 1473 * page_idle does a lockless/optimistic rmap scan on folio->mapping. 1474 * Make sure the compiler doesn't split the stores of anon_vma and 1475 * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code 1476 * could mistake the mapping for a struct address_space and crash. 1477 */ 1478 anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON; 1479 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 1480 folio->index = linear_page_index(vma, address); 1481 } 1482 1483 /** 1484 * __page_check_anon_rmap - sanity check anonymous rmap addition 1485 * @folio: The folio containing @page. 1486 * @page: the page to check the mapping of 1487 * @vma: the vm area in which the mapping is added 1488 * @address: the user virtual address mapped 1489 */ 1490 static void __page_check_anon_rmap(const struct folio *folio, 1491 const struct page *page, struct vm_area_struct *vma, 1492 unsigned long address) 1493 { 1494 /* 1495 * The page's anon-rmap details (mapping and index) are guaranteed to 1496 * be set up correctly at this point. 1497 * 1498 * We have exclusion against folio_add_anon_rmap_*() because the caller 1499 * always holds the page locked. 1500 * 1501 * We have exclusion against folio_add_new_anon_rmap because those pages 1502 * are initially only visible via the pagetables, and the pte is locked 1503 * over the call to folio_add_new_anon_rmap. 1504 */ 1505 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1506 folio); 1507 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), 1508 page); 1509 } 1510 1511 static __always_inline void __folio_add_anon_rmap(struct folio *folio, 1512 struct page *page, int nr_pages, struct vm_area_struct *vma, 1513 unsigned long address, rmap_t flags, enum pgtable_level level) 1514 { 1515 int i; 1516 1517 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 1518 1519 __folio_add_rmap(folio, page, nr_pages, vma, level); 1520 1521 if (likely(!folio_test_ksm(folio))) 1522 __page_check_anon_rmap(folio, page, vma, address); 1523 1524 if (flags & RMAP_EXCLUSIVE) { 1525 switch (level) { 1526 case PGTABLE_LEVEL_PTE: 1527 for (i = 0; i < nr_pages; i++) 1528 SetPageAnonExclusive(page + i); 1529 break; 1530 case PGTABLE_LEVEL_PMD: 1531 SetPageAnonExclusive(page); 1532 break; 1533 case PGTABLE_LEVEL_PUD: 1534 /* 1535 * Keep the compiler happy, we don't support anonymous 1536 * PUD mappings. 1537 */ 1538 WARN_ON_ONCE(1); 1539 break; 1540 default: 1541 BUILD_BUG(); 1542 } 1543 } 1544 1545 VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && 1546 atomic_read(&folio->_mapcount) > 0, folio); 1547 for (i = 0; i < nr_pages; i++) { 1548 struct page *cur_page = page + i; 1549 1550 VM_WARN_ON_FOLIO(folio_test_large(folio) && 1551 folio_entire_mapcount(folio) > 1 && 1552 PageAnonExclusive(cur_page), folio); 1553 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) 1554 continue; 1555 1556 /* 1557 * While PTE-mapping a THP we have a PMD and a PTE 1558 * mapping. 1559 */ 1560 VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && 1561 PageAnonExclusive(cur_page), folio); 1562 } 1563 1564 /* 1565 * Only mlock it if the folio is fully mapped to the VMA. 1566 * 1567 * Partially mapped folios can be split on reclaim and part outside 1568 * of mlocked VMA can be evicted or freed. 1569 */ 1570 if (folio_nr_pages(folio) == nr_pages) 1571 mlock_vma_folio(folio, vma); 1572 } 1573 1574 /** 1575 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio 1576 * @folio: The folio to add the mappings to 1577 * @page: The first page to add 1578 * @nr_pages: The number of pages which will be mapped 1579 * @vma: The vm area in which the mappings are added 1580 * @address: The user virtual address of the first page to map 1581 * @flags: The rmap flags 1582 * 1583 * The page range of folio is defined by [first_page, first_page + nr_pages) 1584 * 1585 * The caller needs to hold the page table lock, and the page must be locked in 1586 * the anon_vma case: to serialize mapping,index checking after setting, 1587 * and to ensure that an anon folio is not being upgraded racily to a KSM folio 1588 * (but KSM folios are never downgraded). 1589 */ 1590 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, 1591 int nr_pages, struct vm_area_struct *vma, unsigned long address, 1592 rmap_t flags) 1593 { 1594 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, 1595 PGTABLE_LEVEL_PTE); 1596 } 1597 1598 /** 1599 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio 1600 * @folio: The folio to add the mapping to 1601 * @page: The first page to add 1602 * @vma: The vm area in which the mapping is added 1603 * @address: The user virtual address of the first page to map 1604 * @flags: The rmap flags 1605 * 1606 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) 1607 * 1608 * The caller needs to hold the page table lock, and the page must be locked in 1609 * the anon_vma case: to serialize mapping,index checking after setting. 1610 */ 1611 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, 1612 struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1613 { 1614 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1615 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, 1616 PGTABLE_LEVEL_PMD); 1617 #else 1618 WARN_ON_ONCE(true); 1619 #endif 1620 } 1621 1622 /** 1623 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 1624 * @folio: The folio to add the mapping to. 1625 * @vma: the vm area in which the mapping is added 1626 * @address: the user virtual address mapped 1627 * @flags: The rmap flags 1628 * 1629 * Like folio_add_anon_rmap_*() but must only be called on *new* folios. 1630 * This means the inc-and-test can be bypassed. 1631 * The folio doesn't necessarily need to be locked while it's exclusive 1632 * unless two threads map it concurrently. However, the folio must be 1633 * locked if it's shared. 1634 * 1635 * If the folio is pmd-mappable, it is accounted as a THP. 1636 */ 1637 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 1638 unsigned long address, rmap_t flags) 1639 { 1640 const bool exclusive = flags & RMAP_EXCLUSIVE; 1641 int nr = 1, nr_pmdmapped = 0; 1642 1643 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 1644 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); 1645 1646 /* 1647 * VM_DROPPABLE mappings don't swap; instead they're just dropped when 1648 * under memory pressure. 1649 */ 1650 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) 1651 __folio_set_swapbacked(folio); 1652 __folio_set_anon(folio, vma, address, exclusive); 1653 1654 if (likely(!folio_test_large(folio))) { 1655 /* increment count (starts at -1) */ 1656 atomic_set(&folio->_mapcount, 0); 1657 if (exclusive) 1658 SetPageAnonExclusive(&folio->page); 1659 } else if (!folio_test_pmd_mappable(folio)) { 1660 int i; 1661 1662 nr = folio_large_nr_pages(folio); 1663 for (i = 0; i < nr; i++) { 1664 struct page *page = folio_page(folio, i); 1665 1666 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1667 /* increment count (starts at -1) */ 1668 atomic_set(&page->_mapcount, 0); 1669 if (exclusive) 1670 SetPageAnonExclusive(page); 1671 } 1672 1673 folio_set_large_mapcount(folio, nr, vma); 1674 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1675 atomic_set(&folio->_nr_pages_mapped, nr); 1676 } else { 1677 nr = folio_large_nr_pages(folio); 1678 /* increment count (starts at -1) */ 1679 atomic_set(&folio->_entire_mapcount, 0); 1680 folio_set_large_mapcount(folio, 1, vma); 1681 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1682 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); 1683 if (exclusive) 1684 SetPageAnonExclusive(&folio->page); 1685 nr_pmdmapped = nr; 1686 } 1687 1688 VM_WARN_ON_ONCE(address < vma->vm_start || 1689 address + (nr << PAGE_SHIFT) > vma->vm_end); 1690 1691 __folio_mod_stat(folio, nr, nr_pmdmapped); 1692 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); 1693 } 1694 1695 static __always_inline void __folio_add_file_rmap(struct folio *folio, 1696 struct page *page, int nr_pages, struct vm_area_struct *vma, 1697 enum pgtable_level level) 1698 { 1699 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); 1700 1701 __folio_add_rmap(folio, page, nr_pages, vma, level); 1702 1703 /* 1704 * Only mlock it if the folio is fully mapped to the VMA. 1705 * 1706 * Partially mapped folios can be split on reclaim and part outside 1707 * of mlocked VMA can be evicted or freed. 1708 */ 1709 if (folio_nr_pages(folio) == nr_pages) 1710 mlock_vma_folio(folio, vma); 1711 } 1712 1713 /** 1714 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio 1715 * @folio: The folio to add the mappings to 1716 * @page: The first page to add 1717 * @nr_pages: The number of pages that will be mapped using PTEs 1718 * @vma: The vm area in which the mappings are added 1719 * 1720 * The page range of the folio is defined by [page, page + nr_pages) 1721 * 1722 * The caller needs to hold the page table lock. 1723 */ 1724 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, 1725 int nr_pages, struct vm_area_struct *vma) 1726 { 1727 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); 1728 } 1729 1730 /** 1731 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio 1732 * @folio: The folio to add the mapping to 1733 * @page: The first page to add 1734 * @vma: The vm area in which the mapping is added 1735 * 1736 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1737 * 1738 * The caller needs to hold the page table lock. 1739 */ 1740 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, 1741 struct vm_area_struct *vma) 1742 { 1743 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1744 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); 1745 #else 1746 WARN_ON_ONCE(true); 1747 #endif 1748 } 1749 1750 /** 1751 * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio 1752 * @folio: The folio to add the mapping to 1753 * @page: The first page to add 1754 * @vma: The vm area in which the mapping is added 1755 * 1756 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) 1757 * 1758 * The caller needs to hold the page table lock. 1759 */ 1760 void folio_add_file_rmap_pud(struct folio *folio, struct page *page, 1761 struct vm_area_struct *vma) 1762 { 1763 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1764 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1765 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); 1766 #else 1767 WARN_ON_ONCE(true); 1768 #endif 1769 } 1770 1771 static __always_inline void __folio_remove_rmap(struct folio *folio, 1772 struct page *page, int nr_pages, struct vm_area_struct *vma, 1773 enum pgtable_level level) 1774 { 1775 atomic_t *mapped = &folio->_nr_pages_mapped; 1776 int last = 0, nr = 0, nr_pmdmapped = 0; 1777 bool partially_mapped = false; 1778 1779 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1780 1781 switch (level) { 1782 case PGTABLE_LEVEL_PTE: 1783 if (!folio_test_large(folio)) { 1784 nr = atomic_add_negative(-1, &folio->_mapcount); 1785 break; 1786 } 1787 1788 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1789 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); 1790 if (!nr) { 1791 /* Now completely unmapped. */ 1792 nr = folio_large_nr_pages(folio); 1793 } else { 1794 partially_mapped = nr < folio_large_nr_pages(folio) && 1795 !folio_entire_mapcount(folio); 1796 nr = 0; 1797 } 1798 break; 1799 } 1800 1801 folio_sub_large_mapcount(folio, nr_pages, vma); 1802 do { 1803 last += atomic_add_negative(-1, &page->_mapcount); 1804 } while (page++, --nr_pages > 0); 1805 1806 if (last && 1807 atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) 1808 nr = last; 1809 1810 partially_mapped = nr && atomic_read(mapped); 1811 break; 1812 case PGTABLE_LEVEL_PMD: 1813 case PGTABLE_LEVEL_PUD: 1814 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1815 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1816 if (level == PGTABLE_LEVEL_PMD && last) 1817 nr_pmdmapped = folio_large_nr_pages(folio); 1818 nr = folio_dec_return_large_mapcount(folio, vma); 1819 if (!nr) { 1820 /* Now completely unmapped. */ 1821 nr = folio_large_nr_pages(folio); 1822 } else { 1823 partially_mapped = last && 1824 nr < folio_large_nr_pages(folio); 1825 nr = 0; 1826 } 1827 break; 1828 } 1829 1830 folio_dec_large_mapcount(folio, vma); 1831 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1832 if (last) { 1833 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); 1834 if (likely(nr < ENTIRELY_MAPPED)) { 1835 nr_pages = folio_large_nr_pages(folio); 1836 if (level == PGTABLE_LEVEL_PMD) 1837 nr_pmdmapped = nr_pages; 1838 nr = nr_pages - nr; 1839 /* Raced ahead of another remove and an add? */ 1840 if (unlikely(nr < 0)) 1841 nr = 0; 1842 } else { 1843 /* An add of ENTIRELY_MAPPED raced ahead */ 1844 nr = 0; 1845 } 1846 } 1847 1848 partially_mapped = nr && nr < nr_pmdmapped; 1849 break; 1850 default: 1851 BUILD_BUG(); 1852 } 1853 1854 /* 1855 * Queue anon large folio for deferred split if at least one page of 1856 * the folio is unmapped and at least one page is still mapped. 1857 * 1858 * Check partially_mapped first to ensure it is a large folio. 1859 * 1860 * Device private folios do not support deferred splitting and 1861 * shrinker based scanning of the folios to free. 1862 */ 1863 if (partially_mapped && folio_test_anon(folio) && 1864 !folio_test_partially_mapped(folio) && 1865 !folio_is_device_private(folio)) 1866 deferred_split_folio(folio, true); 1867 1868 __folio_mod_stat(folio, -nr, -nr_pmdmapped); 1869 1870 /* 1871 * It would be tidy to reset folio_test_anon mapping when fully 1872 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() 1873 * which increments mapcount after us but sets mapping before us: 1874 * so leave the reset to free_pages_prepare, and remember that 1875 * it's only reliable while mapped. 1876 */ 1877 1878 munlock_vma_folio(folio, vma); 1879 } 1880 1881 /** 1882 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio 1883 * @folio: The folio to remove the mappings from 1884 * @page: The first page to remove 1885 * @nr_pages: The number of pages that will be removed from the mapping 1886 * @vma: The vm area from which the mappings are removed 1887 * 1888 * The page range of the folio is defined by [page, page + nr_pages) 1889 * 1890 * The caller needs to hold the page table lock. 1891 */ 1892 void folio_remove_rmap_ptes(struct folio *folio, struct page *page, 1893 int nr_pages, struct vm_area_struct *vma) 1894 { 1895 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); 1896 } 1897 1898 /** 1899 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio 1900 * @folio: The folio to remove the mapping from 1901 * @page: The first page to remove 1902 * @vma: The vm area from which the mapping is removed 1903 * 1904 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1905 * 1906 * The caller needs to hold the page table lock. 1907 */ 1908 void folio_remove_rmap_pmd(struct folio *folio, struct page *page, 1909 struct vm_area_struct *vma) 1910 { 1911 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1912 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); 1913 #else 1914 WARN_ON_ONCE(true); 1915 #endif 1916 } 1917 1918 /** 1919 * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio 1920 * @folio: The folio to remove the mapping from 1921 * @page: The first page to remove 1922 * @vma: The vm area from which the mapping is removed 1923 * 1924 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) 1925 * 1926 * The caller needs to hold the page table lock. 1927 */ 1928 void folio_remove_rmap_pud(struct folio *folio, struct page *page, 1929 struct vm_area_struct *vma) 1930 { 1931 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1932 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1933 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); 1934 #else 1935 WARN_ON_ONCE(true); 1936 #endif 1937 } 1938 1939 static inline unsigned int folio_unmap_pte_batch(struct folio *folio, 1940 struct page_vma_mapped_walk *pvmw, 1941 enum ttu_flags flags, pte_t pte) 1942 { 1943 unsigned long end_addr, addr = pvmw->address; 1944 struct vm_area_struct *vma = pvmw->vma; 1945 unsigned int max_nr; 1946 1947 if (flags & TTU_HWPOISON) 1948 return 1; 1949 if (!folio_test_large(folio)) 1950 return 1; 1951 1952 /* We may only batch within a single VMA and a single page table. */ 1953 end_addr = pmd_addr_end(addr, vma->vm_end); 1954 max_nr = (end_addr - addr) >> PAGE_SHIFT; 1955 1956 /* We only support lazyfree or file folios batching for now ... */ 1957 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) 1958 return 1; 1959 1960 if (pte_unused(pte)) 1961 return 1; 1962 1963 if (userfaultfd_wp(vma)) 1964 return 1; 1965 1966 /* 1967 * If unmap fails, we need to restore the ptes. To avoid accidentally 1968 * upgrading write permissions for ptes that were not originally 1969 * writable, and to avoid losing the soft-dirty bit, use the 1970 * appropriate FPB flags. 1971 */ 1972 return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr, 1973 FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY); 1974 } 1975 1976 /* 1977 * @arg: enum ttu_flags will be passed to this argument 1978 */ 1979 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1980 unsigned long address, void *arg) 1981 { 1982 struct mm_struct *mm = vma->vm_mm; 1983 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1984 bool anon_exclusive, ret = true; 1985 pte_t pteval; 1986 struct page *subpage; 1987 struct mmu_notifier_range range; 1988 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1989 unsigned long nr_pages = 1, end_addr; 1990 unsigned long pfn; 1991 unsigned long hsz = 0; 1992 int ptes = 0; 1993 1994 /* 1995 * When racing against e.g. zap_pte_range() on another cpu, 1996 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 1997 * try_to_unmap() may return before page_mapped() has become false, 1998 * if page table locking is skipped: use TTU_SYNC to wait for that. 1999 */ 2000 if (flags & TTU_SYNC) 2001 pvmw.flags = PVMW_SYNC; 2002 2003 /* 2004 * For THP, we have to assume the worse case ie pmd for invalidation. 2005 * For hugetlb, it could be much worse if we need to do pud 2006 * invalidation in the case of pmd sharing. 2007 * 2008 * Note that the folio can not be freed in this function as call of 2009 * try_to_unmap() must hold a reference on the folio. 2010 */ 2011 range.end = vma_address_end(&pvmw); 2012 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2013 address, range.end); 2014 if (folio_test_hugetlb(folio)) { 2015 /* 2016 * If sharing is possible, start and end will be adjusted 2017 * accordingly. 2018 */ 2019 adjust_range_if_pmd_sharing_possible(vma, &range.start, 2020 &range.end); 2021 2022 /* We need the huge page size for set_huge_pte_at() */ 2023 hsz = huge_page_size(hstate_vma(vma)); 2024 } 2025 mmu_notifier_invalidate_range_start(&range); 2026 2027 while (page_vma_mapped_walk(&pvmw)) { 2028 /* 2029 * If the folio is in an mlock()d vma, we must not swap it out. 2030 */ 2031 if (!(flags & TTU_IGNORE_MLOCK) && 2032 (vma->vm_flags & VM_LOCKED)) { 2033 ptes++; 2034 2035 /* 2036 * Set 'ret' to indicate the page cannot be unmapped. 2037 * 2038 * Do not jump to walk_abort immediately as additional 2039 * iteration might be required to detect fully mapped 2040 * folio an mlock it. 2041 */ 2042 ret = false; 2043 2044 /* Only mlock fully mapped pages */ 2045 if (pvmw.pte && ptes != pvmw.nr_pages) 2046 continue; 2047 2048 /* 2049 * All PTEs must be protected by page table lock in 2050 * order to mlock the page. 2051 * 2052 * If page table boundary has been cross, current ptl 2053 * only protect part of ptes. 2054 */ 2055 if (pvmw.flags & PVMW_PGTABLE_CROSSED) 2056 goto walk_done; 2057 2058 /* Restore the mlock which got missed */ 2059 mlock_vma_folio(folio, vma); 2060 goto walk_done; 2061 } 2062 2063 if (!pvmw.pte) { 2064 if (folio_test_lazyfree(folio)) { 2065 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) 2066 goto walk_done; 2067 /* 2068 * unmap_huge_pmd_locked has either already marked 2069 * the folio as swap-backed or decided to retain it 2070 * due to GUP or speculative references. 2071 */ 2072 goto walk_abort; 2073 } 2074 2075 if (flags & TTU_SPLIT_HUGE_PMD) { 2076 /* 2077 * We temporarily have to drop the PTL and 2078 * restart so we can process the PTE-mapped THP. 2079 */ 2080 split_huge_pmd_locked(vma, pvmw.address, 2081 pvmw.pmd, false); 2082 flags &= ~TTU_SPLIT_HUGE_PMD; 2083 page_vma_mapped_walk_restart(&pvmw); 2084 continue; 2085 } 2086 } 2087 2088 /* Unexpected PMD-mapped THP? */ 2089 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2090 2091 /* 2092 * Handle PFN swap PTEs, such as device-exclusive ones, that 2093 * actually map pages. 2094 */ 2095 pteval = ptep_get(pvmw.pte); 2096 if (likely(pte_present(pteval))) { 2097 pfn = pte_pfn(pteval); 2098 } else { 2099 const softleaf_t entry = softleaf_from_pte(pteval); 2100 2101 pfn = softleaf_to_pfn(entry); 2102 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 2103 } 2104 2105 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2106 address = pvmw.address; 2107 anon_exclusive = folio_test_anon(folio) && 2108 PageAnonExclusive(subpage); 2109 2110 if (folio_test_hugetlb(folio)) { 2111 bool anon = folio_test_anon(folio); 2112 2113 /* 2114 * The try_to_unmap() is only passed a hugetlb page 2115 * in the case where the hugetlb page is poisoned. 2116 */ 2117 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 2118 /* 2119 * huge_pmd_unshare may unmap an entire PMD page. 2120 * There is no way of knowing exactly which PMDs may 2121 * be cached for this mm, so we must flush them all. 2122 * start/end were already adjusted above to cover this 2123 * range. 2124 */ 2125 flush_cache_range(vma, range.start, range.end); 2126 2127 /* 2128 * To call huge_pmd_unshare, i_mmap_rwsem must be 2129 * held in write mode. Caller needs to explicitly 2130 * do this outside rmap routines. 2131 * 2132 * We also must hold hugetlb vma_lock in write mode. 2133 * Lock order dictates acquiring vma_lock BEFORE 2134 * i_mmap_rwsem. We can only try lock here and fail 2135 * if unsuccessful. 2136 */ 2137 if (!anon) { 2138 struct mmu_gather tlb; 2139 2140 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 2141 if (!hugetlb_vma_trylock_write(vma)) 2142 goto walk_abort; 2143 2144 tlb_gather_mmu_vma(&tlb, vma); 2145 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { 2146 hugetlb_vma_unlock_write(vma); 2147 huge_pmd_unshare_flush(&tlb, vma); 2148 tlb_finish_mmu(&tlb); 2149 /* 2150 * The PMD table was unmapped, 2151 * consequently unmapping the folio. 2152 */ 2153 goto walk_done; 2154 } 2155 hugetlb_vma_unlock_write(vma); 2156 tlb_finish_mmu(&tlb); 2157 } 2158 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 2159 if (pte_dirty(pteval)) 2160 folio_mark_dirty(folio); 2161 } else if (likely(pte_present(pteval))) { 2162 nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); 2163 end_addr = address + nr_pages * PAGE_SIZE; 2164 flush_cache_range(vma, address, end_addr); 2165 2166 /* Nuke the page table entry. */ 2167 pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages); 2168 /* 2169 * We clear the PTE but do not flush so potentially 2170 * a remote CPU could still be writing to the folio. 2171 * If the entry was previously clean then the 2172 * architecture must guarantee that a clear->dirty 2173 * transition on a cached TLB entry is written through 2174 * and traps if the PTE is unmapped. 2175 */ 2176 if (should_defer_flush(mm, flags)) 2177 set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); 2178 else 2179 flush_tlb_range(vma, address, end_addr); 2180 if (pte_dirty(pteval)) 2181 folio_mark_dirty(folio); 2182 } else { 2183 pte_clear(mm, address, pvmw.pte); 2184 } 2185 2186 /* 2187 * Now the pte is cleared. If this pte was uffd-wp armed, 2188 * we may want to replace a none pte with a marker pte if 2189 * it's file-backed, so we don't lose the tracking info. 2190 */ 2191 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 2192 2193 /* Update high watermark before we lower rss */ 2194 update_hiwater_rss(mm); 2195 2196 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 2197 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2198 if (folio_test_hugetlb(folio)) { 2199 hugetlb_count_sub(folio_nr_pages(folio), mm); 2200 set_huge_pte_at(mm, address, pvmw.pte, pteval, 2201 hsz); 2202 } else { 2203 dec_mm_counter(mm, mm_counter(folio)); 2204 set_pte_at(mm, address, pvmw.pte, pteval); 2205 } 2206 } else if (likely(pte_present(pteval)) && pte_unused(pteval) && 2207 !userfaultfd_armed(vma)) { 2208 /* 2209 * The guest indicated that the page content is of no 2210 * interest anymore. Simply discard the pte, vmscan 2211 * will take care of the rest. 2212 * A future reference will then fault in a new zero 2213 * page. When userfaultfd is active, we must not drop 2214 * this page though, as its main user (postcopy 2215 * migration) will not expect userfaults on already 2216 * copied pages. 2217 */ 2218 dec_mm_counter(mm, mm_counter(folio)); 2219 } else if (folio_test_anon(folio)) { 2220 swp_entry_t entry = page_swap_entry(subpage); 2221 pte_t swp_pte; 2222 /* 2223 * Store the swap location in the pte. 2224 * See handle_pte_fault() ... 2225 */ 2226 if (unlikely(folio_test_swapbacked(folio) != 2227 folio_test_swapcache(folio))) { 2228 WARN_ON_ONCE(1); 2229 goto walk_abort; 2230 } 2231 2232 /* MADV_FREE page check */ 2233 if (!folio_test_swapbacked(folio)) { 2234 int ref_count, map_count; 2235 2236 /* 2237 * Synchronize with gup_pte_range(): 2238 * - clear PTE; barrier; read refcount 2239 * - inc refcount; barrier; read PTE 2240 */ 2241 smp_mb(); 2242 2243 ref_count = folio_ref_count(folio); 2244 map_count = folio_mapcount(folio); 2245 2246 /* 2247 * Order reads for page refcount and dirty flag 2248 * (see comments in __remove_mapping()). 2249 */ 2250 smp_rmb(); 2251 2252 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 2253 /* 2254 * redirtied either using the page table or a previously 2255 * obtained GUP reference. 2256 */ 2257 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); 2258 folio_set_swapbacked(folio); 2259 goto walk_abort; 2260 } else if (ref_count != 1 + map_count) { 2261 /* 2262 * Additional reference. Could be a GUP reference or any 2263 * speculative reference. GUP users must mark the folio 2264 * dirty if there was a modification. This folio cannot be 2265 * reclaimed right now either way, so act just like nothing 2266 * happened. 2267 * We'll come back here later and detect if the folio was 2268 * dirtied when the additional reference is gone. 2269 */ 2270 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); 2271 goto walk_abort; 2272 } 2273 add_mm_counter(mm, MM_ANONPAGES, -nr_pages); 2274 goto discard; 2275 } 2276 2277 if (folio_dup_swap(folio, subpage) < 0) { 2278 set_pte_at(mm, address, pvmw.pte, pteval); 2279 goto walk_abort; 2280 } 2281 2282 /* 2283 * arch_unmap_one() is expected to be a NOP on 2284 * architectures where we could have PFN swap PTEs, 2285 * so we'll not check/care. 2286 */ 2287 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2288 folio_put_swap(folio, subpage); 2289 set_pte_at(mm, address, pvmw.pte, pteval); 2290 goto walk_abort; 2291 } 2292 2293 /* See folio_try_share_anon_rmap(): clear PTE first. */ 2294 if (anon_exclusive && 2295 folio_try_share_anon_rmap_pte(folio, subpage)) { 2296 folio_put_swap(folio, subpage); 2297 set_pte_at(mm, address, pvmw.pte, pteval); 2298 goto walk_abort; 2299 } 2300 if (list_empty(&mm->mmlist)) { 2301 spin_lock(&mmlist_lock); 2302 if (list_empty(&mm->mmlist)) 2303 list_add(&mm->mmlist, &init_mm.mmlist); 2304 spin_unlock(&mmlist_lock); 2305 } 2306 dec_mm_counter(mm, MM_ANONPAGES); 2307 inc_mm_counter(mm, MM_SWAPENTS); 2308 swp_pte = swp_entry_to_pte(entry); 2309 if (anon_exclusive) 2310 swp_pte = pte_swp_mkexclusive(swp_pte); 2311 if (likely(pte_present(pteval))) { 2312 if (pte_soft_dirty(pteval)) 2313 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2314 if (pte_uffd_wp(pteval)) 2315 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2316 } else { 2317 if (pte_swp_soft_dirty(pteval)) 2318 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2319 if (pte_swp_uffd_wp(pteval)) 2320 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2321 } 2322 set_pte_at(mm, address, pvmw.pte, swp_pte); 2323 } else { 2324 /* 2325 * This is a locked file-backed folio, 2326 * so it cannot be removed from the page 2327 * cache and replaced by a new folio before 2328 * mmu_notifier_invalidate_range_end, so no 2329 * concurrent thread might update its page table 2330 * to point at a new folio while a device is 2331 * still using this folio. 2332 * 2333 * See Documentation/mm/mmu_notifier.rst 2334 */ 2335 add_mm_counter(mm, mm_counter_file(folio), -nr_pages); 2336 } 2337 discard: 2338 if (unlikely(folio_test_hugetlb(folio))) { 2339 hugetlb_remove_rmap(folio); 2340 } else { 2341 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); 2342 } 2343 if (vma->vm_flags & VM_LOCKED) 2344 mlock_drain_local(); 2345 folio_put_refs(folio, nr_pages); 2346 2347 /* 2348 * If we are sure that we batched the entire folio and cleared 2349 * all PTEs, we can just optimize and stop right here. 2350 */ 2351 if (nr_pages == folio_nr_pages(folio)) 2352 goto walk_done; 2353 continue; 2354 walk_abort: 2355 ret = false; 2356 walk_done: 2357 page_vma_mapped_walk_done(&pvmw); 2358 break; 2359 } 2360 2361 mmu_notifier_invalidate_range_end(&range); 2362 2363 return ret; 2364 } 2365 2366 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 2367 { 2368 return vma_is_temporary_stack(vma); 2369 } 2370 2371 static int folio_not_mapped(struct folio *folio) 2372 { 2373 return !folio_mapped(folio); 2374 } 2375 2376 /** 2377 * try_to_unmap - Try to remove all page table mappings to a folio. 2378 * @folio: The folio to unmap. 2379 * @flags: action and flags 2380 * 2381 * Tries to remove all the page table entries which are mapping this 2382 * folio. It is the caller's responsibility to check if the folio is 2383 * still mapped if needed (use TTU_SYNC to prevent accounting races). 2384 * 2385 * Context: Caller must hold the folio lock. 2386 */ 2387 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 2388 { 2389 struct rmap_walk_control rwc = { 2390 .rmap_one = try_to_unmap_one, 2391 .arg = (void *)flags, 2392 .done = folio_not_mapped, 2393 .anon_lock = folio_lock_anon_vma_read, 2394 }; 2395 2396 if (flags & TTU_RMAP_LOCKED) 2397 rmap_walk_locked(folio, &rwc); 2398 else 2399 rmap_walk(folio, &rwc); 2400 } 2401 2402 /* 2403 * @arg: enum ttu_flags will be passed to this argument. 2404 * 2405 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 2406 * containing migration entries. 2407 */ 2408 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 2409 unsigned long address, void *arg) 2410 { 2411 struct mm_struct *mm = vma->vm_mm; 2412 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2413 bool anon_exclusive, writable, ret = true; 2414 pte_t pteval; 2415 struct page *subpage; 2416 struct mmu_notifier_range range; 2417 enum ttu_flags flags = (enum ttu_flags)(long)arg; 2418 unsigned long pfn; 2419 unsigned long hsz = 0; 2420 2421 /* 2422 * When racing against e.g. zap_pte_range() on another cpu, 2423 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 2424 * try_to_migrate() may return before page_mapped() has become false, 2425 * if page table locking is skipped: use TTU_SYNC to wait for that. 2426 */ 2427 if (flags & TTU_SYNC) 2428 pvmw.flags = PVMW_SYNC; 2429 2430 /* 2431 * For THP, we have to assume the worse case ie pmd for invalidation. 2432 * For hugetlb, it could be much worse if we need to do pud 2433 * invalidation in the case of pmd sharing. 2434 * 2435 * Note that the page can not be free in this function as call of 2436 * try_to_unmap() must hold a reference on the page. 2437 */ 2438 range.end = vma_address_end(&pvmw); 2439 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2440 address, range.end); 2441 if (folio_test_hugetlb(folio)) { 2442 /* 2443 * If sharing is possible, start and end will be adjusted 2444 * accordingly. 2445 */ 2446 adjust_range_if_pmd_sharing_possible(vma, &range.start, 2447 &range.end); 2448 2449 /* We need the huge page size for set_huge_pte_at() */ 2450 hsz = huge_page_size(hstate_vma(vma)); 2451 } 2452 mmu_notifier_invalidate_range_start(&range); 2453 2454 while (page_vma_mapped_walk(&pvmw)) { 2455 /* PMD-mapped THP migration entry */ 2456 if (!pvmw.pte) { 2457 __maybe_unused unsigned long pfn; 2458 __maybe_unused pmd_t pmdval; 2459 2460 if (flags & TTU_SPLIT_HUGE_PMD) { 2461 /* 2462 * split_huge_pmd_locked() might leave the 2463 * folio mapped through PTEs. Retry the walk 2464 * so we can detect this scenario and properly 2465 * abort the walk. 2466 */ 2467 split_huge_pmd_locked(vma, pvmw.address, 2468 pvmw.pmd, true); 2469 flags &= ~TTU_SPLIT_HUGE_PMD; 2470 page_vma_mapped_walk_restart(&pvmw); 2471 continue; 2472 } 2473 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2474 pmdval = pmdp_get(pvmw.pmd); 2475 if (likely(pmd_present(pmdval))) 2476 pfn = pmd_pfn(pmdval); 2477 else 2478 pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval)); 2479 2480 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2481 2482 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 2483 !folio_test_pmd_mappable(folio), folio); 2484 2485 if (set_pmd_migration_entry(&pvmw, subpage)) { 2486 ret = false; 2487 page_vma_mapped_walk_done(&pvmw); 2488 break; 2489 } 2490 continue; 2491 #endif 2492 } 2493 2494 /* Unexpected PMD-mapped THP? */ 2495 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2496 2497 /* 2498 * Handle PFN swap PTEs, such as device-exclusive ones, that 2499 * actually map pages. 2500 */ 2501 pteval = ptep_get(pvmw.pte); 2502 if (likely(pte_present(pteval))) { 2503 pfn = pte_pfn(pteval); 2504 } else { 2505 const softleaf_t entry = softleaf_from_pte(pteval); 2506 2507 pfn = softleaf_to_pfn(entry); 2508 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 2509 } 2510 2511 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2512 address = pvmw.address; 2513 anon_exclusive = folio_test_anon(folio) && 2514 PageAnonExclusive(subpage); 2515 2516 if (folio_test_hugetlb(folio)) { 2517 bool anon = folio_test_anon(folio); 2518 2519 /* 2520 * huge_pmd_unshare may unmap an entire PMD page. 2521 * There is no way of knowing exactly which PMDs may 2522 * be cached for this mm, so we must flush them all. 2523 * start/end were already adjusted above to cover this 2524 * range. 2525 */ 2526 flush_cache_range(vma, range.start, range.end); 2527 2528 /* 2529 * To call huge_pmd_unshare, i_mmap_rwsem must be 2530 * held in write mode. Caller needs to explicitly 2531 * do this outside rmap routines. 2532 * 2533 * We also must hold hugetlb vma_lock in write mode. 2534 * Lock order dictates acquiring vma_lock BEFORE 2535 * i_mmap_rwsem. We can only try lock here and 2536 * fail if unsuccessful. 2537 */ 2538 if (!anon) { 2539 struct mmu_gather tlb; 2540 2541 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 2542 if (!hugetlb_vma_trylock_write(vma)) { 2543 page_vma_mapped_walk_done(&pvmw); 2544 ret = false; 2545 break; 2546 } 2547 2548 tlb_gather_mmu_vma(&tlb, vma); 2549 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { 2550 hugetlb_vma_unlock_write(vma); 2551 huge_pmd_unshare_flush(&tlb, vma); 2552 tlb_finish_mmu(&tlb); 2553 /* 2554 * The PMD table was unmapped, 2555 * consequently unmapping the folio. 2556 */ 2557 page_vma_mapped_walk_done(&pvmw); 2558 break; 2559 } 2560 hugetlb_vma_unlock_write(vma); 2561 tlb_finish_mmu(&tlb); 2562 } 2563 /* Nuke the hugetlb page table entry */ 2564 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 2565 if (pte_dirty(pteval)) 2566 folio_mark_dirty(folio); 2567 writable = pte_write(pteval); 2568 } else if (likely(pte_present(pteval))) { 2569 flush_cache_page(vma, address, pfn); 2570 /* Nuke the page table entry. */ 2571 if (should_defer_flush(mm, flags)) { 2572 /* 2573 * We clear the PTE but do not flush so potentially 2574 * a remote CPU could still be writing to the folio. 2575 * If the entry was previously clean then the 2576 * architecture must guarantee that a clear->dirty 2577 * transition on a cached TLB entry is written through 2578 * and traps if the PTE is unmapped. 2579 */ 2580 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 2581 2582 set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); 2583 } else { 2584 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2585 } 2586 if (pte_dirty(pteval)) 2587 folio_mark_dirty(folio); 2588 writable = pte_write(pteval); 2589 } else { 2590 const softleaf_t entry = softleaf_from_pte(pteval); 2591 2592 pte_clear(mm, address, pvmw.pte); 2593 2594 writable = softleaf_is_device_private_write(entry); 2595 } 2596 2597 VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && 2598 !anon_exclusive, folio); 2599 2600 /* Update high watermark before we lower rss */ 2601 update_hiwater_rss(mm); 2602 2603 if (PageHWPoison(subpage)) { 2604 VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); 2605 2606 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2607 if (folio_test_hugetlb(folio)) { 2608 hugetlb_count_sub(folio_nr_pages(folio), mm); 2609 set_huge_pte_at(mm, address, pvmw.pte, pteval, 2610 hsz); 2611 } else { 2612 dec_mm_counter(mm, mm_counter(folio)); 2613 set_pte_at(mm, address, pvmw.pte, pteval); 2614 } 2615 } else if (likely(pte_present(pteval)) && pte_unused(pteval) && 2616 !userfaultfd_armed(vma)) { 2617 /* 2618 * The guest indicated that the page content is of no 2619 * interest anymore. Simply discard the pte, vmscan 2620 * will take care of the rest. 2621 * A future reference will then fault in a new zero 2622 * page. When userfaultfd is active, we must not drop 2623 * this page though, as its main user (postcopy 2624 * migration) will not expect userfaults on already 2625 * copied pages. 2626 */ 2627 dec_mm_counter(mm, mm_counter(folio)); 2628 } else { 2629 swp_entry_t entry; 2630 pte_t swp_pte; 2631 2632 /* 2633 * arch_unmap_one() is expected to be a NOP on 2634 * architectures where we could have PFN swap PTEs, 2635 * so we'll not check/care. 2636 */ 2637 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2638 if (folio_test_hugetlb(folio)) 2639 set_huge_pte_at(mm, address, pvmw.pte, 2640 pteval, hsz); 2641 else 2642 set_pte_at(mm, address, pvmw.pte, pteval); 2643 ret = false; 2644 page_vma_mapped_walk_done(&pvmw); 2645 break; 2646 } 2647 2648 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ 2649 if (folio_test_hugetlb(folio)) { 2650 if (anon_exclusive && 2651 hugetlb_try_share_anon_rmap(folio)) { 2652 set_huge_pte_at(mm, address, pvmw.pte, 2653 pteval, hsz); 2654 ret = false; 2655 page_vma_mapped_walk_done(&pvmw); 2656 break; 2657 } 2658 } else if (anon_exclusive && 2659 folio_try_share_anon_rmap_pte(folio, subpage)) { 2660 set_pte_at(mm, address, pvmw.pte, pteval); 2661 ret = false; 2662 page_vma_mapped_walk_done(&pvmw); 2663 break; 2664 } 2665 2666 /* 2667 * Store the pfn of the page in a special migration 2668 * pte. do_swap_page() will wait until the migration 2669 * pte is removed and then restart fault handling. 2670 */ 2671 if (writable) 2672 entry = make_writable_migration_entry( 2673 page_to_pfn(subpage)); 2674 else if (anon_exclusive) 2675 entry = make_readable_exclusive_migration_entry( 2676 page_to_pfn(subpage)); 2677 else 2678 entry = make_readable_migration_entry( 2679 page_to_pfn(subpage)); 2680 if (likely(pte_present(pteval))) { 2681 if (pte_young(pteval)) 2682 entry = make_migration_entry_young(entry); 2683 if (pte_dirty(pteval)) 2684 entry = make_migration_entry_dirty(entry); 2685 swp_pte = swp_entry_to_pte(entry); 2686 if (pte_soft_dirty(pteval)) 2687 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2688 if (pte_uffd_wp(pteval)) 2689 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2690 } else { 2691 swp_pte = swp_entry_to_pte(entry); 2692 if (pte_swp_soft_dirty(pteval)) 2693 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2694 if (pte_swp_uffd_wp(pteval)) 2695 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2696 } 2697 if (folio_test_hugetlb(folio)) 2698 set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2699 hsz); 2700 else 2701 set_pte_at(mm, address, pvmw.pte, swp_pte); 2702 trace_set_migration_pte(address, pte_val(swp_pte), 2703 folio_order(folio)); 2704 /* 2705 * No need to invalidate here it will synchronize on 2706 * against the special swap migration pte. 2707 */ 2708 } 2709 2710 if (unlikely(folio_test_hugetlb(folio))) 2711 hugetlb_remove_rmap(folio); 2712 else 2713 folio_remove_rmap_pte(folio, subpage, vma); 2714 if (vma->vm_flags & VM_LOCKED) 2715 mlock_drain_local(); 2716 folio_put(folio); 2717 } 2718 2719 mmu_notifier_invalidate_range_end(&range); 2720 2721 return ret; 2722 } 2723 2724 /** 2725 * try_to_migrate - try to replace all page table mappings with swap entries 2726 * @folio: the folio to replace page table entries for 2727 * @flags: action and flags 2728 * 2729 * Tries to remove all the page table entries which are mapping this folio and 2730 * replace them with special swap entries. Caller must hold the folio lock. 2731 */ 2732 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2733 { 2734 struct rmap_walk_control rwc = { 2735 .rmap_one = try_to_migrate_one, 2736 .arg = (void *)flags, 2737 .done = folio_not_mapped, 2738 .anon_lock = folio_lock_anon_vma_read, 2739 }; 2740 2741 /* 2742 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2743 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2744 */ 2745 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2746 TTU_SYNC | TTU_BATCH_FLUSH))) 2747 return; 2748 2749 if (folio_is_zone_device(folio) && 2750 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2751 return; 2752 2753 /* 2754 * During exec, a temporary VMA is setup and later moved. 2755 * The VMA is moved under the anon_vma lock but not the 2756 * page tables leading to a race where migration cannot 2757 * find the migration ptes. Rather than increasing the 2758 * locking requirements of exec(), migration skips 2759 * temporary VMAs until after exec() completes. 2760 */ 2761 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2762 rwc.invalid_vma = invalid_migration_vma; 2763 2764 if (flags & TTU_RMAP_LOCKED) 2765 rmap_walk_locked(folio, &rwc); 2766 else 2767 rmap_walk(folio, &rwc); 2768 } 2769 2770 #ifdef CONFIG_DEVICE_PRIVATE 2771 /** 2772 * make_device_exclusive() - Mark a page for exclusive use by a device 2773 * @mm: mm_struct of associated target process 2774 * @addr: the virtual address to mark for exclusive device access 2775 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2776 * @foliop: folio pointer will be stored here on success. 2777 * 2778 * This function looks up the page mapped at the given address, grabs a 2779 * folio reference, locks the folio and replaces the PTE with special 2780 * device-exclusive PFN swap entry, preventing access through the process 2781 * page tables. The function will return with the folio locked and referenced. 2782 * 2783 * On fault, the device-exclusive entries are replaced with the original PTE 2784 * under folio lock, after calling MMU notifiers. 2785 * 2786 * Only anonymous non-hugetlb folios are supported and the VMA must have 2787 * write permissions such that we can fault in the anonymous page writable 2788 * in order to mark it exclusive. The caller must hold the mmap_lock in read 2789 * mode. 2790 * 2791 * A driver using this to program access from a device must use a mmu notifier 2792 * critical section to hold a device specific lock during programming. Once 2793 * programming is complete it should drop the folio lock and reference after 2794 * which point CPU access to the page will revoke the exclusive access. 2795 * 2796 * Notes: 2797 * #. This function always operates on individual PTEs mapping individual 2798 * pages. PMD-sized THPs are first remapped to be mapped by PTEs before 2799 * the conversion happens on a single PTE corresponding to @addr. 2800 * #. While concurrent access through the process page tables is prevented, 2801 * concurrent access through other page references (e.g., earlier GUP 2802 * invocation) is not handled and not supported. 2803 * #. device-exclusive entries are considered "clean" and "old" by core-mm. 2804 * Device drivers must update the folio state when informed by MMU 2805 * notifiers. 2806 * 2807 * Returns: pointer to mapped page on success, otherwise a negative error. 2808 */ 2809 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, 2810 void *owner, struct folio **foliop) 2811 { 2812 struct mmu_notifier_range range; 2813 struct folio *folio, *fw_folio; 2814 struct vm_area_struct *vma; 2815 struct folio_walk fw; 2816 struct page *page; 2817 swp_entry_t entry; 2818 pte_t swp_pte; 2819 int ret; 2820 2821 mmap_assert_locked(mm); 2822 addr = PAGE_ALIGN_DOWN(addr); 2823 2824 /* 2825 * Fault in the page writable and try to lock it; note that if the 2826 * address would already be marked for exclusive use by a device, 2827 * the GUP call would undo that first by triggering a fault. 2828 * 2829 * If any other device would already map this page exclusively, the 2830 * fault will trigger a conversion to an ordinary 2831 * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. 2832 */ 2833 retry: 2834 page = get_user_page_vma_remote(mm, addr, 2835 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2836 &vma); 2837 if (IS_ERR(page)) 2838 return page; 2839 folio = page_folio(page); 2840 2841 if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { 2842 folio_put(folio); 2843 return ERR_PTR(-EOPNOTSUPP); 2844 } 2845 2846 ret = folio_lock_killable(folio); 2847 if (ret) { 2848 folio_put(folio); 2849 return ERR_PTR(ret); 2850 } 2851 2852 /* 2853 * Inform secondary MMUs that we are going to convert this PTE to 2854 * device-exclusive, such that they unmap it now. Note that the 2855 * caller must filter this event out to prevent livelocks. 2856 */ 2857 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2858 mm, addr, addr + PAGE_SIZE, owner); 2859 mmu_notifier_invalidate_range_start(&range); 2860 2861 /* 2862 * Let's do a second walk and make sure we still find the same page 2863 * mapped writable. Note that any page of an anonymous folio can 2864 * only be mapped writable using exactly one PTE ("exclusive"), so 2865 * there cannot be other mappings. 2866 */ 2867 fw_folio = folio_walk_start(&fw, vma, addr, 0); 2868 if (fw_folio != folio || fw.page != page || 2869 fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { 2870 if (fw_folio) 2871 folio_walk_end(&fw, vma); 2872 mmu_notifier_invalidate_range_end(&range); 2873 folio_unlock(folio); 2874 folio_put(folio); 2875 goto retry; 2876 } 2877 2878 /* Nuke the page table entry so we get the uptodate dirty bit. */ 2879 flush_cache_page(vma, addr, page_to_pfn(page)); 2880 fw.pte = ptep_clear_flush(vma, addr, fw.ptep); 2881 2882 /* Set the dirty flag on the folio now the PTE is gone. */ 2883 if (pte_dirty(fw.pte)) 2884 folio_mark_dirty(folio); 2885 2886 /* 2887 * Store the pfn of the page in a special device-exclusive PFN swap PTE. 2888 * do_swap_page() will trigger the conversion back while holding the 2889 * folio lock. 2890 */ 2891 entry = make_device_exclusive_entry(page_to_pfn(page)); 2892 swp_pte = swp_entry_to_pte(entry); 2893 if (pte_soft_dirty(fw.pte)) 2894 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2895 /* The pte is writable, uffd-wp does not apply. */ 2896 set_pte_at(mm, addr, fw.ptep, swp_pte); 2897 2898 folio_walk_end(&fw, vma); 2899 mmu_notifier_invalidate_range_end(&range); 2900 *foliop = folio; 2901 return page; 2902 } 2903 EXPORT_SYMBOL_GPL(make_device_exclusive); 2904 #endif 2905 2906 void __put_anon_vma(struct anon_vma *anon_vma) 2907 { 2908 struct anon_vma *root = anon_vma->root; 2909 2910 anon_vma_free(anon_vma); 2911 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2912 anon_vma_free(root); 2913 } 2914 2915 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, 2916 struct rmap_walk_control *rwc) 2917 { 2918 struct anon_vma *anon_vma; 2919 2920 if (rwc->anon_lock) 2921 return rwc->anon_lock(folio, rwc); 2922 2923 /* 2924 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2925 * because that depends on page_mapped(); but not all its usages 2926 * are holding mmap_lock. Users without mmap_lock are required to 2927 * take a reference count to prevent the anon_vma disappearing 2928 */ 2929 anon_vma = folio_anon_vma(folio); 2930 if (!anon_vma) 2931 return NULL; 2932 2933 if (anon_vma_trylock_read(anon_vma)) 2934 goto out; 2935 2936 if (rwc->try_lock) { 2937 anon_vma = NULL; 2938 rwc->contended = true; 2939 goto out; 2940 } 2941 2942 anon_vma_lock_read(anon_vma); 2943 out: 2944 return anon_vma; 2945 } 2946 2947 /* 2948 * rmap_walk_anon - do something to anonymous page using the object-based 2949 * rmap method 2950 * @folio: the folio to be handled 2951 * @rwc: control variable according to each walk type 2952 * @locked: caller holds relevant rmap lock 2953 * 2954 * Find all the mappings of a folio using the mapping pointer and the vma 2955 * chains contained in the anon_vma struct it points to. 2956 */ 2957 static void rmap_walk_anon(struct folio *folio, 2958 struct rmap_walk_control *rwc, bool locked) 2959 { 2960 struct anon_vma *anon_vma; 2961 pgoff_t pgoff_start, pgoff_end; 2962 struct anon_vma_chain *avc; 2963 2964 /* 2965 * The folio lock ensures that folio->mapping can't be changed under us 2966 * to an anon_vma with different root. 2967 */ 2968 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 2969 2970 if (locked) { 2971 anon_vma = folio_anon_vma(folio); 2972 /* anon_vma disappear under us? */ 2973 VM_BUG_ON_FOLIO(!anon_vma, folio); 2974 } else { 2975 anon_vma = rmap_walk_anon_lock(folio, rwc); 2976 } 2977 if (!anon_vma) 2978 return; 2979 2980 pgoff_start = folio_pgoff(folio); 2981 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2982 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2983 pgoff_start, pgoff_end) { 2984 struct vm_area_struct *vma = avc->vma; 2985 unsigned long address = vma_address(vma, pgoff_start, 2986 folio_nr_pages(folio)); 2987 2988 VM_BUG_ON_VMA(address == -EFAULT, vma); 2989 cond_resched(); 2990 2991 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2992 continue; 2993 2994 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2995 break; 2996 if (rwc->done && rwc->done(folio)) 2997 break; 2998 } 2999 3000 if (!locked) 3001 anon_vma_unlock_read(anon_vma); 3002 } 3003 3004 /** 3005 * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping 3006 * of a page mapped within a specified page cache object at a specified offset. 3007 * 3008 * @folio: Either the folio whose mappings to traverse, or if NULL, 3009 * the callbacks specified in @rwc will be configured such 3010 * as to be able to look up mappings correctly. 3011 * @mapping: The page cache object whose mapping VMAs we intend to 3012 * traverse. If @folio is non-NULL, this should be equal to 3013 * folio_mapping(folio). 3014 * @pgoff_start: The offset within @mapping of the page which we are 3015 * looking up. If @folio is non-NULL, this should be equal 3016 * to folio_pgoff(folio). 3017 * @nr_pages: The number of pages mapped by the mapping. If @folio is 3018 * non-NULL, this should be equal to folio_nr_pages(folio). 3019 * @rwc: The reverse mapping walk control object describing how 3020 * the traversal should proceed. 3021 * @locked: Is the @mapping already locked? If not, we acquire the 3022 * lock. 3023 */ 3024 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, 3025 pgoff_t pgoff_start, unsigned long nr_pages, 3026 struct rmap_walk_control *rwc, bool locked) 3027 { 3028 pgoff_t pgoff_end = pgoff_start + nr_pages - 1; 3029 struct vm_area_struct *vma; 3030 3031 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); 3032 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); 3033 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); 3034 3035 if (!locked) { 3036 if (i_mmap_trylock_read(mapping)) 3037 goto lookup; 3038 3039 if (rwc->try_lock) { 3040 rwc->contended = true; 3041 return; 3042 } 3043 3044 i_mmap_lock_read(mapping); 3045 } 3046 lookup: 3047 vma_interval_tree_foreach(vma, &mapping->i_mmap, 3048 pgoff_start, pgoff_end) { 3049 unsigned long address = vma_address(vma, pgoff_start, nr_pages); 3050 3051 VM_BUG_ON_VMA(address == -EFAULT, vma); 3052 cond_resched(); 3053 3054 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 3055 continue; 3056 3057 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 3058 goto done; 3059 if (rwc->done && rwc->done(folio)) 3060 goto done; 3061 } 3062 done: 3063 if (!locked) 3064 i_mmap_unlock_read(mapping); 3065 } 3066 3067 /* 3068 * rmap_walk_file - do something to file page using the object-based rmap method 3069 * @folio: the folio to be handled 3070 * @rwc: control variable according to each walk type 3071 * @locked: caller holds relevant rmap lock 3072 * 3073 * Find all the mappings of a folio using the mapping pointer and the vma chains 3074 * contained in the address_space struct it points to. 3075 */ 3076 static void rmap_walk_file(struct folio *folio, 3077 struct rmap_walk_control *rwc, bool locked) 3078 { 3079 /* 3080 * The folio lock not only makes sure that folio->mapping cannot 3081 * suddenly be NULLified by truncation, it makes sure that the structure 3082 * at mapping cannot be freed and reused yet, so we can safely take 3083 * mapping->i_mmap_rwsem. 3084 */ 3085 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3086 3087 if (!folio->mapping) 3088 return; 3089 3090 __rmap_walk_file(folio, folio->mapping, folio->index, 3091 folio_nr_pages(folio), rwc, locked); 3092 } 3093 3094 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 3095 { 3096 if (unlikely(folio_test_ksm(folio))) 3097 rmap_walk_ksm(folio, rwc); 3098 else if (folio_test_anon(folio)) 3099 rmap_walk_anon(folio, rwc, false); 3100 else 3101 rmap_walk_file(folio, rwc, false); 3102 } 3103 3104 /* Like rmap_walk, but caller holds relevant rmap lock */ 3105 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 3106 { 3107 /* no ksm support for now */ 3108 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 3109 if (folio_test_anon(folio)) 3110 rmap_walk_anon(folio, rwc, true); 3111 else 3112 rmap_walk_file(folio, rwc, true); 3113 } 3114 3115 #ifdef CONFIG_HUGETLB_PAGE 3116 /* 3117 * The following two functions are for anonymous (private mapped) hugepages. 3118 * Unlike common anonymous pages, anonymous hugepages have no accounting code 3119 * and no lru code, because we handle hugepages differently from common pages. 3120 */ 3121 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 3122 unsigned long address, rmap_t flags) 3123 { 3124 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 3125 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 3126 3127 atomic_inc(&folio->_entire_mapcount); 3128 atomic_inc(&folio->_large_mapcount); 3129 if (flags & RMAP_EXCLUSIVE) 3130 SetPageAnonExclusive(&folio->page); 3131 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && 3132 PageAnonExclusive(&folio->page), folio); 3133 } 3134 3135 void hugetlb_add_new_anon_rmap(struct folio *folio, 3136 struct vm_area_struct *vma, unsigned long address) 3137 { 3138 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 3139 3140 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 3141 /* increment count (starts at -1) */ 3142 atomic_set(&folio->_entire_mapcount, 0); 3143 atomic_set(&folio->_large_mapcount, 0); 3144 folio_clear_hugetlb_restore_reserve(folio); 3145 __folio_set_anon(folio, vma, address, true); 3146 SetPageAnonExclusive(&folio->page); 3147 } 3148 #endif /* CONFIG_HUGETLB_PAGE */ 3149