1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/rmap.c - physical to virtual reverse mappings 4 * 5 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * folio_lock 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28 * vma_start_write 29 * mapping->i_mmap_rwsem 30 * anon_vma->rwsem 31 * mm->page_table_lock or pte_lock 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in block_dirty_folio) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in folio_lruvec_lock_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * hugetlbfs PageHuge() take locks in this order: 49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 50 * vma_lock (hugetlb specific lock for pmd_sharing) 51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 52 * folio_lock 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/leafops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 #include <linux/mm_inline.h> 77 #include <linux/oom.h> 78 79 #include <asm/tlb.h> 80 81 #define CREATE_TRACE_POINTS 82 #include <trace/events/migrate.h> 83 84 #include "internal.h" 85 #include "swap.h" 86 87 static struct kmem_cache *anon_vma_cachep; 88 static struct kmem_cache *anon_vma_chain_cachep; 89 90 static inline struct anon_vma *anon_vma_alloc(void) 91 { 92 struct anon_vma *anon_vma; 93 94 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 95 if (anon_vma) { 96 atomic_set(&anon_vma->refcount, 1); 97 anon_vma->num_children = 0; 98 anon_vma->num_active_vmas = 0; 99 anon_vma->parent = anon_vma; 100 /* 101 * Initialise the anon_vma root to point to itself. If called 102 * from fork, the root will be reset to the parents anon_vma. 103 */ 104 anon_vma->root = anon_vma; 105 } 106 107 return anon_vma; 108 } 109 110 static inline void anon_vma_free(struct anon_vma *anon_vma) 111 { 112 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 113 114 /* 115 * Synchronize against folio_lock_anon_vma_read() such that 116 * we can safely hold the lock without the anon_vma getting 117 * freed. 118 * 119 * Relies on the full mb implied by the atomic_dec_and_test() from 120 * put_anon_vma() against the acquire barrier implied by 121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 122 * 123 * folio_lock_anon_vma_read() VS put_anon_vma() 124 * down_read_trylock() atomic_dec_and_test() 125 * LOCK MB 126 * atomic_read() rwsem_is_locked() 127 * 128 * LOCK should suffice since the actual taking of the lock must 129 * happen _before_ what follows. 130 */ 131 might_sleep(); 132 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 133 anon_vma_lock_write(anon_vma); 134 anon_vma_unlock_write(anon_vma); 135 } 136 137 kmem_cache_free(anon_vma_cachep, anon_vma); 138 } 139 140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 141 { 142 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 143 } 144 145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 146 { 147 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 148 } 149 150 static void anon_vma_chain_assign(struct vm_area_struct *vma, 151 struct anon_vma_chain *avc, 152 struct anon_vma *anon_vma) 153 { 154 avc->vma = vma; 155 avc->anon_vma = anon_vma; 156 list_add(&avc->same_vma, &vma->anon_vma_chain); 157 } 158 159 /** 160 * __anon_vma_prepare - attach an anon_vma to a memory region 161 * @vma: the memory region in question 162 * 163 * This makes sure the memory mapping described by 'vma' has 164 * an 'anon_vma' attached to it, so that we can associate the 165 * anonymous pages mapped into it with that anon_vma. 166 * 167 * The common case will be that we already have one, which 168 * is handled inline by anon_vma_prepare(). But if 169 * not we either need to find an adjacent mapping that we 170 * can re-use the anon_vma from (very common when the only 171 * reason for splitting a vma has been mprotect()), or we 172 * allocate a new one. 173 * 174 * Anon-vma allocations are very subtle, because we may have 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 176 * and that may actually touch the rwsem even in the newly 177 * allocated vma (it depends on RCU to make sure that the 178 * anon_vma isn't actually destroyed). 179 * 180 * As a result, we need to do proper anon_vma locking even 181 * for the new allocation. At the same time, we do not want 182 * to do any locking for the common case of already having 183 * an anon_vma. 184 */ 185 int __anon_vma_prepare(struct vm_area_struct *vma) 186 { 187 struct mm_struct *mm = vma->vm_mm; 188 struct anon_vma *anon_vma, *allocated; 189 struct anon_vma_chain *avc; 190 191 mmap_assert_locked(mm); 192 might_sleep(); 193 194 avc = anon_vma_chain_alloc(GFP_KERNEL); 195 if (!avc) 196 goto out_enomem; 197 198 anon_vma = find_mergeable_anon_vma(vma); 199 allocated = NULL; 200 if (!anon_vma) { 201 anon_vma = anon_vma_alloc(); 202 if (unlikely(!anon_vma)) 203 goto out_enomem_free_avc; 204 anon_vma->num_children++; /* self-parent link for new root */ 205 allocated = anon_vma; 206 } 207 208 anon_vma_lock_write(anon_vma); 209 /* page_table_lock to protect against threads */ 210 spin_lock(&mm->page_table_lock); 211 if (likely(!vma->anon_vma)) { 212 vma->anon_vma = anon_vma; 213 anon_vma_chain_assign(vma, avc, anon_vma); 214 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 215 anon_vma->num_active_vmas++; 216 allocated = NULL; 217 avc = NULL; 218 } 219 spin_unlock(&mm->page_table_lock); 220 anon_vma_unlock_write(anon_vma); 221 222 if (unlikely(allocated)) 223 put_anon_vma(allocated); 224 if (unlikely(avc)) 225 anon_vma_chain_free(avc); 226 227 return 0; 228 229 out_enomem_free_avc: 230 anon_vma_chain_free(avc); 231 out_enomem: 232 return -ENOMEM; 233 } 234 235 static void check_anon_vma_clone(struct vm_area_struct *dst, 236 struct vm_area_struct *src, 237 enum vma_operation operation) 238 { 239 /* The write lock must be held. */ 240 mmap_assert_write_locked(src->vm_mm); 241 /* If not a fork then must be on same mm. */ 242 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && dst->vm_mm != src->vm_mm); 243 244 /* If we have anything to do src->anon_vma must be provided. */ 245 VM_WARN_ON_ONCE(!src->anon_vma && !list_empty(&src->anon_vma_chain)); 246 VM_WARN_ON_ONCE(!src->anon_vma && dst->anon_vma); 247 /* We are establishing a new anon_vma_chain. */ 248 VM_WARN_ON_ONCE(!list_empty(&dst->anon_vma_chain)); 249 /* 250 * On fork, dst->anon_vma is set NULL (temporarily). Otherwise, anon_vma 251 * must be the same across dst and src. 252 */ 253 VM_WARN_ON_ONCE(dst->anon_vma && dst->anon_vma != src->anon_vma); 254 /* 255 * Essentially equivalent to above - if not a no-op, we should expect 256 * dst->anon_vma to be set for everything except a fork. 257 */ 258 VM_WARN_ON_ONCE(operation != VMA_OP_FORK && src->anon_vma && 259 !dst->anon_vma); 260 /* For the anon_vma to be compatible, it can only be singular. */ 261 VM_WARN_ON_ONCE(operation == VMA_OP_MERGE_UNFAULTED && 262 !list_is_singular(&src->anon_vma_chain)); 263 #ifdef CONFIG_PER_VMA_LOCK 264 /* Only merging an unfaulted VMA leaves the destination attached. */ 265 VM_WARN_ON_ONCE(operation != VMA_OP_MERGE_UNFAULTED && 266 vma_is_attached(dst)); 267 #endif 268 } 269 270 static void maybe_reuse_anon_vma(struct vm_area_struct *dst, 271 struct anon_vma *anon_vma) 272 { 273 /* If already populated, nothing to do.*/ 274 if (dst->anon_vma) 275 return; 276 277 /* 278 * We reuse an anon_vma if any linking VMAs were unmapped and it has 279 * only a single child at most. 280 */ 281 if (anon_vma->num_active_vmas > 0) 282 return; 283 if (anon_vma->num_children > 1) 284 return; 285 286 dst->anon_vma = anon_vma; 287 anon_vma->num_active_vmas++; 288 } 289 290 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma); 291 292 /** 293 * anon_vma_clone - Establishes new anon_vma_chain objects in @dst linking to 294 * all of the anon_vma objects contained within @src anon_vma_chain's. 295 * @dst: The destination VMA with an empty anon_vma_chain. 296 * @src: The source VMA we wish to duplicate. 297 * @operation: The type of operation which resulted in the clone. 298 * 299 * This is the heart of the VMA side of the anon_vma implementation - we invoke 300 * this function whenever we need to set up a new VMA's anon_vma state. 301 * 302 * This is invoked for: 303 * 304 * - VMA Merge, but only when @dst is unfaulted and @src is faulted - meaning we 305 * clone @src into @dst. 306 * - VMA split. 307 * - VMA (m)remap. 308 * - Fork of faulted VMA. 309 * 310 * In all cases other than fork this is simply a duplication. Fork additionally 311 * adds a new active anon_vma. 312 * 313 * ONLY in the case of fork do we try to 'reuse' existing anon_vma's in an 314 * anon_vma hierarchy, reusing anon_vma's which have no VMA associated with them 315 * but do have a single child. This is to avoid waste of memory when repeatedly 316 * forking. 317 * 318 * Returns: 0 on success, -ENOMEM on failure. 319 */ 320 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src, 321 enum vma_operation operation) 322 { 323 struct anon_vma_chain *avc, *pavc; 324 struct anon_vma *active_anon_vma = src->anon_vma; 325 326 check_anon_vma_clone(dst, src, operation); 327 328 if (!active_anon_vma) 329 return 0; 330 331 /* 332 * Allocate AVCs. We don't need an anon_vma lock for this as we 333 * are not updating the anon_vma rbtree nor are we changing 334 * anon_vma statistics. 335 * 336 * Either src, dst have the same mm for which we hold an exclusive mmap 337 * write lock, or we are forking and we hold it on src->vm_mm and dst is 338 * not yet accessible to other threads so there's no possibliity of the 339 * unlinked AVC's being observed yet. 340 */ 341 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 342 avc = anon_vma_chain_alloc(GFP_KERNEL); 343 if (!avc) 344 goto enomem_failure; 345 346 anon_vma_chain_assign(dst, avc, pavc->anon_vma); 347 } 348 349 /* 350 * Now link the anon_vma's back to the newly inserted AVCs. 351 * Note that all anon_vma's share the same root. 352 */ 353 anon_vma_lock_write(src->anon_vma); 354 list_for_each_entry_reverse(avc, &dst->anon_vma_chain, same_vma) { 355 struct anon_vma *anon_vma = avc->anon_vma; 356 357 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 358 if (operation == VMA_OP_FORK) 359 maybe_reuse_anon_vma(dst, anon_vma); 360 } 361 362 if (operation != VMA_OP_FORK) 363 dst->anon_vma->num_active_vmas++; 364 365 anon_vma_unlock_write(active_anon_vma); 366 return 0; 367 368 enomem_failure: 369 cleanup_partial_anon_vmas(dst); 370 return -ENOMEM; 371 } 372 373 /* 374 * Attach vma to its own anon_vma, as well as to the anon_vmas that 375 * the corresponding VMA in the parent process is attached to. 376 * Returns 0 on success, non-zero on failure. 377 */ 378 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 379 { 380 struct anon_vma_chain *avc; 381 struct anon_vma *anon_vma; 382 int rc; 383 384 /* Don't bother if the parent process has no anon_vma here. */ 385 if (!pvma->anon_vma) 386 return 0; 387 388 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 389 vma->anon_vma = NULL; 390 391 anon_vma = anon_vma_alloc(); 392 if (!anon_vma) 393 return -ENOMEM; 394 avc = anon_vma_chain_alloc(GFP_KERNEL); 395 if (!avc) { 396 put_anon_vma(anon_vma); 397 return -ENOMEM; 398 } 399 400 /* 401 * First, attach the new VMA to the parent VMA's anon_vmas, 402 * so rmap can find non-COWed pages in child processes. 403 */ 404 rc = anon_vma_clone(vma, pvma, VMA_OP_FORK); 405 /* An error arose or an existing anon_vma was reused, all done then. */ 406 if (rc || vma->anon_vma) { 407 put_anon_vma(anon_vma); 408 anon_vma_chain_free(avc); 409 return rc; 410 } 411 412 /* 413 * OK no reuse, so add our own anon_vma. 414 * 415 * Since it is not linked anywhere we can safely manipulate anon_vma 416 * fields without a lock. 417 */ 418 419 anon_vma->num_active_vmas = 1; 420 /* 421 * The root anon_vma's rwsem is the lock actually used when we 422 * lock any of the anon_vmas in this anon_vma tree. 423 */ 424 anon_vma->root = pvma->anon_vma->root; 425 anon_vma->parent = pvma->anon_vma; 426 /* 427 * With refcounts, an anon_vma can stay around longer than the 428 * process it belongs to. The root anon_vma needs to be pinned until 429 * this anon_vma is freed, because the lock lives in the root. 430 */ 431 get_anon_vma(anon_vma->root); 432 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 433 vma->anon_vma = anon_vma; 434 anon_vma_chain_assign(vma, avc, anon_vma); 435 /* Now let rmap see it. */ 436 anon_vma_lock_write(anon_vma); 437 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 438 anon_vma->parent->num_children++; 439 anon_vma_unlock_write(anon_vma); 440 441 return 0; 442 } 443 444 /* 445 * In the unfortunate case of anon_vma_clone() failing to allocate memory we 446 * have to clean things up. 447 * 448 * Since we allocate anon_vma_chain's before we insert them into the interval 449 * trees, we simply have to free up the AVC's and remove the entries from the 450 * VMA's anon_vma_chain. 451 */ 452 static void cleanup_partial_anon_vmas(struct vm_area_struct *vma) 453 { 454 struct anon_vma_chain *avc, *next; 455 456 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 457 list_del(&avc->same_vma); 458 anon_vma_chain_free(avc); 459 } 460 461 /* 462 * The anon_vma assigned to this VMA is no longer valid, as we were not 463 * able to correctly clone AVC state. Avoid inconsistent anon_vma tree 464 * state by resetting. 465 */ 466 vma->anon_vma = NULL; 467 } 468 469 /** 470 * unlink_anon_vmas() - remove all links between a VMA and anon_vma's, freeing 471 * anon_vma_chain objects. 472 * @vma: The VMA whose links to anon_vma objects is to be severed. 473 * 474 * As part of the process anon_vma_chain's are freed, 475 * anon_vma->num_children,num_active_vmas is updated as required and, if the 476 * relevant anon_vma references no further VMAs, its reference count is 477 * decremented. 478 */ 479 void unlink_anon_vmas(struct vm_area_struct *vma) 480 { 481 struct anon_vma_chain *avc, *next; 482 struct anon_vma *active_anon_vma = vma->anon_vma; 483 484 /* Always hold mmap lock, read-lock on unmap possibly. */ 485 mmap_assert_locked(vma->vm_mm); 486 487 /* Unfaulted is a no-op. */ 488 if (!active_anon_vma) { 489 VM_WARN_ON_ONCE(!list_empty(&vma->anon_vma_chain)); 490 return; 491 } 492 493 anon_vma_lock_write(active_anon_vma); 494 495 /* 496 * Unlink each anon_vma chained to the VMA. This list is ordered 497 * from newest to oldest, ensuring the root anon_vma gets freed last. 498 */ 499 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 500 struct anon_vma *anon_vma = avc->anon_vma; 501 502 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 503 504 /* 505 * Leave empty anon_vmas on the list - we'll need 506 * to free them outside the lock. 507 */ 508 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 509 anon_vma->parent->num_children--; 510 continue; 511 } 512 513 list_del(&avc->same_vma); 514 anon_vma_chain_free(avc); 515 } 516 517 active_anon_vma->num_active_vmas--; 518 /* 519 * vma would still be needed after unlink, and anon_vma will be prepared 520 * when handle fault. 521 */ 522 vma->anon_vma = NULL; 523 anon_vma_unlock_write(active_anon_vma); 524 525 526 /* 527 * Iterate the list once more, it now only contains empty and unlinked 528 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 529 * needing to write-acquire the anon_vma->root->rwsem. 530 */ 531 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 532 struct anon_vma *anon_vma = avc->anon_vma; 533 534 VM_WARN_ON(anon_vma->num_children); 535 VM_WARN_ON(anon_vma->num_active_vmas); 536 put_anon_vma(anon_vma); 537 538 list_del(&avc->same_vma); 539 anon_vma_chain_free(avc); 540 } 541 } 542 543 static void anon_vma_ctor(void *data) 544 { 545 struct anon_vma *anon_vma = data; 546 547 init_rwsem(&anon_vma->rwsem); 548 atomic_set(&anon_vma->refcount, 0); 549 anon_vma->rb_root = RB_ROOT_CACHED; 550 } 551 552 void __init anon_vma_init(void) 553 { 554 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 555 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 556 anon_vma_ctor); 557 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 558 SLAB_PANIC|SLAB_ACCOUNT); 559 } 560 561 /* 562 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 563 * 564 * Since there is no serialization what so ever against folio_remove_rmap_*() 565 * the best this function can do is return a refcount increased anon_vma 566 * that might have been relevant to this page. 567 * 568 * The page might have been remapped to a different anon_vma or the anon_vma 569 * returned may already be freed (and even reused). 570 * 571 * In case it was remapped to a different anon_vma, the new anon_vma will be a 572 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 573 * ensure that any anon_vma obtained from the page will still be valid for as 574 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 575 * 576 * All users of this function must be very careful when walking the anon_vma 577 * chain and verify that the page in question is indeed mapped in it 578 * [ something equivalent to page_mapped_in_vma() ]. 579 * 580 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 581 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid 582 * if there is a mapcount, we can dereference the anon_vma after observing 583 * those. 584 * 585 * NOTE: the caller should hold folio lock when calling this. 586 */ 587 struct anon_vma *folio_get_anon_vma(const struct folio *folio) 588 { 589 struct anon_vma *anon_vma = NULL; 590 unsigned long anon_mapping; 591 592 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 593 594 rcu_read_lock(); 595 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 596 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) 597 goto out; 598 if (!folio_mapped(folio)) 599 goto out; 600 601 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); 602 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 603 anon_vma = NULL; 604 goto out; 605 } 606 607 /* 608 * If this folio is still mapped, then its anon_vma cannot have been 609 * freed. But if it has been unmapped, we have no security against the 610 * anon_vma structure being freed and reused (for another anon_vma: 611 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 612 * above cannot corrupt). 613 */ 614 if (!folio_mapped(folio)) { 615 rcu_read_unlock(); 616 put_anon_vma(anon_vma); 617 return NULL; 618 } 619 out: 620 rcu_read_unlock(); 621 622 return anon_vma; 623 } 624 625 /* 626 * Similar to folio_get_anon_vma() except it locks the anon_vma. 627 * 628 * Its a little more complex as it tries to keep the fast path to a single 629 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 630 * reference like with folio_get_anon_vma() and then block on the mutex 631 * on !rwc->try_lock case. 632 */ 633 struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, 634 struct rmap_walk_control *rwc) 635 { 636 struct anon_vma *anon_vma = NULL; 637 struct anon_vma *root_anon_vma; 638 unsigned long anon_mapping; 639 640 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 641 642 rcu_read_lock(); 643 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 644 if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) 645 goto out; 646 if (!folio_mapped(folio)) 647 goto out; 648 649 anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); 650 root_anon_vma = READ_ONCE(anon_vma->root); 651 if (down_read_trylock(&root_anon_vma->rwsem)) { 652 /* 653 * If the folio is still mapped, then this anon_vma is still 654 * its anon_vma, and holding the mutex ensures that it will 655 * not go away, see anon_vma_free(). 656 */ 657 if (!folio_mapped(folio)) { 658 up_read(&root_anon_vma->rwsem); 659 anon_vma = NULL; 660 } 661 goto out; 662 } 663 664 if (rwc && rwc->try_lock) { 665 anon_vma = NULL; 666 rwc->contended = true; 667 goto out; 668 } 669 670 /* trylock failed, we got to sleep */ 671 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 672 anon_vma = NULL; 673 goto out; 674 } 675 676 if (!folio_mapped(folio)) { 677 rcu_read_unlock(); 678 put_anon_vma(anon_vma); 679 return NULL; 680 } 681 682 /* we pinned the anon_vma, its safe to sleep */ 683 rcu_read_unlock(); 684 anon_vma_lock_read(anon_vma); 685 686 if (atomic_dec_and_test(&anon_vma->refcount)) { 687 /* 688 * Oops, we held the last refcount, release the lock 689 * and bail -- can't simply use put_anon_vma() because 690 * we'll deadlock on the anon_vma_lock_write() recursion. 691 */ 692 anon_vma_unlock_read(anon_vma); 693 __put_anon_vma(anon_vma); 694 anon_vma = NULL; 695 } 696 697 return anon_vma; 698 699 out: 700 rcu_read_unlock(); 701 return anon_vma; 702 } 703 704 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 705 /* 706 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 707 * important if a PTE was dirty when it was unmapped that it's flushed 708 * before any IO is initiated on the page to prevent lost writes. Similarly, 709 * it must be flushed before freeing to prevent data leakage. 710 */ 711 void try_to_unmap_flush(void) 712 { 713 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 714 715 if (!tlb_ubc->flush_required) 716 return; 717 718 arch_tlbbatch_flush(&tlb_ubc->arch); 719 tlb_ubc->flush_required = false; 720 tlb_ubc->writable = false; 721 } 722 723 /* Flush iff there are potentially writable TLB entries that can race with IO */ 724 void try_to_unmap_flush_dirty(void) 725 { 726 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 727 728 if (tlb_ubc->writable) 729 try_to_unmap_flush(); 730 } 731 732 /* 733 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 734 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 735 */ 736 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 737 #define TLB_FLUSH_BATCH_PENDING_MASK \ 738 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 739 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 740 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 741 742 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 743 unsigned long start, unsigned long end) 744 { 745 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 746 int batch; 747 bool writable = pte_dirty(pteval); 748 749 if (!pte_accessible(mm, pteval)) 750 return; 751 752 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); 753 tlb_ubc->flush_required = true; 754 755 /* 756 * Ensure compiler does not re-order the setting of tlb_flush_batched 757 * before the PTE is cleared. 758 */ 759 barrier(); 760 batch = atomic_read(&mm->tlb_flush_batched); 761 retry: 762 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 763 /* 764 * Prevent `pending' from catching up with `flushed' because of 765 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 766 * `pending' becomes large. 767 */ 768 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) 769 goto retry; 770 } else { 771 atomic_inc(&mm->tlb_flush_batched); 772 } 773 774 /* 775 * If the PTE was dirty then it's best to assume it's writable. The 776 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 777 * before the page is queued for IO. 778 */ 779 if (writable) 780 tlb_ubc->writable = true; 781 } 782 783 /* 784 * Returns true if the TLB flush should be deferred to the end of a batch of 785 * unmap operations to reduce IPIs. 786 */ 787 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 788 { 789 if (!(flags & TTU_BATCH_FLUSH)) 790 return false; 791 792 return arch_tlbbatch_should_defer(mm); 793 } 794 795 /* 796 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 797 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 798 * operation such as mprotect or munmap to race between reclaim unmapping 799 * the page and flushing the page. If this race occurs, it potentially allows 800 * access to data via a stale TLB entry. Tracking all mm's that have TLB 801 * batching in flight would be expensive during reclaim so instead track 802 * whether TLB batching occurred in the past and if so then do a flush here 803 * if required. This will cost one additional flush per reclaim cycle paid 804 * by the first operation at risk such as mprotect and mumap. 805 * 806 * This must be called under the PTL so that an access to tlb_flush_batched 807 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 808 * via the PTL. 809 */ 810 void flush_tlb_batched_pending(struct mm_struct *mm) 811 { 812 int batch = atomic_read(&mm->tlb_flush_batched); 813 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 814 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 815 816 if (pending != flushed) { 817 flush_tlb_mm(mm); 818 /* 819 * If the new TLB flushing is pending during flushing, leave 820 * mm->tlb_flush_batched as is, to avoid losing flushing. 821 */ 822 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 823 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 824 } 825 } 826 #else 827 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 828 unsigned long start, unsigned long end) 829 { 830 } 831 832 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 833 { 834 return false; 835 } 836 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 837 838 /** 839 * page_address_in_vma - The virtual address of a page in this VMA. 840 * @folio: The folio containing the page. 841 * @page: The page within the folio. 842 * @vma: The VMA we need to know the address in. 843 * 844 * Calculates the user virtual address of this page in the specified VMA. 845 * It is the caller's responsibility to check the page is actually 846 * within the VMA. There may not currently be a PTE pointing at this 847 * page, but if a page fault occurs at this address, this is the page 848 * which will be accessed. 849 * 850 * Context: Caller should hold a reference to the folio. Caller should 851 * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the 852 * VMA from being altered. 853 * 854 * Return: The virtual address corresponding to this page in the VMA. 855 */ 856 unsigned long page_address_in_vma(const struct folio *folio, 857 const struct page *page, const struct vm_area_struct *vma) 858 { 859 if (folio_test_anon(folio)) { 860 struct anon_vma *anon_vma = folio_anon_vma(folio); 861 /* 862 * Note: swapoff's unuse_vma() is more efficient with this 863 * check, and needs it to match anon_vma when KSM is active. 864 */ 865 if (!vma->anon_vma || !anon_vma || 866 vma->anon_vma->root != anon_vma->root) 867 return -EFAULT; 868 } else if (!vma->vm_file) { 869 return -EFAULT; 870 } else if (vma->vm_file->f_mapping != folio->mapping) { 871 return -EFAULT; 872 } 873 874 /* KSM folios don't reach here because of the !anon_vma check */ 875 return vma_address(vma, page_pgoff(folio, page), 1); 876 } 877 878 /* 879 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 880 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 881 * represents. 882 */ 883 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 884 { 885 pgd_t *pgd; 886 p4d_t *p4d; 887 pud_t *pud; 888 pmd_t *pmd = NULL; 889 890 pgd = pgd_offset(mm, address); 891 if (!pgd_present(*pgd)) 892 goto out; 893 894 p4d = p4d_offset(pgd, address); 895 if (!p4d_present(*p4d)) 896 goto out; 897 898 pud = pud_offset(p4d, address); 899 if (!pud_present(*pud)) 900 goto out; 901 902 pmd = pmd_offset(pud, address); 903 out: 904 return pmd; 905 } 906 907 struct folio_referenced_arg { 908 int mapcount; 909 int referenced; 910 vm_flags_t vm_flags; 911 struct mem_cgroup *memcg; 912 }; 913 914 /* 915 * arg: folio_referenced_arg will be passed 916 */ 917 static bool folio_referenced_one(struct folio *folio, 918 struct vm_area_struct *vma, unsigned long address, void *arg) 919 { 920 struct folio_referenced_arg *pra = arg; 921 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 922 int ptes = 0, referenced = 0; 923 unsigned int nr; 924 925 while (page_vma_mapped_walk(&pvmw)) { 926 address = pvmw.address; 927 nr = 1; 928 929 if (vma->vm_flags & VM_LOCKED) { 930 ptes++; 931 pra->mapcount--; 932 933 /* Only mlock fully mapped pages */ 934 if (pvmw.pte && ptes != pvmw.nr_pages) 935 continue; 936 937 /* 938 * All PTEs must be protected by page table lock in 939 * order to mlock the page. 940 * 941 * If page table boundary has been cross, current ptl 942 * only protect part of ptes. 943 */ 944 if (pvmw.flags & PVMW_PGTABLE_CROSSED) 945 continue; 946 947 /* Restore the mlock which got missed */ 948 mlock_vma_folio(folio, vma); 949 page_vma_mapped_walk_done(&pvmw); 950 pra->vm_flags |= VM_LOCKED; 951 return false; /* To break the loop */ 952 } 953 954 /* 955 * Skip the non-shared swapbacked folio mapped solely by 956 * the exiting or OOM-reaped process. This avoids redundant 957 * swap-out followed by an immediate unmap. 958 */ 959 if ((!atomic_read(&vma->vm_mm->mm_users) || 960 check_stable_address_space(vma->vm_mm)) && 961 folio_test_anon(folio) && folio_test_swapbacked(folio) && 962 !folio_maybe_mapped_shared(folio)) { 963 pra->referenced = -1; 964 page_vma_mapped_walk_done(&pvmw); 965 return false; 966 } 967 968 if (lru_gen_enabled() && pvmw.pte) { 969 if (lru_gen_look_around(&pvmw)) 970 referenced++; 971 } else if (pvmw.pte) { 972 if (folio_test_large(folio)) { 973 unsigned long end_addr = pmd_addr_end(address, vma->vm_end); 974 unsigned int max_nr = (end_addr - address) >> PAGE_SHIFT; 975 pte_t pteval = ptep_get(pvmw.pte); 976 977 nr = folio_pte_batch(folio, pvmw.pte, 978 pteval, max_nr); 979 } 980 981 ptes += nr; 982 if (clear_flush_young_ptes_notify(vma, address, pvmw.pte, nr)) 983 referenced++; 984 /* Skip the batched PTEs */ 985 pvmw.pte += nr - 1; 986 pvmw.address += (nr - 1) * PAGE_SIZE; 987 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 988 if (pmdp_clear_flush_young_notify(vma, address, 989 pvmw.pmd)) 990 referenced++; 991 } else { 992 /* unexpected pmd-mapped folio? */ 993 WARN_ON_ONCE(1); 994 } 995 996 pra->mapcount -= nr; 997 /* 998 * If we are sure that we batched the entire folio, 999 * we can just optimize and stop right here. 1000 */ 1001 if (ptes == pvmw.nr_pages) { 1002 page_vma_mapped_walk_done(&pvmw); 1003 break; 1004 } 1005 } 1006 1007 if (referenced) 1008 folio_clear_idle(folio); 1009 if (folio_test_clear_young(folio)) 1010 referenced++; 1011 1012 if (referenced) { 1013 pra->referenced++; 1014 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 1015 } 1016 1017 if (!pra->mapcount) 1018 return false; /* To break the loop */ 1019 1020 return true; 1021 } 1022 1023 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 1024 { 1025 struct folio_referenced_arg *pra = arg; 1026 struct mem_cgroup *memcg = pra->memcg; 1027 1028 /* 1029 * Ignore references from this mapping if it has no recency. If the 1030 * folio has been used in another mapping, we will catch it; if this 1031 * other mapping is already gone, the unmap path will have set the 1032 * referenced flag or activated the folio in zap_pte_range(). 1033 */ 1034 if (!vma_has_recency(vma)) 1035 return true; 1036 1037 /* 1038 * If we are reclaiming on behalf of a cgroup, skip counting on behalf 1039 * of references from different cgroups. 1040 */ 1041 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 1042 return true; 1043 1044 return false; 1045 } 1046 1047 /** 1048 * folio_referenced() - Test if the folio was referenced. 1049 * @folio: The folio to test. 1050 * @is_locked: Caller holds lock on the folio. 1051 * @memcg: target memory cgroup 1052 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 1053 * 1054 * Quick test_and_clear_referenced for all mappings of a folio, 1055 * 1056 * Return: The number of mappings which referenced the folio. Return -1 if 1057 * the function bailed out due to rmap lock contention. 1058 */ 1059 int folio_referenced(struct folio *folio, int is_locked, 1060 struct mem_cgroup *memcg, vm_flags_t *vm_flags) 1061 { 1062 bool we_locked = false; 1063 struct folio_referenced_arg pra = { 1064 .mapcount = folio_mapcount(folio), 1065 .memcg = memcg, 1066 }; 1067 struct rmap_walk_control rwc = { 1068 .rmap_one = folio_referenced_one, 1069 .arg = (void *)&pra, 1070 .anon_lock = folio_lock_anon_vma_read, 1071 .try_lock = true, 1072 .invalid_vma = invalid_folio_referenced_vma, 1073 }; 1074 1075 *vm_flags = 0; 1076 if (!pra.mapcount) 1077 return 0; 1078 1079 if (!folio_raw_mapping(folio)) 1080 return 0; 1081 1082 if (!is_locked) { 1083 we_locked = folio_trylock(folio); 1084 if (!we_locked) 1085 return 1; 1086 } 1087 1088 rmap_walk(folio, &rwc); 1089 *vm_flags = pra.vm_flags; 1090 1091 if (we_locked) 1092 folio_unlock(folio); 1093 1094 return rwc.contended ? -1 : pra.referenced; 1095 } 1096 1097 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 1098 { 1099 int cleaned = 0; 1100 struct vm_area_struct *vma = pvmw->vma; 1101 struct mmu_notifier_range range; 1102 unsigned long address = pvmw->address; 1103 1104 /* 1105 * We have to assume the worse case ie pmd for invalidation. Note that 1106 * the folio can not be freed from this function. 1107 */ 1108 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 1109 vma->vm_mm, address, vma_address_end(pvmw)); 1110 mmu_notifier_invalidate_range_start(&range); 1111 1112 while (page_vma_mapped_walk(pvmw)) { 1113 int ret = 0; 1114 1115 address = pvmw->address; 1116 if (pvmw->pte) { 1117 pte_t *pte = pvmw->pte; 1118 pte_t entry = ptep_get(pte); 1119 1120 /* 1121 * PFN swap PTEs, such as device-exclusive ones, that 1122 * actually map pages are clean and not writable from a 1123 * CPU perspective. The MMU notifier takes care of any 1124 * device aspects. 1125 */ 1126 if (!pte_present(entry)) 1127 continue; 1128 if (!pte_dirty(entry) && !pte_write(entry)) 1129 continue; 1130 1131 flush_cache_page(vma, address, pte_pfn(entry)); 1132 entry = ptep_clear_flush(vma, address, pte); 1133 entry = pte_wrprotect(entry); 1134 entry = pte_mkclean(entry); 1135 set_pte_at(vma->vm_mm, address, pte, entry); 1136 ret = 1; 1137 } else { 1138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1139 pmd_t *pmd = pvmw->pmd; 1140 pmd_t entry = pmdp_get(pmd); 1141 1142 /* 1143 * Please see the comment above (!pte_present). 1144 * A non present PMD is not writable from a CPU 1145 * perspective. 1146 */ 1147 if (!pmd_present(entry)) 1148 continue; 1149 if (!pmd_dirty(entry) && !pmd_write(entry)) 1150 continue; 1151 1152 flush_cache_range(vma, address, 1153 address + HPAGE_PMD_SIZE); 1154 entry = pmdp_invalidate(vma, address, pmd); 1155 entry = pmd_wrprotect(entry); 1156 entry = pmd_mkclean(entry); 1157 set_pmd_at(vma->vm_mm, address, pmd, entry); 1158 ret = 1; 1159 #else 1160 /* unexpected pmd-mapped folio? */ 1161 WARN_ON_ONCE(1); 1162 #endif 1163 } 1164 1165 if (ret) 1166 cleaned++; 1167 } 1168 1169 mmu_notifier_invalidate_range_end(&range); 1170 1171 return cleaned; 1172 } 1173 1174 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1175 unsigned long address, void *arg) 1176 { 1177 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1178 int *cleaned = arg; 1179 1180 *cleaned += page_vma_mkclean_one(&pvmw); 1181 1182 return true; 1183 } 1184 1185 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1186 { 1187 if (vma->vm_flags & VM_SHARED) 1188 return false; 1189 1190 return true; 1191 } 1192 1193 int folio_mkclean(struct folio *folio) 1194 { 1195 int cleaned = 0; 1196 struct address_space *mapping; 1197 struct rmap_walk_control rwc = { 1198 .arg = (void *)&cleaned, 1199 .rmap_one = page_mkclean_one, 1200 .invalid_vma = invalid_mkclean_vma, 1201 }; 1202 1203 BUG_ON(!folio_test_locked(folio)); 1204 1205 if (!folio_mapped(folio)) 1206 return 0; 1207 1208 mapping = folio_mapping(folio); 1209 if (!mapping) 1210 return 0; 1211 1212 rmap_walk(folio, &rwc); 1213 1214 return cleaned; 1215 } 1216 EXPORT_SYMBOL_GPL(folio_mkclean); 1217 1218 struct wrprotect_file_state { 1219 int cleaned; 1220 pgoff_t pgoff; 1221 unsigned long pfn; 1222 unsigned long nr_pages; 1223 }; 1224 1225 static bool mapping_wrprotect_range_one(struct folio *folio, 1226 struct vm_area_struct *vma, unsigned long address, void *arg) 1227 { 1228 struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; 1229 struct page_vma_mapped_walk pvmw = { 1230 .pfn = state->pfn, 1231 .nr_pages = state->nr_pages, 1232 .pgoff = state->pgoff, 1233 .vma = vma, 1234 .address = address, 1235 .flags = PVMW_SYNC, 1236 }; 1237 1238 state->cleaned += page_vma_mkclean_one(&pvmw); 1239 1240 return true; 1241 } 1242 1243 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, 1244 pgoff_t pgoff_start, unsigned long nr_pages, 1245 struct rmap_walk_control *rwc, bool locked); 1246 1247 /** 1248 * mapping_wrprotect_range() - Write-protect all mappings in a specified range. 1249 * 1250 * @mapping: The mapping whose reverse mapping should be traversed. 1251 * @pgoff: The page offset at which @pfn is mapped within @mapping. 1252 * @pfn: The PFN of the page mapped in @mapping at @pgoff. 1253 * @nr_pages: The number of physically contiguous base pages spanned. 1254 * 1255 * Traverses the reverse mapping, finding all VMAs which contain a shared 1256 * mapping of the pages in the specified range in @mapping, and write-protects 1257 * them (that is, updates the page tables to mark the mappings read-only such 1258 * that a write protection fault arises when the mappings are written to). 1259 * 1260 * The @pfn value need not refer to a folio, but rather can reference a kernel 1261 * allocation which is mapped into userland. We therefore do not require that 1262 * the page maps to a folio with a valid mapping or index field, rather the 1263 * caller specifies these in @mapping and @pgoff. 1264 * 1265 * Return: the number of write-protected PTEs, or an error. 1266 */ 1267 int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, 1268 unsigned long pfn, unsigned long nr_pages) 1269 { 1270 struct wrprotect_file_state state = { 1271 .cleaned = 0, 1272 .pgoff = pgoff, 1273 .pfn = pfn, 1274 .nr_pages = nr_pages, 1275 }; 1276 struct rmap_walk_control rwc = { 1277 .arg = (void *)&state, 1278 .rmap_one = mapping_wrprotect_range_one, 1279 .invalid_vma = invalid_mkclean_vma, 1280 }; 1281 1282 if (!mapping) 1283 return 0; 1284 1285 __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, 1286 /* locked = */false); 1287 1288 return state.cleaned; 1289 } 1290 EXPORT_SYMBOL_GPL(mapping_wrprotect_range); 1291 1292 /** 1293 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1294 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1295 * within the @vma of shared mappings. And since clean PTEs 1296 * should also be readonly, write protects them too. 1297 * @pfn: start pfn. 1298 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1299 * @pgoff: page offset that the @pfn mapped with. 1300 * @vma: vma that @pfn mapped within. 1301 * 1302 * Returns the number of cleaned PTEs (including PMDs). 1303 */ 1304 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1305 struct vm_area_struct *vma) 1306 { 1307 struct page_vma_mapped_walk pvmw = { 1308 .pfn = pfn, 1309 .nr_pages = nr_pages, 1310 .pgoff = pgoff, 1311 .vma = vma, 1312 .flags = PVMW_SYNC, 1313 }; 1314 1315 if (invalid_mkclean_vma(vma, NULL)) 1316 return 0; 1317 1318 pvmw.address = vma_address(vma, pgoff, nr_pages); 1319 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1320 1321 return page_vma_mkclean_one(&pvmw); 1322 } 1323 1324 static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) 1325 { 1326 int idx; 1327 1328 if (nr) { 1329 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 1330 lruvec_stat_mod_folio(folio, idx, nr); 1331 } 1332 if (nr_pmdmapped) { 1333 if (folio_test_anon(folio)) { 1334 idx = NR_ANON_THPS; 1335 lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); 1336 } else { 1337 /* NR_*_PMDMAPPED are not maintained per-memcg */ 1338 idx = folio_test_swapbacked(folio) ? 1339 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; 1340 __mod_node_page_state(folio_pgdat(folio), idx, 1341 nr_pmdmapped); 1342 } 1343 } 1344 } 1345 1346 static __always_inline void __folio_add_rmap(struct folio *folio, 1347 struct page *page, int nr_pages, struct vm_area_struct *vma, 1348 enum pgtable_level level) 1349 { 1350 atomic_t *mapped = &folio->_nr_pages_mapped; 1351 const int orig_nr_pages = nr_pages; 1352 int first = 0, nr = 0, nr_pmdmapped = 0; 1353 1354 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1355 1356 switch (level) { 1357 case PGTABLE_LEVEL_PTE: 1358 if (!folio_test_large(folio)) { 1359 nr = atomic_inc_and_test(&folio->_mapcount); 1360 break; 1361 } 1362 1363 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1364 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); 1365 if (nr == orig_nr_pages) 1366 /* Was completely unmapped. */ 1367 nr = folio_large_nr_pages(folio); 1368 else 1369 nr = 0; 1370 break; 1371 } 1372 1373 do { 1374 first += atomic_inc_and_test(&page->_mapcount); 1375 } while (page++, --nr_pages > 0); 1376 1377 if (first && 1378 atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) 1379 nr = first; 1380 1381 folio_add_large_mapcount(folio, orig_nr_pages, vma); 1382 break; 1383 case PGTABLE_LEVEL_PMD: 1384 case PGTABLE_LEVEL_PUD: 1385 first = atomic_inc_and_test(&folio->_entire_mapcount); 1386 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1387 if (level == PGTABLE_LEVEL_PMD && first) 1388 nr_pmdmapped = folio_large_nr_pages(folio); 1389 nr = folio_inc_return_large_mapcount(folio, vma); 1390 if (nr == 1) 1391 /* Was completely unmapped. */ 1392 nr = folio_large_nr_pages(folio); 1393 else 1394 nr = 0; 1395 break; 1396 } 1397 1398 if (first) { 1399 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); 1400 if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { 1401 nr_pages = folio_large_nr_pages(folio); 1402 /* 1403 * We only track PMD mappings of PMD-sized 1404 * folios separately. 1405 */ 1406 if (level == PGTABLE_LEVEL_PMD) 1407 nr_pmdmapped = nr_pages; 1408 nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); 1409 /* Raced ahead of a remove and another add? */ 1410 if (unlikely(nr < 0)) 1411 nr = 0; 1412 } else { 1413 /* Raced ahead of a remove of ENTIRELY_MAPPED */ 1414 nr = 0; 1415 } 1416 } 1417 folio_inc_large_mapcount(folio, vma); 1418 break; 1419 default: 1420 BUILD_BUG(); 1421 } 1422 __folio_mod_stat(folio, nr, nr_pmdmapped); 1423 } 1424 1425 /** 1426 * folio_move_anon_rmap - move a folio to our anon_vma 1427 * @folio: The folio to move to our anon_vma 1428 * @vma: The vma the folio belongs to 1429 * 1430 * When a folio belongs exclusively to one process after a COW event, 1431 * that folio can be moved into the anon_vma that belongs to just that 1432 * process, so the rmap code will not search the parent or sibling processes. 1433 */ 1434 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) 1435 { 1436 void *anon_vma = vma->anon_vma; 1437 1438 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1439 VM_BUG_ON_VMA(!anon_vma, vma); 1440 1441 anon_vma += FOLIO_MAPPING_ANON; 1442 /* 1443 * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written 1444 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1445 * folio_test_anon()) will not see one without the other. 1446 */ 1447 WRITE_ONCE(folio->mapping, anon_vma); 1448 } 1449 1450 /** 1451 * __folio_set_anon - set up a new anonymous rmap for a folio 1452 * @folio: The folio to set up the new anonymous rmap for. 1453 * @vma: VM area to add the folio to. 1454 * @address: User virtual address of the mapping 1455 * @exclusive: Whether the folio is exclusive to the process. 1456 */ 1457 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, 1458 unsigned long address, bool exclusive) 1459 { 1460 struct anon_vma *anon_vma = vma->anon_vma; 1461 1462 BUG_ON(!anon_vma); 1463 1464 /* 1465 * If the folio isn't exclusive to this vma, we must use the _oldest_ 1466 * possible anon_vma for the folio mapping! 1467 */ 1468 if (!exclusive) 1469 anon_vma = anon_vma->root; 1470 1471 /* 1472 * page_idle does a lockless/optimistic rmap scan on folio->mapping. 1473 * Make sure the compiler doesn't split the stores of anon_vma and 1474 * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code 1475 * could mistake the mapping for a struct address_space and crash. 1476 */ 1477 anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON; 1478 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 1479 folio->index = linear_page_index(vma, address); 1480 } 1481 1482 /** 1483 * __page_check_anon_rmap - sanity check anonymous rmap addition 1484 * @folio: The folio containing @page. 1485 * @page: the page to check the mapping of 1486 * @vma: the vm area in which the mapping is added 1487 * @address: the user virtual address mapped 1488 */ 1489 static void __page_check_anon_rmap(const struct folio *folio, 1490 const struct page *page, struct vm_area_struct *vma, 1491 unsigned long address) 1492 { 1493 /* 1494 * The page's anon-rmap details (mapping and index) are guaranteed to 1495 * be set up correctly at this point. 1496 * 1497 * We have exclusion against folio_add_anon_rmap_*() because the caller 1498 * always holds the page locked. 1499 * 1500 * We have exclusion against folio_add_new_anon_rmap because those pages 1501 * are initially only visible via the pagetables, and the pte is locked 1502 * over the call to folio_add_new_anon_rmap. 1503 */ 1504 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1505 folio); 1506 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), 1507 page); 1508 } 1509 1510 static __always_inline void __folio_add_anon_rmap(struct folio *folio, 1511 struct page *page, int nr_pages, struct vm_area_struct *vma, 1512 unsigned long address, rmap_t flags, enum pgtable_level level) 1513 { 1514 int i; 1515 1516 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 1517 1518 __folio_add_rmap(folio, page, nr_pages, vma, level); 1519 1520 if (likely(!folio_test_ksm(folio))) 1521 __page_check_anon_rmap(folio, page, vma, address); 1522 1523 if (flags & RMAP_EXCLUSIVE) { 1524 switch (level) { 1525 case PGTABLE_LEVEL_PTE: 1526 for (i = 0; i < nr_pages; i++) 1527 SetPageAnonExclusive(page + i); 1528 break; 1529 case PGTABLE_LEVEL_PMD: 1530 SetPageAnonExclusive(page); 1531 break; 1532 case PGTABLE_LEVEL_PUD: 1533 /* 1534 * Keep the compiler happy, we don't support anonymous 1535 * PUD mappings. 1536 */ 1537 WARN_ON_ONCE(1); 1538 break; 1539 default: 1540 BUILD_BUG(); 1541 } 1542 } 1543 1544 VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && 1545 atomic_read(&folio->_mapcount) > 0, folio); 1546 for (i = 0; i < nr_pages; i++) { 1547 struct page *cur_page = page + i; 1548 1549 VM_WARN_ON_FOLIO(folio_test_large(folio) && 1550 folio_entire_mapcount(folio) > 1 && 1551 PageAnonExclusive(cur_page), folio); 1552 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) 1553 continue; 1554 1555 /* 1556 * While PTE-mapping a THP we have a PMD and a PTE 1557 * mapping. 1558 */ 1559 VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && 1560 PageAnonExclusive(cur_page), folio); 1561 } 1562 1563 /* 1564 * Only mlock it if the folio is fully mapped to the VMA. 1565 * 1566 * Partially mapped folios can be split on reclaim and part outside 1567 * of mlocked VMA can be evicted or freed. 1568 */ 1569 if (folio_nr_pages(folio) == nr_pages) 1570 mlock_vma_folio(folio, vma); 1571 } 1572 1573 /** 1574 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio 1575 * @folio: The folio to add the mappings to 1576 * @page: The first page to add 1577 * @nr_pages: The number of pages which will be mapped 1578 * @vma: The vm area in which the mappings are added 1579 * @address: The user virtual address of the first page to map 1580 * @flags: The rmap flags 1581 * 1582 * The page range of folio is defined by [first_page, first_page + nr_pages) 1583 * 1584 * The caller needs to hold the page table lock, and the page must be locked in 1585 * the anon_vma case: to serialize mapping,index checking after setting, 1586 * and to ensure that an anon folio is not being upgraded racily to a KSM folio 1587 * (but KSM folios are never downgraded). 1588 */ 1589 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, 1590 int nr_pages, struct vm_area_struct *vma, unsigned long address, 1591 rmap_t flags) 1592 { 1593 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, 1594 PGTABLE_LEVEL_PTE); 1595 } 1596 1597 /** 1598 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio 1599 * @folio: The folio to add the mapping to 1600 * @page: The first page to add 1601 * @vma: The vm area in which the mapping is added 1602 * @address: The user virtual address of the first page to map 1603 * @flags: The rmap flags 1604 * 1605 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) 1606 * 1607 * The caller needs to hold the page table lock, and the page must be locked in 1608 * the anon_vma case: to serialize mapping,index checking after setting. 1609 */ 1610 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, 1611 struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1612 { 1613 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1614 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, 1615 PGTABLE_LEVEL_PMD); 1616 #else 1617 WARN_ON_ONCE(true); 1618 #endif 1619 } 1620 1621 /** 1622 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 1623 * @folio: The folio to add the mapping to. 1624 * @vma: the vm area in which the mapping is added 1625 * @address: the user virtual address mapped 1626 * @flags: The rmap flags 1627 * 1628 * Like folio_add_anon_rmap_*() but must only be called on *new* folios. 1629 * This means the inc-and-test can be bypassed. 1630 * The folio doesn't necessarily need to be locked while it's exclusive 1631 * unless two threads map it concurrently. However, the folio must be 1632 * locked if it's shared. 1633 * 1634 * If the folio is pmd-mappable, it is accounted as a THP. 1635 */ 1636 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 1637 unsigned long address, rmap_t flags) 1638 { 1639 const bool exclusive = flags & RMAP_EXCLUSIVE; 1640 int nr = 1, nr_pmdmapped = 0; 1641 1642 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 1643 VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); 1644 1645 /* 1646 * VM_DROPPABLE mappings don't swap; instead they're just dropped when 1647 * under memory pressure. 1648 */ 1649 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) 1650 __folio_set_swapbacked(folio); 1651 __folio_set_anon(folio, vma, address, exclusive); 1652 1653 if (likely(!folio_test_large(folio))) { 1654 /* increment count (starts at -1) */ 1655 atomic_set(&folio->_mapcount, 0); 1656 if (exclusive) 1657 SetPageAnonExclusive(&folio->page); 1658 } else if (!folio_test_pmd_mappable(folio)) { 1659 int i; 1660 1661 nr = folio_large_nr_pages(folio); 1662 for (i = 0; i < nr; i++) { 1663 struct page *page = folio_page(folio, i); 1664 1665 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1666 /* increment count (starts at -1) */ 1667 atomic_set(&page->_mapcount, 0); 1668 if (exclusive) 1669 SetPageAnonExclusive(page); 1670 } 1671 1672 folio_set_large_mapcount(folio, nr, vma); 1673 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1674 atomic_set(&folio->_nr_pages_mapped, nr); 1675 } else { 1676 nr = folio_large_nr_pages(folio); 1677 /* increment count (starts at -1) */ 1678 atomic_set(&folio->_entire_mapcount, 0); 1679 folio_set_large_mapcount(folio, 1, vma); 1680 if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) 1681 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); 1682 if (exclusive) 1683 SetPageAnonExclusive(&folio->page); 1684 nr_pmdmapped = nr; 1685 } 1686 1687 VM_WARN_ON_ONCE(address < vma->vm_start || 1688 address + (nr << PAGE_SHIFT) > vma->vm_end); 1689 1690 __folio_mod_stat(folio, nr, nr_pmdmapped); 1691 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); 1692 } 1693 1694 static __always_inline void __folio_add_file_rmap(struct folio *folio, 1695 struct page *page, int nr_pages, struct vm_area_struct *vma, 1696 enum pgtable_level level) 1697 { 1698 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); 1699 1700 __folio_add_rmap(folio, page, nr_pages, vma, level); 1701 1702 /* 1703 * Only mlock it if the folio is fully mapped to the VMA. 1704 * 1705 * Partially mapped folios can be split on reclaim and part outside 1706 * of mlocked VMA can be evicted or freed. 1707 */ 1708 if (folio_nr_pages(folio) == nr_pages) 1709 mlock_vma_folio(folio, vma); 1710 } 1711 1712 /** 1713 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio 1714 * @folio: The folio to add the mappings to 1715 * @page: The first page to add 1716 * @nr_pages: The number of pages that will be mapped using PTEs 1717 * @vma: The vm area in which the mappings are added 1718 * 1719 * The page range of the folio is defined by [page, page + nr_pages) 1720 * 1721 * The caller needs to hold the page table lock. 1722 */ 1723 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, 1724 int nr_pages, struct vm_area_struct *vma) 1725 { 1726 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); 1727 } 1728 1729 /** 1730 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio 1731 * @folio: The folio to add the mapping to 1732 * @page: The first page to add 1733 * @vma: The vm area in which the mapping is added 1734 * 1735 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1736 * 1737 * The caller needs to hold the page table lock. 1738 */ 1739 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, 1740 struct vm_area_struct *vma) 1741 { 1742 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1743 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); 1744 #else 1745 WARN_ON_ONCE(true); 1746 #endif 1747 } 1748 1749 /** 1750 * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio 1751 * @folio: The folio to add the mapping to 1752 * @page: The first page to add 1753 * @vma: The vm area in which the mapping is added 1754 * 1755 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) 1756 * 1757 * The caller needs to hold the page table lock. 1758 */ 1759 void folio_add_file_rmap_pud(struct folio *folio, struct page *page, 1760 struct vm_area_struct *vma) 1761 { 1762 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1763 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1764 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); 1765 #else 1766 WARN_ON_ONCE(true); 1767 #endif 1768 } 1769 1770 static __always_inline void __folio_remove_rmap(struct folio *folio, 1771 struct page *page, int nr_pages, struct vm_area_struct *vma, 1772 enum pgtable_level level) 1773 { 1774 atomic_t *mapped = &folio->_nr_pages_mapped; 1775 int last = 0, nr = 0, nr_pmdmapped = 0; 1776 bool partially_mapped = false; 1777 1778 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1779 1780 switch (level) { 1781 case PGTABLE_LEVEL_PTE: 1782 if (!folio_test_large(folio)) { 1783 nr = atomic_add_negative(-1, &folio->_mapcount); 1784 break; 1785 } 1786 1787 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1788 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); 1789 if (!nr) { 1790 /* Now completely unmapped. */ 1791 nr = folio_large_nr_pages(folio); 1792 } else { 1793 partially_mapped = nr < folio_large_nr_pages(folio) && 1794 !folio_entire_mapcount(folio); 1795 nr = 0; 1796 } 1797 break; 1798 } 1799 1800 folio_sub_large_mapcount(folio, nr_pages, vma); 1801 do { 1802 last += atomic_add_negative(-1, &page->_mapcount); 1803 } while (page++, --nr_pages > 0); 1804 1805 if (last && 1806 atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) 1807 nr = last; 1808 1809 partially_mapped = nr && atomic_read(mapped); 1810 break; 1811 case PGTABLE_LEVEL_PMD: 1812 case PGTABLE_LEVEL_PUD: 1813 if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { 1814 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1815 if (level == PGTABLE_LEVEL_PMD && last) 1816 nr_pmdmapped = folio_large_nr_pages(folio); 1817 nr = folio_dec_return_large_mapcount(folio, vma); 1818 if (!nr) { 1819 /* Now completely unmapped. */ 1820 nr = folio_large_nr_pages(folio); 1821 } else { 1822 partially_mapped = last && 1823 nr < folio_large_nr_pages(folio); 1824 nr = 0; 1825 } 1826 break; 1827 } 1828 1829 folio_dec_large_mapcount(folio, vma); 1830 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1831 if (last) { 1832 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); 1833 if (likely(nr < ENTIRELY_MAPPED)) { 1834 nr_pages = folio_large_nr_pages(folio); 1835 if (level == PGTABLE_LEVEL_PMD) 1836 nr_pmdmapped = nr_pages; 1837 nr = nr_pages - nr; 1838 /* Raced ahead of another remove and an add? */ 1839 if (unlikely(nr < 0)) 1840 nr = 0; 1841 } else { 1842 /* An add of ENTIRELY_MAPPED raced ahead */ 1843 nr = 0; 1844 } 1845 } 1846 1847 partially_mapped = nr && nr < nr_pmdmapped; 1848 break; 1849 default: 1850 BUILD_BUG(); 1851 } 1852 1853 /* 1854 * Queue anon large folio for deferred split if at least one page of 1855 * the folio is unmapped and at least one page is still mapped. 1856 * 1857 * Check partially_mapped first to ensure it is a large folio. 1858 * 1859 * Device private folios do not support deferred splitting and 1860 * shrinker based scanning of the folios to free. 1861 */ 1862 if (partially_mapped && folio_test_anon(folio) && 1863 !folio_test_partially_mapped(folio) && 1864 !folio_is_device_private(folio)) 1865 deferred_split_folio(folio, true); 1866 1867 __folio_mod_stat(folio, -nr, -nr_pmdmapped); 1868 1869 /* 1870 * It would be tidy to reset folio_test_anon mapping when fully 1871 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() 1872 * which increments mapcount after us but sets mapping before us: 1873 * so leave the reset to free_pages_prepare, and remember that 1874 * it's only reliable while mapped. 1875 */ 1876 1877 munlock_vma_folio(folio, vma); 1878 } 1879 1880 /** 1881 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio 1882 * @folio: The folio to remove the mappings from 1883 * @page: The first page to remove 1884 * @nr_pages: The number of pages that will be removed from the mapping 1885 * @vma: The vm area from which the mappings are removed 1886 * 1887 * The page range of the folio is defined by [page, page + nr_pages) 1888 * 1889 * The caller needs to hold the page table lock. 1890 */ 1891 void folio_remove_rmap_ptes(struct folio *folio, struct page *page, 1892 int nr_pages, struct vm_area_struct *vma) 1893 { 1894 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); 1895 } 1896 1897 /** 1898 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio 1899 * @folio: The folio to remove the mapping from 1900 * @page: The first page to remove 1901 * @vma: The vm area from which the mapping is removed 1902 * 1903 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1904 * 1905 * The caller needs to hold the page table lock. 1906 */ 1907 void folio_remove_rmap_pmd(struct folio *folio, struct page *page, 1908 struct vm_area_struct *vma) 1909 { 1910 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1911 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); 1912 #else 1913 WARN_ON_ONCE(true); 1914 #endif 1915 } 1916 1917 /** 1918 * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio 1919 * @folio: The folio to remove the mapping from 1920 * @page: The first page to remove 1921 * @vma: The vm area from which the mapping is removed 1922 * 1923 * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) 1924 * 1925 * The caller needs to hold the page table lock. 1926 */ 1927 void folio_remove_rmap_pud(struct folio *folio, struct page *page, 1928 struct vm_area_struct *vma) 1929 { 1930 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1931 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1932 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); 1933 #else 1934 WARN_ON_ONCE(true); 1935 #endif 1936 } 1937 1938 static inline unsigned int folio_unmap_pte_batch(struct folio *folio, 1939 struct page_vma_mapped_walk *pvmw, 1940 enum ttu_flags flags, pte_t pte) 1941 { 1942 unsigned long end_addr, addr = pvmw->address; 1943 struct vm_area_struct *vma = pvmw->vma; 1944 unsigned int max_nr; 1945 1946 if (flags & TTU_HWPOISON) 1947 return 1; 1948 if (!folio_test_large(folio)) 1949 return 1; 1950 1951 /* We may only batch within a single VMA and a single page table. */ 1952 end_addr = pmd_addr_end(addr, vma->vm_end); 1953 max_nr = (end_addr - addr) >> PAGE_SHIFT; 1954 1955 /* We only support lazyfree or file folios batching for now ... */ 1956 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) 1957 return 1; 1958 1959 if (pte_unused(pte)) 1960 return 1; 1961 1962 if (userfaultfd_wp(vma)) 1963 return 1; 1964 1965 /* 1966 * If unmap fails, we need to restore the ptes. To avoid accidentally 1967 * upgrading write permissions for ptes that were not originally 1968 * writable, and to avoid losing the soft-dirty bit, use the 1969 * appropriate FPB flags. 1970 */ 1971 return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr, 1972 FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY); 1973 } 1974 1975 /* 1976 * @arg: enum ttu_flags will be passed to this argument 1977 */ 1978 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1979 unsigned long address, void *arg) 1980 { 1981 struct mm_struct *mm = vma->vm_mm; 1982 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1983 bool anon_exclusive, ret = true; 1984 pte_t pteval; 1985 struct page *subpage; 1986 struct mmu_notifier_range range; 1987 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1988 unsigned long nr_pages = 1, end_addr; 1989 unsigned long pfn; 1990 unsigned long hsz = 0; 1991 int ptes = 0; 1992 1993 /* 1994 * When racing against e.g. zap_pte_range() on another cpu, 1995 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 1996 * try_to_unmap() may return before page_mapped() has become false, 1997 * if page table locking is skipped: use TTU_SYNC to wait for that. 1998 */ 1999 if (flags & TTU_SYNC) 2000 pvmw.flags = PVMW_SYNC; 2001 2002 /* 2003 * For THP, we have to assume the worse case ie pmd for invalidation. 2004 * For hugetlb, it could be much worse if we need to do pud 2005 * invalidation in the case of pmd sharing. 2006 * 2007 * Note that the folio can not be freed in this function as call of 2008 * try_to_unmap() must hold a reference on the folio. 2009 */ 2010 range.end = vma_address_end(&pvmw); 2011 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2012 address, range.end); 2013 if (folio_test_hugetlb(folio)) { 2014 /* 2015 * If sharing is possible, start and end will be adjusted 2016 * accordingly. 2017 */ 2018 adjust_range_if_pmd_sharing_possible(vma, &range.start, 2019 &range.end); 2020 2021 /* We need the huge page size for set_huge_pte_at() */ 2022 hsz = huge_page_size(hstate_vma(vma)); 2023 } 2024 mmu_notifier_invalidate_range_start(&range); 2025 2026 while (page_vma_mapped_walk(&pvmw)) { 2027 /* 2028 * If the folio is in an mlock()d vma, we must not swap it out. 2029 */ 2030 if (!(flags & TTU_IGNORE_MLOCK) && 2031 (vma->vm_flags & VM_LOCKED)) { 2032 ptes++; 2033 2034 /* 2035 * Set 'ret' to indicate the page cannot be unmapped. 2036 * 2037 * Do not jump to walk_abort immediately as additional 2038 * iteration might be required to detect fully mapped 2039 * folio an mlock it. 2040 */ 2041 ret = false; 2042 2043 /* Only mlock fully mapped pages */ 2044 if (pvmw.pte && ptes != pvmw.nr_pages) 2045 continue; 2046 2047 /* 2048 * All PTEs must be protected by page table lock in 2049 * order to mlock the page. 2050 * 2051 * If page table boundary has been cross, current ptl 2052 * only protect part of ptes. 2053 */ 2054 if (pvmw.flags & PVMW_PGTABLE_CROSSED) 2055 goto walk_done; 2056 2057 /* Restore the mlock which got missed */ 2058 mlock_vma_folio(folio, vma); 2059 goto walk_done; 2060 } 2061 2062 if (!pvmw.pte) { 2063 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 2064 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) 2065 goto walk_done; 2066 /* 2067 * unmap_huge_pmd_locked has either already marked 2068 * the folio as swap-backed or decided to retain it 2069 * due to GUP or speculative references. 2070 */ 2071 goto walk_abort; 2072 } 2073 2074 if (flags & TTU_SPLIT_HUGE_PMD) { 2075 /* 2076 * We temporarily have to drop the PTL and 2077 * restart so we can process the PTE-mapped THP. 2078 */ 2079 split_huge_pmd_locked(vma, pvmw.address, 2080 pvmw.pmd, false); 2081 flags &= ~TTU_SPLIT_HUGE_PMD; 2082 page_vma_mapped_walk_restart(&pvmw); 2083 continue; 2084 } 2085 } 2086 2087 /* Unexpected PMD-mapped THP? */ 2088 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2089 2090 /* 2091 * Handle PFN swap PTEs, such as device-exclusive ones, that 2092 * actually map pages. 2093 */ 2094 pteval = ptep_get(pvmw.pte); 2095 if (likely(pte_present(pteval))) { 2096 pfn = pte_pfn(pteval); 2097 } else { 2098 const softleaf_t entry = softleaf_from_pte(pteval); 2099 2100 pfn = softleaf_to_pfn(entry); 2101 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 2102 } 2103 2104 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2105 address = pvmw.address; 2106 anon_exclusive = folio_test_anon(folio) && 2107 PageAnonExclusive(subpage); 2108 2109 if (folio_test_hugetlb(folio)) { 2110 bool anon = folio_test_anon(folio); 2111 2112 /* 2113 * The try_to_unmap() is only passed a hugetlb page 2114 * in the case where the hugetlb page is poisoned. 2115 */ 2116 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 2117 /* 2118 * huge_pmd_unshare may unmap an entire PMD page. 2119 * There is no way of knowing exactly which PMDs may 2120 * be cached for this mm, so we must flush them all. 2121 * start/end were already adjusted above to cover this 2122 * range. 2123 */ 2124 flush_cache_range(vma, range.start, range.end); 2125 2126 /* 2127 * To call huge_pmd_unshare, i_mmap_rwsem must be 2128 * held in write mode. Caller needs to explicitly 2129 * do this outside rmap routines. 2130 * 2131 * We also must hold hugetlb vma_lock in write mode. 2132 * Lock order dictates acquiring vma_lock BEFORE 2133 * i_mmap_rwsem. We can only try lock here and fail 2134 * if unsuccessful. 2135 */ 2136 if (!anon) { 2137 struct mmu_gather tlb; 2138 2139 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 2140 if (!hugetlb_vma_trylock_write(vma)) 2141 goto walk_abort; 2142 2143 tlb_gather_mmu_vma(&tlb, vma); 2144 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { 2145 hugetlb_vma_unlock_write(vma); 2146 huge_pmd_unshare_flush(&tlb, vma); 2147 tlb_finish_mmu(&tlb); 2148 /* 2149 * The PMD table was unmapped, 2150 * consequently unmapping the folio. 2151 */ 2152 goto walk_done; 2153 } 2154 hugetlb_vma_unlock_write(vma); 2155 tlb_finish_mmu(&tlb); 2156 } 2157 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 2158 if (pte_dirty(pteval)) 2159 folio_mark_dirty(folio); 2160 } else if (likely(pte_present(pteval))) { 2161 nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); 2162 end_addr = address + nr_pages * PAGE_SIZE; 2163 flush_cache_range(vma, address, end_addr); 2164 2165 /* Nuke the page table entry. */ 2166 pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages); 2167 /* 2168 * We clear the PTE but do not flush so potentially 2169 * a remote CPU could still be writing to the folio. 2170 * If the entry was previously clean then the 2171 * architecture must guarantee that a clear->dirty 2172 * transition on a cached TLB entry is written through 2173 * and traps if the PTE is unmapped. 2174 */ 2175 if (should_defer_flush(mm, flags)) 2176 set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); 2177 else 2178 flush_tlb_range(vma, address, end_addr); 2179 if (pte_dirty(pteval)) 2180 folio_mark_dirty(folio); 2181 } else { 2182 pte_clear(mm, address, pvmw.pte); 2183 } 2184 2185 /* 2186 * Now the pte is cleared. If this pte was uffd-wp armed, 2187 * we may want to replace a none pte with a marker pte if 2188 * it's file-backed, so we don't lose the tracking info. 2189 */ 2190 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 2191 2192 /* Update high watermark before we lower rss */ 2193 update_hiwater_rss(mm); 2194 2195 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 2196 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2197 if (folio_test_hugetlb(folio)) { 2198 hugetlb_count_sub(folio_nr_pages(folio), mm); 2199 set_huge_pte_at(mm, address, pvmw.pte, pteval, 2200 hsz); 2201 } else { 2202 dec_mm_counter(mm, mm_counter(folio)); 2203 set_pte_at(mm, address, pvmw.pte, pteval); 2204 } 2205 } else if (likely(pte_present(pteval)) && pte_unused(pteval) && 2206 !userfaultfd_armed(vma)) { 2207 /* 2208 * The guest indicated that the page content is of no 2209 * interest anymore. Simply discard the pte, vmscan 2210 * will take care of the rest. 2211 * A future reference will then fault in a new zero 2212 * page. When userfaultfd is active, we must not drop 2213 * this page though, as its main user (postcopy 2214 * migration) will not expect userfaults on already 2215 * copied pages. 2216 */ 2217 dec_mm_counter(mm, mm_counter(folio)); 2218 } else if (folio_test_anon(folio)) { 2219 swp_entry_t entry = page_swap_entry(subpage); 2220 pte_t swp_pte; 2221 /* 2222 * Store the swap location in the pte. 2223 * See handle_pte_fault() ... 2224 */ 2225 if (unlikely(folio_test_swapbacked(folio) != 2226 folio_test_swapcache(folio))) { 2227 WARN_ON_ONCE(1); 2228 goto walk_abort; 2229 } 2230 2231 /* MADV_FREE page check */ 2232 if (!folio_test_swapbacked(folio)) { 2233 int ref_count, map_count; 2234 2235 /* 2236 * Synchronize with gup_pte_range(): 2237 * - clear PTE; barrier; read refcount 2238 * - inc refcount; barrier; read PTE 2239 */ 2240 smp_mb(); 2241 2242 ref_count = folio_ref_count(folio); 2243 map_count = folio_mapcount(folio); 2244 2245 /* 2246 * Order reads for page refcount and dirty flag 2247 * (see comments in __remove_mapping()). 2248 */ 2249 smp_rmb(); 2250 2251 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { 2252 /* 2253 * redirtied either using the page table or a previously 2254 * obtained GUP reference. 2255 */ 2256 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); 2257 folio_set_swapbacked(folio); 2258 goto walk_abort; 2259 } else if (ref_count != 1 + map_count) { 2260 /* 2261 * Additional reference. Could be a GUP reference or any 2262 * speculative reference. GUP users must mark the folio 2263 * dirty if there was a modification. This folio cannot be 2264 * reclaimed right now either way, so act just like nothing 2265 * happened. 2266 * We'll come back here later and detect if the folio was 2267 * dirtied when the additional reference is gone. 2268 */ 2269 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); 2270 goto walk_abort; 2271 } 2272 add_mm_counter(mm, MM_ANONPAGES, -nr_pages); 2273 goto discard; 2274 } 2275 2276 if (folio_dup_swap(folio, subpage) < 0) { 2277 set_pte_at(mm, address, pvmw.pte, pteval); 2278 goto walk_abort; 2279 } 2280 2281 /* 2282 * arch_unmap_one() is expected to be a NOP on 2283 * architectures where we could have PFN swap PTEs, 2284 * so we'll not check/care. 2285 */ 2286 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2287 folio_put_swap(folio, subpage); 2288 set_pte_at(mm, address, pvmw.pte, pteval); 2289 goto walk_abort; 2290 } 2291 2292 /* See folio_try_share_anon_rmap(): clear PTE first. */ 2293 if (anon_exclusive && 2294 folio_try_share_anon_rmap_pte(folio, subpage)) { 2295 folio_put_swap(folio, subpage); 2296 set_pte_at(mm, address, pvmw.pte, pteval); 2297 goto walk_abort; 2298 } 2299 if (list_empty(&mm->mmlist)) { 2300 spin_lock(&mmlist_lock); 2301 if (list_empty(&mm->mmlist)) 2302 list_add(&mm->mmlist, &init_mm.mmlist); 2303 spin_unlock(&mmlist_lock); 2304 } 2305 dec_mm_counter(mm, MM_ANONPAGES); 2306 inc_mm_counter(mm, MM_SWAPENTS); 2307 swp_pte = swp_entry_to_pte(entry); 2308 if (anon_exclusive) 2309 swp_pte = pte_swp_mkexclusive(swp_pte); 2310 if (likely(pte_present(pteval))) { 2311 if (pte_soft_dirty(pteval)) 2312 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2313 if (pte_uffd_wp(pteval)) 2314 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2315 } else { 2316 if (pte_swp_soft_dirty(pteval)) 2317 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2318 if (pte_swp_uffd_wp(pteval)) 2319 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2320 } 2321 set_pte_at(mm, address, pvmw.pte, swp_pte); 2322 } else { 2323 /* 2324 * This is a locked file-backed folio, 2325 * so it cannot be removed from the page 2326 * cache and replaced by a new folio before 2327 * mmu_notifier_invalidate_range_end, so no 2328 * concurrent thread might update its page table 2329 * to point at a new folio while a device is 2330 * still using this folio. 2331 * 2332 * See Documentation/mm/mmu_notifier.rst 2333 */ 2334 add_mm_counter(mm, mm_counter_file(folio), -nr_pages); 2335 } 2336 discard: 2337 if (unlikely(folio_test_hugetlb(folio))) { 2338 hugetlb_remove_rmap(folio); 2339 } else { 2340 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); 2341 } 2342 if (vma->vm_flags & VM_LOCKED) 2343 mlock_drain_local(); 2344 folio_put_refs(folio, nr_pages); 2345 2346 /* 2347 * If we are sure that we batched the entire folio and cleared 2348 * all PTEs, we can just optimize and stop right here. 2349 */ 2350 if (nr_pages == folio_nr_pages(folio)) 2351 goto walk_done; 2352 continue; 2353 walk_abort: 2354 ret = false; 2355 walk_done: 2356 page_vma_mapped_walk_done(&pvmw); 2357 break; 2358 } 2359 2360 mmu_notifier_invalidate_range_end(&range); 2361 2362 return ret; 2363 } 2364 2365 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 2366 { 2367 return vma_is_temporary_stack(vma); 2368 } 2369 2370 static int folio_not_mapped(struct folio *folio) 2371 { 2372 return !folio_mapped(folio); 2373 } 2374 2375 /** 2376 * try_to_unmap - Try to remove all page table mappings to a folio. 2377 * @folio: The folio to unmap. 2378 * @flags: action and flags 2379 * 2380 * Tries to remove all the page table entries which are mapping this 2381 * folio. It is the caller's responsibility to check if the folio is 2382 * still mapped if needed (use TTU_SYNC to prevent accounting races). 2383 * 2384 * Context: Caller must hold the folio lock. 2385 */ 2386 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 2387 { 2388 struct rmap_walk_control rwc = { 2389 .rmap_one = try_to_unmap_one, 2390 .arg = (void *)flags, 2391 .done = folio_not_mapped, 2392 .anon_lock = folio_lock_anon_vma_read, 2393 }; 2394 2395 if (flags & TTU_RMAP_LOCKED) 2396 rmap_walk_locked(folio, &rwc); 2397 else 2398 rmap_walk(folio, &rwc); 2399 } 2400 2401 /* 2402 * @arg: enum ttu_flags will be passed to this argument. 2403 * 2404 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 2405 * containing migration entries. 2406 */ 2407 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 2408 unsigned long address, void *arg) 2409 { 2410 struct mm_struct *mm = vma->vm_mm; 2411 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2412 bool anon_exclusive, writable, ret = true; 2413 pte_t pteval; 2414 struct page *subpage; 2415 struct mmu_notifier_range range; 2416 enum ttu_flags flags = (enum ttu_flags)(long)arg; 2417 unsigned long pfn; 2418 unsigned long hsz = 0; 2419 2420 /* 2421 * When racing against e.g. zap_pte_range() on another cpu, 2422 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 2423 * try_to_migrate() may return before page_mapped() has become false, 2424 * if page table locking is skipped: use TTU_SYNC to wait for that. 2425 */ 2426 if (flags & TTU_SYNC) 2427 pvmw.flags = PVMW_SYNC; 2428 2429 /* 2430 * For THP, we have to assume the worse case ie pmd for invalidation. 2431 * For hugetlb, it could be much worse if we need to do pud 2432 * invalidation in the case of pmd sharing. 2433 * 2434 * Note that the page can not be free in this function as call of 2435 * try_to_unmap() must hold a reference on the page. 2436 */ 2437 range.end = vma_address_end(&pvmw); 2438 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2439 address, range.end); 2440 if (folio_test_hugetlb(folio)) { 2441 /* 2442 * If sharing is possible, start and end will be adjusted 2443 * accordingly. 2444 */ 2445 adjust_range_if_pmd_sharing_possible(vma, &range.start, 2446 &range.end); 2447 2448 /* We need the huge page size for set_huge_pte_at() */ 2449 hsz = huge_page_size(hstate_vma(vma)); 2450 } 2451 mmu_notifier_invalidate_range_start(&range); 2452 2453 while (page_vma_mapped_walk(&pvmw)) { 2454 /* PMD-mapped THP migration entry */ 2455 if (!pvmw.pte) { 2456 __maybe_unused unsigned long pfn; 2457 __maybe_unused pmd_t pmdval; 2458 2459 if (flags & TTU_SPLIT_HUGE_PMD) { 2460 /* 2461 * split_huge_pmd_locked() might leave the 2462 * folio mapped through PTEs. Retry the walk 2463 * so we can detect this scenario and properly 2464 * abort the walk. 2465 */ 2466 split_huge_pmd_locked(vma, pvmw.address, 2467 pvmw.pmd, true); 2468 flags &= ~TTU_SPLIT_HUGE_PMD; 2469 page_vma_mapped_walk_restart(&pvmw); 2470 continue; 2471 } 2472 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2473 pmdval = pmdp_get(pvmw.pmd); 2474 if (likely(pmd_present(pmdval))) 2475 pfn = pmd_pfn(pmdval); 2476 else 2477 pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval)); 2478 2479 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2480 2481 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 2482 !folio_test_pmd_mappable(folio), folio); 2483 2484 if (set_pmd_migration_entry(&pvmw, subpage)) { 2485 ret = false; 2486 page_vma_mapped_walk_done(&pvmw); 2487 break; 2488 } 2489 continue; 2490 #endif 2491 } 2492 2493 /* Unexpected PMD-mapped THP? */ 2494 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2495 2496 /* 2497 * Handle PFN swap PTEs, such as device-exclusive ones, that 2498 * actually map pages. 2499 */ 2500 pteval = ptep_get(pvmw.pte); 2501 if (likely(pte_present(pteval))) { 2502 pfn = pte_pfn(pteval); 2503 } else { 2504 const softleaf_t entry = softleaf_from_pte(pteval); 2505 2506 pfn = softleaf_to_pfn(entry); 2507 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 2508 } 2509 2510 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2511 address = pvmw.address; 2512 anon_exclusive = folio_test_anon(folio) && 2513 PageAnonExclusive(subpage); 2514 2515 if (folio_test_hugetlb(folio)) { 2516 bool anon = folio_test_anon(folio); 2517 2518 /* 2519 * huge_pmd_unshare may unmap an entire PMD page. 2520 * There is no way of knowing exactly which PMDs may 2521 * be cached for this mm, so we must flush them all. 2522 * start/end were already adjusted above to cover this 2523 * range. 2524 */ 2525 flush_cache_range(vma, range.start, range.end); 2526 2527 /* 2528 * To call huge_pmd_unshare, i_mmap_rwsem must be 2529 * held in write mode. Caller needs to explicitly 2530 * do this outside rmap routines. 2531 * 2532 * We also must hold hugetlb vma_lock in write mode. 2533 * Lock order dictates acquiring vma_lock BEFORE 2534 * i_mmap_rwsem. We can only try lock here and 2535 * fail if unsuccessful. 2536 */ 2537 if (!anon) { 2538 struct mmu_gather tlb; 2539 2540 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 2541 if (!hugetlb_vma_trylock_write(vma)) { 2542 page_vma_mapped_walk_done(&pvmw); 2543 ret = false; 2544 break; 2545 } 2546 2547 tlb_gather_mmu_vma(&tlb, vma); 2548 if (huge_pmd_unshare(&tlb, vma, address, pvmw.pte)) { 2549 hugetlb_vma_unlock_write(vma); 2550 huge_pmd_unshare_flush(&tlb, vma); 2551 tlb_finish_mmu(&tlb); 2552 /* 2553 * The PMD table was unmapped, 2554 * consequently unmapping the folio. 2555 */ 2556 page_vma_mapped_walk_done(&pvmw); 2557 break; 2558 } 2559 hugetlb_vma_unlock_write(vma); 2560 tlb_finish_mmu(&tlb); 2561 } 2562 /* Nuke the hugetlb page table entry */ 2563 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 2564 if (pte_dirty(pteval)) 2565 folio_mark_dirty(folio); 2566 writable = pte_write(pteval); 2567 } else if (likely(pte_present(pteval))) { 2568 flush_cache_page(vma, address, pfn); 2569 /* Nuke the page table entry. */ 2570 if (should_defer_flush(mm, flags)) { 2571 /* 2572 * We clear the PTE but do not flush so potentially 2573 * a remote CPU could still be writing to the folio. 2574 * If the entry was previously clean then the 2575 * architecture must guarantee that a clear->dirty 2576 * transition on a cached TLB entry is written through 2577 * and traps if the PTE is unmapped. 2578 */ 2579 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 2580 2581 set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); 2582 } else { 2583 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2584 } 2585 if (pte_dirty(pteval)) 2586 folio_mark_dirty(folio); 2587 writable = pte_write(pteval); 2588 } else { 2589 const softleaf_t entry = softleaf_from_pte(pteval); 2590 2591 pte_clear(mm, address, pvmw.pte); 2592 2593 writable = softleaf_is_device_private_write(entry); 2594 } 2595 2596 VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && 2597 !anon_exclusive, folio); 2598 2599 /* Update high watermark before we lower rss */ 2600 update_hiwater_rss(mm); 2601 2602 if (PageHWPoison(subpage)) { 2603 VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); 2604 2605 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2606 if (folio_test_hugetlb(folio)) { 2607 hugetlb_count_sub(folio_nr_pages(folio), mm); 2608 set_huge_pte_at(mm, address, pvmw.pte, pteval, 2609 hsz); 2610 } else { 2611 dec_mm_counter(mm, mm_counter(folio)); 2612 set_pte_at(mm, address, pvmw.pte, pteval); 2613 } 2614 } else if (likely(pte_present(pteval)) && pte_unused(pteval) && 2615 !userfaultfd_armed(vma)) { 2616 /* 2617 * The guest indicated that the page content is of no 2618 * interest anymore. Simply discard the pte, vmscan 2619 * will take care of the rest. 2620 * A future reference will then fault in a new zero 2621 * page. When userfaultfd is active, we must not drop 2622 * this page though, as its main user (postcopy 2623 * migration) will not expect userfaults on already 2624 * copied pages. 2625 */ 2626 dec_mm_counter(mm, mm_counter(folio)); 2627 } else { 2628 swp_entry_t entry; 2629 pte_t swp_pte; 2630 2631 /* 2632 * arch_unmap_one() is expected to be a NOP on 2633 * architectures where we could have PFN swap PTEs, 2634 * so we'll not check/care. 2635 */ 2636 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2637 if (folio_test_hugetlb(folio)) 2638 set_huge_pte_at(mm, address, pvmw.pte, 2639 pteval, hsz); 2640 else 2641 set_pte_at(mm, address, pvmw.pte, pteval); 2642 ret = false; 2643 page_vma_mapped_walk_done(&pvmw); 2644 break; 2645 } 2646 2647 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ 2648 if (folio_test_hugetlb(folio)) { 2649 if (anon_exclusive && 2650 hugetlb_try_share_anon_rmap(folio)) { 2651 set_huge_pte_at(mm, address, pvmw.pte, 2652 pteval, hsz); 2653 ret = false; 2654 page_vma_mapped_walk_done(&pvmw); 2655 break; 2656 } 2657 } else if (anon_exclusive && 2658 folio_try_share_anon_rmap_pte(folio, subpage)) { 2659 set_pte_at(mm, address, pvmw.pte, pteval); 2660 ret = false; 2661 page_vma_mapped_walk_done(&pvmw); 2662 break; 2663 } 2664 2665 /* 2666 * Store the pfn of the page in a special migration 2667 * pte. do_swap_page() will wait until the migration 2668 * pte is removed and then restart fault handling. 2669 */ 2670 if (writable) 2671 entry = make_writable_migration_entry( 2672 page_to_pfn(subpage)); 2673 else if (anon_exclusive) 2674 entry = make_readable_exclusive_migration_entry( 2675 page_to_pfn(subpage)); 2676 else 2677 entry = make_readable_migration_entry( 2678 page_to_pfn(subpage)); 2679 if (likely(pte_present(pteval))) { 2680 if (pte_young(pteval)) 2681 entry = make_migration_entry_young(entry); 2682 if (pte_dirty(pteval)) 2683 entry = make_migration_entry_dirty(entry); 2684 swp_pte = swp_entry_to_pte(entry); 2685 if (pte_soft_dirty(pteval)) 2686 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2687 if (pte_uffd_wp(pteval)) 2688 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2689 } else { 2690 swp_pte = swp_entry_to_pte(entry); 2691 if (pte_swp_soft_dirty(pteval)) 2692 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2693 if (pte_swp_uffd_wp(pteval)) 2694 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2695 } 2696 if (folio_test_hugetlb(folio)) 2697 set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2698 hsz); 2699 else 2700 set_pte_at(mm, address, pvmw.pte, swp_pte); 2701 trace_set_migration_pte(address, pte_val(swp_pte), 2702 folio_order(folio)); 2703 /* 2704 * No need to invalidate here it will synchronize on 2705 * against the special swap migration pte. 2706 */ 2707 } 2708 2709 if (unlikely(folio_test_hugetlb(folio))) 2710 hugetlb_remove_rmap(folio); 2711 else 2712 folio_remove_rmap_pte(folio, subpage, vma); 2713 if (vma->vm_flags & VM_LOCKED) 2714 mlock_drain_local(); 2715 folio_put(folio); 2716 } 2717 2718 mmu_notifier_invalidate_range_end(&range); 2719 2720 return ret; 2721 } 2722 2723 /** 2724 * try_to_migrate - try to replace all page table mappings with swap entries 2725 * @folio: the folio to replace page table entries for 2726 * @flags: action and flags 2727 * 2728 * Tries to remove all the page table entries which are mapping this folio and 2729 * replace them with special swap entries. Caller must hold the folio lock. 2730 */ 2731 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2732 { 2733 struct rmap_walk_control rwc = { 2734 .rmap_one = try_to_migrate_one, 2735 .arg = (void *)flags, 2736 .done = folio_not_mapped, 2737 .anon_lock = folio_lock_anon_vma_read, 2738 }; 2739 2740 /* 2741 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2742 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2743 */ 2744 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2745 TTU_SYNC | TTU_BATCH_FLUSH))) 2746 return; 2747 2748 if (folio_is_zone_device(folio) && 2749 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2750 return; 2751 2752 /* 2753 * During exec, a temporary VMA is setup and later moved. 2754 * The VMA is moved under the anon_vma lock but not the 2755 * page tables leading to a race where migration cannot 2756 * find the migration ptes. Rather than increasing the 2757 * locking requirements of exec(), migration skips 2758 * temporary VMAs until after exec() completes. 2759 */ 2760 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2761 rwc.invalid_vma = invalid_migration_vma; 2762 2763 if (flags & TTU_RMAP_LOCKED) 2764 rmap_walk_locked(folio, &rwc); 2765 else 2766 rmap_walk(folio, &rwc); 2767 } 2768 2769 #ifdef CONFIG_DEVICE_PRIVATE 2770 /** 2771 * make_device_exclusive() - Mark a page for exclusive use by a device 2772 * @mm: mm_struct of associated target process 2773 * @addr: the virtual address to mark for exclusive device access 2774 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2775 * @foliop: folio pointer will be stored here on success. 2776 * 2777 * This function looks up the page mapped at the given address, grabs a 2778 * folio reference, locks the folio and replaces the PTE with special 2779 * device-exclusive PFN swap entry, preventing access through the process 2780 * page tables. The function will return with the folio locked and referenced. 2781 * 2782 * On fault, the device-exclusive entries are replaced with the original PTE 2783 * under folio lock, after calling MMU notifiers. 2784 * 2785 * Only anonymous non-hugetlb folios are supported and the VMA must have 2786 * write permissions such that we can fault in the anonymous page writable 2787 * in order to mark it exclusive. The caller must hold the mmap_lock in read 2788 * mode. 2789 * 2790 * A driver using this to program access from a device must use a mmu notifier 2791 * critical section to hold a device specific lock during programming. Once 2792 * programming is complete it should drop the folio lock and reference after 2793 * which point CPU access to the page will revoke the exclusive access. 2794 * 2795 * Notes: 2796 * #. This function always operates on individual PTEs mapping individual 2797 * pages. PMD-sized THPs are first remapped to be mapped by PTEs before 2798 * the conversion happens on a single PTE corresponding to @addr. 2799 * #. While concurrent access through the process page tables is prevented, 2800 * concurrent access through other page references (e.g., earlier GUP 2801 * invocation) is not handled and not supported. 2802 * #. device-exclusive entries are considered "clean" and "old" by core-mm. 2803 * Device drivers must update the folio state when informed by MMU 2804 * notifiers. 2805 * 2806 * Returns: pointer to mapped page on success, otherwise a negative error. 2807 */ 2808 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, 2809 void *owner, struct folio **foliop) 2810 { 2811 struct mmu_notifier_range range; 2812 struct folio *folio, *fw_folio; 2813 struct vm_area_struct *vma; 2814 struct folio_walk fw; 2815 struct page *page; 2816 swp_entry_t entry; 2817 pte_t swp_pte; 2818 int ret; 2819 2820 mmap_assert_locked(mm); 2821 addr = PAGE_ALIGN_DOWN(addr); 2822 2823 /* 2824 * Fault in the page writable and try to lock it; note that if the 2825 * address would already be marked for exclusive use by a device, 2826 * the GUP call would undo that first by triggering a fault. 2827 * 2828 * If any other device would already map this page exclusively, the 2829 * fault will trigger a conversion to an ordinary 2830 * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. 2831 */ 2832 retry: 2833 page = get_user_page_vma_remote(mm, addr, 2834 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2835 &vma); 2836 if (IS_ERR(page)) 2837 return page; 2838 folio = page_folio(page); 2839 2840 if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { 2841 folio_put(folio); 2842 return ERR_PTR(-EOPNOTSUPP); 2843 } 2844 2845 ret = folio_lock_killable(folio); 2846 if (ret) { 2847 folio_put(folio); 2848 return ERR_PTR(ret); 2849 } 2850 2851 /* 2852 * Inform secondary MMUs that we are going to convert this PTE to 2853 * device-exclusive, such that they unmap it now. Note that the 2854 * caller must filter this event out to prevent livelocks. 2855 */ 2856 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2857 mm, addr, addr + PAGE_SIZE, owner); 2858 mmu_notifier_invalidate_range_start(&range); 2859 2860 /* 2861 * Let's do a second walk and make sure we still find the same page 2862 * mapped writable. Note that any page of an anonymous folio can 2863 * only be mapped writable using exactly one PTE ("exclusive"), so 2864 * there cannot be other mappings. 2865 */ 2866 fw_folio = folio_walk_start(&fw, vma, addr, 0); 2867 if (fw_folio != folio || fw.page != page || 2868 fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { 2869 if (fw_folio) 2870 folio_walk_end(&fw, vma); 2871 mmu_notifier_invalidate_range_end(&range); 2872 folio_unlock(folio); 2873 folio_put(folio); 2874 goto retry; 2875 } 2876 2877 /* Nuke the page table entry so we get the uptodate dirty bit. */ 2878 flush_cache_page(vma, addr, page_to_pfn(page)); 2879 fw.pte = ptep_clear_flush(vma, addr, fw.ptep); 2880 2881 /* Set the dirty flag on the folio now the PTE is gone. */ 2882 if (pte_dirty(fw.pte)) 2883 folio_mark_dirty(folio); 2884 2885 /* 2886 * Store the pfn of the page in a special device-exclusive PFN swap PTE. 2887 * do_swap_page() will trigger the conversion back while holding the 2888 * folio lock. 2889 */ 2890 entry = make_device_exclusive_entry(page_to_pfn(page)); 2891 swp_pte = swp_entry_to_pte(entry); 2892 if (pte_soft_dirty(fw.pte)) 2893 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2894 /* The pte is writable, uffd-wp does not apply. */ 2895 set_pte_at(mm, addr, fw.ptep, swp_pte); 2896 2897 folio_walk_end(&fw, vma); 2898 mmu_notifier_invalidate_range_end(&range); 2899 *foliop = folio; 2900 return page; 2901 } 2902 EXPORT_SYMBOL_GPL(make_device_exclusive); 2903 #endif 2904 2905 void __put_anon_vma(struct anon_vma *anon_vma) 2906 { 2907 struct anon_vma *root = anon_vma->root; 2908 2909 anon_vma_free(anon_vma); 2910 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2911 anon_vma_free(root); 2912 } 2913 2914 static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, 2915 struct rmap_walk_control *rwc) 2916 { 2917 struct anon_vma *anon_vma; 2918 2919 if (rwc->anon_lock) 2920 return rwc->anon_lock(folio, rwc); 2921 2922 /* 2923 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2924 * because that depends on page_mapped(); but not all its usages 2925 * are holding mmap_lock. Users without mmap_lock are required to 2926 * take a reference count to prevent the anon_vma disappearing 2927 */ 2928 anon_vma = folio_anon_vma(folio); 2929 if (!anon_vma) 2930 return NULL; 2931 2932 if (anon_vma_trylock_read(anon_vma)) 2933 goto out; 2934 2935 if (rwc->try_lock) { 2936 anon_vma = NULL; 2937 rwc->contended = true; 2938 goto out; 2939 } 2940 2941 anon_vma_lock_read(anon_vma); 2942 out: 2943 return anon_vma; 2944 } 2945 2946 /* 2947 * rmap_walk_anon - do something to anonymous page using the object-based 2948 * rmap method 2949 * @folio: the folio to be handled 2950 * @rwc: control variable according to each walk type 2951 * @locked: caller holds relevant rmap lock 2952 * 2953 * Find all the mappings of a folio using the mapping pointer and the vma 2954 * chains contained in the anon_vma struct it points to. 2955 */ 2956 static void rmap_walk_anon(struct folio *folio, 2957 struct rmap_walk_control *rwc, bool locked) 2958 { 2959 struct anon_vma *anon_vma; 2960 pgoff_t pgoff_start, pgoff_end; 2961 struct anon_vma_chain *avc; 2962 2963 /* 2964 * The folio lock ensures that folio->mapping can't be changed under us 2965 * to an anon_vma with different root. 2966 */ 2967 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 2968 2969 if (locked) { 2970 anon_vma = folio_anon_vma(folio); 2971 /* anon_vma disappear under us? */ 2972 VM_BUG_ON_FOLIO(!anon_vma, folio); 2973 } else { 2974 anon_vma = rmap_walk_anon_lock(folio, rwc); 2975 } 2976 if (!anon_vma) 2977 return; 2978 2979 pgoff_start = folio_pgoff(folio); 2980 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2981 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2982 pgoff_start, pgoff_end) { 2983 struct vm_area_struct *vma = avc->vma; 2984 unsigned long address = vma_address(vma, pgoff_start, 2985 folio_nr_pages(folio)); 2986 2987 VM_BUG_ON_VMA(address == -EFAULT, vma); 2988 cond_resched(); 2989 2990 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2991 continue; 2992 2993 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2994 break; 2995 if (rwc->done && rwc->done(folio)) 2996 break; 2997 } 2998 2999 if (!locked) 3000 anon_vma_unlock_read(anon_vma); 3001 } 3002 3003 /** 3004 * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping 3005 * of a page mapped within a specified page cache object at a specified offset. 3006 * 3007 * @folio: Either the folio whose mappings to traverse, or if NULL, 3008 * the callbacks specified in @rwc will be configured such 3009 * as to be able to look up mappings correctly. 3010 * @mapping: The page cache object whose mapping VMAs we intend to 3011 * traverse. If @folio is non-NULL, this should be equal to 3012 * folio_mapping(folio). 3013 * @pgoff_start: The offset within @mapping of the page which we are 3014 * looking up. If @folio is non-NULL, this should be equal 3015 * to folio_pgoff(folio). 3016 * @nr_pages: The number of pages mapped by the mapping. If @folio is 3017 * non-NULL, this should be equal to folio_nr_pages(folio). 3018 * @rwc: The reverse mapping walk control object describing how 3019 * the traversal should proceed. 3020 * @locked: Is the @mapping already locked? If not, we acquire the 3021 * lock. 3022 */ 3023 static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, 3024 pgoff_t pgoff_start, unsigned long nr_pages, 3025 struct rmap_walk_control *rwc, bool locked) 3026 { 3027 pgoff_t pgoff_end = pgoff_start + nr_pages - 1; 3028 struct vm_area_struct *vma; 3029 3030 VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); 3031 VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); 3032 VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); 3033 3034 if (!locked) { 3035 if (i_mmap_trylock_read(mapping)) 3036 goto lookup; 3037 3038 if (rwc->try_lock) { 3039 rwc->contended = true; 3040 return; 3041 } 3042 3043 i_mmap_lock_read(mapping); 3044 } 3045 lookup: 3046 vma_interval_tree_foreach(vma, &mapping->i_mmap, 3047 pgoff_start, pgoff_end) { 3048 unsigned long address = vma_address(vma, pgoff_start, nr_pages); 3049 3050 VM_BUG_ON_VMA(address == -EFAULT, vma); 3051 cond_resched(); 3052 3053 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 3054 continue; 3055 3056 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 3057 goto done; 3058 if (rwc->done && rwc->done(folio)) 3059 goto done; 3060 } 3061 done: 3062 if (!locked) 3063 i_mmap_unlock_read(mapping); 3064 } 3065 3066 /* 3067 * rmap_walk_file - do something to file page using the object-based rmap method 3068 * @folio: the folio to be handled 3069 * @rwc: control variable according to each walk type 3070 * @locked: caller holds relevant rmap lock 3071 * 3072 * Find all the mappings of a folio using the mapping pointer and the vma chains 3073 * contained in the address_space struct it points to. 3074 */ 3075 static void rmap_walk_file(struct folio *folio, 3076 struct rmap_walk_control *rwc, bool locked) 3077 { 3078 /* 3079 * The folio lock not only makes sure that folio->mapping cannot 3080 * suddenly be NULLified by truncation, it makes sure that the structure 3081 * at mapping cannot be freed and reused yet, so we can safely take 3082 * mapping->i_mmap_rwsem. 3083 */ 3084 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 3085 3086 if (!folio->mapping) 3087 return; 3088 3089 __rmap_walk_file(folio, folio->mapping, folio->index, 3090 folio_nr_pages(folio), rwc, locked); 3091 } 3092 3093 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 3094 { 3095 if (unlikely(folio_test_ksm(folio))) 3096 rmap_walk_ksm(folio, rwc); 3097 else if (folio_test_anon(folio)) 3098 rmap_walk_anon(folio, rwc, false); 3099 else 3100 rmap_walk_file(folio, rwc, false); 3101 } 3102 3103 /* Like rmap_walk, but caller holds relevant rmap lock */ 3104 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 3105 { 3106 /* no ksm support for now */ 3107 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 3108 if (folio_test_anon(folio)) 3109 rmap_walk_anon(folio, rwc, true); 3110 else 3111 rmap_walk_file(folio, rwc, true); 3112 } 3113 3114 #ifdef CONFIG_HUGETLB_PAGE 3115 /* 3116 * The following two functions are for anonymous (private mapped) hugepages. 3117 * Unlike common anonymous pages, anonymous hugepages have no accounting code 3118 * and no lru code, because we handle hugepages differently from common pages. 3119 */ 3120 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 3121 unsigned long address, rmap_t flags) 3122 { 3123 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 3124 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 3125 3126 atomic_inc(&folio->_entire_mapcount); 3127 atomic_inc(&folio->_large_mapcount); 3128 if (flags & RMAP_EXCLUSIVE) 3129 SetPageAnonExclusive(&folio->page); 3130 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && 3131 PageAnonExclusive(&folio->page), folio); 3132 } 3133 3134 void hugetlb_add_new_anon_rmap(struct folio *folio, 3135 struct vm_area_struct *vma, unsigned long address) 3136 { 3137 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 3138 3139 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 3140 /* increment count (starts at -1) */ 3141 atomic_set(&folio->_entire_mapcount, 0); 3142 atomic_set(&folio->_large_mapcount, 0); 3143 folio_clear_hugetlb_restore_reserve(folio); 3144 __folio_set_anon(folio, vma, address, true); 3145 SetPageAnonExclusive(&folio->page); 3146 } 3147 #endif /* CONFIG_HUGETLB_PAGE */ 3148