1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * folio_lock 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28 * vma_start_write 29 * mapping->i_mmap_rwsem 30 * anon_vma->rwsem 31 * mm->page_table_lock or pte_lock 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in block_dirty_folio) 35 * folio_lock_memcg move_lock (in block_dirty_folio) 36 * i_pages lock (widely used) 37 * lruvec->lru_lock (in folio_lruvec_lock_irq) 38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 40 * sb_lock (within inode_lock in fs/fs-writeback.c) 41 * i_pages lock (widely used, in set_page_dirty, 42 * in arch-dependent flush_dcache_mmap_lock, 43 * within bdi.wb->list_lock in __sync_single_inode) 44 * 45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 46 * ->tasklist_lock 47 * pte map lock 48 * 49 * hugetlbfs PageHuge() take locks in this order: 50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51 * vma_lock (hugetlb specific lock for pmd_sharing) 52 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 53 * folio_lock 54 */ 55 56 #include <linux/mm.h> 57 #include <linux/sched/mm.h> 58 #include <linux/sched/task.h> 59 #include <linux/pagemap.h> 60 #include <linux/swap.h> 61 #include <linux/swapops.h> 62 #include <linux/slab.h> 63 #include <linux/init.h> 64 #include <linux/ksm.h> 65 #include <linux/rmap.h> 66 #include <linux/rcupdate.h> 67 #include <linux/export.h> 68 #include <linux/memcontrol.h> 69 #include <linux/mmu_notifier.h> 70 #include <linux/migrate.h> 71 #include <linux/hugetlb.h> 72 #include <linux/huge_mm.h> 73 #include <linux/backing-dev.h> 74 #include <linux/page_idle.h> 75 #include <linux/memremap.h> 76 #include <linux/userfaultfd_k.h> 77 #include <linux/mm_inline.h> 78 79 #include <asm/tlbflush.h> 80 81 #define CREATE_TRACE_POINTS 82 #include <trace/events/tlb.h> 83 #include <trace/events/migrate.h> 84 85 #include "internal.h" 86 87 static struct kmem_cache *anon_vma_cachep; 88 static struct kmem_cache *anon_vma_chain_cachep; 89 90 static inline struct anon_vma *anon_vma_alloc(void) 91 { 92 struct anon_vma *anon_vma; 93 94 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 95 if (anon_vma) { 96 atomic_set(&anon_vma->refcount, 1); 97 anon_vma->num_children = 0; 98 anon_vma->num_active_vmas = 0; 99 anon_vma->parent = anon_vma; 100 /* 101 * Initialise the anon_vma root to point to itself. If called 102 * from fork, the root will be reset to the parents anon_vma. 103 */ 104 anon_vma->root = anon_vma; 105 } 106 107 return anon_vma; 108 } 109 110 static inline void anon_vma_free(struct anon_vma *anon_vma) 111 { 112 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 113 114 /* 115 * Synchronize against folio_lock_anon_vma_read() such that 116 * we can safely hold the lock without the anon_vma getting 117 * freed. 118 * 119 * Relies on the full mb implied by the atomic_dec_and_test() from 120 * put_anon_vma() against the acquire barrier implied by 121 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 122 * 123 * folio_lock_anon_vma_read() VS put_anon_vma() 124 * down_read_trylock() atomic_dec_and_test() 125 * LOCK MB 126 * atomic_read() rwsem_is_locked() 127 * 128 * LOCK should suffice since the actual taking of the lock must 129 * happen _before_ what follows. 130 */ 131 might_sleep(); 132 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 133 anon_vma_lock_write(anon_vma); 134 anon_vma_unlock_write(anon_vma); 135 } 136 137 kmem_cache_free(anon_vma_cachep, anon_vma); 138 } 139 140 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 141 { 142 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 143 } 144 145 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 146 { 147 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 148 } 149 150 static void anon_vma_chain_link(struct vm_area_struct *vma, 151 struct anon_vma_chain *avc, 152 struct anon_vma *anon_vma) 153 { 154 avc->vma = vma; 155 avc->anon_vma = anon_vma; 156 list_add(&avc->same_vma, &vma->anon_vma_chain); 157 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 158 } 159 160 /** 161 * __anon_vma_prepare - attach an anon_vma to a memory region 162 * @vma: the memory region in question 163 * 164 * This makes sure the memory mapping described by 'vma' has 165 * an 'anon_vma' attached to it, so that we can associate the 166 * anonymous pages mapped into it with that anon_vma. 167 * 168 * The common case will be that we already have one, which 169 * is handled inline by anon_vma_prepare(). But if 170 * not we either need to find an adjacent mapping that we 171 * can re-use the anon_vma from (very common when the only 172 * reason for splitting a vma has been mprotect()), or we 173 * allocate a new one. 174 * 175 * Anon-vma allocations are very subtle, because we may have 176 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 177 * and that may actually touch the rwsem even in the newly 178 * allocated vma (it depends on RCU to make sure that the 179 * anon_vma isn't actually destroyed). 180 * 181 * As a result, we need to do proper anon_vma locking even 182 * for the new allocation. At the same time, we do not want 183 * to do any locking for the common case of already having 184 * an anon_vma. 185 */ 186 int __anon_vma_prepare(struct vm_area_struct *vma) 187 { 188 struct mm_struct *mm = vma->vm_mm; 189 struct anon_vma *anon_vma, *allocated; 190 struct anon_vma_chain *avc; 191 192 mmap_assert_locked(mm); 193 might_sleep(); 194 195 avc = anon_vma_chain_alloc(GFP_KERNEL); 196 if (!avc) 197 goto out_enomem; 198 199 anon_vma = find_mergeable_anon_vma(vma); 200 allocated = NULL; 201 if (!anon_vma) { 202 anon_vma = anon_vma_alloc(); 203 if (unlikely(!anon_vma)) 204 goto out_enomem_free_avc; 205 anon_vma->num_children++; /* self-parent link for new root */ 206 allocated = anon_vma; 207 } 208 209 anon_vma_lock_write(anon_vma); 210 /* page_table_lock to protect against threads */ 211 spin_lock(&mm->page_table_lock); 212 if (likely(!vma->anon_vma)) { 213 vma->anon_vma = anon_vma; 214 anon_vma_chain_link(vma, avc, anon_vma); 215 anon_vma->num_active_vmas++; 216 allocated = NULL; 217 avc = NULL; 218 } 219 spin_unlock(&mm->page_table_lock); 220 anon_vma_unlock_write(anon_vma); 221 222 if (unlikely(allocated)) 223 put_anon_vma(allocated); 224 if (unlikely(avc)) 225 anon_vma_chain_free(avc); 226 227 return 0; 228 229 out_enomem_free_avc: 230 anon_vma_chain_free(avc); 231 out_enomem: 232 return -ENOMEM; 233 } 234 235 /* 236 * This is a useful helper function for locking the anon_vma root as 237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 238 * have the same vma. 239 * 240 * Such anon_vma's should have the same root, so you'd expect to see 241 * just a single mutex_lock for the whole traversal. 242 */ 243 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 244 { 245 struct anon_vma *new_root = anon_vma->root; 246 if (new_root != root) { 247 if (WARN_ON_ONCE(root)) 248 up_write(&root->rwsem); 249 root = new_root; 250 down_write(&root->rwsem); 251 } 252 return root; 253 } 254 255 static inline void unlock_anon_vma_root(struct anon_vma *root) 256 { 257 if (root) 258 up_write(&root->rwsem); 259 } 260 261 /* 262 * Attach the anon_vmas from src to dst. 263 * Returns 0 on success, -ENOMEM on failure. 264 * 265 * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), 266 * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, 267 * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to 268 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before 269 * call, we can identify this case by checking (!dst->anon_vma && 270 * src->anon_vma). 271 * 272 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 273 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 274 * This prevents degradation of anon_vma hierarchy to endless linear chain in 275 * case of constantly forking task. On the other hand, an anon_vma with more 276 * than one child isn't reused even if there was no alive vma, thus rmap 277 * walker has a good chance of avoiding scanning the whole hierarchy when it 278 * searches where page is mapped. 279 */ 280 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 281 { 282 struct anon_vma_chain *avc, *pavc; 283 struct anon_vma *root = NULL; 284 285 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 286 struct anon_vma *anon_vma; 287 288 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 289 if (unlikely(!avc)) { 290 unlock_anon_vma_root(root); 291 root = NULL; 292 avc = anon_vma_chain_alloc(GFP_KERNEL); 293 if (!avc) 294 goto enomem_failure; 295 } 296 anon_vma = pavc->anon_vma; 297 root = lock_anon_vma_root(root, anon_vma); 298 anon_vma_chain_link(dst, avc, anon_vma); 299 300 /* 301 * Reuse existing anon_vma if it has no vma and only one 302 * anon_vma child. 303 * 304 * Root anon_vma is never reused: 305 * it has self-parent reference and at least one child. 306 */ 307 if (!dst->anon_vma && src->anon_vma && 308 anon_vma->num_children < 2 && 309 anon_vma->num_active_vmas == 0) 310 dst->anon_vma = anon_vma; 311 } 312 if (dst->anon_vma) 313 dst->anon_vma->num_active_vmas++; 314 unlock_anon_vma_root(root); 315 return 0; 316 317 enomem_failure: 318 /* 319 * dst->anon_vma is dropped here otherwise its num_active_vmas can 320 * be incorrectly decremented in unlink_anon_vmas(). 321 * We can safely do this because callers of anon_vma_clone() don't care 322 * about dst->anon_vma if anon_vma_clone() failed. 323 */ 324 dst->anon_vma = NULL; 325 unlink_anon_vmas(dst); 326 return -ENOMEM; 327 } 328 329 /* 330 * Attach vma to its own anon_vma, as well as to the anon_vmas that 331 * the corresponding VMA in the parent process is attached to. 332 * Returns 0 on success, non-zero on failure. 333 */ 334 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 335 { 336 struct anon_vma_chain *avc; 337 struct anon_vma *anon_vma; 338 int error; 339 340 /* Don't bother if the parent process has no anon_vma here. */ 341 if (!pvma->anon_vma) 342 return 0; 343 344 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 345 vma->anon_vma = NULL; 346 347 /* 348 * First, attach the new VMA to the parent VMA's anon_vmas, 349 * so rmap can find non-COWed pages in child processes. 350 */ 351 error = anon_vma_clone(vma, pvma); 352 if (error) 353 return error; 354 355 /* An existing anon_vma has been reused, all done then. */ 356 if (vma->anon_vma) 357 return 0; 358 359 /* Then add our own anon_vma. */ 360 anon_vma = anon_vma_alloc(); 361 if (!anon_vma) 362 goto out_error; 363 anon_vma->num_active_vmas++; 364 avc = anon_vma_chain_alloc(GFP_KERNEL); 365 if (!avc) 366 goto out_error_free_anon_vma; 367 368 /* 369 * The root anon_vma's rwsem is the lock actually used when we 370 * lock any of the anon_vmas in this anon_vma tree. 371 */ 372 anon_vma->root = pvma->anon_vma->root; 373 anon_vma->parent = pvma->anon_vma; 374 /* 375 * With refcounts, an anon_vma can stay around longer than the 376 * process it belongs to. The root anon_vma needs to be pinned until 377 * this anon_vma is freed, because the lock lives in the root. 378 */ 379 get_anon_vma(anon_vma->root); 380 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 381 vma->anon_vma = anon_vma; 382 anon_vma_lock_write(anon_vma); 383 anon_vma_chain_link(vma, avc, anon_vma); 384 anon_vma->parent->num_children++; 385 anon_vma_unlock_write(anon_vma); 386 387 return 0; 388 389 out_error_free_anon_vma: 390 put_anon_vma(anon_vma); 391 out_error: 392 unlink_anon_vmas(vma); 393 return -ENOMEM; 394 } 395 396 void unlink_anon_vmas(struct vm_area_struct *vma) 397 { 398 struct anon_vma_chain *avc, *next; 399 struct anon_vma *root = NULL; 400 401 /* 402 * Unlink each anon_vma chained to the VMA. This list is ordered 403 * from newest to oldest, ensuring the root anon_vma gets freed last. 404 */ 405 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 406 struct anon_vma *anon_vma = avc->anon_vma; 407 408 root = lock_anon_vma_root(root, anon_vma); 409 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 410 411 /* 412 * Leave empty anon_vmas on the list - we'll need 413 * to free them outside the lock. 414 */ 415 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 416 anon_vma->parent->num_children--; 417 continue; 418 } 419 420 list_del(&avc->same_vma); 421 anon_vma_chain_free(avc); 422 } 423 if (vma->anon_vma) { 424 vma->anon_vma->num_active_vmas--; 425 426 /* 427 * vma would still be needed after unlink, and anon_vma will be prepared 428 * when handle fault. 429 */ 430 vma->anon_vma = NULL; 431 } 432 unlock_anon_vma_root(root); 433 434 /* 435 * Iterate the list once more, it now only contains empty and unlinked 436 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 437 * needing to write-acquire the anon_vma->root->rwsem. 438 */ 439 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 440 struct anon_vma *anon_vma = avc->anon_vma; 441 442 VM_WARN_ON(anon_vma->num_children); 443 VM_WARN_ON(anon_vma->num_active_vmas); 444 put_anon_vma(anon_vma); 445 446 list_del(&avc->same_vma); 447 anon_vma_chain_free(avc); 448 } 449 } 450 451 static void anon_vma_ctor(void *data) 452 { 453 struct anon_vma *anon_vma = data; 454 455 init_rwsem(&anon_vma->rwsem); 456 atomic_set(&anon_vma->refcount, 0); 457 anon_vma->rb_root = RB_ROOT_CACHED; 458 } 459 460 void __init anon_vma_init(void) 461 { 462 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 463 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 464 anon_vma_ctor); 465 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 466 SLAB_PANIC|SLAB_ACCOUNT); 467 } 468 469 /* 470 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 471 * 472 * Since there is no serialization what so ever against folio_remove_rmap_*() 473 * the best this function can do is return a refcount increased anon_vma 474 * that might have been relevant to this page. 475 * 476 * The page might have been remapped to a different anon_vma or the anon_vma 477 * returned may already be freed (and even reused). 478 * 479 * In case it was remapped to a different anon_vma, the new anon_vma will be a 480 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 481 * ensure that any anon_vma obtained from the page will still be valid for as 482 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 483 * 484 * All users of this function must be very careful when walking the anon_vma 485 * chain and verify that the page in question is indeed mapped in it 486 * [ something equivalent to page_mapped_in_vma() ]. 487 * 488 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 489 * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid 490 * if there is a mapcount, we can dereference the anon_vma after observing 491 * those. 492 * 493 * NOTE: the caller should normally hold folio lock when calling this. If 494 * not, the caller needs to double check the anon_vma didn't change after 495 * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it 496 * concurrently without folio lock protection). See folio_lock_anon_vma_read() 497 * which has already covered that, and comment above remap_pages(). 498 */ 499 struct anon_vma *folio_get_anon_vma(struct folio *folio) 500 { 501 struct anon_vma *anon_vma = NULL; 502 unsigned long anon_mapping; 503 504 rcu_read_lock(); 505 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 506 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 507 goto out; 508 if (!folio_mapped(folio)) 509 goto out; 510 511 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 512 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 513 anon_vma = NULL; 514 goto out; 515 } 516 517 /* 518 * If this folio is still mapped, then its anon_vma cannot have been 519 * freed. But if it has been unmapped, we have no security against the 520 * anon_vma structure being freed and reused (for another anon_vma: 521 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 522 * above cannot corrupt). 523 */ 524 if (!folio_mapped(folio)) { 525 rcu_read_unlock(); 526 put_anon_vma(anon_vma); 527 return NULL; 528 } 529 out: 530 rcu_read_unlock(); 531 532 return anon_vma; 533 } 534 535 /* 536 * Similar to folio_get_anon_vma() except it locks the anon_vma. 537 * 538 * Its a little more complex as it tries to keep the fast path to a single 539 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 540 * reference like with folio_get_anon_vma() and then block on the mutex 541 * on !rwc->try_lock case. 542 */ 543 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 544 struct rmap_walk_control *rwc) 545 { 546 struct anon_vma *anon_vma = NULL; 547 struct anon_vma *root_anon_vma; 548 unsigned long anon_mapping; 549 550 retry: 551 rcu_read_lock(); 552 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 553 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 554 goto out; 555 if (!folio_mapped(folio)) 556 goto out; 557 558 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 559 root_anon_vma = READ_ONCE(anon_vma->root); 560 if (down_read_trylock(&root_anon_vma->rwsem)) { 561 /* 562 * folio_move_anon_rmap() might have changed the anon_vma as we 563 * might not hold the folio lock here. 564 */ 565 if (unlikely((unsigned long)READ_ONCE(folio->mapping) != 566 anon_mapping)) { 567 up_read(&root_anon_vma->rwsem); 568 rcu_read_unlock(); 569 goto retry; 570 } 571 572 /* 573 * If the folio is still mapped, then this anon_vma is still 574 * its anon_vma, and holding the mutex ensures that it will 575 * not go away, see anon_vma_free(). 576 */ 577 if (!folio_mapped(folio)) { 578 up_read(&root_anon_vma->rwsem); 579 anon_vma = NULL; 580 } 581 goto out; 582 } 583 584 if (rwc && rwc->try_lock) { 585 anon_vma = NULL; 586 rwc->contended = true; 587 goto out; 588 } 589 590 /* trylock failed, we got to sleep */ 591 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 592 anon_vma = NULL; 593 goto out; 594 } 595 596 if (!folio_mapped(folio)) { 597 rcu_read_unlock(); 598 put_anon_vma(anon_vma); 599 return NULL; 600 } 601 602 /* we pinned the anon_vma, its safe to sleep */ 603 rcu_read_unlock(); 604 anon_vma_lock_read(anon_vma); 605 606 /* 607 * folio_move_anon_rmap() might have changed the anon_vma as we might 608 * not hold the folio lock here. 609 */ 610 if (unlikely((unsigned long)READ_ONCE(folio->mapping) != 611 anon_mapping)) { 612 anon_vma_unlock_read(anon_vma); 613 put_anon_vma(anon_vma); 614 anon_vma = NULL; 615 goto retry; 616 } 617 618 if (atomic_dec_and_test(&anon_vma->refcount)) { 619 /* 620 * Oops, we held the last refcount, release the lock 621 * and bail -- can't simply use put_anon_vma() because 622 * we'll deadlock on the anon_vma_lock_write() recursion. 623 */ 624 anon_vma_unlock_read(anon_vma); 625 __put_anon_vma(anon_vma); 626 anon_vma = NULL; 627 } 628 629 return anon_vma; 630 631 out: 632 rcu_read_unlock(); 633 return anon_vma; 634 } 635 636 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 637 /* 638 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 639 * important if a PTE was dirty when it was unmapped that it's flushed 640 * before any IO is initiated on the page to prevent lost writes. Similarly, 641 * it must be flushed before freeing to prevent data leakage. 642 */ 643 void try_to_unmap_flush(void) 644 { 645 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 646 647 if (!tlb_ubc->flush_required) 648 return; 649 650 arch_tlbbatch_flush(&tlb_ubc->arch); 651 tlb_ubc->flush_required = false; 652 tlb_ubc->writable = false; 653 } 654 655 /* Flush iff there are potentially writable TLB entries that can race with IO */ 656 void try_to_unmap_flush_dirty(void) 657 { 658 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 659 660 if (tlb_ubc->writable) 661 try_to_unmap_flush(); 662 } 663 664 /* 665 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 666 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 667 */ 668 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 669 #define TLB_FLUSH_BATCH_PENDING_MASK \ 670 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 671 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 672 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 673 674 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 675 unsigned long uaddr) 676 { 677 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 678 int batch; 679 bool writable = pte_dirty(pteval); 680 681 if (!pte_accessible(mm, pteval)) 682 return; 683 684 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); 685 tlb_ubc->flush_required = true; 686 687 /* 688 * Ensure compiler does not re-order the setting of tlb_flush_batched 689 * before the PTE is cleared. 690 */ 691 barrier(); 692 batch = atomic_read(&mm->tlb_flush_batched); 693 retry: 694 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 695 /* 696 * Prevent `pending' from catching up with `flushed' because of 697 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 698 * `pending' becomes large. 699 */ 700 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) 701 goto retry; 702 } else { 703 atomic_inc(&mm->tlb_flush_batched); 704 } 705 706 /* 707 * If the PTE was dirty then it's best to assume it's writable. The 708 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 709 * before the page is queued for IO. 710 */ 711 if (writable) 712 tlb_ubc->writable = true; 713 } 714 715 /* 716 * Returns true if the TLB flush should be deferred to the end of a batch of 717 * unmap operations to reduce IPIs. 718 */ 719 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 720 { 721 if (!(flags & TTU_BATCH_FLUSH)) 722 return false; 723 724 return arch_tlbbatch_should_defer(mm); 725 } 726 727 /* 728 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 729 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 730 * operation such as mprotect or munmap to race between reclaim unmapping 731 * the page and flushing the page. If this race occurs, it potentially allows 732 * access to data via a stale TLB entry. Tracking all mm's that have TLB 733 * batching in flight would be expensive during reclaim so instead track 734 * whether TLB batching occurred in the past and if so then do a flush here 735 * if required. This will cost one additional flush per reclaim cycle paid 736 * by the first operation at risk such as mprotect and mumap. 737 * 738 * This must be called under the PTL so that an access to tlb_flush_batched 739 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 740 * via the PTL. 741 */ 742 void flush_tlb_batched_pending(struct mm_struct *mm) 743 { 744 int batch = atomic_read(&mm->tlb_flush_batched); 745 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 746 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 747 748 if (pending != flushed) { 749 arch_flush_tlb_batched_pending(mm); 750 /* 751 * If the new TLB flushing is pending during flushing, leave 752 * mm->tlb_flush_batched as is, to avoid losing flushing. 753 */ 754 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 755 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 756 } 757 } 758 #else 759 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, 760 unsigned long uaddr) 761 { 762 } 763 764 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 765 { 766 return false; 767 } 768 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 769 770 /* 771 * At what user virtual address is page expected in vma? 772 * Caller should check the page is actually part of the vma. 773 */ 774 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 775 { 776 struct folio *folio = page_folio(page); 777 pgoff_t pgoff; 778 779 if (folio_test_anon(folio)) { 780 struct anon_vma *page__anon_vma = folio_anon_vma(folio); 781 /* 782 * Note: swapoff's unuse_vma() is more efficient with this 783 * check, and needs it to match anon_vma when KSM is active. 784 */ 785 if (!vma->anon_vma || !page__anon_vma || 786 vma->anon_vma->root != page__anon_vma->root) 787 return -EFAULT; 788 } else if (!vma->vm_file) { 789 return -EFAULT; 790 } else if (vma->vm_file->f_mapping != folio->mapping) { 791 return -EFAULT; 792 } 793 794 /* The !page__anon_vma above handles KSM folios */ 795 pgoff = folio->index + folio_page_idx(folio, page); 796 return vma_address(vma, pgoff, 1); 797 } 798 799 /* 800 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 801 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 802 * represents. 803 */ 804 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 805 { 806 pgd_t *pgd; 807 p4d_t *p4d; 808 pud_t *pud; 809 pmd_t *pmd = NULL; 810 811 pgd = pgd_offset(mm, address); 812 if (!pgd_present(*pgd)) 813 goto out; 814 815 p4d = p4d_offset(pgd, address); 816 if (!p4d_present(*p4d)) 817 goto out; 818 819 pud = pud_offset(p4d, address); 820 if (!pud_present(*pud)) 821 goto out; 822 823 pmd = pmd_offset(pud, address); 824 out: 825 return pmd; 826 } 827 828 struct folio_referenced_arg { 829 int mapcount; 830 int referenced; 831 unsigned long vm_flags; 832 struct mem_cgroup *memcg; 833 }; 834 835 /* 836 * arg: folio_referenced_arg will be passed 837 */ 838 static bool folio_referenced_one(struct folio *folio, 839 struct vm_area_struct *vma, unsigned long address, void *arg) 840 { 841 struct folio_referenced_arg *pra = arg; 842 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 843 int referenced = 0; 844 unsigned long start = address, ptes = 0; 845 846 while (page_vma_mapped_walk(&pvmw)) { 847 address = pvmw.address; 848 849 if (vma->vm_flags & VM_LOCKED) { 850 if (!folio_test_large(folio) || !pvmw.pte) { 851 /* Restore the mlock which got missed */ 852 mlock_vma_folio(folio, vma); 853 page_vma_mapped_walk_done(&pvmw); 854 pra->vm_flags |= VM_LOCKED; 855 return false; /* To break the loop */ 856 } 857 /* 858 * For large folio fully mapped to VMA, will 859 * be handled after the pvmw loop. 860 * 861 * For large folio cross VMA boundaries, it's 862 * expected to be picked by page reclaim. But 863 * should skip reference of pages which are in 864 * the range of VM_LOCKED vma. As page reclaim 865 * should just count the reference of pages out 866 * the range of VM_LOCKED vma. 867 */ 868 ptes++; 869 pra->mapcount--; 870 continue; 871 } 872 873 if (pvmw.pte) { 874 if (lru_gen_enabled() && 875 pte_young(ptep_get(pvmw.pte))) { 876 lru_gen_look_around(&pvmw); 877 referenced++; 878 } 879 880 if (ptep_clear_flush_young_notify(vma, address, 881 pvmw.pte)) 882 referenced++; 883 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 884 if (pmdp_clear_flush_young_notify(vma, address, 885 pvmw.pmd)) 886 referenced++; 887 } else { 888 /* unexpected pmd-mapped folio? */ 889 WARN_ON_ONCE(1); 890 } 891 892 pra->mapcount--; 893 } 894 895 if ((vma->vm_flags & VM_LOCKED) && 896 folio_test_large(folio) && 897 folio_within_vma(folio, vma)) { 898 unsigned long s_align, e_align; 899 900 s_align = ALIGN_DOWN(start, PMD_SIZE); 901 e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); 902 903 /* folio doesn't cross page table boundary and fully mapped */ 904 if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { 905 /* Restore the mlock which got missed */ 906 mlock_vma_folio(folio, vma); 907 pra->vm_flags |= VM_LOCKED; 908 return false; /* To break the loop */ 909 } 910 } 911 912 if (referenced) 913 folio_clear_idle(folio); 914 if (folio_test_clear_young(folio)) 915 referenced++; 916 917 if (referenced) { 918 pra->referenced++; 919 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 920 } 921 922 if (!pra->mapcount) 923 return false; /* To break the loop */ 924 925 return true; 926 } 927 928 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 929 { 930 struct folio_referenced_arg *pra = arg; 931 struct mem_cgroup *memcg = pra->memcg; 932 933 /* 934 * Ignore references from this mapping if it has no recency. If the 935 * folio has been used in another mapping, we will catch it; if this 936 * other mapping is already gone, the unmap path will have set the 937 * referenced flag or activated the folio in zap_pte_range(). 938 */ 939 if (!vma_has_recency(vma)) 940 return true; 941 942 /* 943 * If we are reclaiming on behalf of a cgroup, skip counting on behalf 944 * of references from different cgroups. 945 */ 946 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 947 return true; 948 949 return false; 950 } 951 952 /** 953 * folio_referenced() - Test if the folio was referenced. 954 * @folio: The folio to test. 955 * @is_locked: Caller holds lock on the folio. 956 * @memcg: target memory cgroup 957 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 958 * 959 * Quick test_and_clear_referenced for all mappings of a folio, 960 * 961 * Return: The number of mappings which referenced the folio. Return -1 if 962 * the function bailed out due to rmap lock contention. 963 */ 964 int folio_referenced(struct folio *folio, int is_locked, 965 struct mem_cgroup *memcg, unsigned long *vm_flags) 966 { 967 bool we_locked = false; 968 struct folio_referenced_arg pra = { 969 .mapcount = folio_mapcount(folio), 970 .memcg = memcg, 971 }; 972 struct rmap_walk_control rwc = { 973 .rmap_one = folio_referenced_one, 974 .arg = (void *)&pra, 975 .anon_lock = folio_lock_anon_vma_read, 976 .try_lock = true, 977 .invalid_vma = invalid_folio_referenced_vma, 978 }; 979 980 *vm_flags = 0; 981 if (!pra.mapcount) 982 return 0; 983 984 if (!folio_raw_mapping(folio)) 985 return 0; 986 987 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 988 we_locked = folio_trylock(folio); 989 if (!we_locked) 990 return 1; 991 } 992 993 rmap_walk(folio, &rwc); 994 *vm_flags = pra.vm_flags; 995 996 if (we_locked) 997 folio_unlock(folio); 998 999 return rwc.contended ? -1 : pra.referenced; 1000 } 1001 1002 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 1003 { 1004 int cleaned = 0; 1005 struct vm_area_struct *vma = pvmw->vma; 1006 struct mmu_notifier_range range; 1007 unsigned long address = pvmw->address; 1008 1009 /* 1010 * We have to assume the worse case ie pmd for invalidation. Note that 1011 * the folio can not be freed from this function. 1012 */ 1013 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 1014 vma->vm_mm, address, vma_address_end(pvmw)); 1015 mmu_notifier_invalidate_range_start(&range); 1016 1017 while (page_vma_mapped_walk(pvmw)) { 1018 int ret = 0; 1019 1020 address = pvmw->address; 1021 if (pvmw->pte) { 1022 pte_t *pte = pvmw->pte; 1023 pte_t entry = ptep_get(pte); 1024 1025 if (!pte_dirty(entry) && !pte_write(entry)) 1026 continue; 1027 1028 flush_cache_page(vma, address, pte_pfn(entry)); 1029 entry = ptep_clear_flush(vma, address, pte); 1030 entry = pte_wrprotect(entry); 1031 entry = pte_mkclean(entry); 1032 set_pte_at(vma->vm_mm, address, pte, entry); 1033 ret = 1; 1034 } else { 1035 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1036 pmd_t *pmd = pvmw->pmd; 1037 pmd_t entry; 1038 1039 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 1040 continue; 1041 1042 flush_cache_range(vma, address, 1043 address + HPAGE_PMD_SIZE); 1044 entry = pmdp_invalidate(vma, address, pmd); 1045 entry = pmd_wrprotect(entry); 1046 entry = pmd_mkclean(entry); 1047 set_pmd_at(vma->vm_mm, address, pmd, entry); 1048 ret = 1; 1049 #else 1050 /* unexpected pmd-mapped folio? */ 1051 WARN_ON_ONCE(1); 1052 #endif 1053 } 1054 1055 if (ret) 1056 cleaned++; 1057 } 1058 1059 mmu_notifier_invalidate_range_end(&range); 1060 1061 return cleaned; 1062 } 1063 1064 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1065 unsigned long address, void *arg) 1066 { 1067 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1068 int *cleaned = arg; 1069 1070 *cleaned += page_vma_mkclean_one(&pvmw); 1071 1072 return true; 1073 } 1074 1075 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1076 { 1077 if (vma->vm_flags & VM_SHARED) 1078 return false; 1079 1080 return true; 1081 } 1082 1083 int folio_mkclean(struct folio *folio) 1084 { 1085 int cleaned = 0; 1086 struct address_space *mapping; 1087 struct rmap_walk_control rwc = { 1088 .arg = (void *)&cleaned, 1089 .rmap_one = page_mkclean_one, 1090 .invalid_vma = invalid_mkclean_vma, 1091 }; 1092 1093 BUG_ON(!folio_test_locked(folio)); 1094 1095 if (!folio_mapped(folio)) 1096 return 0; 1097 1098 mapping = folio_mapping(folio); 1099 if (!mapping) 1100 return 0; 1101 1102 rmap_walk(folio, &rwc); 1103 1104 return cleaned; 1105 } 1106 EXPORT_SYMBOL_GPL(folio_mkclean); 1107 1108 /** 1109 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1110 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1111 * within the @vma of shared mappings. And since clean PTEs 1112 * should also be readonly, write protects them too. 1113 * @pfn: start pfn. 1114 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1115 * @pgoff: page offset that the @pfn mapped with. 1116 * @vma: vma that @pfn mapped within. 1117 * 1118 * Returns the number of cleaned PTEs (including PMDs). 1119 */ 1120 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1121 struct vm_area_struct *vma) 1122 { 1123 struct page_vma_mapped_walk pvmw = { 1124 .pfn = pfn, 1125 .nr_pages = nr_pages, 1126 .pgoff = pgoff, 1127 .vma = vma, 1128 .flags = PVMW_SYNC, 1129 }; 1130 1131 if (invalid_mkclean_vma(vma, NULL)) 1132 return 0; 1133 1134 pvmw.address = vma_address(vma, pgoff, nr_pages); 1135 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1136 1137 return page_vma_mkclean_one(&pvmw); 1138 } 1139 1140 static __always_inline unsigned int __folio_add_rmap(struct folio *folio, 1141 struct page *page, int nr_pages, enum rmap_level level, 1142 int *nr_pmdmapped) 1143 { 1144 atomic_t *mapped = &folio->_nr_pages_mapped; 1145 const int orig_nr_pages = nr_pages; 1146 int first, nr = 0; 1147 1148 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1149 1150 switch (level) { 1151 case RMAP_LEVEL_PTE: 1152 if (!folio_test_large(folio)) { 1153 nr = atomic_inc_and_test(&page->_mapcount); 1154 break; 1155 } 1156 1157 do { 1158 first = atomic_inc_and_test(&page->_mapcount); 1159 if (first) { 1160 first = atomic_inc_return_relaxed(mapped); 1161 if (first < ENTIRELY_MAPPED) 1162 nr++; 1163 } 1164 } while (page++, --nr_pages > 0); 1165 atomic_add(orig_nr_pages, &folio->_large_mapcount); 1166 break; 1167 case RMAP_LEVEL_PMD: 1168 first = atomic_inc_and_test(&folio->_entire_mapcount); 1169 if (first) { 1170 nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); 1171 if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { 1172 *nr_pmdmapped = folio_nr_pages(folio); 1173 nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 1174 /* Raced ahead of a remove and another add? */ 1175 if (unlikely(nr < 0)) 1176 nr = 0; 1177 } else { 1178 /* Raced ahead of a remove of ENTIRELY_MAPPED */ 1179 nr = 0; 1180 } 1181 } 1182 atomic_inc(&folio->_large_mapcount); 1183 break; 1184 } 1185 return nr; 1186 } 1187 1188 /** 1189 * folio_move_anon_rmap - move a folio to our anon_vma 1190 * @folio: The folio to move to our anon_vma 1191 * @vma: The vma the folio belongs to 1192 * 1193 * When a folio belongs exclusively to one process after a COW event, 1194 * that folio can be moved into the anon_vma that belongs to just that 1195 * process, so the rmap code will not search the parent or sibling processes. 1196 */ 1197 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) 1198 { 1199 void *anon_vma = vma->anon_vma; 1200 1201 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1202 VM_BUG_ON_VMA(!anon_vma, vma); 1203 1204 anon_vma += PAGE_MAPPING_ANON; 1205 /* 1206 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1207 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1208 * folio_test_anon()) will not see one without the other. 1209 */ 1210 WRITE_ONCE(folio->mapping, anon_vma); 1211 } 1212 1213 /** 1214 * __folio_set_anon - set up a new anonymous rmap for a folio 1215 * @folio: The folio to set up the new anonymous rmap for. 1216 * @vma: VM area to add the folio to. 1217 * @address: User virtual address of the mapping 1218 * @exclusive: Whether the folio is exclusive to the process. 1219 */ 1220 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, 1221 unsigned long address, bool exclusive) 1222 { 1223 struct anon_vma *anon_vma = vma->anon_vma; 1224 1225 BUG_ON(!anon_vma); 1226 1227 /* 1228 * If the folio isn't exclusive to this vma, we must use the _oldest_ 1229 * possible anon_vma for the folio mapping! 1230 */ 1231 if (!exclusive) 1232 anon_vma = anon_vma->root; 1233 1234 /* 1235 * page_idle does a lockless/optimistic rmap scan on folio->mapping. 1236 * Make sure the compiler doesn't split the stores of anon_vma and 1237 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1238 * could mistake the mapping for a struct address_space and crash. 1239 */ 1240 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1241 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 1242 folio->index = linear_page_index(vma, address); 1243 } 1244 1245 /** 1246 * __page_check_anon_rmap - sanity check anonymous rmap addition 1247 * @folio: The folio containing @page. 1248 * @page: the page to check the mapping of 1249 * @vma: the vm area in which the mapping is added 1250 * @address: the user virtual address mapped 1251 */ 1252 static void __page_check_anon_rmap(struct folio *folio, struct page *page, 1253 struct vm_area_struct *vma, unsigned long address) 1254 { 1255 /* 1256 * The page's anon-rmap details (mapping and index) are guaranteed to 1257 * be set up correctly at this point. 1258 * 1259 * We have exclusion against folio_add_anon_rmap_*() because the caller 1260 * always holds the page locked. 1261 * 1262 * We have exclusion against folio_add_new_anon_rmap because those pages 1263 * are initially only visible via the pagetables, and the pte is locked 1264 * over the call to folio_add_new_anon_rmap. 1265 */ 1266 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1267 folio); 1268 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1269 page); 1270 } 1271 1272 static __always_inline void __folio_add_anon_rmap(struct folio *folio, 1273 struct page *page, int nr_pages, struct vm_area_struct *vma, 1274 unsigned long address, rmap_t flags, enum rmap_level level) 1275 { 1276 int i, nr, nr_pmdmapped = 0; 1277 1278 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); 1279 if (nr_pmdmapped) 1280 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); 1281 if (nr) 1282 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 1283 1284 if (unlikely(!folio_test_anon(folio))) { 1285 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 1286 /* 1287 * For a PTE-mapped large folio, we only know that the single 1288 * PTE is exclusive. Further, __folio_set_anon() might not get 1289 * folio->index right when not given the address of the head 1290 * page. 1291 */ 1292 VM_WARN_ON_FOLIO(folio_test_large(folio) && 1293 level != RMAP_LEVEL_PMD, folio); 1294 __folio_set_anon(folio, vma, address, 1295 !!(flags & RMAP_EXCLUSIVE)); 1296 } else if (likely(!folio_test_ksm(folio))) { 1297 __page_check_anon_rmap(folio, page, vma, address); 1298 } 1299 1300 if (flags & RMAP_EXCLUSIVE) { 1301 switch (level) { 1302 case RMAP_LEVEL_PTE: 1303 for (i = 0; i < nr_pages; i++) 1304 SetPageAnonExclusive(page + i); 1305 break; 1306 case RMAP_LEVEL_PMD: 1307 SetPageAnonExclusive(page); 1308 break; 1309 } 1310 } 1311 for (i = 0; i < nr_pages; i++) { 1312 struct page *cur_page = page + i; 1313 1314 /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ 1315 VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || 1316 (folio_test_large(folio) && 1317 folio_entire_mapcount(folio) > 1)) && 1318 PageAnonExclusive(cur_page), folio); 1319 } 1320 1321 /* 1322 * For large folio, only mlock it if it's fully mapped to VMA. It's 1323 * not easy to check whether the large folio is fully mapped to VMA 1324 * here. Only mlock normal 4K folio and leave page reclaim to handle 1325 * large folio. 1326 */ 1327 if (!folio_test_large(folio)) 1328 mlock_vma_folio(folio, vma); 1329 } 1330 1331 /** 1332 * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio 1333 * @folio: The folio to add the mappings to 1334 * @page: The first page to add 1335 * @nr_pages: The number of pages which will be mapped 1336 * @vma: The vm area in which the mappings are added 1337 * @address: The user virtual address of the first page to map 1338 * @flags: The rmap flags 1339 * 1340 * The page range of folio is defined by [first_page, first_page + nr_pages) 1341 * 1342 * The caller needs to hold the page table lock, and the page must be locked in 1343 * the anon_vma case: to serialize mapping,index checking after setting, 1344 * and to ensure that an anon folio is not being upgraded racily to a KSM folio 1345 * (but KSM folios are never downgraded). 1346 */ 1347 void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, 1348 int nr_pages, struct vm_area_struct *vma, unsigned long address, 1349 rmap_t flags) 1350 { 1351 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, 1352 RMAP_LEVEL_PTE); 1353 } 1354 1355 /** 1356 * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio 1357 * @folio: The folio to add the mapping to 1358 * @page: The first page to add 1359 * @vma: The vm area in which the mapping is added 1360 * @address: The user virtual address of the first page to map 1361 * @flags: The rmap flags 1362 * 1363 * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) 1364 * 1365 * The caller needs to hold the page table lock, and the page must be locked in 1366 * the anon_vma case: to serialize mapping,index checking after setting. 1367 */ 1368 void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, 1369 struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1370 { 1371 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1372 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, 1373 RMAP_LEVEL_PMD); 1374 #else 1375 WARN_ON_ONCE(true); 1376 #endif 1377 } 1378 1379 /** 1380 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 1381 * @folio: The folio to add the mapping to. 1382 * @vma: the vm area in which the mapping is added 1383 * @address: the user virtual address mapped 1384 * 1385 * Like folio_add_anon_rmap_*() but must only be called on *new* folios. 1386 * This means the inc-and-test can be bypassed. 1387 * The folio does not have to be locked. 1388 * 1389 * If the folio is pmd-mappable, it is accounted as a THP. As the folio 1390 * is new, it's assumed to be mapped exclusively by a single process. 1391 */ 1392 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 1393 unsigned long address) 1394 { 1395 int nr = folio_nr_pages(folio); 1396 1397 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); 1398 VM_BUG_ON_VMA(address < vma->vm_start || 1399 address + (nr << PAGE_SHIFT) > vma->vm_end, vma); 1400 __folio_set_swapbacked(folio); 1401 __folio_set_anon(folio, vma, address, true); 1402 1403 if (likely(!folio_test_large(folio))) { 1404 /* increment count (starts at -1) */ 1405 atomic_set(&folio->_mapcount, 0); 1406 SetPageAnonExclusive(&folio->page); 1407 } else if (!folio_test_pmd_mappable(folio)) { 1408 int i; 1409 1410 for (i = 0; i < nr; i++) { 1411 struct page *page = folio_page(folio, i); 1412 1413 /* increment count (starts at -1) */ 1414 atomic_set(&page->_mapcount, 0); 1415 SetPageAnonExclusive(page); 1416 } 1417 1418 /* increment count (starts at -1) */ 1419 atomic_set(&folio->_large_mapcount, nr - 1); 1420 atomic_set(&folio->_nr_pages_mapped, nr); 1421 } else { 1422 /* increment count (starts at -1) */ 1423 atomic_set(&folio->_entire_mapcount, 0); 1424 /* increment count (starts at -1) */ 1425 atomic_set(&folio->_large_mapcount, 0); 1426 atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); 1427 SetPageAnonExclusive(&folio->page); 1428 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); 1429 } 1430 1431 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 1432 } 1433 1434 static __always_inline void __folio_add_file_rmap(struct folio *folio, 1435 struct page *page, int nr_pages, struct vm_area_struct *vma, 1436 enum rmap_level level) 1437 { 1438 pg_data_t *pgdat = folio_pgdat(folio); 1439 int nr, nr_pmdmapped = 0; 1440 1441 VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); 1442 1443 nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); 1444 if (nr_pmdmapped) 1445 __mod_node_page_state(pgdat, folio_test_swapbacked(folio) ? 1446 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); 1447 if (nr) 1448 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); 1449 1450 /* See comments in folio_add_anon_rmap_*() */ 1451 if (!folio_test_large(folio)) 1452 mlock_vma_folio(folio, vma); 1453 } 1454 1455 /** 1456 * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio 1457 * @folio: The folio to add the mappings to 1458 * @page: The first page to add 1459 * @nr_pages: The number of pages that will be mapped using PTEs 1460 * @vma: The vm area in which the mappings are added 1461 * 1462 * The page range of the folio is defined by [page, page + nr_pages) 1463 * 1464 * The caller needs to hold the page table lock. 1465 */ 1466 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, 1467 int nr_pages, struct vm_area_struct *vma) 1468 { 1469 __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); 1470 } 1471 1472 /** 1473 * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio 1474 * @folio: The folio to add the mapping to 1475 * @page: The first page to add 1476 * @vma: The vm area in which the mapping is added 1477 * 1478 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1479 * 1480 * The caller needs to hold the page table lock. 1481 */ 1482 void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, 1483 struct vm_area_struct *vma) 1484 { 1485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1486 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); 1487 #else 1488 WARN_ON_ONCE(true); 1489 #endif 1490 } 1491 1492 static __always_inline void __folio_remove_rmap(struct folio *folio, 1493 struct page *page, int nr_pages, struct vm_area_struct *vma, 1494 enum rmap_level level) 1495 { 1496 atomic_t *mapped = &folio->_nr_pages_mapped; 1497 pg_data_t *pgdat = folio_pgdat(folio); 1498 int last, nr = 0, nr_pmdmapped = 0; 1499 bool partially_mapped = false; 1500 enum node_stat_item idx; 1501 1502 __folio_rmap_sanity_checks(folio, page, nr_pages, level); 1503 1504 switch (level) { 1505 case RMAP_LEVEL_PTE: 1506 if (!folio_test_large(folio)) { 1507 nr = atomic_add_negative(-1, &page->_mapcount); 1508 break; 1509 } 1510 1511 atomic_sub(nr_pages, &folio->_large_mapcount); 1512 do { 1513 last = atomic_add_negative(-1, &page->_mapcount); 1514 if (last) { 1515 last = atomic_dec_return_relaxed(mapped); 1516 if (last < ENTIRELY_MAPPED) 1517 nr++; 1518 } 1519 } while (page++, --nr_pages > 0); 1520 1521 partially_mapped = nr && atomic_read(mapped); 1522 break; 1523 case RMAP_LEVEL_PMD: 1524 atomic_dec(&folio->_large_mapcount); 1525 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1526 if (last) { 1527 nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); 1528 if (likely(nr < ENTIRELY_MAPPED)) { 1529 nr_pmdmapped = folio_nr_pages(folio); 1530 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 1531 /* Raced ahead of another remove and an add? */ 1532 if (unlikely(nr < 0)) 1533 nr = 0; 1534 } else { 1535 /* An add of ENTIRELY_MAPPED raced ahead */ 1536 nr = 0; 1537 } 1538 } 1539 1540 partially_mapped = nr < nr_pmdmapped; 1541 break; 1542 } 1543 1544 if (nr_pmdmapped) { 1545 /* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */ 1546 if (folio_test_anon(folio)) 1547 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, -nr_pmdmapped); 1548 else 1549 __mod_node_page_state(pgdat, 1550 folio_test_swapbacked(folio) ? 1551 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, 1552 -nr_pmdmapped); 1553 } 1554 if (nr) { 1555 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 1556 __lruvec_stat_mod_folio(folio, idx, -nr); 1557 1558 /* 1559 * Queue anon large folio for deferred split if at least one 1560 * page of the folio is unmapped and at least one page 1561 * is still mapped. 1562 * 1563 * Check partially_mapped first to ensure it is a large folio. 1564 */ 1565 if (folio_test_anon(folio) && partially_mapped && 1566 list_empty(&folio->_deferred_list)) 1567 deferred_split_folio(folio); 1568 } 1569 1570 /* 1571 * It would be tidy to reset folio_test_anon mapping when fully 1572 * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() 1573 * which increments mapcount after us but sets mapping before us: 1574 * so leave the reset to free_pages_prepare, and remember that 1575 * it's only reliable while mapped. 1576 */ 1577 1578 munlock_vma_folio(folio, vma); 1579 } 1580 1581 /** 1582 * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio 1583 * @folio: The folio to remove the mappings from 1584 * @page: The first page to remove 1585 * @nr_pages: The number of pages that will be removed from the mapping 1586 * @vma: The vm area from which the mappings are removed 1587 * 1588 * The page range of the folio is defined by [page, page + nr_pages) 1589 * 1590 * The caller needs to hold the page table lock. 1591 */ 1592 void folio_remove_rmap_ptes(struct folio *folio, struct page *page, 1593 int nr_pages, struct vm_area_struct *vma) 1594 { 1595 __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); 1596 } 1597 1598 /** 1599 * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio 1600 * @folio: The folio to remove the mapping from 1601 * @page: The first page to remove 1602 * @vma: The vm area from which the mapping is removed 1603 * 1604 * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) 1605 * 1606 * The caller needs to hold the page table lock. 1607 */ 1608 void folio_remove_rmap_pmd(struct folio *folio, struct page *page, 1609 struct vm_area_struct *vma) 1610 { 1611 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1612 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); 1613 #else 1614 WARN_ON_ONCE(true); 1615 #endif 1616 } 1617 1618 /* 1619 * @arg: enum ttu_flags will be passed to this argument 1620 */ 1621 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1622 unsigned long address, void *arg) 1623 { 1624 struct mm_struct *mm = vma->vm_mm; 1625 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1626 pte_t pteval; 1627 struct page *subpage; 1628 bool anon_exclusive, ret = true; 1629 struct mmu_notifier_range range; 1630 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1631 unsigned long pfn; 1632 unsigned long hsz = 0; 1633 1634 /* 1635 * When racing against e.g. zap_pte_range() on another cpu, 1636 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 1637 * try_to_unmap() may return before page_mapped() has become false, 1638 * if page table locking is skipped: use TTU_SYNC to wait for that. 1639 */ 1640 if (flags & TTU_SYNC) 1641 pvmw.flags = PVMW_SYNC; 1642 1643 if (flags & TTU_SPLIT_HUGE_PMD) 1644 split_huge_pmd_address(vma, address, false, folio); 1645 1646 /* 1647 * For THP, we have to assume the worse case ie pmd for invalidation. 1648 * For hugetlb, it could be much worse if we need to do pud 1649 * invalidation in the case of pmd sharing. 1650 * 1651 * Note that the folio can not be freed in this function as call of 1652 * try_to_unmap() must hold a reference on the folio. 1653 */ 1654 range.end = vma_address_end(&pvmw); 1655 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1656 address, range.end); 1657 if (folio_test_hugetlb(folio)) { 1658 /* 1659 * If sharing is possible, start and end will be adjusted 1660 * accordingly. 1661 */ 1662 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1663 &range.end); 1664 1665 /* We need the huge page size for set_huge_pte_at() */ 1666 hsz = huge_page_size(hstate_vma(vma)); 1667 } 1668 mmu_notifier_invalidate_range_start(&range); 1669 1670 while (page_vma_mapped_walk(&pvmw)) { 1671 /* Unexpected PMD-mapped THP? */ 1672 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1673 1674 /* 1675 * If the folio is in an mlock()d vma, we must not swap it out. 1676 */ 1677 if (!(flags & TTU_IGNORE_MLOCK) && 1678 (vma->vm_flags & VM_LOCKED)) { 1679 /* Restore the mlock which got missed */ 1680 if (!folio_test_large(folio)) 1681 mlock_vma_folio(folio, vma); 1682 page_vma_mapped_walk_done(&pvmw); 1683 ret = false; 1684 break; 1685 } 1686 1687 pfn = pte_pfn(ptep_get(pvmw.pte)); 1688 subpage = folio_page(folio, pfn - folio_pfn(folio)); 1689 address = pvmw.address; 1690 anon_exclusive = folio_test_anon(folio) && 1691 PageAnonExclusive(subpage); 1692 1693 if (folio_test_hugetlb(folio)) { 1694 bool anon = folio_test_anon(folio); 1695 1696 /* 1697 * The try_to_unmap() is only passed a hugetlb page 1698 * in the case where the hugetlb page is poisoned. 1699 */ 1700 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1701 /* 1702 * huge_pmd_unshare may unmap an entire PMD page. 1703 * There is no way of knowing exactly which PMDs may 1704 * be cached for this mm, so we must flush them all. 1705 * start/end were already adjusted above to cover this 1706 * range. 1707 */ 1708 flush_cache_range(vma, range.start, range.end); 1709 1710 /* 1711 * To call huge_pmd_unshare, i_mmap_rwsem must be 1712 * held in write mode. Caller needs to explicitly 1713 * do this outside rmap routines. 1714 * 1715 * We also must hold hugetlb vma_lock in write mode. 1716 * Lock order dictates acquiring vma_lock BEFORE 1717 * i_mmap_rwsem. We can only try lock here and fail 1718 * if unsuccessful. 1719 */ 1720 if (!anon) { 1721 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1722 if (!hugetlb_vma_trylock_write(vma)) { 1723 page_vma_mapped_walk_done(&pvmw); 1724 ret = false; 1725 break; 1726 } 1727 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1728 hugetlb_vma_unlock_write(vma); 1729 flush_tlb_range(vma, 1730 range.start, range.end); 1731 /* 1732 * The ref count of the PMD page was 1733 * dropped which is part of the way map 1734 * counting is done for shared PMDs. 1735 * Return 'true' here. When there is 1736 * no other sharing, huge_pmd_unshare 1737 * returns false and we will unmap the 1738 * actual page and drop map count 1739 * to zero. 1740 */ 1741 page_vma_mapped_walk_done(&pvmw); 1742 break; 1743 } 1744 hugetlb_vma_unlock_write(vma); 1745 } 1746 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1747 } else { 1748 flush_cache_page(vma, address, pfn); 1749 /* Nuke the page table entry. */ 1750 if (should_defer_flush(mm, flags)) { 1751 /* 1752 * We clear the PTE but do not flush so potentially 1753 * a remote CPU could still be writing to the folio. 1754 * If the entry was previously clean then the 1755 * architecture must guarantee that a clear->dirty 1756 * transition on a cached TLB entry is written through 1757 * and traps if the PTE is unmapped. 1758 */ 1759 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1760 1761 set_tlb_ubc_flush_pending(mm, pteval, address); 1762 } else { 1763 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1764 } 1765 } 1766 1767 /* 1768 * Now the pte is cleared. If this pte was uffd-wp armed, 1769 * we may want to replace a none pte with a marker pte if 1770 * it's file-backed, so we don't lose the tracking info. 1771 */ 1772 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1773 1774 /* Set the dirty flag on the folio now the pte is gone. */ 1775 if (pte_dirty(pteval)) 1776 folio_mark_dirty(folio); 1777 1778 /* Update high watermark before we lower rss */ 1779 update_hiwater_rss(mm); 1780 1781 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 1782 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1783 if (folio_test_hugetlb(folio)) { 1784 hugetlb_count_sub(folio_nr_pages(folio), mm); 1785 set_huge_pte_at(mm, address, pvmw.pte, pteval, 1786 hsz); 1787 } else { 1788 dec_mm_counter(mm, mm_counter(folio)); 1789 set_pte_at(mm, address, pvmw.pte, pteval); 1790 } 1791 1792 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1793 /* 1794 * The guest indicated that the page content is of no 1795 * interest anymore. Simply discard the pte, vmscan 1796 * will take care of the rest. 1797 * A future reference will then fault in a new zero 1798 * page. When userfaultfd is active, we must not drop 1799 * this page though, as its main user (postcopy 1800 * migration) will not expect userfaults on already 1801 * copied pages. 1802 */ 1803 dec_mm_counter(mm, mm_counter(folio)); 1804 } else if (folio_test_anon(folio)) { 1805 swp_entry_t entry = page_swap_entry(subpage); 1806 pte_t swp_pte; 1807 /* 1808 * Store the swap location in the pte. 1809 * See handle_pte_fault() ... 1810 */ 1811 if (unlikely(folio_test_swapbacked(folio) != 1812 folio_test_swapcache(folio))) { 1813 WARN_ON_ONCE(1); 1814 ret = false; 1815 page_vma_mapped_walk_done(&pvmw); 1816 break; 1817 } 1818 1819 /* MADV_FREE page check */ 1820 if (!folio_test_swapbacked(folio)) { 1821 int ref_count, map_count; 1822 1823 /* 1824 * Synchronize with gup_pte_range(): 1825 * - clear PTE; barrier; read refcount 1826 * - inc refcount; barrier; read PTE 1827 */ 1828 smp_mb(); 1829 1830 ref_count = folio_ref_count(folio); 1831 map_count = folio_mapcount(folio); 1832 1833 /* 1834 * Order reads for page refcount and dirty flag 1835 * (see comments in __remove_mapping()). 1836 */ 1837 smp_rmb(); 1838 1839 /* 1840 * The only page refs must be one from isolation 1841 * plus the rmap(s) (dropped by discard:). 1842 */ 1843 if (ref_count == 1 + map_count && 1844 !folio_test_dirty(folio)) { 1845 dec_mm_counter(mm, MM_ANONPAGES); 1846 goto discard; 1847 } 1848 1849 /* 1850 * If the folio was redirtied, it cannot be 1851 * discarded. Remap the page to page table. 1852 */ 1853 set_pte_at(mm, address, pvmw.pte, pteval); 1854 folio_set_swapbacked(folio); 1855 ret = false; 1856 page_vma_mapped_walk_done(&pvmw); 1857 break; 1858 } 1859 1860 if (swap_duplicate(entry) < 0) { 1861 set_pte_at(mm, address, pvmw.pte, pteval); 1862 ret = false; 1863 page_vma_mapped_walk_done(&pvmw); 1864 break; 1865 } 1866 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1867 swap_free(entry); 1868 set_pte_at(mm, address, pvmw.pte, pteval); 1869 ret = false; 1870 page_vma_mapped_walk_done(&pvmw); 1871 break; 1872 } 1873 1874 /* See folio_try_share_anon_rmap(): clear PTE first. */ 1875 if (anon_exclusive && 1876 folio_try_share_anon_rmap_pte(folio, subpage)) { 1877 swap_free(entry); 1878 set_pte_at(mm, address, pvmw.pte, pteval); 1879 ret = false; 1880 page_vma_mapped_walk_done(&pvmw); 1881 break; 1882 } 1883 if (list_empty(&mm->mmlist)) { 1884 spin_lock(&mmlist_lock); 1885 if (list_empty(&mm->mmlist)) 1886 list_add(&mm->mmlist, &init_mm.mmlist); 1887 spin_unlock(&mmlist_lock); 1888 } 1889 dec_mm_counter(mm, MM_ANONPAGES); 1890 inc_mm_counter(mm, MM_SWAPENTS); 1891 swp_pte = swp_entry_to_pte(entry); 1892 if (anon_exclusive) 1893 swp_pte = pte_swp_mkexclusive(swp_pte); 1894 if (pte_soft_dirty(pteval)) 1895 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1896 if (pte_uffd_wp(pteval)) 1897 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1898 set_pte_at(mm, address, pvmw.pte, swp_pte); 1899 } else { 1900 /* 1901 * This is a locked file-backed folio, 1902 * so it cannot be removed from the page 1903 * cache and replaced by a new folio before 1904 * mmu_notifier_invalidate_range_end, so no 1905 * concurrent thread might update its page table 1906 * to point at a new folio while a device is 1907 * still using this folio. 1908 * 1909 * See Documentation/mm/mmu_notifier.rst 1910 */ 1911 dec_mm_counter(mm, mm_counter_file(folio)); 1912 } 1913 discard: 1914 if (unlikely(folio_test_hugetlb(folio))) 1915 hugetlb_remove_rmap(folio); 1916 else 1917 folio_remove_rmap_pte(folio, subpage, vma); 1918 if (vma->vm_flags & VM_LOCKED) 1919 mlock_drain_local(); 1920 folio_put(folio); 1921 } 1922 1923 mmu_notifier_invalidate_range_end(&range); 1924 1925 return ret; 1926 } 1927 1928 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1929 { 1930 return vma_is_temporary_stack(vma); 1931 } 1932 1933 static int folio_not_mapped(struct folio *folio) 1934 { 1935 return !folio_mapped(folio); 1936 } 1937 1938 /** 1939 * try_to_unmap - Try to remove all page table mappings to a folio. 1940 * @folio: The folio to unmap. 1941 * @flags: action and flags 1942 * 1943 * Tries to remove all the page table entries which are mapping this 1944 * folio. It is the caller's responsibility to check if the folio is 1945 * still mapped if needed (use TTU_SYNC to prevent accounting races). 1946 * 1947 * Context: Caller must hold the folio lock. 1948 */ 1949 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 1950 { 1951 struct rmap_walk_control rwc = { 1952 .rmap_one = try_to_unmap_one, 1953 .arg = (void *)flags, 1954 .done = folio_not_mapped, 1955 .anon_lock = folio_lock_anon_vma_read, 1956 }; 1957 1958 if (flags & TTU_RMAP_LOCKED) 1959 rmap_walk_locked(folio, &rwc); 1960 else 1961 rmap_walk(folio, &rwc); 1962 } 1963 1964 /* 1965 * @arg: enum ttu_flags will be passed to this argument. 1966 * 1967 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1968 * containing migration entries. 1969 */ 1970 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1971 unsigned long address, void *arg) 1972 { 1973 struct mm_struct *mm = vma->vm_mm; 1974 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1975 pte_t pteval; 1976 struct page *subpage; 1977 bool anon_exclusive, ret = true; 1978 struct mmu_notifier_range range; 1979 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1980 unsigned long pfn; 1981 unsigned long hsz = 0; 1982 1983 /* 1984 * When racing against e.g. zap_pte_range() on another cpu, 1985 * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), 1986 * try_to_migrate() may return before page_mapped() has become false, 1987 * if page table locking is skipped: use TTU_SYNC to wait for that. 1988 */ 1989 if (flags & TTU_SYNC) 1990 pvmw.flags = PVMW_SYNC; 1991 1992 /* 1993 * unmap_page() in mm/huge_memory.c is the only user of migration with 1994 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1995 */ 1996 if (flags & TTU_SPLIT_HUGE_PMD) 1997 split_huge_pmd_address(vma, address, true, folio); 1998 1999 /* 2000 * For THP, we have to assume the worse case ie pmd for invalidation. 2001 * For hugetlb, it could be much worse if we need to do pud 2002 * invalidation in the case of pmd sharing. 2003 * 2004 * Note that the page can not be free in this function as call of 2005 * try_to_unmap() must hold a reference on the page. 2006 */ 2007 range.end = vma_address_end(&pvmw); 2008 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 2009 address, range.end); 2010 if (folio_test_hugetlb(folio)) { 2011 /* 2012 * If sharing is possible, start and end will be adjusted 2013 * accordingly. 2014 */ 2015 adjust_range_if_pmd_sharing_possible(vma, &range.start, 2016 &range.end); 2017 2018 /* We need the huge page size for set_huge_pte_at() */ 2019 hsz = huge_page_size(hstate_vma(vma)); 2020 } 2021 mmu_notifier_invalidate_range_start(&range); 2022 2023 while (page_vma_mapped_walk(&pvmw)) { 2024 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2025 /* PMD-mapped THP migration entry */ 2026 if (!pvmw.pte) { 2027 subpage = folio_page(folio, 2028 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 2029 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 2030 !folio_test_pmd_mappable(folio), folio); 2031 2032 if (set_pmd_migration_entry(&pvmw, subpage)) { 2033 ret = false; 2034 page_vma_mapped_walk_done(&pvmw); 2035 break; 2036 } 2037 continue; 2038 } 2039 #endif 2040 2041 /* Unexpected PMD-mapped THP? */ 2042 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2043 2044 pfn = pte_pfn(ptep_get(pvmw.pte)); 2045 2046 if (folio_is_zone_device(folio)) { 2047 /* 2048 * Our PTE is a non-present device exclusive entry and 2049 * calculating the subpage as for the common case would 2050 * result in an invalid pointer. 2051 * 2052 * Since only PAGE_SIZE pages can currently be 2053 * migrated, just set it to page. This will need to be 2054 * changed when hugepage migrations to device private 2055 * memory are supported. 2056 */ 2057 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 2058 subpage = &folio->page; 2059 } else { 2060 subpage = folio_page(folio, pfn - folio_pfn(folio)); 2061 } 2062 address = pvmw.address; 2063 anon_exclusive = folio_test_anon(folio) && 2064 PageAnonExclusive(subpage); 2065 2066 if (folio_test_hugetlb(folio)) { 2067 bool anon = folio_test_anon(folio); 2068 2069 /* 2070 * huge_pmd_unshare may unmap an entire PMD page. 2071 * There is no way of knowing exactly which PMDs may 2072 * be cached for this mm, so we must flush them all. 2073 * start/end were already adjusted above to cover this 2074 * range. 2075 */ 2076 flush_cache_range(vma, range.start, range.end); 2077 2078 /* 2079 * To call huge_pmd_unshare, i_mmap_rwsem must be 2080 * held in write mode. Caller needs to explicitly 2081 * do this outside rmap routines. 2082 * 2083 * We also must hold hugetlb vma_lock in write mode. 2084 * Lock order dictates acquiring vma_lock BEFORE 2085 * i_mmap_rwsem. We can only try lock here and 2086 * fail if unsuccessful. 2087 */ 2088 if (!anon) { 2089 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 2090 if (!hugetlb_vma_trylock_write(vma)) { 2091 page_vma_mapped_walk_done(&pvmw); 2092 ret = false; 2093 break; 2094 } 2095 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 2096 hugetlb_vma_unlock_write(vma); 2097 flush_tlb_range(vma, 2098 range.start, range.end); 2099 2100 /* 2101 * The ref count of the PMD page was 2102 * dropped which is part of the way map 2103 * counting is done for shared PMDs. 2104 * Return 'true' here. When there is 2105 * no other sharing, huge_pmd_unshare 2106 * returns false and we will unmap the 2107 * actual page and drop map count 2108 * to zero. 2109 */ 2110 page_vma_mapped_walk_done(&pvmw); 2111 break; 2112 } 2113 hugetlb_vma_unlock_write(vma); 2114 } 2115 /* Nuke the hugetlb page table entry */ 2116 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 2117 } else { 2118 flush_cache_page(vma, address, pfn); 2119 /* Nuke the page table entry. */ 2120 if (should_defer_flush(mm, flags)) { 2121 /* 2122 * We clear the PTE but do not flush so potentially 2123 * a remote CPU could still be writing to the folio. 2124 * If the entry was previously clean then the 2125 * architecture must guarantee that a clear->dirty 2126 * transition on a cached TLB entry is written through 2127 * and traps if the PTE is unmapped. 2128 */ 2129 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 2130 2131 set_tlb_ubc_flush_pending(mm, pteval, address); 2132 } else { 2133 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2134 } 2135 } 2136 2137 /* Set the dirty flag on the folio now the pte is gone. */ 2138 if (pte_dirty(pteval)) 2139 folio_mark_dirty(folio); 2140 2141 /* Update high watermark before we lower rss */ 2142 update_hiwater_rss(mm); 2143 2144 if (folio_is_device_private(folio)) { 2145 unsigned long pfn = folio_pfn(folio); 2146 swp_entry_t entry; 2147 pte_t swp_pte; 2148 2149 if (anon_exclusive) 2150 WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, 2151 subpage)); 2152 2153 /* 2154 * Store the pfn of the page in a special migration 2155 * pte. do_swap_page() will wait until the migration 2156 * pte is removed and then restart fault handling. 2157 */ 2158 entry = pte_to_swp_entry(pteval); 2159 if (is_writable_device_private_entry(entry)) 2160 entry = make_writable_migration_entry(pfn); 2161 else if (anon_exclusive) 2162 entry = make_readable_exclusive_migration_entry(pfn); 2163 else 2164 entry = make_readable_migration_entry(pfn); 2165 swp_pte = swp_entry_to_pte(entry); 2166 2167 /* 2168 * pteval maps a zone device page and is therefore 2169 * a swap pte. 2170 */ 2171 if (pte_swp_soft_dirty(pteval)) 2172 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2173 if (pte_swp_uffd_wp(pteval)) 2174 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2175 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 2176 trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 2177 folio_order(folio)); 2178 /* 2179 * No need to invalidate here it will synchronize on 2180 * against the special swap migration pte. 2181 */ 2182 } else if (PageHWPoison(subpage)) { 2183 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2184 if (folio_test_hugetlb(folio)) { 2185 hugetlb_count_sub(folio_nr_pages(folio), mm); 2186 set_huge_pte_at(mm, address, pvmw.pte, pteval, 2187 hsz); 2188 } else { 2189 dec_mm_counter(mm, mm_counter(folio)); 2190 set_pte_at(mm, address, pvmw.pte, pteval); 2191 } 2192 2193 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2194 /* 2195 * The guest indicated that the page content is of no 2196 * interest anymore. Simply discard the pte, vmscan 2197 * will take care of the rest. 2198 * A future reference will then fault in a new zero 2199 * page. When userfaultfd is active, we must not drop 2200 * this page though, as its main user (postcopy 2201 * migration) will not expect userfaults on already 2202 * copied pages. 2203 */ 2204 dec_mm_counter(mm, mm_counter(folio)); 2205 } else { 2206 swp_entry_t entry; 2207 pte_t swp_pte; 2208 2209 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2210 if (folio_test_hugetlb(folio)) 2211 set_huge_pte_at(mm, address, pvmw.pte, 2212 pteval, hsz); 2213 else 2214 set_pte_at(mm, address, pvmw.pte, pteval); 2215 ret = false; 2216 page_vma_mapped_walk_done(&pvmw); 2217 break; 2218 } 2219 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 2220 !anon_exclusive, subpage); 2221 2222 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ 2223 if (folio_test_hugetlb(folio)) { 2224 if (anon_exclusive && 2225 hugetlb_try_share_anon_rmap(folio)) { 2226 set_huge_pte_at(mm, address, pvmw.pte, 2227 pteval, hsz); 2228 ret = false; 2229 page_vma_mapped_walk_done(&pvmw); 2230 break; 2231 } 2232 } else if (anon_exclusive && 2233 folio_try_share_anon_rmap_pte(folio, subpage)) { 2234 set_pte_at(mm, address, pvmw.pte, pteval); 2235 ret = false; 2236 page_vma_mapped_walk_done(&pvmw); 2237 break; 2238 } 2239 2240 /* 2241 * Store the pfn of the page in a special migration 2242 * pte. do_swap_page() will wait until the migration 2243 * pte is removed and then restart fault handling. 2244 */ 2245 if (pte_write(pteval)) 2246 entry = make_writable_migration_entry( 2247 page_to_pfn(subpage)); 2248 else if (anon_exclusive) 2249 entry = make_readable_exclusive_migration_entry( 2250 page_to_pfn(subpage)); 2251 else 2252 entry = make_readable_migration_entry( 2253 page_to_pfn(subpage)); 2254 if (pte_young(pteval)) 2255 entry = make_migration_entry_young(entry); 2256 if (pte_dirty(pteval)) 2257 entry = make_migration_entry_dirty(entry); 2258 swp_pte = swp_entry_to_pte(entry); 2259 if (pte_soft_dirty(pteval)) 2260 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2261 if (pte_uffd_wp(pteval)) 2262 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2263 if (folio_test_hugetlb(folio)) 2264 set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2265 hsz); 2266 else 2267 set_pte_at(mm, address, pvmw.pte, swp_pte); 2268 trace_set_migration_pte(address, pte_val(swp_pte), 2269 folio_order(folio)); 2270 /* 2271 * No need to invalidate here it will synchronize on 2272 * against the special swap migration pte. 2273 */ 2274 } 2275 2276 if (unlikely(folio_test_hugetlb(folio))) 2277 hugetlb_remove_rmap(folio); 2278 else 2279 folio_remove_rmap_pte(folio, subpage, vma); 2280 if (vma->vm_flags & VM_LOCKED) 2281 mlock_drain_local(); 2282 folio_put(folio); 2283 } 2284 2285 mmu_notifier_invalidate_range_end(&range); 2286 2287 return ret; 2288 } 2289 2290 /** 2291 * try_to_migrate - try to replace all page table mappings with swap entries 2292 * @folio: the folio to replace page table entries for 2293 * @flags: action and flags 2294 * 2295 * Tries to remove all the page table entries which are mapping this folio and 2296 * replace them with special swap entries. Caller must hold the folio lock. 2297 */ 2298 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2299 { 2300 struct rmap_walk_control rwc = { 2301 .rmap_one = try_to_migrate_one, 2302 .arg = (void *)flags, 2303 .done = folio_not_mapped, 2304 .anon_lock = folio_lock_anon_vma_read, 2305 }; 2306 2307 /* 2308 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2309 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2310 */ 2311 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2312 TTU_SYNC | TTU_BATCH_FLUSH))) 2313 return; 2314 2315 if (folio_is_zone_device(folio) && 2316 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2317 return; 2318 2319 /* 2320 * During exec, a temporary VMA is setup and later moved. 2321 * The VMA is moved under the anon_vma lock but not the 2322 * page tables leading to a race where migration cannot 2323 * find the migration ptes. Rather than increasing the 2324 * locking requirements of exec(), migration skips 2325 * temporary VMAs until after exec() completes. 2326 */ 2327 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2328 rwc.invalid_vma = invalid_migration_vma; 2329 2330 if (flags & TTU_RMAP_LOCKED) 2331 rmap_walk_locked(folio, &rwc); 2332 else 2333 rmap_walk(folio, &rwc); 2334 } 2335 2336 #ifdef CONFIG_DEVICE_PRIVATE 2337 struct make_exclusive_args { 2338 struct mm_struct *mm; 2339 unsigned long address; 2340 void *owner; 2341 bool valid; 2342 }; 2343 2344 static bool page_make_device_exclusive_one(struct folio *folio, 2345 struct vm_area_struct *vma, unsigned long address, void *priv) 2346 { 2347 struct mm_struct *mm = vma->vm_mm; 2348 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2349 struct make_exclusive_args *args = priv; 2350 pte_t pteval; 2351 struct page *subpage; 2352 bool ret = true; 2353 struct mmu_notifier_range range; 2354 swp_entry_t entry; 2355 pte_t swp_pte; 2356 pte_t ptent; 2357 2358 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2359 vma->vm_mm, address, min(vma->vm_end, 2360 address + folio_size(folio)), 2361 args->owner); 2362 mmu_notifier_invalidate_range_start(&range); 2363 2364 while (page_vma_mapped_walk(&pvmw)) { 2365 /* Unexpected PMD-mapped THP? */ 2366 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2367 2368 ptent = ptep_get(pvmw.pte); 2369 if (!pte_present(ptent)) { 2370 ret = false; 2371 page_vma_mapped_walk_done(&pvmw); 2372 break; 2373 } 2374 2375 subpage = folio_page(folio, 2376 pte_pfn(ptent) - folio_pfn(folio)); 2377 address = pvmw.address; 2378 2379 /* Nuke the page table entry. */ 2380 flush_cache_page(vma, address, pte_pfn(ptent)); 2381 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2382 2383 /* Set the dirty flag on the folio now the pte is gone. */ 2384 if (pte_dirty(pteval)) 2385 folio_mark_dirty(folio); 2386 2387 /* 2388 * Check that our target page is still mapped at the expected 2389 * address. 2390 */ 2391 if (args->mm == mm && args->address == address && 2392 pte_write(pteval)) 2393 args->valid = true; 2394 2395 /* 2396 * Store the pfn of the page in a special migration 2397 * pte. do_swap_page() will wait until the migration 2398 * pte is removed and then restart fault handling. 2399 */ 2400 if (pte_write(pteval)) 2401 entry = make_writable_device_exclusive_entry( 2402 page_to_pfn(subpage)); 2403 else 2404 entry = make_readable_device_exclusive_entry( 2405 page_to_pfn(subpage)); 2406 swp_pte = swp_entry_to_pte(entry); 2407 if (pte_soft_dirty(pteval)) 2408 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2409 if (pte_uffd_wp(pteval)) 2410 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2411 2412 set_pte_at(mm, address, pvmw.pte, swp_pte); 2413 2414 /* 2415 * There is a reference on the page for the swap entry which has 2416 * been removed, so shouldn't take another. 2417 */ 2418 folio_remove_rmap_pte(folio, subpage, vma); 2419 } 2420 2421 mmu_notifier_invalidate_range_end(&range); 2422 2423 return ret; 2424 } 2425 2426 /** 2427 * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 2428 * @folio: The folio to replace page table entries for. 2429 * @mm: The mm_struct where the folio is expected to be mapped. 2430 * @address: Address where the folio is expected to be mapped. 2431 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2432 * 2433 * Tries to remove all the page table entries which are mapping this 2434 * folio and replace them with special device exclusive swap entries to 2435 * grant a device exclusive access to the folio. 2436 * 2437 * Context: Caller must hold the folio lock. 2438 * Return: false if the page is still mapped, or if it could not be unmapped 2439 * from the expected address. Otherwise returns true (success). 2440 */ 2441 static bool folio_make_device_exclusive(struct folio *folio, 2442 struct mm_struct *mm, unsigned long address, void *owner) 2443 { 2444 struct make_exclusive_args args = { 2445 .mm = mm, 2446 .address = address, 2447 .owner = owner, 2448 .valid = false, 2449 }; 2450 struct rmap_walk_control rwc = { 2451 .rmap_one = page_make_device_exclusive_one, 2452 .done = folio_not_mapped, 2453 .anon_lock = folio_lock_anon_vma_read, 2454 .arg = &args, 2455 }; 2456 2457 /* 2458 * Restrict to anonymous folios for now to avoid potential writeback 2459 * issues. 2460 */ 2461 if (!folio_test_anon(folio)) 2462 return false; 2463 2464 rmap_walk(folio, &rwc); 2465 2466 return args.valid && !folio_mapcount(folio); 2467 } 2468 2469 /** 2470 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2471 * @mm: mm_struct of associated target process 2472 * @start: start of the region to mark for exclusive device access 2473 * @end: end address of region 2474 * @pages: returns the pages which were successfully marked for exclusive access 2475 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2476 * 2477 * Returns: number of pages found in the range by GUP. A page is marked for 2478 * exclusive access only if the page pointer is non-NULL. 2479 * 2480 * This function finds ptes mapping page(s) to the given address range, locks 2481 * them and replaces mappings with special swap entries preventing userspace CPU 2482 * access. On fault these entries are replaced with the original mapping after 2483 * calling MMU notifiers. 2484 * 2485 * A driver using this to program access from a device must use a mmu notifier 2486 * critical section to hold a device specific lock during programming. Once 2487 * programming is complete it should drop the page lock and reference after 2488 * which point CPU access to the page will revoke the exclusive access. 2489 */ 2490 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2491 unsigned long end, struct page **pages, 2492 void *owner) 2493 { 2494 long npages = (end - start) >> PAGE_SHIFT; 2495 long i; 2496 2497 npages = get_user_pages_remote(mm, start, npages, 2498 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2499 pages, NULL); 2500 if (npages < 0) 2501 return npages; 2502 2503 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2504 struct folio *folio = page_folio(pages[i]); 2505 if (PageTail(pages[i]) || !folio_trylock(folio)) { 2506 folio_put(folio); 2507 pages[i] = NULL; 2508 continue; 2509 } 2510 2511 if (!folio_make_device_exclusive(folio, mm, start, owner)) { 2512 folio_unlock(folio); 2513 folio_put(folio); 2514 pages[i] = NULL; 2515 } 2516 } 2517 2518 return npages; 2519 } 2520 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2521 #endif 2522 2523 void __put_anon_vma(struct anon_vma *anon_vma) 2524 { 2525 struct anon_vma *root = anon_vma->root; 2526 2527 anon_vma_free(anon_vma); 2528 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2529 anon_vma_free(root); 2530 } 2531 2532 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 2533 struct rmap_walk_control *rwc) 2534 { 2535 struct anon_vma *anon_vma; 2536 2537 if (rwc->anon_lock) 2538 return rwc->anon_lock(folio, rwc); 2539 2540 /* 2541 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2542 * because that depends on page_mapped(); but not all its usages 2543 * are holding mmap_lock. Users without mmap_lock are required to 2544 * take a reference count to prevent the anon_vma disappearing 2545 */ 2546 anon_vma = folio_anon_vma(folio); 2547 if (!anon_vma) 2548 return NULL; 2549 2550 if (anon_vma_trylock_read(anon_vma)) 2551 goto out; 2552 2553 if (rwc->try_lock) { 2554 anon_vma = NULL; 2555 rwc->contended = true; 2556 goto out; 2557 } 2558 2559 anon_vma_lock_read(anon_vma); 2560 out: 2561 return anon_vma; 2562 } 2563 2564 /* 2565 * rmap_walk_anon - do something to anonymous page using the object-based 2566 * rmap method 2567 * @folio: the folio to be handled 2568 * @rwc: control variable according to each walk type 2569 * @locked: caller holds relevant rmap lock 2570 * 2571 * Find all the mappings of a folio using the mapping pointer and the vma 2572 * chains contained in the anon_vma struct it points to. 2573 */ 2574 static void rmap_walk_anon(struct folio *folio, 2575 struct rmap_walk_control *rwc, bool locked) 2576 { 2577 struct anon_vma *anon_vma; 2578 pgoff_t pgoff_start, pgoff_end; 2579 struct anon_vma_chain *avc; 2580 2581 if (locked) { 2582 anon_vma = folio_anon_vma(folio); 2583 /* anon_vma disappear under us? */ 2584 VM_BUG_ON_FOLIO(!anon_vma, folio); 2585 } else { 2586 anon_vma = rmap_walk_anon_lock(folio, rwc); 2587 } 2588 if (!anon_vma) 2589 return; 2590 2591 pgoff_start = folio_pgoff(folio); 2592 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2593 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2594 pgoff_start, pgoff_end) { 2595 struct vm_area_struct *vma = avc->vma; 2596 unsigned long address = vma_address(vma, pgoff_start, 2597 folio_nr_pages(folio)); 2598 2599 VM_BUG_ON_VMA(address == -EFAULT, vma); 2600 cond_resched(); 2601 2602 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2603 continue; 2604 2605 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2606 break; 2607 if (rwc->done && rwc->done(folio)) 2608 break; 2609 } 2610 2611 if (!locked) 2612 anon_vma_unlock_read(anon_vma); 2613 } 2614 2615 /* 2616 * rmap_walk_file - do something to file page using the object-based rmap method 2617 * @folio: the folio to be handled 2618 * @rwc: control variable according to each walk type 2619 * @locked: caller holds relevant rmap lock 2620 * 2621 * Find all the mappings of a folio using the mapping pointer and the vma chains 2622 * contained in the address_space struct it points to. 2623 */ 2624 static void rmap_walk_file(struct folio *folio, 2625 struct rmap_walk_control *rwc, bool locked) 2626 { 2627 struct address_space *mapping = folio_mapping(folio); 2628 pgoff_t pgoff_start, pgoff_end; 2629 struct vm_area_struct *vma; 2630 2631 /* 2632 * The page lock not only makes sure that page->mapping cannot 2633 * suddenly be NULLified by truncation, it makes sure that the 2634 * structure at mapping cannot be freed and reused yet, 2635 * so we can safely take mapping->i_mmap_rwsem. 2636 */ 2637 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2638 2639 if (!mapping) 2640 return; 2641 2642 pgoff_start = folio_pgoff(folio); 2643 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2644 if (!locked) { 2645 if (i_mmap_trylock_read(mapping)) 2646 goto lookup; 2647 2648 if (rwc->try_lock) { 2649 rwc->contended = true; 2650 return; 2651 } 2652 2653 i_mmap_lock_read(mapping); 2654 } 2655 lookup: 2656 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2657 pgoff_start, pgoff_end) { 2658 unsigned long address = vma_address(vma, pgoff_start, 2659 folio_nr_pages(folio)); 2660 2661 VM_BUG_ON_VMA(address == -EFAULT, vma); 2662 cond_resched(); 2663 2664 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2665 continue; 2666 2667 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2668 goto done; 2669 if (rwc->done && rwc->done(folio)) 2670 goto done; 2671 } 2672 2673 done: 2674 if (!locked) 2675 i_mmap_unlock_read(mapping); 2676 } 2677 2678 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2679 { 2680 if (unlikely(folio_test_ksm(folio))) 2681 rmap_walk_ksm(folio, rwc); 2682 else if (folio_test_anon(folio)) 2683 rmap_walk_anon(folio, rwc, false); 2684 else 2685 rmap_walk_file(folio, rwc, false); 2686 } 2687 2688 /* Like rmap_walk, but caller holds relevant rmap lock */ 2689 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2690 { 2691 /* no ksm support for now */ 2692 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 2693 if (folio_test_anon(folio)) 2694 rmap_walk_anon(folio, rwc, true); 2695 else 2696 rmap_walk_file(folio, rwc, true); 2697 } 2698 2699 #ifdef CONFIG_HUGETLB_PAGE 2700 /* 2701 * The following two functions are for anonymous (private mapped) hugepages. 2702 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2703 * and no lru code, because we handle hugepages differently from common pages. 2704 */ 2705 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 2706 unsigned long address, rmap_t flags) 2707 { 2708 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 2709 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); 2710 2711 atomic_inc(&folio->_entire_mapcount); 2712 atomic_inc(&folio->_large_mapcount); 2713 if (flags & RMAP_EXCLUSIVE) 2714 SetPageAnonExclusive(&folio->page); 2715 VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && 2716 PageAnonExclusive(&folio->page), folio); 2717 } 2718 2719 void hugetlb_add_new_anon_rmap(struct folio *folio, 2720 struct vm_area_struct *vma, unsigned long address) 2721 { 2722 VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); 2723 2724 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2725 /* increment count (starts at -1) */ 2726 atomic_set(&folio->_entire_mapcount, 0); 2727 atomic_set(&folio->_large_mapcount, 0); 2728 folio_clear_hugetlb_restore_reserve(folio); 2729 __folio_set_anon(folio, vma, address, true); 2730 SetPageAnonExclusive(&folio->page); 2731 } 2732 #endif /* CONFIG_HUGETLB_PAGE */ 2733