1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * page->flags PG_locked (lock_page) * (see hugetlbfs below) 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 28 * mapping->i_mmap_rwsem 29 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 30 * anon_vma->rwsem 31 * mm->page_table_lock or pte_lock 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in block_dirty_folio) 35 * folio_lock_memcg move_lock (in block_dirty_folio) 36 * i_pages lock (widely used) 37 * lruvec->lru_lock (in folio_lruvec_lock_irq) 38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 40 * sb_lock (within inode_lock in fs/fs-writeback.c) 41 * i_pages lock (widely used, in set_page_dirty, 42 * in arch-dependent flush_dcache_mmap_lock, 43 * within bdi.wb->list_lock in __sync_single_inode) 44 * 45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 46 * ->tasklist_lock 47 * pte map lock 48 * 49 * * hugetlbfs PageHuge() pages take locks in this order: 50 * mapping->i_mmap_rwsem 51 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 52 * page->flags PG_locked (lock_page) 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/swapops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 77 #include <asm/tlbflush.h> 78 79 #include <trace/events/tlb.h> 80 81 #include "internal.h" 82 83 static struct kmem_cache *anon_vma_cachep; 84 static struct kmem_cache *anon_vma_chain_cachep; 85 86 static inline struct anon_vma *anon_vma_alloc(void) 87 { 88 struct anon_vma *anon_vma; 89 90 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 91 if (anon_vma) { 92 atomic_set(&anon_vma->refcount, 1); 93 anon_vma->degree = 1; /* Reference for first vma */ 94 anon_vma->parent = anon_vma; 95 /* 96 * Initialise the anon_vma root to point to itself. If called 97 * from fork, the root will be reset to the parents anon_vma. 98 */ 99 anon_vma->root = anon_vma; 100 } 101 102 return anon_vma; 103 } 104 105 static inline void anon_vma_free(struct anon_vma *anon_vma) 106 { 107 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 108 109 /* 110 * Synchronize against folio_lock_anon_vma_read() such that 111 * we can safely hold the lock without the anon_vma getting 112 * freed. 113 * 114 * Relies on the full mb implied by the atomic_dec_and_test() from 115 * put_anon_vma() against the acquire barrier implied by 116 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 117 * 118 * folio_lock_anon_vma_read() VS put_anon_vma() 119 * down_read_trylock() atomic_dec_and_test() 120 * LOCK MB 121 * atomic_read() rwsem_is_locked() 122 * 123 * LOCK should suffice since the actual taking of the lock must 124 * happen _before_ what follows. 125 */ 126 might_sleep(); 127 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 128 anon_vma_lock_write(anon_vma); 129 anon_vma_unlock_write(anon_vma); 130 } 131 132 kmem_cache_free(anon_vma_cachep, anon_vma); 133 } 134 135 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 136 { 137 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 138 } 139 140 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 141 { 142 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 143 } 144 145 static void anon_vma_chain_link(struct vm_area_struct *vma, 146 struct anon_vma_chain *avc, 147 struct anon_vma *anon_vma) 148 { 149 avc->vma = vma; 150 avc->anon_vma = anon_vma; 151 list_add(&avc->same_vma, &vma->anon_vma_chain); 152 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 153 } 154 155 /** 156 * __anon_vma_prepare - attach an anon_vma to a memory region 157 * @vma: the memory region in question 158 * 159 * This makes sure the memory mapping described by 'vma' has 160 * an 'anon_vma' attached to it, so that we can associate the 161 * anonymous pages mapped into it with that anon_vma. 162 * 163 * The common case will be that we already have one, which 164 * is handled inline by anon_vma_prepare(). But if 165 * not we either need to find an adjacent mapping that we 166 * can re-use the anon_vma from (very common when the only 167 * reason for splitting a vma has been mprotect()), or we 168 * allocate a new one. 169 * 170 * Anon-vma allocations are very subtle, because we may have 171 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 172 * and that may actually touch the rwsem even in the newly 173 * allocated vma (it depends on RCU to make sure that the 174 * anon_vma isn't actually destroyed). 175 * 176 * As a result, we need to do proper anon_vma locking even 177 * for the new allocation. At the same time, we do not want 178 * to do any locking for the common case of already having 179 * an anon_vma. 180 * 181 * This must be called with the mmap_lock held for reading. 182 */ 183 int __anon_vma_prepare(struct vm_area_struct *vma) 184 { 185 struct mm_struct *mm = vma->vm_mm; 186 struct anon_vma *anon_vma, *allocated; 187 struct anon_vma_chain *avc; 188 189 might_sleep(); 190 191 avc = anon_vma_chain_alloc(GFP_KERNEL); 192 if (!avc) 193 goto out_enomem; 194 195 anon_vma = find_mergeable_anon_vma(vma); 196 allocated = NULL; 197 if (!anon_vma) { 198 anon_vma = anon_vma_alloc(); 199 if (unlikely(!anon_vma)) 200 goto out_enomem_free_avc; 201 allocated = anon_vma; 202 } 203 204 anon_vma_lock_write(anon_vma); 205 /* page_table_lock to protect against threads */ 206 spin_lock(&mm->page_table_lock); 207 if (likely(!vma->anon_vma)) { 208 vma->anon_vma = anon_vma; 209 anon_vma_chain_link(vma, avc, anon_vma); 210 /* vma reference or self-parent link for new root */ 211 anon_vma->degree++; 212 allocated = NULL; 213 avc = NULL; 214 } 215 spin_unlock(&mm->page_table_lock); 216 anon_vma_unlock_write(anon_vma); 217 218 if (unlikely(allocated)) 219 put_anon_vma(allocated); 220 if (unlikely(avc)) 221 anon_vma_chain_free(avc); 222 223 return 0; 224 225 out_enomem_free_avc: 226 anon_vma_chain_free(avc); 227 out_enomem: 228 return -ENOMEM; 229 } 230 231 /* 232 * This is a useful helper function for locking the anon_vma root as 233 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 234 * have the same vma. 235 * 236 * Such anon_vma's should have the same root, so you'd expect to see 237 * just a single mutex_lock for the whole traversal. 238 */ 239 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 240 { 241 struct anon_vma *new_root = anon_vma->root; 242 if (new_root != root) { 243 if (WARN_ON_ONCE(root)) 244 up_write(&root->rwsem); 245 root = new_root; 246 down_write(&root->rwsem); 247 } 248 return root; 249 } 250 251 static inline void unlock_anon_vma_root(struct anon_vma *root) 252 { 253 if (root) 254 up_write(&root->rwsem); 255 } 256 257 /* 258 * Attach the anon_vmas from src to dst. 259 * Returns 0 on success, -ENOMEM on failure. 260 * 261 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 262 * anon_vma_fork(). The first three want an exact copy of src, while the last 263 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 264 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 265 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 266 * 267 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 268 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 269 * This prevents degradation of anon_vma hierarchy to endless linear chain in 270 * case of constantly forking task. On the other hand, an anon_vma with more 271 * than one child isn't reused even if there was no alive vma, thus rmap 272 * walker has a good chance of avoiding scanning the whole hierarchy when it 273 * searches where page is mapped. 274 */ 275 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 276 { 277 struct anon_vma_chain *avc, *pavc; 278 struct anon_vma *root = NULL; 279 280 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 281 struct anon_vma *anon_vma; 282 283 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 284 if (unlikely(!avc)) { 285 unlock_anon_vma_root(root); 286 root = NULL; 287 avc = anon_vma_chain_alloc(GFP_KERNEL); 288 if (!avc) 289 goto enomem_failure; 290 } 291 anon_vma = pavc->anon_vma; 292 root = lock_anon_vma_root(root, anon_vma); 293 anon_vma_chain_link(dst, avc, anon_vma); 294 295 /* 296 * Reuse existing anon_vma if its degree lower than two, 297 * that means it has no vma and only one anon_vma child. 298 * 299 * Do not chose parent anon_vma, otherwise first child 300 * will always reuse it. Root anon_vma is never reused: 301 * it has self-parent reference and at least one child. 302 */ 303 if (!dst->anon_vma && src->anon_vma && 304 anon_vma != src->anon_vma && anon_vma->degree < 2) 305 dst->anon_vma = anon_vma; 306 } 307 if (dst->anon_vma) 308 dst->anon_vma->degree++; 309 unlock_anon_vma_root(root); 310 return 0; 311 312 enomem_failure: 313 /* 314 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 315 * decremented in unlink_anon_vmas(). 316 * We can safely do this because callers of anon_vma_clone() don't care 317 * about dst->anon_vma if anon_vma_clone() failed. 318 */ 319 dst->anon_vma = NULL; 320 unlink_anon_vmas(dst); 321 return -ENOMEM; 322 } 323 324 /* 325 * Attach vma to its own anon_vma, as well as to the anon_vmas that 326 * the corresponding VMA in the parent process is attached to. 327 * Returns 0 on success, non-zero on failure. 328 */ 329 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 330 { 331 struct anon_vma_chain *avc; 332 struct anon_vma *anon_vma; 333 int error; 334 335 /* Don't bother if the parent process has no anon_vma here. */ 336 if (!pvma->anon_vma) 337 return 0; 338 339 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 340 vma->anon_vma = NULL; 341 342 /* 343 * First, attach the new VMA to the parent VMA's anon_vmas, 344 * so rmap can find non-COWed pages in child processes. 345 */ 346 error = anon_vma_clone(vma, pvma); 347 if (error) 348 return error; 349 350 /* An existing anon_vma has been reused, all done then. */ 351 if (vma->anon_vma) 352 return 0; 353 354 /* Then add our own anon_vma. */ 355 anon_vma = anon_vma_alloc(); 356 if (!anon_vma) 357 goto out_error; 358 avc = anon_vma_chain_alloc(GFP_KERNEL); 359 if (!avc) 360 goto out_error_free_anon_vma; 361 362 /* 363 * The root anon_vma's rwsem is the lock actually used when we 364 * lock any of the anon_vmas in this anon_vma tree. 365 */ 366 anon_vma->root = pvma->anon_vma->root; 367 anon_vma->parent = pvma->anon_vma; 368 /* 369 * With refcounts, an anon_vma can stay around longer than the 370 * process it belongs to. The root anon_vma needs to be pinned until 371 * this anon_vma is freed, because the lock lives in the root. 372 */ 373 get_anon_vma(anon_vma->root); 374 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 375 vma->anon_vma = anon_vma; 376 anon_vma_lock_write(anon_vma); 377 anon_vma_chain_link(vma, avc, anon_vma); 378 anon_vma->parent->degree++; 379 anon_vma_unlock_write(anon_vma); 380 381 return 0; 382 383 out_error_free_anon_vma: 384 put_anon_vma(anon_vma); 385 out_error: 386 unlink_anon_vmas(vma); 387 return -ENOMEM; 388 } 389 390 void unlink_anon_vmas(struct vm_area_struct *vma) 391 { 392 struct anon_vma_chain *avc, *next; 393 struct anon_vma *root = NULL; 394 395 /* 396 * Unlink each anon_vma chained to the VMA. This list is ordered 397 * from newest to oldest, ensuring the root anon_vma gets freed last. 398 */ 399 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 400 struct anon_vma *anon_vma = avc->anon_vma; 401 402 root = lock_anon_vma_root(root, anon_vma); 403 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 404 405 /* 406 * Leave empty anon_vmas on the list - we'll need 407 * to free them outside the lock. 408 */ 409 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 410 anon_vma->parent->degree--; 411 continue; 412 } 413 414 list_del(&avc->same_vma); 415 anon_vma_chain_free(avc); 416 } 417 if (vma->anon_vma) { 418 vma->anon_vma->degree--; 419 420 /* 421 * vma would still be needed after unlink, and anon_vma will be prepared 422 * when handle fault. 423 */ 424 vma->anon_vma = NULL; 425 } 426 unlock_anon_vma_root(root); 427 428 /* 429 * Iterate the list once more, it now only contains empty and unlinked 430 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 431 * needing to write-acquire the anon_vma->root->rwsem. 432 */ 433 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 434 struct anon_vma *anon_vma = avc->anon_vma; 435 436 VM_WARN_ON(anon_vma->degree); 437 put_anon_vma(anon_vma); 438 439 list_del(&avc->same_vma); 440 anon_vma_chain_free(avc); 441 } 442 } 443 444 static void anon_vma_ctor(void *data) 445 { 446 struct anon_vma *anon_vma = data; 447 448 init_rwsem(&anon_vma->rwsem); 449 atomic_set(&anon_vma->refcount, 0); 450 anon_vma->rb_root = RB_ROOT_CACHED; 451 } 452 453 void __init anon_vma_init(void) 454 { 455 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 456 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 457 anon_vma_ctor); 458 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 459 SLAB_PANIC|SLAB_ACCOUNT); 460 } 461 462 /* 463 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 464 * 465 * Since there is no serialization what so ever against page_remove_rmap() 466 * the best this function can do is return a refcount increased anon_vma 467 * that might have been relevant to this page. 468 * 469 * The page might have been remapped to a different anon_vma or the anon_vma 470 * returned may already be freed (and even reused). 471 * 472 * In case it was remapped to a different anon_vma, the new anon_vma will be a 473 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 474 * ensure that any anon_vma obtained from the page will still be valid for as 475 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 476 * 477 * All users of this function must be very careful when walking the anon_vma 478 * chain and verify that the page in question is indeed mapped in it 479 * [ something equivalent to page_mapped_in_vma() ]. 480 * 481 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 482 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 483 * if there is a mapcount, we can dereference the anon_vma after observing 484 * those. 485 */ 486 struct anon_vma *page_get_anon_vma(struct page *page) 487 { 488 struct anon_vma *anon_vma = NULL; 489 unsigned long anon_mapping; 490 491 rcu_read_lock(); 492 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 493 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 494 goto out; 495 if (!page_mapped(page)) 496 goto out; 497 498 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 499 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 500 anon_vma = NULL; 501 goto out; 502 } 503 504 /* 505 * If this page is still mapped, then its anon_vma cannot have been 506 * freed. But if it has been unmapped, we have no security against the 507 * anon_vma structure being freed and reused (for another anon_vma: 508 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 509 * above cannot corrupt). 510 */ 511 if (!page_mapped(page)) { 512 rcu_read_unlock(); 513 put_anon_vma(anon_vma); 514 return NULL; 515 } 516 out: 517 rcu_read_unlock(); 518 519 return anon_vma; 520 } 521 522 /* 523 * Similar to page_get_anon_vma() except it locks the anon_vma. 524 * 525 * Its a little more complex as it tries to keep the fast path to a single 526 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 527 * reference like with page_get_anon_vma() and then block on the mutex. 528 */ 529 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio) 530 { 531 struct anon_vma *anon_vma = NULL; 532 struct anon_vma *root_anon_vma; 533 unsigned long anon_mapping; 534 535 rcu_read_lock(); 536 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 537 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 538 goto out; 539 if (!folio_mapped(folio)) 540 goto out; 541 542 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 543 root_anon_vma = READ_ONCE(anon_vma->root); 544 if (down_read_trylock(&root_anon_vma->rwsem)) { 545 /* 546 * If the folio is still mapped, then this anon_vma is still 547 * its anon_vma, and holding the mutex ensures that it will 548 * not go away, see anon_vma_free(). 549 */ 550 if (!folio_mapped(folio)) { 551 up_read(&root_anon_vma->rwsem); 552 anon_vma = NULL; 553 } 554 goto out; 555 } 556 557 /* trylock failed, we got to sleep */ 558 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 559 anon_vma = NULL; 560 goto out; 561 } 562 563 if (!folio_mapped(folio)) { 564 rcu_read_unlock(); 565 put_anon_vma(anon_vma); 566 return NULL; 567 } 568 569 /* we pinned the anon_vma, its safe to sleep */ 570 rcu_read_unlock(); 571 anon_vma_lock_read(anon_vma); 572 573 if (atomic_dec_and_test(&anon_vma->refcount)) { 574 /* 575 * Oops, we held the last refcount, release the lock 576 * and bail -- can't simply use put_anon_vma() because 577 * we'll deadlock on the anon_vma_lock_write() recursion. 578 */ 579 anon_vma_unlock_read(anon_vma); 580 __put_anon_vma(anon_vma); 581 anon_vma = NULL; 582 } 583 584 return anon_vma; 585 586 out: 587 rcu_read_unlock(); 588 return anon_vma; 589 } 590 591 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 592 { 593 anon_vma_unlock_read(anon_vma); 594 } 595 596 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 597 /* 598 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 599 * important if a PTE was dirty when it was unmapped that it's flushed 600 * before any IO is initiated on the page to prevent lost writes. Similarly, 601 * it must be flushed before freeing to prevent data leakage. 602 */ 603 void try_to_unmap_flush(void) 604 { 605 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 606 607 if (!tlb_ubc->flush_required) 608 return; 609 610 arch_tlbbatch_flush(&tlb_ubc->arch); 611 tlb_ubc->flush_required = false; 612 tlb_ubc->writable = false; 613 } 614 615 /* Flush iff there are potentially writable TLB entries that can race with IO */ 616 void try_to_unmap_flush_dirty(void) 617 { 618 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 619 620 if (tlb_ubc->writable) 621 try_to_unmap_flush(); 622 } 623 624 /* 625 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 626 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 627 */ 628 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 629 #define TLB_FLUSH_BATCH_PENDING_MASK \ 630 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 631 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 632 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 633 634 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 635 { 636 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 637 int batch, nbatch; 638 639 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 640 tlb_ubc->flush_required = true; 641 642 /* 643 * Ensure compiler does not re-order the setting of tlb_flush_batched 644 * before the PTE is cleared. 645 */ 646 barrier(); 647 batch = atomic_read(&mm->tlb_flush_batched); 648 retry: 649 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 650 /* 651 * Prevent `pending' from catching up with `flushed' because of 652 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 653 * `pending' becomes large. 654 */ 655 nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); 656 if (nbatch != batch) { 657 batch = nbatch; 658 goto retry; 659 } 660 } else { 661 atomic_inc(&mm->tlb_flush_batched); 662 } 663 664 /* 665 * If the PTE was dirty then it's best to assume it's writable. The 666 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 667 * before the page is queued for IO. 668 */ 669 if (writable) 670 tlb_ubc->writable = true; 671 } 672 673 /* 674 * Returns true if the TLB flush should be deferred to the end of a batch of 675 * unmap operations to reduce IPIs. 676 */ 677 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 678 { 679 bool should_defer = false; 680 681 if (!(flags & TTU_BATCH_FLUSH)) 682 return false; 683 684 /* If remote CPUs need to be flushed then defer batch the flush */ 685 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 686 should_defer = true; 687 put_cpu(); 688 689 return should_defer; 690 } 691 692 /* 693 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 694 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 695 * operation such as mprotect or munmap to race between reclaim unmapping 696 * the page and flushing the page. If this race occurs, it potentially allows 697 * access to data via a stale TLB entry. Tracking all mm's that have TLB 698 * batching in flight would be expensive during reclaim so instead track 699 * whether TLB batching occurred in the past and if so then do a flush here 700 * if required. This will cost one additional flush per reclaim cycle paid 701 * by the first operation at risk such as mprotect and mumap. 702 * 703 * This must be called under the PTL so that an access to tlb_flush_batched 704 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 705 * via the PTL. 706 */ 707 void flush_tlb_batched_pending(struct mm_struct *mm) 708 { 709 int batch = atomic_read(&mm->tlb_flush_batched); 710 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 711 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 712 713 if (pending != flushed) { 714 flush_tlb_mm(mm); 715 /* 716 * If the new TLB flushing is pending during flushing, leave 717 * mm->tlb_flush_batched as is, to avoid losing flushing. 718 */ 719 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 720 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 721 } 722 } 723 #else 724 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 725 { 726 } 727 728 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 729 { 730 return false; 731 } 732 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 733 734 /* 735 * At what user virtual address is page expected in vma? 736 * Caller should check the page is actually part of the vma. 737 */ 738 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 739 { 740 struct folio *folio = page_folio(page); 741 if (folio_test_anon(folio)) { 742 struct anon_vma *page__anon_vma = folio_anon_vma(folio); 743 /* 744 * Note: swapoff's unuse_vma() is more efficient with this 745 * check, and needs it to match anon_vma when KSM is active. 746 */ 747 if (!vma->anon_vma || !page__anon_vma || 748 vma->anon_vma->root != page__anon_vma->root) 749 return -EFAULT; 750 } else if (!vma->vm_file) { 751 return -EFAULT; 752 } else if (vma->vm_file->f_mapping != folio->mapping) { 753 return -EFAULT; 754 } 755 756 return vma_address(page, vma); 757 } 758 759 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 760 { 761 pgd_t *pgd; 762 p4d_t *p4d; 763 pud_t *pud; 764 pmd_t *pmd = NULL; 765 pmd_t pmde; 766 767 pgd = pgd_offset(mm, address); 768 if (!pgd_present(*pgd)) 769 goto out; 770 771 p4d = p4d_offset(pgd, address); 772 if (!p4d_present(*p4d)) 773 goto out; 774 775 pud = pud_offset(p4d, address); 776 if (!pud_present(*pud)) 777 goto out; 778 779 pmd = pmd_offset(pud, address); 780 /* 781 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 782 * without holding anon_vma lock for write. So when looking for a 783 * genuine pmde (in which to find pte), test present and !THP together. 784 */ 785 pmde = *pmd; 786 barrier(); 787 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 788 pmd = NULL; 789 out: 790 return pmd; 791 } 792 793 struct folio_referenced_arg { 794 int mapcount; 795 int referenced; 796 unsigned long vm_flags; 797 struct mem_cgroup *memcg; 798 }; 799 /* 800 * arg: folio_referenced_arg will be passed 801 */ 802 static bool folio_referenced_one(struct folio *folio, 803 struct vm_area_struct *vma, unsigned long address, void *arg) 804 { 805 struct folio_referenced_arg *pra = arg; 806 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 807 int referenced = 0; 808 809 while (page_vma_mapped_walk(&pvmw)) { 810 address = pvmw.address; 811 812 if ((vma->vm_flags & VM_LOCKED) && 813 (!folio_test_large(folio) || !pvmw.pte)) { 814 /* Restore the mlock which got missed */ 815 mlock_vma_folio(folio, vma, !pvmw.pte); 816 page_vma_mapped_walk_done(&pvmw); 817 pra->vm_flags |= VM_LOCKED; 818 return false; /* To break the loop */ 819 } 820 821 if (pvmw.pte) { 822 if (ptep_clear_flush_young_notify(vma, address, 823 pvmw.pte)) { 824 /* 825 * Don't treat a reference through 826 * a sequentially read mapping as such. 827 * If the folio has been used in another mapping, 828 * we will catch it; if this other mapping is 829 * already gone, the unmap path will have set 830 * the referenced flag or activated the folio. 831 */ 832 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 833 referenced++; 834 } 835 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 836 if (pmdp_clear_flush_young_notify(vma, address, 837 pvmw.pmd)) 838 referenced++; 839 } else { 840 /* unexpected pmd-mapped folio? */ 841 WARN_ON_ONCE(1); 842 } 843 844 pra->mapcount--; 845 } 846 847 if (referenced) 848 folio_clear_idle(folio); 849 if (folio_test_clear_young(folio)) 850 referenced++; 851 852 if (referenced) { 853 pra->referenced++; 854 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 855 } 856 857 if (!pra->mapcount) 858 return false; /* To break the loop */ 859 860 return true; 861 } 862 863 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 864 { 865 struct folio_referenced_arg *pra = arg; 866 struct mem_cgroup *memcg = pra->memcg; 867 868 if (!mm_match_cgroup(vma->vm_mm, memcg)) 869 return true; 870 871 return false; 872 } 873 874 /** 875 * folio_referenced() - Test if the folio was referenced. 876 * @folio: The folio to test. 877 * @is_locked: Caller holds lock on the folio. 878 * @memcg: target memory cgroup 879 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 880 * 881 * Quick test_and_clear_referenced for all mappings of a folio, 882 * 883 * Return: The number of mappings which referenced the folio. 884 */ 885 int folio_referenced(struct folio *folio, int is_locked, 886 struct mem_cgroup *memcg, unsigned long *vm_flags) 887 { 888 int we_locked = 0; 889 struct folio_referenced_arg pra = { 890 .mapcount = folio_mapcount(folio), 891 .memcg = memcg, 892 }; 893 struct rmap_walk_control rwc = { 894 .rmap_one = folio_referenced_one, 895 .arg = (void *)&pra, 896 .anon_lock = folio_lock_anon_vma_read, 897 }; 898 899 *vm_flags = 0; 900 if (!pra.mapcount) 901 return 0; 902 903 if (!folio_raw_mapping(folio)) 904 return 0; 905 906 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 907 we_locked = folio_trylock(folio); 908 if (!we_locked) 909 return 1; 910 } 911 912 /* 913 * If we are reclaiming on behalf of a cgroup, skip 914 * counting on behalf of references from different 915 * cgroups 916 */ 917 if (memcg) { 918 rwc.invalid_vma = invalid_folio_referenced_vma; 919 } 920 921 rmap_walk(folio, &rwc); 922 *vm_flags = pra.vm_flags; 923 924 if (we_locked) 925 folio_unlock(folio); 926 927 return pra.referenced; 928 } 929 930 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 931 unsigned long address, void *arg) 932 { 933 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 934 struct mmu_notifier_range range; 935 int *cleaned = arg; 936 937 /* 938 * We have to assume the worse case ie pmd for invalidation. Note that 939 * the folio can not be freed from this function. 940 */ 941 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 942 0, vma, vma->vm_mm, address, 943 vma_address_end(&pvmw)); 944 mmu_notifier_invalidate_range_start(&range); 945 946 while (page_vma_mapped_walk(&pvmw)) { 947 int ret = 0; 948 949 address = pvmw.address; 950 if (pvmw.pte) { 951 pte_t entry; 952 pte_t *pte = pvmw.pte; 953 954 if (!pte_dirty(*pte) && !pte_write(*pte)) 955 continue; 956 957 flush_cache_page(vma, address, pte_pfn(*pte)); 958 entry = ptep_clear_flush(vma, address, pte); 959 entry = pte_wrprotect(entry); 960 entry = pte_mkclean(entry); 961 set_pte_at(vma->vm_mm, address, pte, entry); 962 ret = 1; 963 } else { 964 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 965 pmd_t *pmd = pvmw.pmd; 966 pmd_t entry; 967 968 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 969 continue; 970 971 flush_cache_page(vma, address, folio_pfn(folio)); 972 entry = pmdp_invalidate(vma, address, pmd); 973 entry = pmd_wrprotect(entry); 974 entry = pmd_mkclean(entry); 975 set_pmd_at(vma->vm_mm, address, pmd, entry); 976 ret = 1; 977 #else 978 /* unexpected pmd-mapped folio? */ 979 WARN_ON_ONCE(1); 980 #endif 981 } 982 983 /* 984 * No need to call mmu_notifier_invalidate_range() as we are 985 * downgrading page table protection not changing it to point 986 * to a new page. 987 * 988 * See Documentation/vm/mmu_notifier.rst 989 */ 990 if (ret) 991 (*cleaned)++; 992 } 993 994 mmu_notifier_invalidate_range_end(&range); 995 996 return true; 997 } 998 999 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1000 { 1001 if (vma->vm_flags & VM_SHARED) 1002 return false; 1003 1004 return true; 1005 } 1006 1007 int folio_mkclean(struct folio *folio) 1008 { 1009 int cleaned = 0; 1010 struct address_space *mapping; 1011 struct rmap_walk_control rwc = { 1012 .arg = (void *)&cleaned, 1013 .rmap_one = page_mkclean_one, 1014 .invalid_vma = invalid_mkclean_vma, 1015 }; 1016 1017 BUG_ON(!folio_test_locked(folio)); 1018 1019 if (!folio_mapped(folio)) 1020 return 0; 1021 1022 mapping = folio_mapping(folio); 1023 if (!mapping) 1024 return 0; 1025 1026 rmap_walk(folio, &rwc); 1027 1028 return cleaned; 1029 } 1030 EXPORT_SYMBOL_GPL(folio_mkclean); 1031 1032 /** 1033 * page_move_anon_rmap - move a page to our anon_vma 1034 * @page: the page to move to our anon_vma 1035 * @vma: the vma the page belongs to 1036 * 1037 * When a page belongs exclusively to one process after a COW event, 1038 * that page can be moved into the anon_vma that belongs to just that 1039 * process, so the rmap code will not search the parent or sibling 1040 * processes. 1041 */ 1042 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1043 { 1044 struct anon_vma *anon_vma = vma->anon_vma; 1045 1046 page = compound_head(page); 1047 1048 VM_BUG_ON_PAGE(!PageLocked(page), page); 1049 VM_BUG_ON_VMA(!anon_vma, vma); 1050 1051 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1052 /* 1053 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1054 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1055 * folio_test_anon()) will not see one without the other. 1056 */ 1057 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1058 } 1059 1060 /** 1061 * __page_set_anon_rmap - set up new anonymous rmap 1062 * @page: Page or Hugepage to add to rmap 1063 * @vma: VM area to add page to. 1064 * @address: User virtual address of the mapping 1065 * @exclusive: the page is exclusively owned by the current process 1066 */ 1067 static void __page_set_anon_rmap(struct page *page, 1068 struct vm_area_struct *vma, unsigned long address, int exclusive) 1069 { 1070 struct anon_vma *anon_vma = vma->anon_vma; 1071 1072 BUG_ON(!anon_vma); 1073 1074 if (PageAnon(page)) 1075 return; 1076 1077 /* 1078 * If the page isn't exclusively mapped into this vma, 1079 * we must use the _oldest_ possible anon_vma for the 1080 * page mapping! 1081 */ 1082 if (!exclusive) 1083 anon_vma = anon_vma->root; 1084 1085 /* 1086 * page_idle does a lockless/optimistic rmap scan on page->mapping. 1087 * Make sure the compiler doesn't split the stores of anon_vma and 1088 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1089 * could mistake the mapping for a struct address_space and crash. 1090 */ 1091 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1092 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1093 page->index = linear_page_index(vma, address); 1094 } 1095 1096 /** 1097 * __page_check_anon_rmap - sanity check anonymous rmap addition 1098 * @page: the page to add the mapping to 1099 * @vma: the vm area in which the mapping is added 1100 * @address: the user virtual address mapped 1101 */ 1102 static void __page_check_anon_rmap(struct page *page, 1103 struct vm_area_struct *vma, unsigned long address) 1104 { 1105 struct folio *folio = page_folio(page); 1106 /* 1107 * The page's anon-rmap details (mapping and index) are guaranteed to 1108 * be set up correctly at this point. 1109 * 1110 * We have exclusion against page_add_anon_rmap because the caller 1111 * always holds the page locked. 1112 * 1113 * We have exclusion against page_add_new_anon_rmap because those pages 1114 * are initially only visible via the pagetables, and the pte is locked 1115 * over the call to page_add_new_anon_rmap. 1116 */ 1117 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1118 folio); 1119 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1120 page); 1121 } 1122 1123 /** 1124 * page_add_anon_rmap - add pte mapping to an anonymous page 1125 * @page: the page to add the mapping to 1126 * @vma: the vm area in which the mapping is added 1127 * @address: the user virtual address mapped 1128 * @compound: charge the page as compound or small page 1129 * 1130 * The caller needs to hold the pte lock, and the page must be locked in 1131 * the anon_vma case: to serialize mapping,index checking after setting, 1132 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1133 * (but PageKsm is never downgraded to PageAnon). 1134 */ 1135 void page_add_anon_rmap(struct page *page, 1136 struct vm_area_struct *vma, unsigned long address, bool compound) 1137 { 1138 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1139 } 1140 1141 /* 1142 * Special version of the above for do_swap_page, which often runs 1143 * into pages that are exclusively owned by the current process. 1144 * Everybody else should continue to use page_add_anon_rmap above. 1145 */ 1146 void do_page_add_anon_rmap(struct page *page, 1147 struct vm_area_struct *vma, unsigned long address, int flags) 1148 { 1149 bool compound = flags & RMAP_COMPOUND; 1150 bool first; 1151 1152 if (unlikely(PageKsm(page))) 1153 lock_page_memcg(page); 1154 else 1155 VM_BUG_ON_PAGE(!PageLocked(page), page); 1156 1157 if (compound) { 1158 atomic_t *mapcount; 1159 VM_BUG_ON_PAGE(!PageLocked(page), page); 1160 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1161 mapcount = compound_mapcount_ptr(page); 1162 first = atomic_inc_and_test(mapcount); 1163 } else { 1164 first = atomic_inc_and_test(&page->_mapcount); 1165 } 1166 1167 if (first) { 1168 int nr = compound ? thp_nr_pages(page) : 1; 1169 /* 1170 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1171 * these counters are not modified in interrupt context, and 1172 * pte lock(a spinlock) is held, which implies preemption 1173 * disabled. 1174 */ 1175 if (compound) 1176 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1177 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1178 } 1179 1180 if (unlikely(PageKsm(page))) 1181 unlock_page_memcg(page); 1182 1183 /* address might be in next vma when migration races vma_adjust */ 1184 else if (first) 1185 __page_set_anon_rmap(page, vma, address, 1186 flags & RMAP_EXCLUSIVE); 1187 else 1188 __page_check_anon_rmap(page, vma, address); 1189 1190 mlock_vma_page(page, vma, compound); 1191 } 1192 1193 /** 1194 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1195 * @page: the page to add the mapping to 1196 * @vma: the vm area in which the mapping is added 1197 * @address: the user virtual address mapped 1198 * @compound: charge the page as compound or small page 1199 * 1200 * Same as page_add_anon_rmap but must only be called on *new* pages. 1201 * This means the inc-and-test can be bypassed. 1202 * Page does not have to be locked. 1203 */ 1204 void page_add_new_anon_rmap(struct page *page, 1205 struct vm_area_struct *vma, unsigned long address, bool compound) 1206 { 1207 int nr = compound ? thp_nr_pages(page) : 1; 1208 1209 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1210 __SetPageSwapBacked(page); 1211 if (compound) { 1212 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1213 /* increment count (starts at -1) */ 1214 atomic_set(compound_mapcount_ptr(page), 0); 1215 atomic_set(compound_pincount_ptr(page), 0); 1216 1217 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1218 } else { 1219 /* Anon THP always mapped first with PMD */ 1220 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1221 /* increment count (starts at -1) */ 1222 atomic_set(&page->_mapcount, 0); 1223 } 1224 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1225 __page_set_anon_rmap(page, vma, address, 1); 1226 } 1227 1228 /** 1229 * page_add_file_rmap - add pte mapping to a file page 1230 * @page: the page to add the mapping to 1231 * @vma: the vm area in which the mapping is added 1232 * @compound: charge the page as compound or small page 1233 * 1234 * The caller needs to hold the pte lock. 1235 */ 1236 void page_add_file_rmap(struct page *page, 1237 struct vm_area_struct *vma, bool compound) 1238 { 1239 int i, nr = 1; 1240 1241 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1242 lock_page_memcg(page); 1243 if (compound && PageTransHuge(page)) { 1244 int nr_pages = thp_nr_pages(page); 1245 1246 for (i = 0, nr = 0; i < nr_pages; i++) { 1247 if (atomic_inc_and_test(&page[i]._mapcount)) 1248 nr++; 1249 } 1250 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1251 goto out; 1252 1253 /* 1254 * It is racy to ClearPageDoubleMap in page_remove_file_rmap(); 1255 * but page lock is held by all page_add_file_rmap() compound 1256 * callers, and SetPageDoubleMap below warns if !PageLocked: 1257 * so here is a place that DoubleMap can be safely cleared. 1258 */ 1259 VM_WARN_ON_ONCE(!PageLocked(page)); 1260 if (nr == nr_pages && PageDoubleMap(page)) 1261 ClearPageDoubleMap(page); 1262 1263 if (PageSwapBacked(page)) 1264 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1265 nr_pages); 1266 else 1267 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1268 nr_pages); 1269 } else { 1270 if (PageTransCompound(page) && page_mapping(page)) { 1271 VM_WARN_ON_ONCE(!PageLocked(page)); 1272 SetPageDoubleMap(compound_head(page)); 1273 } 1274 if (!atomic_inc_and_test(&page->_mapcount)) 1275 goto out; 1276 } 1277 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1278 out: 1279 unlock_page_memcg(page); 1280 1281 mlock_vma_page(page, vma, compound); 1282 } 1283 1284 static void page_remove_file_rmap(struct page *page, bool compound) 1285 { 1286 int i, nr = 1; 1287 1288 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1289 1290 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1291 if (unlikely(PageHuge(page))) { 1292 /* hugetlb pages are always mapped with pmds */ 1293 atomic_dec(compound_mapcount_ptr(page)); 1294 return; 1295 } 1296 1297 /* page still mapped by someone else? */ 1298 if (compound && PageTransHuge(page)) { 1299 int nr_pages = thp_nr_pages(page); 1300 1301 for (i = 0, nr = 0; i < nr_pages; i++) { 1302 if (atomic_add_negative(-1, &page[i]._mapcount)) 1303 nr++; 1304 } 1305 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1306 return; 1307 if (PageSwapBacked(page)) 1308 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1309 -nr_pages); 1310 else 1311 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1312 -nr_pages); 1313 } else { 1314 if (!atomic_add_negative(-1, &page->_mapcount)) 1315 return; 1316 } 1317 1318 /* 1319 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1320 * these counters are not modified in interrupt context, and 1321 * pte lock(a spinlock) is held, which implies preemption disabled. 1322 */ 1323 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1324 } 1325 1326 static void page_remove_anon_compound_rmap(struct page *page) 1327 { 1328 int i, nr; 1329 1330 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1331 return; 1332 1333 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1334 if (unlikely(PageHuge(page))) 1335 return; 1336 1337 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1338 return; 1339 1340 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 1341 1342 if (TestClearPageDoubleMap(page)) { 1343 /* 1344 * Subpages can be mapped with PTEs too. Check how many of 1345 * them are still mapped. 1346 */ 1347 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1348 if (atomic_add_negative(-1, &page[i]._mapcount)) 1349 nr++; 1350 } 1351 1352 /* 1353 * Queue the page for deferred split if at least one small 1354 * page of the compound page is unmapped, but at least one 1355 * small page is still mapped. 1356 */ 1357 if (nr && nr < thp_nr_pages(page)) 1358 deferred_split_huge_page(page); 1359 } else { 1360 nr = thp_nr_pages(page); 1361 } 1362 1363 if (nr) 1364 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 1365 } 1366 1367 /** 1368 * page_remove_rmap - take down pte mapping from a page 1369 * @page: page to remove mapping from 1370 * @vma: the vm area from which the mapping is removed 1371 * @compound: uncharge the page as compound or small page 1372 * 1373 * The caller needs to hold the pte lock. 1374 */ 1375 void page_remove_rmap(struct page *page, 1376 struct vm_area_struct *vma, bool compound) 1377 { 1378 lock_page_memcg(page); 1379 1380 if (!PageAnon(page)) { 1381 page_remove_file_rmap(page, compound); 1382 goto out; 1383 } 1384 1385 if (compound) { 1386 page_remove_anon_compound_rmap(page); 1387 goto out; 1388 } 1389 1390 /* page still mapped by someone else? */ 1391 if (!atomic_add_negative(-1, &page->_mapcount)) 1392 goto out; 1393 1394 /* 1395 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1396 * these counters are not modified in interrupt context, and 1397 * pte lock(a spinlock) is held, which implies preemption disabled. 1398 */ 1399 __dec_lruvec_page_state(page, NR_ANON_MAPPED); 1400 1401 if (PageTransCompound(page)) 1402 deferred_split_huge_page(compound_head(page)); 1403 1404 /* 1405 * It would be tidy to reset the PageAnon mapping here, 1406 * but that might overwrite a racing page_add_anon_rmap 1407 * which increments mapcount after us but sets mapping 1408 * before us: so leave the reset to free_unref_page, 1409 * and remember that it's only reliable while mapped. 1410 * Leaving it set also helps swapoff to reinstate ptes 1411 * faster for those pages still in swapcache. 1412 */ 1413 out: 1414 unlock_page_memcg(page); 1415 1416 munlock_vma_page(page, vma, compound); 1417 } 1418 1419 /* 1420 * @arg: enum ttu_flags will be passed to this argument 1421 */ 1422 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1423 unsigned long address, void *arg) 1424 { 1425 struct mm_struct *mm = vma->vm_mm; 1426 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1427 pte_t pteval; 1428 struct page *subpage; 1429 bool ret = true; 1430 struct mmu_notifier_range range; 1431 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1432 1433 /* 1434 * When racing against e.g. zap_pte_range() on another cpu, 1435 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1436 * try_to_unmap() may return before page_mapped() has become false, 1437 * if page table locking is skipped: use TTU_SYNC to wait for that. 1438 */ 1439 if (flags & TTU_SYNC) 1440 pvmw.flags = PVMW_SYNC; 1441 1442 if (flags & TTU_SPLIT_HUGE_PMD) 1443 split_huge_pmd_address(vma, address, false, folio); 1444 1445 /* 1446 * For THP, we have to assume the worse case ie pmd for invalidation. 1447 * For hugetlb, it could be much worse if we need to do pud 1448 * invalidation in the case of pmd sharing. 1449 * 1450 * Note that the folio can not be freed in this function as call of 1451 * try_to_unmap() must hold a reference on the folio. 1452 */ 1453 range.end = vma_address_end(&pvmw); 1454 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1455 address, range.end); 1456 if (folio_test_hugetlb(folio)) { 1457 /* 1458 * If sharing is possible, start and end will be adjusted 1459 * accordingly. 1460 */ 1461 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1462 &range.end); 1463 } 1464 mmu_notifier_invalidate_range_start(&range); 1465 1466 while (page_vma_mapped_walk(&pvmw)) { 1467 /* Unexpected PMD-mapped THP? */ 1468 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1469 1470 /* 1471 * If the folio is in an mlock()d vma, we must not swap it out. 1472 */ 1473 if (!(flags & TTU_IGNORE_MLOCK) && 1474 (vma->vm_flags & VM_LOCKED)) { 1475 /* Restore the mlock which got missed */ 1476 mlock_vma_folio(folio, vma, false); 1477 page_vma_mapped_walk_done(&pvmw); 1478 ret = false; 1479 break; 1480 } 1481 1482 subpage = folio_page(folio, 1483 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1484 address = pvmw.address; 1485 1486 if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1487 /* 1488 * To call huge_pmd_unshare, i_mmap_rwsem must be 1489 * held in write mode. Caller needs to explicitly 1490 * do this outside rmap routines. 1491 */ 1492 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1493 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1494 /* 1495 * huge_pmd_unshare unmapped an entire PMD 1496 * page. There is no way of knowing exactly 1497 * which PMDs may be cached for this mm, so 1498 * we must flush them all. start/end were 1499 * already adjusted above to cover this range. 1500 */ 1501 flush_cache_range(vma, range.start, range.end); 1502 flush_tlb_range(vma, range.start, range.end); 1503 mmu_notifier_invalidate_range(mm, range.start, 1504 range.end); 1505 1506 /* 1507 * The ref count of the PMD page was dropped 1508 * which is part of the way map counting 1509 * is done for shared PMDs. Return 'true' 1510 * here. When there is no other sharing, 1511 * huge_pmd_unshare returns false and we will 1512 * unmap the actual page and drop map count 1513 * to zero. 1514 */ 1515 page_vma_mapped_walk_done(&pvmw); 1516 break; 1517 } 1518 } 1519 1520 /* Nuke the page table entry. */ 1521 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1522 if (should_defer_flush(mm, flags)) { 1523 /* 1524 * We clear the PTE but do not flush so potentially 1525 * a remote CPU could still be writing to the folio. 1526 * If the entry was previously clean then the 1527 * architecture must guarantee that a clear->dirty 1528 * transition on a cached TLB entry is written through 1529 * and traps if the PTE is unmapped. 1530 */ 1531 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1532 1533 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1534 } else { 1535 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1536 } 1537 1538 /* Set the dirty flag on the folio now the pte is gone. */ 1539 if (pte_dirty(pteval)) 1540 folio_mark_dirty(folio); 1541 1542 /* Update high watermark before we lower rss */ 1543 update_hiwater_rss(mm); 1544 1545 if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 1546 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1547 if (folio_test_hugetlb(folio)) { 1548 hugetlb_count_sub(folio_nr_pages(folio), mm); 1549 set_huge_swap_pte_at(mm, address, 1550 pvmw.pte, pteval, 1551 vma_mmu_pagesize(vma)); 1552 } else { 1553 dec_mm_counter(mm, mm_counter(&folio->page)); 1554 set_pte_at(mm, address, pvmw.pte, pteval); 1555 } 1556 1557 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1558 /* 1559 * The guest indicated that the page content is of no 1560 * interest anymore. Simply discard the pte, vmscan 1561 * will take care of the rest. 1562 * A future reference will then fault in a new zero 1563 * page. When userfaultfd is active, we must not drop 1564 * this page though, as its main user (postcopy 1565 * migration) will not expect userfaults on already 1566 * copied pages. 1567 */ 1568 dec_mm_counter(mm, mm_counter(&folio->page)); 1569 /* We have to invalidate as we cleared the pte */ 1570 mmu_notifier_invalidate_range(mm, address, 1571 address + PAGE_SIZE); 1572 } else if (folio_test_anon(folio)) { 1573 swp_entry_t entry = { .val = page_private(subpage) }; 1574 pte_t swp_pte; 1575 /* 1576 * Store the swap location in the pte. 1577 * See handle_pte_fault() ... 1578 */ 1579 if (unlikely(folio_test_swapbacked(folio) != 1580 folio_test_swapcache(folio))) { 1581 WARN_ON_ONCE(1); 1582 ret = false; 1583 /* We have to invalidate as we cleared the pte */ 1584 mmu_notifier_invalidate_range(mm, address, 1585 address + PAGE_SIZE); 1586 page_vma_mapped_walk_done(&pvmw); 1587 break; 1588 } 1589 1590 /* MADV_FREE page check */ 1591 if (!folio_test_swapbacked(folio)) { 1592 if (!folio_test_dirty(folio)) { 1593 /* Invalidate as we cleared the pte */ 1594 mmu_notifier_invalidate_range(mm, 1595 address, address + PAGE_SIZE); 1596 dec_mm_counter(mm, MM_ANONPAGES); 1597 goto discard; 1598 } 1599 1600 /* 1601 * If the folio was redirtied, it cannot be 1602 * discarded. Remap the page to page table. 1603 */ 1604 set_pte_at(mm, address, pvmw.pte, pteval); 1605 folio_set_swapbacked(folio); 1606 ret = false; 1607 page_vma_mapped_walk_done(&pvmw); 1608 break; 1609 } 1610 1611 if (swap_duplicate(entry) < 0) { 1612 set_pte_at(mm, address, pvmw.pte, pteval); 1613 ret = false; 1614 page_vma_mapped_walk_done(&pvmw); 1615 break; 1616 } 1617 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1618 set_pte_at(mm, address, pvmw.pte, pteval); 1619 ret = false; 1620 page_vma_mapped_walk_done(&pvmw); 1621 break; 1622 } 1623 if (list_empty(&mm->mmlist)) { 1624 spin_lock(&mmlist_lock); 1625 if (list_empty(&mm->mmlist)) 1626 list_add(&mm->mmlist, &init_mm.mmlist); 1627 spin_unlock(&mmlist_lock); 1628 } 1629 dec_mm_counter(mm, MM_ANONPAGES); 1630 inc_mm_counter(mm, MM_SWAPENTS); 1631 swp_pte = swp_entry_to_pte(entry); 1632 if (pte_soft_dirty(pteval)) 1633 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1634 if (pte_uffd_wp(pteval)) 1635 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1636 set_pte_at(mm, address, pvmw.pte, swp_pte); 1637 /* Invalidate as we cleared the pte */ 1638 mmu_notifier_invalidate_range(mm, address, 1639 address + PAGE_SIZE); 1640 } else { 1641 /* 1642 * This is a locked file-backed folio, 1643 * so it cannot be removed from the page 1644 * cache and replaced by a new folio before 1645 * mmu_notifier_invalidate_range_end, so no 1646 * concurrent thread might update its page table 1647 * to point at a new folio while a device is 1648 * still using this folio. 1649 * 1650 * See Documentation/vm/mmu_notifier.rst 1651 */ 1652 dec_mm_counter(mm, mm_counter_file(&folio->page)); 1653 } 1654 discard: 1655 /* 1656 * No need to call mmu_notifier_invalidate_range() it has be 1657 * done above for all cases requiring it to happen under page 1658 * table lock before mmu_notifier_invalidate_range_end() 1659 * 1660 * See Documentation/vm/mmu_notifier.rst 1661 */ 1662 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1663 if (vma->vm_flags & VM_LOCKED) 1664 mlock_page_drain(smp_processor_id()); 1665 folio_put(folio); 1666 } 1667 1668 mmu_notifier_invalidate_range_end(&range); 1669 1670 return ret; 1671 } 1672 1673 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1674 { 1675 return vma_is_temporary_stack(vma); 1676 } 1677 1678 static int page_not_mapped(struct folio *folio) 1679 { 1680 return !folio_mapped(folio); 1681 } 1682 1683 /** 1684 * try_to_unmap - Try to remove all page table mappings to a folio. 1685 * @folio: The folio to unmap. 1686 * @flags: action and flags 1687 * 1688 * Tries to remove all the page table entries which are mapping this 1689 * folio. It is the caller's responsibility to check if the folio is 1690 * still mapped if needed (use TTU_SYNC to prevent accounting races). 1691 * 1692 * Context: Caller must hold the folio lock. 1693 */ 1694 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 1695 { 1696 struct rmap_walk_control rwc = { 1697 .rmap_one = try_to_unmap_one, 1698 .arg = (void *)flags, 1699 .done = page_not_mapped, 1700 .anon_lock = folio_lock_anon_vma_read, 1701 }; 1702 1703 if (flags & TTU_RMAP_LOCKED) 1704 rmap_walk_locked(folio, &rwc); 1705 else 1706 rmap_walk(folio, &rwc); 1707 } 1708 1709 /* 1710 * @arg: enum ttu_flags will be passed to this argument. 1711 * 1712 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1713 * containing migration entries. 1714 */ 1715 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1716 unsigned long address, void *arg) 1717 { 1718 struct mm_struct *mm = vma->vm_mm; 1719 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1720 pte_t pteval; 1721 struct page *subpage; 1722 bool ret = true; 1723 struct mmu_notifier_range range; 1724 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1725 1726 /* 1727 * When racing against e.g. zap_pte_range() on another cpu, 1728 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1729 * try_to_migrate() may return before page_mapped() has become false, 1730 * if page table locking is skipped: use TTU_SYNC to wait for that. 1731 */ 1732 if (flags & TTU_SYNC) 1733 pvmw.flags = PVMW_SYNC; 1734 1735 /* 1736 * unmap_page() in mm/huge_memory.c is the only user of migration with 1737 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1738 */ 1739 if (flags & TTU_SPLIT_HUGE_PMD) 1740 split_huge_pmd_address(vma, address, true, folio); 1741 1742 /* 1743 * For THP, we have to assume the worse case ie pmd for invalidation. 1744 * For hugetlb, it could be much worse if we need to do pud 1745 * invalidation in the case of pmd sharing. 1746 * 1747 * Note that the page can not be free in this function as call of 1748 * try_to_unmap() must hold a reference on the page. 1749 */ 1750 range.end = vma_address_end(&pvmw); 1751 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1752 address, range.end); 1753 if (folio_test_hugetlb(folio)) { 1754 /* 1755 * If sharing is possible, start and end will be adjusted 1756 * accordingly. 1757 */ 1758 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1759 &range.end); 1760 } 1761 mmu_notifier_invalidate_range_start(&range); 1762 1763 while (page_vma_mapped_walk(&pvmw)) { 1764 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1765 /* PMD-mapped THP migration entry */ 1766 if (!pvmw.pte) { 1767 subpage = folio_page(folio, 1768 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 1769 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 1770 !folio_test_pmd_mappable(folio), folio); 1771 1772 set_pmd_migration_entry(&pvmw, subpage); 1773 continue; 1774 } 1775 #endif 1776 1777 /* Unexpected PMD-mapped THP? */ 1778 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1779 1780 subpage = folio_page(folio, 1781 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1782 address = pvmw.address; 1783 1784 if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1785 /* 1786 * To call huge_pmd_unshare, i_mmap_rwsem must be 1787 * held in write mode. Caller needs to explicitly 1788 * do this outside rmap routines. 1789 */ 1790 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1791 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1792 /* 1793 * huge_pmd_unshare unmapped an entire PMD 1794 * page. There is no way of knowing exactly 1795 * which PMDs may be cached for this mm, so 1796 * we must flush them all. start/end were 1797 * already adjusted above to cover this range. 1798 */ 1799 flush_cache_range(vma, range.start, range.end); 1800 flush_tlb_range(vma, range.start, range.end); 1801 mmu_notifier_invalidate_range(mm, range.start, 1802 range.end); 1803 1804 /* 1805 * The ref count of the PMD page was dropped 1806 * which is part of the way map counting 1807 * is done for shared PMDs. Return 'true' 1808 * here. When there is no other sharing, 1809 * huge_pmd_unshare returns false and we will 1810 * unmap the actual page and drop map count 1811 * to zero. 1812 */ 1813 page_vma_mapped_walk_done(&pvmw); 1814 break; 1815 } 1816 } 1817 1818 /* Nuke the page table entry. */ 1819 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1820 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1821 1822 /* Set the dirty flag on the folio now the pte is gone. */ 1823 if (pte_dirty(pteval)) 1824 folio_mark_dirty(folio); 1825 1826 /* Update high watermark before we lower rss */ 1827 update_hiwater_rss(mm); 1828 1829 if (folio_is_zone_device(folio)) { 1830 unsigned long pfn = folio_pfn(folio); 1831 swp_entry_t entry; 1832 pte_t swp_pte; 1833 1834 /* 1835 * Store the pfn of the page in a special migration 1836 * pte. do_swap_page() will wait until the migration 1837 * pte is removed and then restart fault handling. 1838 */ 1839 entry = pte_to_swp_entry(pteval); 1840 if (is_writable_device_private_entry(entry)) 1841 entry = make_writable_migration_entry(pfn); 1842 else 1843 entry = make_readable_migration_entry(pfn); 1844 swp_pte = swp_entry_to_pte(entry); 1845 1846 /* 1847 * pteval maps a zone device page and is therefore 1848 * a swap pte. 1849 */ 1850 if (pte_swp_soft_dirty(pteval)) 1851 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1852 if (pte_swp_uffd_wp(pteval)) 1853 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1854 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1855 /* 1856 * No need to invalidate here it will synchronize on 1857 * against the special swap migration pte. 1858 * 1859 * The assignment to subpage above was computed from a 1860 * swap PTE which results in an invalid pointer. 1861 * Since only PAGE_SIZE pages can currently be 1862 * migrated, just set it to page. This will need to be 1863 * changed when hugepage migrations to device private 1864 * memory are supported. 1865 */ 1866 subpage = &folio->page; 1867 } else if (PageHWPoison(subpage)) { 1868 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1869 if (folio_test_hugetlb(folio)) { 1870 hugetlb_count_sub(folio_nr_pages(folio), mm); 1871 set_huge_swap_pte_at(mm, address, 1872 pvmw.pte, pteval, 1873 vma_mmu_pagesize(vma)); 1874 } else { 1875 dec_mm_counter(mm, mm_counter(&folio->page)); 1876 set_pte_at(mm, address, pvmw.pte, pteval); 1877 } 1878 1879 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1880 /* 1881 * The guest indicated that the page content is of no 1882 * interest anymore. Simply discard the pte, vmscan 1883 * will take care of the rest. 1884 * A future reference will then fault in a new zero 1885 * page. When userfaultfd is active, we must not drop 1886 * this page though, as its main user (postcopy 1887 * migration) will not expect userfaults on already 1888 * copied pages. 1889 */ 1890 dec_mm_counter(mm, mm_counter(&folio->page)); 1891 /* We have to invalidate as we cleared the pte */ 1892 mmu_notifier_invalidate_range(mm, address, 1893 address + PAGE_SIZE); 1894 } else { 1895 swp_entry_t entry; 1896 pte_t swp_pte; 1897 1898 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1899 set_pte_at(mm, address, pvmw.pte, pteval); 1900 ret = false; 1901 page_vma_mapped_walk_done(&pvmw); 1902 break; 1903 } 1904 1905 /* 1906 * Store the pfn of the page in a special migration 1907 * pte. do_swap_page() will wait until the migration 1908 * pte is removed and then restart fault handling. 1909 */ 1910 if (pte_write(pteval)) 1911 entry = make_writable_migration_entry( 1912 page_to_pfn(subpage)); 1913 else 1914 entry = make_readable_migration_entry( 1915 page_to_pfn(subpage)); 1916 1917 swp_pte = swp_entry_to_pte(entry); 1918 if (pte_soft_dirty(pteval)) 1919 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1920 if (pte_uffd_wp(pteval)) 1921 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1922 set_pte_at(mm, address, pvmw.pte, swp_pte); 1923 /* 1924 * No need to invalidate here it will synchronize on 1925 * against the special swap migration pte. 1926 */ 1927 } 1928 1929 /* 1930 * No need to call mmu_notifier_invalidate_range() it has be 1931 * done above for all cases requiring it to happen under page 1932 * table lock before mmu_notifier_invalidate_range_end() 1933 * 1934 * See Documentation/vm/mmu_notifier.rst 1935 */ 1936 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1937 if (vma->vm_flags & VM_LOCKED) 1938 mlock_page_drain(smp_processor_id()); 1939 folio_put(folio); 1940 } 1941 1942 mmu_notifier_invalidate_range_end(&range); 1943 1944 return ret; 1945 } 1946 1947 /** 1948 * try_to_migrate - try to replace all page table mappings with swap entries 1949 * @folio: the folio to replace page table entries for 1950 * @flags: action and flags 1951 * 1952 * Tries to remove all the page table entries which are mapping this folio and 1953 * replace them with special swap entries. Caller must hold the folio lock. 1954 */ 1955 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 1956 { 1957 struct rmap_walk_control rwc = { 1958 .rmap_one = try_to_migrate_one, 1959 .arg = (void *)flags, 1960 .done = page_not_mapped, 1961 .anon_lock = folio_lock_anon_vma_read, 1962 }; 1963 1964 /* 1965 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 1966 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 1967 */ 1968 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 1969 TTU_SYNC))) 1970 return; 1971 1972 if (folio_is_zone_device(folio) && !folio_is_device_private(folio)) 1973 return; 1974 1975 /* 1976 * During exec, a temporary VMA is setup and later moved. 1977 * The VMA is moved under the anon_vma lock but not the 1978 * page tables leading to a race where migration cannot 1979 * find the migration ptes. Rather than increasing the 1980 * locking requirements of exec(), migration skips 1981 * temporary VMAs until after exec() completes. 1982 */ 1983 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 1984 rwc.invalid_vma = invalid_migration_vma; 1985 1986 if (flags & TTU_RMAP_LOCKED) 1987 rmap_walk_locked(folio, &rwc); 1988 else 1989 rmap_walk(folio, &rwc); 1990 } 1991 1992 #ifdef CONFIG_DEVICE_PRIVATE 1993 struct make_exclusive_args { 1994 struct mm_struct *mm; 1995 unsigned long address; 1996 void *owner; 1997 bool valid; 1998 }; 1999 2000 static bool page_make_device_exclusive_one(struct folio *folio, 2001 struct vm_area_struct *vma, unsigned long address, void *priv) 2002 { 2003 struct mm_struct *mm = vma->vm_mm; 2004 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2005 struct make_exclusive_args *args = priv; 2006 pte_t pteval; 2007 struct page *subpage; 2008 bool ret = true; 2009 struct mmu_notifier_range range; 2010 swp_entry_t entry; 2011 pte_t swp_pte; 2012 2013 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2014 vma->vm_mm, address, min(vma->vm_end, 2015 address + folio_size(folio)), 2016 args->owner); 2017 mmu_notifier_invalidate_range_start(&range); 2018 2019 while (page_vma_mapped_walk(&pvmw)) { 2020 /* Unexpected PMD-mapped THP? */ 2021 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2022 2023 if (!pte_present(*pvmw.pte)) { 2024 ret = false; 2025 page_vma_mapped_walk_done(&pvmw); 2026 break; 2027 } 2028 2029 subpage = folio_page(folio, 2030 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2031 address = pvmw.address; 2032 2033 /* Nuke the page table entry. */ 2034 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2035 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2036 2037 /* Set the dirty flag on the folio now the pte is gone. */ 2038 if (pte_dirty(pteval)) 2039 folio_mark_dirty(folio); 2040 2041 /* 2042 * Check that our target page is still mapped at the expected 2043 * address. 2044 */ 2045 if (args->mm == mm && args->address == address && 2046 pte_write(pteval)) 2047 args->valid = true; 2048 2049 /* 2050 * Store the pfn of the page in a special migration 2051 * pte. do_swap_page() will wait until the migration 2052 * pte is removed and then restart fault handling. 2053 */ 2054 if (pte_write(pteval)) 2055 entry = make_writable_device_exclusive_entry( 2056 page_to_pfn(subpage)); 2057 else 2058 entry = make_readable_device_exclusive_entry( 2059 page_to_pfn(subpage)); 2060 swp_pte = swp_entry_to_pte(entry); 2061 if (pte_soft_dirty(pteval)) 2062 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2063 if (pte_uffd_wp(pteval)) 2064 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2065 2066 set_pte_at(mm, address, pvmw.pte, swp_pte); 2067 2068 /* 2069 * There is a reference on the page for the swap entry which has 2070 * been removed, so shouldn't take another. 2071 */ 2072 page_remove_rmap(subpage, vma, false); 2073 } 2074 2075 mmu_notifier_invalidate_range_end(&range); 2076 2077 return ret; 2078 } 2079 2080 /** 2081 * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 2082 * @folio: The folio to replace page table entries for. 2083 * @mm: The mm_struct where the folio is expected to be mapped. 2084 * @address: Address where the folio is expected to be mapped. 2085 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2086 * 2087 * Tries to remove all the page table entries which are mapping this 2088 * folio and replace them with special device exclusive swap entries to 2089 * grant a device exclusive access to the folio. 2090 * 2091 * Context: Caller must hold the folio lock. 2092 * Return: false if the page is still mapped, or if it could not be unmapped 2093 * from the expected address. Otherwise returns true (success). 2094 */ 2095 static bool folio_make_device_exclusive(struct folio *folio, 2096 struct mm_struct *mm, unsigned long address, void *owner) 2097 { 2098 struct make_exclusive_args args = { 2099 .mm = mm, 2100 .address = address, 2101 .owner = owner, 2102 .valid = false, 2103 }; 2104 struct rmap_walk_control rwc = { 2105 .rmap_one = page_make_device_exclusive_one, 2106 .done = page_not_mapped, 2107 .anon_lock = folio_lock_anon_vma_read, 2108 .arg = &args, 2109 }; 2110 2111 /* 2112 * Restrict to anonymous folios for now to avoid potential writeback 2113 * issues. 2114 */ 2115 if (!folio_test_anon(folio)) 2116 return false; 2117 2118 rmap_walk(folio, &rwc); 2119 2120 return args.valid && !folio_mapcount(folio); 2121 } 2122 2123 /** 2124 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2125 * @mm: mm_struct of assoicated target process 2126 * @start: start of the region to mark for exclusive device access 2127 * @end: end address of region 2128 * @pages: returns the pages which were successfully marked for exclusive access 2129 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2130 * 2131 * Returns: number of pages found in the range by GUP. A page is marked for 2132 * exclusive access only if the page pointer is non-NULL. 2133 * 2134 * This function finds ptes mapping page(s) to the given address range, locks 2135 * them and replaces mappings with special swap entries preventing userspace CPU 2136 * access. On fault these entries are replaced with the original mapping after 2137 * calling MMU notifiers. 2138 * 2139 * A driver using this to program access from a device must use a mmu notifier 2140 * critical section to hold a device specific lock during programming. Once 2141 * programming is complete it should drop the page lock and reference after 2142 * which point CPU access to the page will revoke the exclusive access. 2143 */ 2144 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2145 unsigned long end, struct page **pages, 2146 void *owner) 2147 { 2148 long npages = (end - start) >> PAGE_SHIFT; 2149 long i; 2150 2151 npages = get_user_pages_remote(mm, start, npages, 2152 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2153 pages, NULL, NULL); 2154 if (npages < 0) 2155 return npages; 2156 2157 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2158 struct folio *folio = page_folio(pages[i]); 2159 if (PageTail(pages[i]) || !folio_trylock(folio)) { 2160 folio_put(folio); 2161 pages[i] = NULL; 2162 continue; 2163 } 2164 2165 if (!folio_make_device_exclusive(folio, mm, start, owner)) { 2166 folio_unlock(folio); 2167 folio_put(folio); 2168 pages[i] = NULL; 2169 } 2170 } 2171 2172 return npages; 2173 } 2174 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2175 #endif 2176 2177 void __put_anon_vma(struct anon_vma *anon_vma) 2178 { 2179 struct anon_vma *root = anon_vma->root; 2180 2181 anon_vma_free(anon_vma); 2182 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2183 anon_vma_free(root); 2184 } 2185 2186 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 2187 const struct rmap_walk_control *rwc) 2188 { 2189 struct anon_vma *anon_vma; 2190 2191 if (rwc->anon_lock) 2192 return rwc->anon_lock(folio); 2193 2194 /* 2195 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2196 * because that depends on page_mapped(); but not all its usages 2197 * are holding mmap_lock. Users without mmap_lock are required to 2198 * take a reference count to prevent the anon_vma disappearing 2199 */ 2200 anon_vma = folio_anon_vma(folio); 2201 if (!anon_vma) 2202 return NULL; 2203 2204 anon_vma_lock_read(anon_vma); 2205 return anon_vma; 2206 } 2207 2208 /* 2209 * rmap_walk_anon - do something to anonymous page using the object-based 2210 * rmap method 2211 * @page: the page to be handled 2212 * @rwc: control variable according to each walk type 2213 * 2214 * Find all the mappings of a page using the mapping pointer and the vma chains 2215 * contained in the anon_vma struct it points to. 2216 */ 2217 static void rmap_walk_anon(struct folio *folio, 2218 const struct rmap_walk_control *rwc, bool locked) 2219 { 2220 struct anon_vma *anon_vma; 2221 pgoff_t pgoff_start, pgoff_end; 2222 struct anon_vma_chain *avc; 2223 2224 if (locked) { 2225 anon_vma = folio_anon_vma(folio); 2226 /* anon_vma disappear under us? */ 2227 VM_BUG_ON_FOLIO(!anon_vma, folio); 2228 } else { 2229 anon_vma = rmap_walk_anon_lock(folio, rwc); 2230 } 2231 if (!anon_vma) 2232 return; 2233 2234 pgoff_start = folio_pgoff(folio); 2235 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2236 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2237 pgoff_start, pgoff_end) { 2238 struct vm_area_struct *vma = avc->vma; 2239 unsigned long address = vma_address(&folio->page, vma); 2240 2241 VM_BUG_ON_VMA(address == -EFAULT, vma); 2242 cond_resched(); 2243 2244 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2245 continue; 2246 2247 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2248 break; 2249 if (rwc->done && rwc->done(folio)) 2250 break; 2251 } 2252 2253 if (!locked) 2254 anon_vma_unlock_read(anon_vma); 2255 } 2256 2257 /* 2258 * rmap_walk_file - do something to file page using the object-based rmap method 2259 * @page: the page to be handled 2260 * @rwc: control variable according to each walk type 2261 * 2262 * Find all the mappings of a page using the mapping pointer and the vma chains 2263 * contained in the address_space struct it points to. 2264 */ 2265 static void rmap_walk_file(struct folio *folio, 2266 const struct rmap_walk_control *rwc, bool locked) 2267 { 2268 struct address_space *mapping = folio_mapping(folio); 2269 pgoff_t pgoff_start, pgoff_end; 2270 struct vm_area_struct *vma; 2271 2272 /* 2273 * The page lock not only makes sure that page->mapping cannot 2274 * suddenly be NULLified by truncation, it makes sure that the 2275 * structure at mapping cannot be freed and reused yet, 2276 * so we can safely take mapping->i_mmap_rwsem. 2277 */ 2278 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2279 2280 if (!mapping) 2281 return; 2282 2283 pgoff_start = folio_pgoff(folio); 2284 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2285 if (!locked) 2286 i_mmap_lock_read(mapping); 2287 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2288 pgoff_start, pgoff_end) { 2289 unsigned long address = vma_address(&folio->page, vma); 2290 2291 VM_BUG_ON_VMA(address == -EFAULT, vma); 2292 cond_resched(); 2293 2294 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2295 continue; 2296 2297 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2298 goto done; 2299 if (rwc->done && rwc->done(folio)) 2300 goto done; 2301 } 2302 2303 done: 2304 if (!locked) 2305 i_mmap_unlock_read(mapping); 2306 } 2307 2308 void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc) 2309 { 2310 if (unlikely(folio_test_ksm(folio))) 2311 rmap_walk_ksm(folio, rwc); 2312 else if (folio_test_anon(folio)) 2313 rmap_walk_anon(folio, rwc, false); 2314 else 2315 rmap_walk_file(folio, rwc, false); 2316 } 2317 2318 /* Like rmap_walk, but caller holds relevant rmap lock */ 2319 void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc) 2320 { 2321 /* no ksm support for now */ 2322 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 2323 if (folio_test_anon(folio)) 2324 rmap_walk_anon(folio, rwc, true); 2325 else 2326 rmap_walk_file(folio, rwc, true); 2327 } 2328 2329 #ifdef CONFIG_HUGETLB_PAGE 2330 /* 2331 * The following two functions are for anonymous (private mapped) hugepages. 2332 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2333 * and no lru code, because we handle hugepages differently from common pages. 2334 */ 2335 void hugepage_add_anon_rmap(struct page *page, 2336 struct vm_area_struct *vma, unsigned long address) 2337 { 2338 struct anon_vma *anon_vma = vma->anon_vma; 2339 int first; 2340 2341 BUG_ON(!PageLocked(page)); 2342 BUG_ON(!anon_vma); 2343 /* address might be in next vma when migration races vma_adjust */ 2344 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 2345 if (first) 2346 __page_set_anon_rmap(page, vma, address, 0); 2347 } 2348 2349 void hugepage_add_new_anon_rmap(struct page *page, 2350 struct vm_area_struct *vma, unsigned long address) 2351 { 2352 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2353 atomic_set(compound_mapcount_ptr(page), 0); 2354 atomic_set(compound_pincount_ptr(page), 0); 2355 2356 __page_set_anon_rmap(page, vma, address, 1); 2357 } 2358 #endif /* CONFIG_HUGETLB_PAGE */ 2359