1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * page->flags PG_locked (lock_page) * (see hugetlbfs below) 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 28 * mapping->i_mmap_rwsem 29 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 30 * anon_vma->rwsem 31 * mm->page_table_lock or pte_lock 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in block_dirty_folio) 35 * folio_lock_memcg move_lock (in block_dirty_folio) 36 * i_pages lock (widely used) 37 * lruvec->lru_lock (in folio_lruvec_lock_irq) 38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 40 * sb_lock (within inode_lock in fs/fs-writeback.c) 41 * i_pages lock (widely used, in set_page_dirty, 42 * in arch-dependent flush_dcache_mmap_lock, 43 * within bdi.wb->list_lock in __sync_single_inode) 44 * 45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 46 * ->tasklist_lock 47 * pte map lock 48 * 49 * * hugetlbfs PageHuge() pages take locks in this order: 50 * mapping->i_mmap_rwsem 51 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 52 * page->flags PG_locked (lock_page) 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/swapops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 #include <linux/mm_inline.h> 77 78 #include <asm/tlbflush.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <trace/events/tlb.h> 82 #include <trace/events/migrate.h> 83 84 #include "internal.h" 85 86 static struct kmem_cache *anon_vma_cachep; 87 static struct kmem_cache *anon_vma_chain_cachep; 88 89 static inline struct anon_vma *anon_vma_alloc(void) 90 { 91 struct anon_vma *anon_vma; 92 93 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 94 if (anon_vma) { 95 atomic_set(&anon_vma->refcount, 1); 96 anon_vma->degree = 1; /* Reference for first vma */ 97 anon_vma->parent = anon_vma; 98 /* 99 * Initialise the anon_vma root to point to itself. If called 100 * from fork, the root will be reset to the parents anon_vma. 101 */ 102 anon_vma->root = anon_vma; 103 } 104 105 return anon_vma; 106 } 107 108 static inline void anon_vma_free(struct anon_vma *anon_vma) 109 { 110 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 111 112 /* 113 * Synchronize against folio_lock_anon_vma_read() such that 114 * we can safely hold the lock without the anon_vma getting 115 * freed. 116 * 117 * Relies on the full mb implied by the atomic_dec_and_test() from 118 * put_anon_vma() against the acquire barrier implied by 119 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 120 * 121 * folio_lock_anon_vma_read() VS put_anon_vma() 122 * down_read_trylock() atomic_dec_and_test() 123 * LOCK MB 124 * atomic_read() rwsem_is_locked() 125 * 126 * LOCK should suffice since the actual taking of the lock must 127 * happen _before_ what follows. 128 */ 129 might_sleep(); 130 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 131 anon_vma_lock_write(anon_vma); 132 anon_vma_unlock_write(anon_vma); 133 } 134 135 kmem_cache_free(anon_vma_cachep, anon_vma); 136 } 137 138 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 139 { 140 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 141 } 142 143 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 144 { 145 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 146 } 147 148 static void anon_vma_chain_link(struct vm_area_struct *vma, 149 struct anon_vma_chain *avc, 150 struct anon_vma *anon_vma) 151 { 152 avc->vma = vma; 153 avc->anon_vma = anon_vma; 154 list_add(&avc->same_vma, &vma->anon_vma_chain); 155 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 156 } 157 158 /** 159 * __anon_vma_prepare - attach an anon_vma to a memory region 160 * @vma: the memory region in question 161 * 162 * This makes sure the memory mapping described by 'vma' has 163 * an 'anon_vma' attached to it, so that we can associate the 164 * anonymous pages mapped into it with that anon_vma. 165 * 166 * The common case will be that we already have one, which 167 * is handled inline by anon_vma_prepare(). But if 168 * not we either need to find an adjacent mapping that we 169 * can re-use the anon_vma from (very common when the only 170 * reason for splitting a vma has been mprotect()), or we 171 * allocate a new one. 172 * 173 * Anon-vma allocations are very subtle, because we may have 174 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 175 * and that may actually touch the rwsem even in the newly 176 * allocated vma (it depends on RCU to make sure that the 177 * anon_vma isn't actually destroyed). 178 * 179 * As a result, we need to do proper anon_vma locking even 180 * for the new allocation. At the same time, we do not want 181 * to do any locking for the common case of already having 182 * an anon_vma. 183 * 184 * This must be called with the mmap_lock held for reading. 185 */ 186 int __anon_vma_prepare(struct vm_area_struct *vma) 187 { 188 struct mm_struct *mm = vma->vm_mm; 189 struct anon_vma *anon_vma, *allocated; 190 struct anon_vma_chain *avc; 191 192 might_sleep(); 193 194 avc = anon_vma_chain_alloc(GFP_KERNEL); 195 if (!avc) 196 goto out_enomem; 197 198 anon_vma = find_mergeable_anon_vma(vma); 199 allocated = NULL; 200 if (!anon_vma) { 201 anon_vma = anon_vma_alloc(); 202 if (unlikely(!anon_vma)) 203 goto out_enomem_free_avc; 204 allocated = anon_vma; 205 } 206 207 anon_vma_lock_write(anon_vma); 208 /* page_table_lock to protect against threads */ 209 spin_lock(&mm->page_table_lock); 210 if (likely(!vma->anon_vma)) { 211 vma->anon_vma = anon_vma; 212 anon_vma_chain_link(vma, avc, anon_vma); 213 /* vma reference or self-parent link for new root */ 214 anon_vma->degree++; 215 allocated = NULL; 216 avc = NULL; 217 } 218 spin_unlock(&mm->page_table_lock); 219 anon_vma_unlock_write(anon_vma); 220 221 if (unlikely(allocated)) 222 put_anon_vma(allocated); 223 if (unlikely(avc)) 224 anon_vma_chain_free(avc); 225 226 return 0; 227 228 out_enomem_free_avc: 229 anon_vma_chain_free(avc); 230 out_enomem: 231 return -ENOMEM; 232 } 233 234 /* 235 * This is a useful helper function for locking the anon_vma root as 236 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 237 * have the same vma. 238 * 239 * Such anon_vma's should have the same root, so you'd expect to see 240 * just a single mutex_lock for the whole traversal. 241 */ 242 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 243 { 244 struct anon_vma *new_root = anon_vma->root; 245 if (new_root != root) { 246 if (WARN_ON_ONCE(root)) 247 up_write(&root->rwsem); 248 root = new_root; 249 down_write(&root->rwsem); 250 } 251 return root; 252 } 253 254 static inline void unlock_anon_vma_root(struct anon_vma *root) 255 { 256 if (root) 257 up_write(&root->rwsem); 258 } 259 260 /* 261 * Attach the anon_vmas from src to dst. 262 * Returns 0 on success, -ENOMEM on failure. 263 * 264 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 265 * anon_vma_fork(). The first three want an exact copy of src, while the last 266 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 267 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 268 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 269 * 270 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 271 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 272 * This prevents degradation of anon_vma hierarchy to endless linear chain in 273 * case of constantly forking task. On the other hand, an anon_vma with more 274 * than one child isn't reused even if there was no alive vma, thus rmap 275 * walker has a good chance of avoiding scanning the whole hierarchy when it 276 * searches where page is mapped. 277 */ 278 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 279 { 280 struct anon_vma_chain *avc, *pavc; 281 struct anon_vma *root = NULL; 282 283 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 284 struct anon_vma *anon_vma; 285 286 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 287 if (unlikely(!avc)) { 288 unlock_anon_vma_root(root); 289 root = NULL; 290 avc = anon_vma_chain_alloc(GFP_KERNEL); 291 if (!avc) 292 goto enomem_failure; 293 } 294 anon_vma = pavc->anon_vma; 295 root = lock_anon_vma_root(root, anon_vma); 296 anon_vma_chain_link(dst, avc, anon_vma); 297 298 /* 299 * Reuse existing anon_vma if its degree lower than two, 300 * that means it has no vma and only one anon_vma child. 301 * 302 * Do not choose parent anon_vma, otherwise first child 303 * will always reuse it. Root anon_vma is never reused: 304 * it has self-parent reference and at least one child. 305 */ 306 if (!dst->anon_vma && src->anon_vma && 307 anon_vma != src->anon_vma && anon_vma->degree < 2) 308 dst->anon_vma = anon_vma; 309 } 310 if (dst->anon_vma) 311 dst->anon_vma->degree++; 312 unlock_anon_vma_root(root); 313 return 0; 314 315 enomem_failure: 316 /* 317 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 318 * decremented in unlink_anon_vmas(). 319 * We can safely do this because callers of anon_vma_clone() don't care 320 * about dst->anon_vma if anon_vma_clone() failed. 321 */ 322 dst->anon_vma = NULL; 323 unlink_anon_vmas(dst); 324 return -ENOMEM; 325 } 326 327 /* 328 * Attach vma to its own anon_vma, as well as to the anon_vmas that 329 * the corresponding VMA in the parent process is attached to. 330 * Returns 0 on success, non-zero on failure. 331 */ 332 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 333 { 334 struct anon_vma_chain *avc; 335 struct anon_vma *anon_vma; 336 int error; 337 338 /* Don't bother if the parent process has no anon_vma here. */ 339 if (!pvma->anon_vma) 340 return 0; 341 342 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 343 vma->anon_vma = NULL; 344 345 /* 346 * First, attach the new VMA to the parent VMA's anon_vmas, 347 * so rmap can find non-COWed pages in child processes. 348 */ 349 error = anon_vma_clone(vma, pvma); 350 if (error) 351 return error; 352 353 /* An existing anon_vma has been reused, all done then. */ 354 if (vma->anon_vma) 355 return 0; 356 357 /* Then add our own anon_vma. */ 358 anon_vma = anon_vma_alloc(); 359 if (!anon_vma) 360 goto out_error; 361 avc = anon_vma_chain_alloc(GFP_KERNEL); 362 if (!avc) 363 goto out_error_free_anon_vma; 364 365 /* 366 * The root anon_vma's rwsem is the lock actually used when we 367 * lock any of the anon_vmas in this anon_vma tree. 368 */ 369 anon_vma->root = pvma->anon_vma->root; 370 anon_vma->parent = pvma->anon_vma; 371 /* 372 * With refcounts, an anon_vma can stay around longer than the 373 * process it belongs to. The root anon_vma needs to be pinned until 374 * this anon_vma is freed, because the lock lives in the root. 375 */ 376 get_anon_vma(anon_vma->root); 377 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 378 vma->anon_vma = anon_vma; 379 anon_vma_lock_write(anon_vma); 380 anon_vma_chain_link(vma, avc, anon_vma); 381 anon_vma->parent->degree++; 382 anon_vma_unlock_write(anon_vma); 383 384 return 0; 385 386 out_error_free_anon_vma: 387 put_anon_vma(anon_vma); 388 out_error: 389 unlink_anon_vmas(vma); 390 return -ENOMEM; 391 } 392 393 void unlink_anon_vmas(struct vm_area_struct *vma) 394 { 395 struct anon_vma_chain *avc, *next; 396 struct anon_vma *root = NULL; 397 398 /* 399 * Unlink each anon_vma chained to the VMA. This list is ordered 400 * from newest to oldest, ensuring the root anon_vma gets freed last. 401 */ 402 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 403 struct anon_vma *anon_vma = avc->anon_vma; 404 405 root = lock_anon_vma_root(root, anon_vma); 406 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 407 408 /* 409 * Leave empty anon_vmas on the list - we'll need 410 * to free them outside the lock. 411 */ 412 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 413 anon_vma->parent->degree--; 414 continue; 415 } 416 417 list_del(&avc->same_vma); 418 anon_vma_chain_free(avc); 419 } 420 if (vma->anon_vma) { 421 vma->anon_vma->degree--; 422 423 /* 424 * vma would still be needed after unlink, and anon_vma will be prepared 425 * when handle fault. 426 */ 427 vma->anon_vma = NULL; 428 } 429 unlock_anon_vma_root(root); 430 431 /* 432 * Iterate the list once more, it now only contains empty and unlinked 433 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 434 * needing to write-acquire the anon_vma->root->rwsem. 435 */ 436 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 437 struct anon_vma *anon_vma = avc->anon_vma; 438 439 VM_WARN_ON(anon_vma->degree); 440 put_anon_vma(anon_vma); 441 442 list_del(&avc->same_vma); 443 anon_vma_chain_free(avc); 444 } 445 } 446 447 static void anon_vma_ctor(void *data) 448 { 449 struct anon_vma *anon_vma = data; 450 451 init_rwsem(&anon_vma->rwsem); 452 atomic_set(&anon_vma->refcount, 0); 453 anon_vma->rb_root = RB_ROOT_CACHED; 454 } 455 456 void __init anon_vma_init(void) 457 { 458 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 459 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 460 anon_vma_ctor); 461 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 462 SLAB_PANIC|SLAB_ACCOUNT); 463 } 464 465 /* 466 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 467 * 468 * Since there is no serialization what so ever against page_remove_rmap() 469 * the best this function can do is return a refcount increased anon_vma 470 * that might have been relevant to this page. 471 * 472 * The page might have been remapped to a different anon_vma or the anon_vma 473 * returned may already be freed (and even reused). 474 * 475 * In case it was remapped to a different anon_vma, the new anon_vma will be a 476 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 477 * ensure that any anon_vma obtained from the page will still be valid for as 478 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 479 * 480 * All users of this function must be very careful when walking the anon_vma 481 * chain and verify that the page in question is indeed mapped in it 482 * [ something equivalent to page_mapped_in_vma() ]. 483 * 484 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 485 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 486 * if there is a mapcount, we can dereference the anon_vma after observing 487 * those. 488 */ 489 struct anon_vma *page_get_anon_vma(struct page *page) 490 { 491 struct anon_vma *anon_vma = NULL; 492 unsigned long anon_mapping; 493 494 rcu_read_lock(); 495 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 496 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 497 goto out; 498 if (!page_mapped(page)) 499 goto out; 500 501 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 502 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 503 anon_vma = NULL; 504 goto out; 505 } 506 507 /* 508 * If this page is still mapped, then its anon_vma cannot have been 509 * freed. But if it has been unmapped, we have no security against the 510 * anon_vma structure being freed and reused (for another anon_vma: 511 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 512 * above cannot corrupt). 513 */ 514 if (!page_mapped(page)) { 515 rcu_read_unlock(); 516 put_anon_vma(anon_vma); 517 return NULL; 518 } 519 out: 520 rcu_read_unlock(); 521 522 return anon_vma; 523 } 524 525 /* 526 * Similar to page_get_anon_vma() except it locks the anon_vma. 527 * 528 * Its a little more complex as it tries to keep the fast path to a single 529 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 530 * reference like with page_get_anon_vma() and then block on the mutex 531 * on !rwc->try_lock case. 532 */ 533 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 534 struct rmap_walk_control *rwc) 535 { 536 struct anon_vma *anon_vma = NULL; 537 struct anon_vma *root_anon_vma; 538 unsigned long anon_mapping; 539 540 rcu_read_lock(); 541 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 542 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 543 goto out; 544 if (!folio_mapped(folio)) 545 goto out; 546 547 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 548 root_anon_vma = READ_ONCE(anon_vma->root); 549 if (down_read_trylock(&root_anon_vma->rwsem)) { 550 /* 551 * If the folio is still mapped, then this anon_vma is still 552 * its anon_vma, and holding the mutex ensures that it will 553 * not go away, see anon_vma_free(). 554 */ 555 if (!folio_mapped(folio)) { 556 up_read(&root_anon_vma->rwsem); 557 anon_vma = NULL; 558 } 559 goto out; 560 } 561 562 if (rwc && rwc->try_lock) { 563 anon_vma = NULL; 564 rwc->contended = true; 565 goto out; 566 } 567 568 /* trylock failed, we got to sleep */ 569 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 570 anon_vma = NULL; 571 goto out; 572 } 573 574 if (!folio_mapped(folio)) { 575 rcu_read_unlock(); 576 put_anon_vma(anon_vma); 577 return NULL; 578 } 579 580 /* we pinned the anon_vma, its safe to sleep */ 581 rcu_read_unlock(); 582 anon_vma_lock_read(anon_vma); 583 584 if (atomic_dec_and_test(&anon_vma->refcount)) { 585 /* 586 * Oops, we held the last refcount, release the lock 587 * and bail -- can't simply use put_anon_vma() because 588 * we'll deadlock on the anon_vma_lock_write() recursion. 589 */ 590 anon_vma_unlock_read(anon_vma); 591 __put_anon_vma(anon_vma); 592 anon_vma = NULL; 593 } 594 595 return anon_vma; 596 597 out: 598 rcu_read_unlock(); 599 return anon_vma; 600 } 601 602 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 603 { 604 anon_vma_unlock_read(anon_vma); 605 } 606 607 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 608 /* 609 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 610 * important if a PTE was dirty when it was unmapped that it's flushed 611 * before any IO is initiated on the page to prevent lost writes. Similarly, 612 * it must be flushed before freeing to prevent data leakage. 613 */ 614 void try_to_unmap_flush(void) 615 { 616 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 617 618 if (!tlb_ubc->flush_required) 619 return; 620 621 arch_tlbbatch_flush(&tlb_ubc->arch); 622 tlb_ubc->flush_required = false; 623 tlb_ubc->writable = false; 624 } 625 626 /* Flush iff there are potentially writable TLB entries that can race with IO */ 627 void try_to_unmap_flush_dirty(void) 628 { 629 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 630 631 if (tlb_ubc->writable) 632 try_to_unmap_flush(); 633 } 634 635 /* 636 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 637 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 638 */ 639 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 640 #define TLB_FLUSH_BATCH_PENDING_MASK \ 641 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 642 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 643 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 644 645 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 646 { 647 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 648 int batch, nbatch; 649 650 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 651 tlb_ubc->flush_required = true; 652 653 /* 654 * Ensure compiler does not re-order the setting of tlb_flush_batched 655 * before the PTE is cleared. 656 */ 657 barrier(); 658 batch = atomic_read(&mm->tlb_flush_batched); 659 retry: 660 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 661 /* 662 * Prevent `pending' from catching up with `flushed' because of 663 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 664 * `pending' becomes large. 665 */ 666 nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); 667 if (nbatch != batch) { 668 batch = nbatch; 669 goto retry; 670 } 671 } else { 672 atomic_inc(&mm->tlb_flush_batched); 673 } 674 675 /* 676 * If the PTE was dirty then it's best to assume it's writable. The 677 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 678 * before the page is queued for IO. 679 */ 680 if (writable) 681 tlb_ubc->writable = true; 682 } 683 684 /* 685 * Returns true if the TLB flush should be deferred to the end of a batch of 686 * unmap operations to reduce IPIs. 687 */ 688 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 689 { 690 bool should_defer = false; 691 692 if (!(flags & TTU_BATCH_FLUSH)) 693 return false; 694 695 /* If remote CPUs need to be flushed then defer batch the flush */ 696 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 697 should_defer = true; 698 put_cpu(); 699 700 return should_defer; 701 } 702 703 /* 704 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 705 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 706 * operation such as mprotect or munmap to race between reclaim unmapping 707 * the page and flushing the page. If this race occurs, it potentially allows 708 * access to data via a stale TLB entry. Tracking all mm's that have TLB 709 * batching in flight would be expensive during reclaim so instead track 710 * whether TLB batching occurred in the past and if so then do a flush here 711 * if required. This will cost one additional flush per reclaim cycle paid 712 * by the first operation at risk such as mprotect and mumap. 713 * 714 * This must be called under the PTL so that an access to tlb_flush_batched 715 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 716 * via the PTL. 717 */ 718 void flush_tlb_batched_pending(struct mm_struct *mm) 719 { 720 int batch = atomic_read(&mm->tlb_flush_batched); 721 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 722 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 723 724 if (pending != flushed) { 725 flush_tlb_mm(mm); 726 /* 727 * If the new TLB flushing is pending during flushing, leave 728 * mm->tlb_flush_batched as is, to avoid losing flushing. 729 */ 730 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 731 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 732 } 733 } 734 #else 735 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 736 { 737 } 738 739 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 740 { 741 return false; 742 } 743 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 744 745 /* 746 * At what user virtual address is page expected in vma? 747 * Caller should check the page is actually part of the vma. 748 */ 749 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 750 { 751 struct folio *folio = page_folio(page); 752 if (folio_test_anon(folio)) { 753 struct anon_vma *page__anon_vma = folio_anon_vma(folio); 754 /* 755 * Note: swapoff's unuse_vma() is more efficient with this 756 * check, and needs it to match anon_vma when KSM is active. 757 */ 758 if (!vma->anon_vma || !page__anon_vma || 759 vma->anon_vma->root != page__anon_vma->root) 760 return -EFAULT; 761 } else if (!vma->vm_file) { 762 return -EFAULT; 763 } else if (vma->vm_file->f_mapping != folio->mapping) { 764 return -EFAULT; 765 } 766 767 return vma_address(page, vma); 768 } 769 770 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 771 { 772 pgd_t *pgd; 773 p4d_t *p4d; 774 pud_t *pud; 775 pmd_t *pmd = NULL; 776 pmd_t pmde; 777 778 pgd = pgd_offset(mm, address); 779 if (!pgd_present(*pgd)) 780 goto out; 781 782 p4d = p4d_offset(pgd, address); 783 if (!p4d_present(*p4d)) 784 goto out; 785 786 pud = pud_offset(p4d, address); 787 if (!pud_present(*pud)) 788 goto out; 789 790 pmd = pmd_offset(pud, address); 791 /* 792 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 793 * without holding anon_vma lock for write. So when looking for a 794 * genuine pmde (in which to find pte), test present and !THP together. 795 */ 796 pmde = *pmd; 797 barrier(); 798 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 799 pmd = NULL; 800 out: 801 return pmd; 802 } 803 804 struct folio_referenced_arg { 805 int mapcount; 806 int referenced; 807 unsigned long vm_flags; 808 struct mem_cgroup *memcg; 809 }; 810 /* 811 * arg: folio_referenced_arg will be passed 812 */ 813 static bool folio_referenced_one(struct folio *folio, 814 struct vm_area_struct *vma, unsigned long address, void *arg) 815 { 816 struct folio_referenced_arg *pra = arg; 817 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 818 int referenced = 0; 819 820 while (page_vma_mapped_walk(&pvmw)) { 821 address = pvmw.address; 822 823 if ((vma->vm_flags & VM_LOCKED) && 824 (!folio_test_large(folio) || !pvmw.pte)) { 825 /* Restore the mlock which got missed */ 826 mlock_vma_folio(folio, vma, !pvmw.pte); 827 page_vma_mapped_walk_done(&pvmw); 828 pra->vm_flags |= VM_LOCKED; 829 return false; /* To break the loop */ 830 } 831 832 if (pvmw.pte) { 833 if (ptep_clear_flush_young_notify(vma, address, 834 pvmw.pte)) { 835 /* 836 * Don't treat a reference through 837 * a sequentially read mapping as such. 838 * If the folio has been used in another mapping, 839 * we will catch it; if this other mapping is 840 * already gone, the unmap path will have set 841 * the referenced flag or activated the folio. 842 */ 843 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 844 referenced++; 845 } 846 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 847 if (pmdp_clear_flush_young_notify(vma, address, 848 pvmw.pmd)) 849 referenced++; 850 } else { 851 /* unexpected pmd-mapped folio? */ 852 WARN_ON_ONCE(1); 853 } 854 855 pra->mapcount--; 856 } 857 858 if (referenced) 859 folio_clear_idle(folio); 860 if (folio_test_clear_young(folio)) 861 referenced++; 862 863 if (referenced) { 864 pra->referenced++; 865 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 866 } 867 868 if (!pra->mapcount) 869 return false; /* To break the loop */ 870 871 return true; 872 } 873 874 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 875 { 876 struct folio_referenced_arg *pra = arg; 877 struct mem_cgroup *memcg = pra->memcg; 878 879 if (!mm_match_cgroup(vma->vm_mm, memcg)) 880 return true; 881 882 return false; 883 } 884 885 /** 886 * folio_referenced() - Test if the folio was referenced. 887 * @folio: The folio to test. 888 * @is_locked: Caller holds lock on the folio. 889 * @memcg: target memory cgroup 890 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 891 * 892 * Quick test_and_clear_referenced for all mappings of a folio, 893 * 894 * Return: The number of mappings which referenced the folio. Return -1 if 895 * the function bailed out due to rmap lock contention. 896 */ 897 int folio_referenced(struct folio *folio, int is_locked, 898 struct mem_cgroup *memcg, unsigned long *vm_flags) 899 { 900 int we_locked = 0; 901 struct folio_referenced_arg pra = { 902 .mapcount = folio_mapcount(folio), 903 .memcg = memcg, 904 }; 905 struct rmap_walk_control rwc = { 906 .rmap_one = folio_referenced_one, 907 .arg = (void *)&pra, 908 .anon_lock = folio_lock_anon_vma_read, 909 .try_lock = true, 910 }; 911 912 *vm_flags = 0; 913 if (!pra.mapcount) 914 return 0; 915 916 if (!folio_raw_mapping(folio)) 917 return 0; 918 919 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 920 we_locked = folio_trylock(folio); 921 if (!we_locked) 922 return 1; 923 } 924 925 /* 926 * If we are reclaiming on behalf of a cgroup, skip 927 * counting on behalf of references from different 928 * cgroups 929 */ 930 if (memcg) { 931 rwc.invalid_vma = invalid_folio_referenced_vma; 932 } 933 934 rmap_walk(folio, &rwc); 935 *vm_flags = pra.vm_flags; 936 937 if (we_locked) 938 folio_unlock(folio); 939 940 return rwc.contended ? -1 : pra.referenced; 941 } 942 943 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 944 { 945 int cleaned = 0; 946 struct vm_area_struct *vma = pvmw->vma; 947 struct mmu_notifier_range range; 948 unsigned long address = pvmw->address; 949 950 /* 951 * We have to assume the worse case ie pmd for invalidation. Note that 952 * the folio can not be freed from this function. 953 */ 954 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 955 0, vma, vma->vm_mm, address, 956 vma_address_end(pvmw)); 957 mmu_notifier_invalidate_range_start(&range); 958 959 while (page_vma_mapped_walk(pvmw)) { 960 int ret = 0; 961 962 address = pvmw->address; 963 if (pvmw->pte) { 964 pte_t entry; 965 pte_t *pte = pvmw->pte; 966 967 if (!pte_dirty(*pte) && !pte_write(*pte)) 968 continue; 969 970 flush_cache_page(vma, address, pte_pfn(*pte)); 971 entry = ptep_clear_flush(vma, address, pte); 972 entry = pte_wrprotect(entry); 973 entry = pte_mkclean(entry); 974 set_pte_at(vma->vm_mm, address, pte, entry); 975 ret = 1; 976 } else { 977 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 978 pmd_t *pmd = pvmw->pmd; 979 pmd_t entry; 980 981 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 982 continue; 983 984 flush_cache_range(vma, address, 985 address + HPAGE_PMD_SIZE); 986 entry = pmdp_invalidate(vma, address, pmd); 987 entry = pmd_wrprotect(entry); 988 entry = pmd_mkclean(entry); 989 set_pmd_at(vma->vm_mm, address, pmd, entry); 990 ret = 1; 991 #else 992 /* unexpected pmd-mapped folio? */ 993 WARN_ON_ONCE(1); 994 #endif 995 } 996 997 /* 998 * No need to call mmu_notifier_invalidate_range() as we are 999 * downgrading page table protection not changing it to point 1000 * to a new page. 1001 * 1002 * See Documentation/mm/mmu_notifier.rst 1003 */ 1004 if (ret) 1005 cleaned++; 1006 } 1007 1008 mmu_notifier_invalidate_range_end(&range); 1009 1010 return cleaned; 1011 } 1012 1013 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1014 unsigned long address, void *arg) 1015 { 1016 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1017 int *cleaned = arg; 1018 1019 *cleaned += page_vma_mkclean_one(&pvmw); 1020 1021 return true; 1022 } 1023 1024 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1025 { 1026 if (vma->vm_flags & VM_SHARED) 1027 return false; 1028 1029 return true; 1030 } 1031 1032 int folio_mkclean(struct folio *folio) 1033 { 1034 int cleaned = 0; 1035 struct address_space *mapping; 1036 struct rmap_walk_control rwc = { 1037 .arg = (void *)&cleaned, 1038 .rmap_one = page_mkclean_one, 1039 .invalid_vma = invalid_mkclean_vma, 1040 }; 1041 1042 BUG_ON(!folio_test_locked(folio)); 1043 1044 if (!folio_mapped(folio)) 1045 return 0; 1046 1047 mapping = folio_mapping(folio); 1048 if (!mapping) 1049 return 0; 1050 1051 rmap_walk(folio, &rwc); 1052 1053 return cleaned; 1054 } 1055 EXPORT_SYMBOL_GPL(folio_mkclean); 1056 1057 /** 1058 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1059 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1060 * within the @vma of shared mappings. And since clean PTEs 1061 * should also be readonly, write protects them too. 1062 * @pfn: start pfn. 1063 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1064 * @pgoff: page offset that the @pfn mapped with. 1065 * @vma: vma that @pfn mapped within. 1066 * 1067 * Returns the number of cleaned PTEs (including PMDs). 1068 */ 1069 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1070 struct vm_area_struct *vma) 1071 { 1072 struct page_vma_mapped_walk pvmw = { 1073 .pfn = pfn, 1074 .nr_pages = nr_pages, 1075 .pgoff = pgoff, 1076 .vma = vma, 1077 .flags = PVMW_SYNC, 1078 }; 1079 1080 if (invalid_mkclean_vma(vma, NULL)) 1081 return 0; 1082 1083 pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 1084 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1085 1086 return page_vma_mkclean_one(&pvmw); 1087 } 1088 1089 /** 1090 * page_move_anon_rmap - move a page to our anon_vma 1091 * @page: the page to move to our anon_vma 1092 * @vma: the vma the page belongs to 1093 * 1094 * When a page belongs exclusively to one process after a COW event, 1095 * that page can be moved into the anon_vma that belongs to just that 1096 * process, so the rmap code will not search the parent or sibling 1097 * processes. 1098 */ 1099 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1100 { 1101 struct anon_vma *anon_vma = vma->anon_vma; 1102 struct page *subpage = page; 1103 1104 page = compound_head(page); 1105 1106 VM_BUG_ON_PAGE(!PageLocked(page), page); 1107 VM_BUG_ON_VMA(!anon_vma, vma); 1108 1109 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1110 /* 1111 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1112 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1113 * folio_test_anon()) will not see one without the other. 1114 */ 1115 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1116 SetPageAnonExclusive(subpage); 1117 } 1118 1119 /** 1120 * __page_set_anon_rmap - set up new anonymous rmap 1121 * @page: Page or Hugepage to add to rmap 1122 * @vma: VM area to add page to. 1123 * @address: User virtual address of the mapping 1124 * @exclusive: the page is exclusively owned by the current process 1125 */ 1126 static void __page_set_anon_rmap(struct page *page, 1127 struct vm_area_struct *vma, unsigned long address, int exclusive) 1128 { 1129 struct anon_vma *anon_vma = vma->anon_vma; 1130 1131 BUG_ON(!anon_vma); 1132 1133 if (PageAnon(page)) 1134 goto out; 1135 1136 /* 1137 * If the page isn't exclusively mapped into this vma, 1138 * we must use the _oldest_ possible anon_vma for the 1139 * page mapping! 1140 */ 1141 if (!exclusive) 1142 anon_vma = anon_vma->root; 1143 1144 /* 1145 * page_idle does a lockless/optimistic rmap scan on page->mapping. 1146 * Make sure the compiler doesn't split the stores of anon_vma and 1147 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1148 * could mistake the mapping for a struct address_space and crash. 1149 */ 1150 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1151 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1152 page->index = linear_page_index(vma, address); 1153 out: 1154 if (exclusive) 1155 SetPageAnonExclusive(page); 1156 } 1157 1158 /** 1159 * __page_check_anon_rmap - sanity check anonymous rmap addition 1160 * @page: the page to add the mapping to 1161 * @vma: the vm area in which the mapping is added 1162 * @address: the user virtual address mapped 1163 */ 1164 static void __page_check_anon_rmap(struct page *page, 1165 struct vm_area_struct *vma, unsigned long address) 1166 { 1167 struct folio *folio = page_folio(page); 1168 /* 1169 * The page's anon-rmap details (mapping and index) are guaranteed to 1170 * be set up correctly at this point. 1171 * 1172 * We have exclusion against page_add_anon_rmap because the caller 1173 * always holds the page locked. 1174 * 1175 * We have exclusion against page_add_new_anon_rmap because those pages 1176 * are initially only visible via the pagetables, and the pte is locked 1177 * over the call to page_add_new_anon_rmap. 1178 */ 1179 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1180 folio); 1181 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1182 page); 1183 } 1184 1185 /** 1186 * page_add_anon_rmap - add pte mapping to an anonymous page 1187 * @page: the page to add the mapping to 1188 * @vma: the vm area in which the mapping is added 1189 * @address: the user virtual address mapped 1190 * @flags: the rmap flags 1191 * 1192 * The caller needs to hold the pte lock, and the page must be locked in 1193 * the anon_vma case: to serialize mapping,index checking after setting, 1194 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1195 * (but PageKsm is never downgraded to PageAnon). 1196 */ 1197 void page_add_anon_rmap(struct page *page, 1198 struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1199 { 1200 bool compound = flags & RMAP_COMPOUND; 1201 bool first; 1202 1203 if (unlikely(PageKsm(page))) 1204 lock_page_memcg(page); 1205 else 1206 VM_BUG_ON_PAGE(!PageLocked(page), page); 1207 1208 if (compound) { 1209 atomic_t *mapcount; 1210 VM_BUG_ON_PAGE(!PageLocked(page), page); 1211 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1212 mapcount = compound_mapcount_ptr(page); 1213 first = atomic_inc_and_test(mapcount); 1214 } else { 1215 first = atomic_inc_and_test(&page->_mapcount); 1216 } 1217 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 1218 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 1219 1220 if (first) { 1221 int nr = compound ? thp_nr_pages(page) : 1; 1222 /* 1223 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1224 * these counters are not modified in interrupt context, and 1225 * pte lock(a spinlock) is held, which implies preemption 1226 * disabled. 1227 */ 1228 if (compound) 1229 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1230 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1231 } 1232 1233 if (unlikely(PageKsm(page))) 1234 unlock_page_memcg(page); 1235 1236 /* address might be in next vma when migration races vma_adjust */ 1237 else if (first) 1238 __page_set_anon_rmap(page, vma, address, 1239 !!(flags & RMAP_EXCLUSIVE)); 1240 else 1241 __page_check_anon_rmap(page, vma, address); 1242 1243 mlock_vma_page(page, vma, compound); 1244 } 1245 1246 /** 1247 * page_add_new_anon_rmap - add mapping to a new anonymous page 1248 * @page: the page to add the mapping to 1249 * @vma: the vm area in which the mapping is added 1250 * @address: the user virtual address mapped 1251 * 1252 * If it's a compound page, it is accounted as a compound page. As the page 1253 * is new, it's assume to get mapped exclusively by a single process. 1254 * 1255 * Same as page_add_anon_rmap but must only be called on *new* pages. 1256 * This means the inc-and-test can be bypassed. 1257 * Page does not have to be locked. 1258 */ 1259 void page_add_new_anon_rmap(struct page *page, 1260 struct vm_area_struct *vma, unsigned long address) 1261 { 1262 const bool compound = PageCompound(page); 1263 int nr = compound ? thp_nr_pages(page) : 1; 1264 1265 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1266 __SetPageSwapBacked(page); 1267 if (compound) { 1268 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1269 /* increment count (starts at -1) */ 1270 atomic_set(compound_mapcount_ptr(page), 0); 1271 atomic_set(compound_pincount_ptr(page), 0); 1272 1273 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1274 } else { 1275 /* increment count (starts at -1) */ 1276 atomic_set(&page->_mapcount, 0); 1277 } 1278 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1279 __page_set_anon_rmap(page, vma, address, 1); 1280 } 1281 1282 /** 1283 * page_add_file_rmap - add pte mapping to a file page 1284 * @page: the page to add the mapping to 1285 * @vma: the vm area in which the mapping is added 1286 * @compound: charge the page as compound or small page 1287 * 1288 * The caller needs to hold the pte lock. 1289 */ 1290 void page_add_file_rmap(struct page *page, 1291 struct vm_area_struct *vma, bool compound) 1292 { 1293 int i, nr = 0; 1294 1295 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1296 lock_page_memcg(page); 1297 if (compound && PageTransHuge(page)) { 1298 int nr_pages = thp_nr_pages(page); 1299 1300 for (i = 0; i < nr_pages; i++) { 1301 if (atomic_inc_and_test(&page[i]._mapcount)) 1302 nr++; 1303 } 1304 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1305 goto out; 1306 1307 /* 1308 * It is racy to ClearPageDoubleMap in page_remove_file_rmap(); 1309 * but page lock is held by all page_add_file_rmap() compound 1310 * callers, and SetPageDoubleMap below warns if !PageLocked: 1311 * so here is a place that DoubleMap can be safely cleared. 1312 */ 1313 VM_WARN_ON_ONCE(!PageLocked(page)); 1314 if (nr == nr_pages && PageDoubleMap(page)) 1315 ClearPageDoubleMap(page); 1316 1317 if (PageSwapBacked(page)) 1318 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1319 nr_pages); 1320 else 1321 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1322 nr_pages); 1323 } else { 1324 if (PageTransCompound(page) && page_mapping(page)) { 1325 VM_WARN_ON_ONCE(!PageLocked(page)); 1326 SetPageDoubleMap(compound_head(page)); 1327 } 1328 if (atomic_inc_and_test(&page->_mapcount)) 1329 nr++; 1330 } 1331 out: 1332 if (nr) 1333 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1334 unlock_page_memcg(page); 1335 1336 mlock_vma_page(page, vma, compound); 1337 } 1338 1339 static void page_remove_file_rmap(struct page *page, bool compound) 1340 { 1341 int i, nr = 0; 1342 1343 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1344 1345 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1346 if (unlikely(PageHuge(page))) { 1347 /* hugetlb pages are always mapped with pmds */ 1348 atomic_dec(compound_mapcount_ptr(page)); 1349 return; 1350 } 1351 1352 /* page still mapped by someone else? */ 1353 if (compound && PageTransHuge(page)) { 1354 int nr_pages = thp_nr_pages(page); 1355 1356 for (i = 0; i < nr_pages; i++) { 1357 if (atomic_add_negative(-1, &page[i]._mapcount)) 1358 nr++; 1359 } 1360 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1361 goto out; 1362 if (PageSwapBacked(page)) 1363 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1364 -nr_pages); 1365 else 1366 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1367 -nr_pages); 1368 } else { 1369 if (atomic_add_negative(-1, &page->_mapcount)) 1370 nr++; 1371 } 1372 out: 1373 if (nr) 1374 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1375 } 1376 1377 static void page_remove_anon_compound_rmap(struct page *page) 1378 { 1379 int i, nr; 1380 1381 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1382 return; 1383 1384 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1385 if (unlikely(PageHuge(page))) 1386 return; 1387 1388 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1389 return; 1390 1391 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 1392 1393 if (TestClearPageDoubleMap(page)) { 1394 /* 1395 * Subpages can be mapped with PTEs too. Check how many of 1396 * them are still mapped. 1397 */ 1398 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1399 if (atomic_add_negative(-1, &page[i]._mapcount)) 1400 nr++; 1401 } 1402 1403 /* 1404 * Queue the page for deferred split if at least one small 1405 * page of the compound page is unmapped, but at least one 1406 * small page is still mapped. 1407 */ 1408 if (nr && nr < thp_nr_pages(page)) 1409 deferred_split_huge_page(page); 1410 } else { 1411 nr = thp_nr_pages(page); 1412 } 1413 1414 if (nr) 1415 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 1416 } 1417 1418 /** 1419 * page_remove_rmap - take down pte mapping from a page 1420 * @page: page to remove mapping from 1421 * @vma: the vm area from which the mapping is removed 1422 * @compound: uncharge the page as compound or small page 1423 * 1424 * The caller needs to hold the pte lock. 1425 */ 1426 void page_remove_rmap(struct page *page, 1427 struct vm_area_struct *vma, bool compound) 1428 { 1429 lock_page_memcg(page); 1430 1431 if (!PageAnon(page)) { 1432 page_remove_file_rmap(page, compound); 1433 goto out; 1434 } 1435 1436 if (compound) { 1437 page_remove_anon_compound_rmap(page); 1438 goto out; 1439 } 1440 1441 /* page still mapped by someone else? */ 1442 if (!atomic_add_negative(-1, &page->_mapcount)) 1443 goto out; 1444 1445 /* 1446 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1447 * these counters are not modified in interrupt context, and 1448 * pte lock(a spinlock) is held, which implies preemption disabled. 1449 */ 1450 __dec_lruvec_page_state(page, NR_ANON_MAPPED); 1451 1452 if (PageTransCompound(page)) 1453 deferred_split_huge_page(compound_head(page)); 1454 1455 /* 1456 * It would be tidy to reset the PageAnon mapping here, 1457 * but that might overwrite a racing page_add_anon_rmap 1458 * which increments mapcount after us but sets mapping 1459 * before us: so leave the reset to free_unref_page, 1460 * and remember that it's only reliable while mapped. 1461 * Leaving it set also helps swapoff to reinstate ptes 1462 * faster for those pages still in swapcache. 1463 */ 1464 out: 1465 unlock_page_memcg(page); 1466 1467 munlock_vma_page(page, vma, compound); 1468 } 1469 1470 /* 1471 * @arg: enum ttu_flags will be passed to this argument 1472 */ 1473 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1474 unsigned long address, void *arg) 1475 { 1476 struct mm_struct *mm = vma->vm_mm; 1477 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1478 pte_t pteval; 1479 struct page *subpage; 1480 bool anon_exclusive, ret = true; 1481 struct mmu_notifier_range range; 1482 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1483 1484 /* 1485 * When racing against e.g. zap_pte_range() on another cpu, 1486 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1487 * try_to_unmap() may return before page_mapped() has become false, 1488 * if page table locking is skipped: use TTU_SYNC to wait for that. 1489 */ 1490 if (flags & TTU_SYNC) 1491 pvmw.flags = PVMW_SYNC; 1492 1493 if (flags & TTU_SPLIT_HUGE_PMD) 1494 split_huge_pmd_address(vma, address, false, folio); 1495 1496 /* 1497 * For THP, we have to assume the worse case ie pmd for invalidation. 1498 * For hugetlb, it could be much worse if we need to do pud 1499 * invalidation in the case of pmd sharing. 1500 * 1501 * Note that the folio can not be freed in this function as call of 1502 * try_to_unmap() must hold a reference on the folio. 1503 */ 1504 range.end = vma_address_end(&pvmw); 1505 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1506 address, range.end); 1507 if (folio_test_hugetlb(folio)) { 1508 /* 1509 * If sharing is possible, start and end will be adjusted 1510 * accordingly. 1511 */ 1512 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1513 &range.end); 1514 } 1515 mmu_notifier_invalidate_range_start(&range); 1516 1517 while (page_vma_mapped_walk(&pvmw)) { 1518 /* Unexpected PMD-mapped THP? */ 1519 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1520 1521 /* 1522 * If the folio is in an mlock()d vma, we must not swap it out. 1523 */ 1524 if (!(flags & TTU_IGNORE_MLOCK) && 1525 (vma->vm_flags & VM_LOCKED)) { 1526 /* Restore the mlock which got missed */ 1527 mlock_vma_folio(folio, vma, false); 1528 page_vma_mapped_walk_done(&pvmw); 1529 ret = false; 1530 break; 1531 } 1532 1533 subpage = folio_page(folio, 1534 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1535 address = pvmw.address; 1536 anon_exclusive = folio_test_anon(folio) && 1537 PageAnonExclusive(subpage); 1538 1539 if (folio_test_hugetlb(folio)) { 1540 bool anon = folio_test_anon(folio); 1541 1542 /* 1543 * The try_to_unmap() is only passed a hugetlb page 1544 * in the case where the hugetlb page is poisoned. 1545 */ 1546 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1547 /* 1548 * huge_pmd_unshare may unmap an entire PMD page. 1549 * There is no way of knowing exactly which PMDs may 1550 * be cached for this mm, so we must flush them all. 1551 * start/end were already adjusted above to cover this 1552 * range. 1553 */ 1554 flush_cache_range(vma, range.start, range.end); 1555 1556 /* 1557 * To call huge_pmd_unshare, i_mmap_rwsem must be 1558 * held in write mode. Caller needs to explicitly 1559 * do this outside rmap routines. 1560 */ 1561 VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); 1562 if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1563 flush_tlb_range(vma, range.start, range.end); 1564 mmu_notifier_invalidate_range(mm, range.start, 1565 range.end); 1566 1567 /* 1568 * The ref count of the PMD page was dropped 1569 * which is part of the way map counting 1570 * is done for shared PMDs. Return 'true' 1571 * here. When there is no other sharing, 1572 * huge_pmd_unshare returns false and we will 1573 * unmap the actual page and drop map count 1574 * to zero. 1575 */ 1576 page_vma_mapped_walk_done(&pvmw); 1577 break; 1578 } 1579 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1580 } else { 1581 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1582 /* 1583 * Nuke the page table entry. When having to clear 1584 * PageAnonExclusive(), we always have to flush. 1585 */ 1586 if (should_defer_flush(mm, flags) && !anon_exclusive) { 1587 /* 1588 * We clear the PTE but do not flush so potentially 1589 * a remote CPU could still be writing to the folio. 1590 * If the entry was previously clean then the 1591 * architecture must guarantee that a clear->dirty 1592 * transition on a cached TLB entry is written through 1593 * and traps if the PTE is unmapped. 1594 */ 1595 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1596 1597 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1598 } else { 1599 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1600 } 1601 } 1602 1603 /* 1604 * Now the pte is cleared. If this pte was uffd-wp armed, 1605 * we may want to replace a none pte with a marker pte if 1606 * it's file-backed, so we don't lose the tracking info. 1607 */ 1608 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1609 1610 /* Set the dirty flag on the folio now the pte is gone. */ 1611 if (pte_dirty(pteval)) 1612 folio_mark_dirty(folio); 1613 1614 /* Update high watermark before we lower rss */ 1615 update_hiwater_rss(mm); 1616 1617 if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 1618 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1619 if (folio_test_hugetlb(folio)) { 1620 hugetlb_count_sub(folio_nr_pages(folio), mm); 1621 set_huge_pte_at(mm, address, pvmw.pte, pteval); 1622 } else { 1623 dec_mm_counter(mm, mm_counter(&folio->page)); 1624 set_pte_at(mm, address, pvmw.pte, pteval); 1625 } 1626 1627 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1628 /* 1629 * The guest indicated that the page content is of no 1630 * interest anymore. Simply discard the pte, vmscan 1631 * will take care of the rest. 1632 * A future reference will then fault in a new zero 1633 * page. When userfaultfd is active, we must not drop 1634 * this page though, as its main user (postcopy 1635 * migration) will not expect userfaults on already 1636 * copied pages. 1637 */ 1638 dec_mm_counter(mm, mm_counter(&folio->page)); 1639 /* We have to invalidate as we cleared the pte */ 1640 mmu_notifier_invalidate_range(mm, address, 1641 address + PAGE_SIZE); 1642 } else if (folio_test_anon(folio)) { 1643 swp_entry_t entry = { .val = page_private(subpage) }; 1644 pte_t swp_pte; 1645 /* 1646 * Store the swap location in the pte. 1647 * See handle_pte_fault() ... 1648 */ 1649 if (unlikely(folio_test_swapbacked(folio) != 1650 folio_test_swapcache(folio))) { 1651 WARN_ON_ONCE(1); 1652 ret = false; 1653 /* We have to invalidate as we cleared the pte */ 1654 mmu_notifier_invalidate_range(mm, address, 1655 address + PAGE_SIZE); 1656 page_vma_mapped_walk_done(&pvmw); 1657 break; 1658 } 1659 1660 /* MADV_FREE page check */ 1661 if (!folio_test_swapbacked(folio)) { 1662 int ref_count, map_count; 1663 1664 /* 1665 * Synchronize with gup_pte_range(): 1666 * - clear PTE; barrier; read refcount 1667 * - inc refcount; barrier; read PTE 1668 */ 1669 smp_mb(); 1670 1671 ref_count = folio_ref_count(folio); 1672 map_count = folio_mapcount(folio); 1673 1674 /* 1675 * Order reads for page refcount and dirty flag 1676 * (see comments in __remove_mapping()). 1677 */ 1678 smp_rmb(); 1679 1680 /* 1681 * The only page refs must be one from isolation 1682 * plus the rmap(s) (dropped by discard:). 1683 */ 1684 if (ref_count == 1 + map_count && 1685 !folio_test_dirty(folio)) { 1686 /* Invalidate as we cleared the pte */ 1687 mmu_notifier_invalidate_range(mm, 1688 address, address + PAGE_SIZE); 1689 dec_mm_counter(mm, MM_ANONPAGES); 1690 goto discard; 1691 } 1692 1693 /* 1694 * If the folio was redirtied, it cannot be 1695 * discarded. Remap the page to page table. 1696 */ 1697 set_pte_at(mm, address, pvmw.pte, pteval); 1698 folio_set_swapbacked(folio); 1699 ret = false; 1700 page_vma_mapped_walk_done(&pvmw); 1701 break; 1702 } 1703 1704 if (swap_duplicate(entry) < 0) { 1705 set_pte_at(mm, address, pvmw.pte, pteval); 1706 ret = false; 1707 page_vma_mapped_walk_done(&pvmw); 1708 break; 1709 } 1710 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1711 swap_free(entry); 1712 set_pte_at(mm, address, pvmw.pte, pteval); 1713 ret = false; 1714 page_vma_mapped_walk_done(&pvmw); 1715 break; 1716 } 1717 if (anon_exclusive && 1718 page_try_share_anon_rmap(subpage)) { 1719 swap_free(entry); 1720 set_pte_at(mm, address, pvmw.pte, pteval); 1721 ret = false; 1722 page_vma_mapped_walk_done(&pvmw); 1723 break; 1724 } 1725 /* 1726 * Note: We *don't* remember if the page was mapped 1727 * exclusively in the swap pte if the architecture 1728 * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In 1729 * that case, swapin code has to re-determine that 1730 * manually and might detect the page as possibly 1731 * shared, for example, if there are other references on 1732 * the page or if the page is under writeback. We made 1733 * sure that there are no GUP pins on the page that 1734 * would rely on it, so for GUP pins this is fine. 1735 */ 1736 if (list_empty(&mm->mmlist)) { 1737 spin_lock(&mmlist_lock); 1738 if (list_empty(&mm->mmlist)) 1739 list_add(&mm->mmlist, &init_mm.mmlist); 1740 spin_unlock(&mmlist_lock); 1741 } 1742 dec_mm_counter(mm, MM_ANONPAGES); 1743 inc_mm_counter(mm, MM_SWAPENTS); 1744 swp_pte = swp_entry_to_pte(entry); 1745 if (anon_exclusive) 1746 swp_pte = pte_swp_mkexclusive(swp_pte); 1747 if (pte_soft_dirty(pteval)) 1748 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1749 if (pte_uffd_wp(pteval)) 1750 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1751 set_pte_at(mm, address, pvmw.pte, swp_pte); 1752 /* Invalidate as we cleared the pte */ 1753 mmu_notifier_invalidate_range(mm, address, 1754 address + PAGE_SIZE); 1755 } else { 1756 /* 1757 * This is a locked file-backed folio, 1758 * so it cannot be removed from the page 1759 * cache and replaced by a new folio before 1760 * mmu_notifier_invalidate_range_end, so no 1761 * concurrent thread might update its page table 1762 * to point at a new folio while a device is 1763 * still using this folio. 1764 * 1765 * See Documentation/mm/mmu_notifier.rst 1766 */ 1767 dec_mm_counter(mm, mm_counter_file(&folio->page)); 1768 } 1769 discard: 1770 /* 1771 * No need to call mmu_notifier_invalidate_range() it has be 1772 * done above for all cases requiring it to happen under page 1773 * table lock before mmu_notifier_invalidate_range_end() 1774 * 1775 * See Documentation/mm/mmu_notifier.rst 1776 */ 1777 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1778 if (vma->vm_flags & VM_LOCKED) 1779 mlock_page_drain_local(); 1780 folio_put(folio); 1781 } 1782 1783 mmu_notifier_invalidate_range_end(&range); 1784 1785 return ret; 1786 } 1787 1788 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1789 { 1790 return vma_is_temporary_stack(vma); 1791 } 1792 1793 static int page_not_mapped(struct folio *folio) 1794 { 1795 return !folio_mapped(folio); 1796 } 1797 1798 /** 1799 * try_to_unmap - Try to remove all page table mappings to a folio. 1800 * @folio: The folio to unmap. 1801 * @flags: action and flags 1802 * 1803 * Tries to remove all the page table entries which are mapping this 1804 * folio. It is the caller's responsibility to check if the folio is 1805 * still mapped if needed (use TTU_SYNC to prevent accounting races). 1806 * 1807 * Context: Caller must hold the folio lock. 1808 */ 1809 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 1810 { 1811 struct rmap_walk_control rwc = { 1812 .rmap_one = try_to_unmap_one, 1813 .arg = (void *)flags, 1814 .done = page_not_mapped, 1815 .anon_lock = folio_lock_anon_vma_read, 1816 }; 1817 1818 if (flags & TTU_RMAP_LOCKED) 1819 rmap_walk_locked(folio, &rwc); 1820 else 1821 rmap_walk(folio, &rwc); 1822 } 1823 1824 /* 1825 * @arg: enum ttu_flags will be passed to this argument. 1826 * 1827 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1828 * containing migration entries. 1829 */ 1830 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1831 unsigned long address, void *arg) 1832 { 1833 struct mm_struct *mm = vma->vm_mm; 1834 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1835 pte_t pteval; 1836 struct page *subpage; 1837 bool anon_exclusive, ret = true; 1838 struct mmu_notifier_range range; 1839 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1840 1841 /* 1842 * When racing against e.g. zap_pte_range() on another cpu, 1843 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1844 * try_to_migrate() may return before page_mapped() has become false, 1845 * if page table locking is skipped: use TTU_SYNC to wait for that. 1846 */ 1847 if (flags & TTU_SYNC) 1848 pvmw.flags = PVMW_SYNC; 1849 1850 /* 1851 * unmap_page() in mm/huge_memory.c is the only user of migration with 1852 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1853 */ 1854 if (flags & TTU_SPLIT_HUGE_PMD) 1855 split_huge_pmd_address(vma, address, true, folio); 1856 1857 /* 1858 * For THP, we have to assume the worse case ie pmd for invalidation. 1859 * For hugetlb, it could be much worse if we need to do pud 1860 * invalidation in the case of pmd sharing. 1861 * 1862 * Note that the page can not be free in this function as call of 1863 * try_to_unmap() must hold a reference on the page. 1864 */ 1865 range.end = vma_address_end(&pvmw); 1866 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1867 address, range.end); 1868 if (folio_test_hugetlb(folio)) { 1869 /* 1870 * If sharing is possible, start and end will be adjusted 1871 * accordingly. 1872 */ 1873 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1874 &range.end); 1875 } 1876 mmu_notifier_invalidate_range_start(&range); 1877 1878 while (page_vma_mapped_walk(&pvmw)) { 1879 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1880 /* PMD-mapped THP migration entry */ 1881 if (!pvmw.pte) { 1882 subpage = folio_page(folio, 1883 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 1884 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 1885 !folio_test_pmd_mappable(folio), folio); 1886 1887 if (set_pmd_migration_entry(&pvmw, subpage)) { 1888 ret = false; 1889 page_vma_mapped_walk_done(&pvmw); 1890 break; 1891 } 1892 continue; 1893 } 1894 #endif 1895 1896 /* Unexpected PMD-mapped THP? */ 1897 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1898 1899 if (folio_is_zone_device(folio)) { 1900 /* 1901 * Our PTE is a non-present device exclusive entry and 1902 * calculating the subpage as for the common case would 1903 * result in an invalid pointer. 1904 * 1905 * Since only PAGE_SIZE pages can currently be 1906 * migrated, just set it to page. This will need to be 1907 * changed when hugepage migrations to device private 1908 * memory are supported. 1909 */ 1910 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 1911 subpage = &folio->page; 1912 } else { 1913 subpage = folio_page(folio, 1914 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1915 } 1916 address = pvmw.address; 1917 anon_exclusive = folio_test_anon(folio) && 1918 PageAnonExclusive(subpage); 1919 1920 if (folio_test_hugetlb(folio)) { 1921 bool anon = folio_test_anon(folio); 1922 1923 /* 1924 * huge_pmd_unshare may unmap an entire PMD page. 1925 * There is no way of knowing exactly which PMDs may 1926 * be cached for this mm, so we must flush them all. 1927 * start/end were already adjusted above to cover this 1928 * range. 1929 */ 1930 flush_cache_range(vma, range.start, range.end); 1931 1932 /* 1933 * To call huge_pmd_unshare, i_mmap_rwsem must be 1934 * held in write mode. Caller needs to explicitly 1935 * do this outside rmap routines. 1936 */ 1937 VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); 1938 if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1939 flush_tlb_range(vma, range.start, range.end); 1940 mmu_notifier_invalidate_range(mm, range.start, 1941 range.end); 1942 1943 /* 1944 * The ref count of the PMD page was dropped 1945 * which is part of the way map counting 1946 * is done for shared PMDs. Return 'true' 1947 * here. When there is no other sharing, 1948 * huge_pmd_unshare returns false and we will 1949 * unmap the actual page and drop map count 1950 * to zero. 1951 */ 1952 page_vma_mapped_walk_done(&pvmw); 1953 break; 1954 } 1955 1956 /* Nuke the hugetlb page table entry */ 1957 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1958 } else { 1959 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1960 /* Nuke the page table entry. */ 1961 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1962 } 1963 1964 /* Set the dirty flag on the folio now the pte is gone. */ 1965 if (pte_dirty(pteval)) 1966 folio_mark_dirty(folio); 1967 1968 /* Update high watermark before we lower rss */ 1969 update_hiwater_rss(mm); 1970 1971 if (folio_is_device_private(folio)) { 1972 unsigned long pfn = folio_pfn(folio); 1973 swp_entry_t entry; 1974 pte_t swp_pte; 1975 1976 if (anon_exclusive) 1977 BUG_ON(page_try_share_anon_rmap(subpage)); 1978 1979 /* 1980 * Store the pfn of the page in a special migration 1981 * pte. do_swap_page() will wait until the migration 1982 * pte is removed and then restart fault handling. 1983 */ 1984 entry = pte_to_swp_entry(pteval); 1985 if (is_writable_device_private_entry(entry)) 1986 entry = make_writable_migration_entry(pfn); 1987 else if (anon_exclusive) 1988 entry = make_readable_exclusive_migration_entry(pfn); 1989 else 1990 entry = make_readable_migration_entry(pfn); 1991 swp_pte = swp_entry_to_pte(entry); 1992 1993 /* 1994 * pteval maps a zone device page and is therefore 1995 * a swap pte. 1996 */ 1997 if (pte_swp_soft_dirty(pteval)) 1998 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1999 if (pte_swp_uffd_wp(pteval)) 2000 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2001 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 2002 trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 2003 compound_order(&folio->page)); 2004 /* 2005 * No need to invalidate here it will synchronize on 2006 * against the special swap migration pte. 2007 */ 2008 } else if (PageHWPoison(subpage)) { 2009 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2010 if (folio_test_hugetlb(folio)) { 2011 hugetlb_count_sub(folio_nr_pages(folio), mm); 2012 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2013 } else { 2014 dec_mm_counter(mm, mm_counter(&folio->page)); 2015 set_pte_at(mm, address, pvmw.pte, pteval); 2016 } 2017 2018 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2019 /* 2020 * The guest indicated that the page content is of no 2021 * interest anymore. Simply discard the pte, vmscan 2022 * will take care of the rest. 2023 * A future reference will then fault in a new zero 2024 * page. When userfaultfd is active, we must not drop 2025 * this page though, as its main user (postcopy 2026 * migration) will not expect userfaults on already 2027 * copied pages. 2028 */ 2029 dec_mm_counter(mm, mm_counter(&folio->page)); 2030 /* We have to invalidate as we cleared the pte */ 2031 mmu_notifier_invalidate_range(mm, address, 2032 address + PAGE_SIZE); 2033 } else { 2034 swp_entry_t entry; 2035 pte_t swp_pte; 2036 2037 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2038 if (folio_test_hugetlb(folio)) 2039 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2040 else 2041 set_pte_at(mm, address, pvmw.pte, pteval); 2042 ret = false; 2043 page_vma_mapped_walk_done(&pvmw); 2044 break; 2045 } 2046 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 2047 !anon_exclusive, subpage); 2048 if (anon_exclusive && 2049 page_try_share_anon_rmap(subpage)) { 2050 if (folio_test_hugetlb(folio)) 2051 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2052 else 2053 set_pte_at(mm, address, pvmw.pte, pteval); 2054 ret = false; 2055 page_vma_mapped_walk_done(&pvmw); 2056 break; 2057 } 2058 2059 /* 2060 * Store the pfn of the page in a special migration 2061 * pte. do_swap_page() will wait until the migration 2062 * pte is removed and then restart fault handling. 2063 */ 2064 if (pte_write(pteval)) 2065 entry = make_writable_migration_entry( 2066 page_to_pfn(subpage)); 2067 else if (anon_exclusive) 2068 entry = make_readable_exclusive_migration_entry( 2069 page_to_pfn(subpage)); 2070 else 2071 entry = make_readable_migration_entry( 2072 page_to_pfn(subpage)); 2073 2074 swp_pte = swp_entry_to_pte(entry); 2075 if (pte_soft_dirty(pteval)) 2076 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2077 if (pte_uffd_wp(pteval)) 2078 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2079 if (folio_test_hugetlb(folio)) 2080 set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 2081 else 2082 set_pte_at(mm, address, pvmw.pte, swp_pte); 2083 trace_set_migration_pte(address, pte_val(swp_pte), 2084 compound_order(&folio->page)); 2085 /* 2086 * No need to invalidate here it will synchronize on 2087 * against the special swap migration pte. 2088 */ 2089 } 2090 2091 /* 2092 * No need to call mmu_notifier_invalidate_range() it has be 2093 * done above for all cases requiring it to happen under page 2094 * table lock before mmu_notifier_invalidate_range_end() 2095 * 2096 * See Documentation/mm/mmu_notifier.rst 2097 */ 2098 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 2099 if (vma->vm_flags & VM_LOCKED) 2100 mlock_page_drain_local(); 2101 folio_put(folio); 2102 } 2103 2104 mmu_notifier_invalidate_range_end(&range); 2105 2106 return ret; 2107 } 2108 2109 /** 2110 * try_to_migrate - try to replace all page table mappings with swap entries 2111 * @folio: the folio to replace page table entries for 2112 * @flags: action and flags 2113 * 2114 * Tries to remove all the page table entries which are mapping this folio and 2115 * replace them with special swap entries. Caller must hold the folio lock. 2116 */ 2117 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2118 { 2119 struct rmap_walk_control rwc = { 2120 .rmap_one = try_to_migrate_one, 2121 .arg = (void *)flags, 2122 .done = page_not_mapped, 2123 .anon_lock = folio_lock_anon_vma_read, 2124 }; 2125 2126 /* 2127 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2128 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 2129 */ 2130 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2131 TTU_SYNC))) 2132 return; 2133 2134 if (folio_is_zone_device(folio) && 2135 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2136 return; 2137 2138 /* 2139 * During exec, a temporary VMA is setup and later moved. 2140 * The VMA is moved under the anon_vma lock but not the 2141 * page tables leading to a race where migration cannot 2142 * find the migration ptes. Rather than increasing the 2143 * locking requirements of exec(), migration skips 2144 * temporary VMAs until after exec() completes. 2145 */ 2146 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2147 rwc.invalid_vma = invalid_migration_vma; 2148 2149 if (flags & TTU_RMAP_LOCKED) 2150 rmap_walk_locked(folio, &rwc); 2151 else 2152 rmap_walk(folio, &rwc); 2153 } 2154 2155 #ifdef CONFIG_DEVICE_PRIVATE 2156 struct make_exclusive_args { 2157 struct mm_struct *mm; 2158 unsigned long address; 2159 void *owner; 2160 bool valid; 2161 }; 2162 2163 static bool page_make_device_exclusive_one(struct folio *folio, 2164 struct vm_area_struct *vma, unsigned long address, void *priv) 2165 { 2166 struct mm_struct *mm = vma->vm_mm; 2167 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2168 struct make_exclusive_args *args = priv; 2169 pte_t pteval; 2170 struct page *subpage; 2171 bool ret = true; 2172 struct mmu_notifier_range range; 2173 swp_entry_t entry; 2174 pte_t swp_pte; 2175 2176 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2177 vma->vm_mm, address, min(vma->vm_end, 2178 address + folio_size(folio)), 2179 args->owner); 2180 mmu_notifier_invalidate_range_start(&range); 2181 2182 while (page_vma_mapped_walk(&pvmw)) { 2183 /* Unexpected PMD-mapped THP? */ 2184 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2185 2186 if (!pte_present(*pvmw.pte)) { 2187 ret = false; 2188 page_vma_mapped_walk_done(&pvmw); 2189 break; 2190 } 2191 2192 subpage = folio_page(folio, 2193 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2194 address = pvmw.address; 2195 2196 /* Nuke the page table entry. */ 2197 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2198 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2199 2200 /* Set the dirty flag on the folio now the pte is gone. */ 2201 if (pte_dirty(pteval)) 2202 folio_mark_dirty(folio); 2203 2204 /* 2205 * Check that our target page is still mapped at the expected 2206 * address. 2207 */ 2208 if (args->mm == mm && args->address == address && 2209 pte_write(pteval)) 2210 args->valid = true; 2211 2212 /* 2213 * Store the pfn of the page in a special migration 2214 * pte. do_swap_page() will wait until the migration 2215 * pte is removed and then restart fault handling. 2216 */ 2217 if (pte_write(pteval)) 2218 entry = make_writable_device_exclusive_entry( 2219 page_to_pfn(subpage)); 2220 else 2221 entry = make_readable_device_exclusive_entry( 2222 page_to_pfn(subpage)); 2223 swp_pte = swp_entry_to_pte(entry); 2224 if (pte_soft_dirty(pteval)) 2225 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2226 if (pte_uffd_wp(pteval)) 2227 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2228 2229 set_pte_at(mm, address, pvmw.pte, swp_pte); 2230 2231 /* 2232 * There is a reference on the page for the swap entry which has 2233 * been removed, so shouldn't take another. 2234 */ 2235 page_remove_rmap(subpage, vma, false); 2236 } 2237 2238 mmu_notifier_invalidate_range_end(&range); 2239 2240 return ret; 2241 } 2242 2243 /** 2244 * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 2245 * @folio: The folio to replace page table entries for. 2246 * @mm: The mm_struct where the folio is expected to be mapped. 2247 * @address: Address where the folio is expected to be mapped. 2248 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2249 * 2250 * Tries to remove all the page table entries which are mapping this 2251 * folio and replace them with special device exclusive swap entries to 2252 * grant a device exclusive access to the folio. 2253 * 2254 * Context: Caller must hold the folio lock. 2255 * Return: false if the page is still mapped, or if it could not be unmapped 2256 * from the expected address. Otherwise returns true (success). 2257 */ 2258 static bool folio_make_device_exclusive(struct folio *folio, 2259 struct mm_struct *mm, unsigned long address, void *owner) 2260 { 2261 struct make_exclusive_args args = { 2262 .mm = mm, 2263 .address = address, 2264 .owner = owner, 2265 .valid = false, 2266 }; 2267 struct rmap_walk_control rwc = { 2268 .rmap_one = page_make_device_exclusive_one, 2269 .done = page_not_mapped, 2270 .anon_lock = folio_lock_anon_vma_read, 2271 .arg = &args, 2272 }; 2273 2274 /* 2275 * Restrict to anonymous folios for now to avoid potential writeback 2276 * issues. 2277 */ 2278 if (!folio_test_anon(folio)) 2279 return false; 2280 2281 rmap_walk(folio, &rwc); 2282 2283 return args.valid && !folio_mapcount(folio); 2284 } 2285 2286 /** 2287 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2288 * @mm: mm_struct of associated target process 2289 * @start: start of the region to mark for exclusive device access 2290 * @end: end address of region 2291 * @pages: returns the pages which were successfully marked for exclusive access 2292 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2293 * 2294 * Returns: number of pages found in the range by GUP. A page is marked for 2295 * exclusive access only if the page pointer is non-NULL. 2296 * 2297 * This function finds ptes mapping page(s) to the given address range, locks 2298 * them and replaces mappings with special swap entries preventing userspace CPU 2299 * access. On fault these entries are replaced with the original mapping after 2300 * calling MMU notifiers. 2301 * 2302 * A driver using this to program access from a device must use a mmu notifier 2303 * critical section to hold a device specific lock during programming. Once 2304 * programming is complete it should drop the page lock and reference after 2305 * which point CPU access to the page will revoke the exclusive access. 2306 */ 2307 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2308 unsigned long end, struct page **pages, 2309 void *owner) 2310 { 2311 long npages = (end - start) >> PAGE_SHIFT; 2312 long i; 2313 2314 npages = get_user_pages_remote(mm, start, npages, 2315 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2316 pages, NULL, NULL); 2317 if (npages < 0) 2318 return npages; 2319 2320 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2321 struct folio *folio = page_folio(pages[i]); 2322 if (PageTail(pages[i]) || !folio_trylock(folio)) { 2323 folio_put(folio); 2324 pages[i] = NULL; 2325 continue; 2326 } 2327 2328 if (!folio_make_device_exclusive(folio, mm, start, owner)) { 2329 folio_unlock(folio); 2330 folio_put(folio); 2331 pages[i] = NULL; 2332 } 2333 } 2334 2335 return npages; 2336 } 2337 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2338 #endif 2339 2340 void __put_anon_vma(struct anon_vma *anon_vma) 2341 { 2342 struct anon_vma *root = anon_vma->root; 2343 2344 anon_vma_free(anon_vma); 2345 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2346 anon_vma_free(root); 2347 } 2348 2349 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 2350 struct rmap_walk_control *rwc) 2351 { 2352 struct anon_vma *anon_vma; 2353 2354 if (rwc->anon_lock) 2355 return rwc->anon_lock(folio, rwc); 2356 2357 /* 2358 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2359 * because that depends on page_mapped(); but not all its usages 2360 * are holding mmap_lock. Users without mmap_lock are required to 2361 * take a reference count to prevent the anon_vma disappearing 2362 */ 2363 anon_vma = folio_anon_vma(folio); 2364 if (!anon_vma) 2365 return NULL; 2366 2367 if (anon_vma_trylock_read(anon_vma)) 2368 goto out; 2369 2370 if (rwc->try_lock) { 2371 anon_vma = NULL; 2372 rwc->contended = true; 2373 goto out; 2374 } 2375 2376 anon_vma_lock_read(anon_vma); 2377 out: 2378 return anon_vma; 2379 } 2380 2381 /* 2382 * rmap_walk_anon - do something to anonymous page using the object-based 2383 * rmap method 2384 * @page: the page to be handled 2385 * @rwc: control variable according to each walk type 2386 * 2387 * Find all the mappings of a page using the mapping pointer and the vma chains 2388 * contained in the anon_vma struct it points to. 2389 */ 2390 static void rmap_walk_anon(struct folio *folio, 2391 struct rmap_walk_control *rwc, bool locked) 2392 { 2393 struct anon_vma *anon_vma; 2394 pgoff_t pgoff_start, pgoff_end; 2395 struct anon_vma_chain *avc; 2396 2397 if (locked) { 2398 anon_vma = folio_anon_vma(folio); 2399 /* anon_vma disappear under us? */ 2400 VM_BUG_ON_FOLIO(!anon_vma, folio); 2401 } else { 2402 anon_vma = rmap_walk_anon_lock(folio, rwc); 2403 } 2404 if (!anon_vma) 2405 return; 2406 2407 pgoff_start = folio_pgoff(folio); 2408 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2409 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2410 pgoff_start, pgoff_end) { 2411 struct vm_area_struct *vma = avc->vma; 2412 unsigned long address = vma_address(&folio->page, vma); 2413 2414 VM_BUG_ON_VMA(address == -EFAULT, vma); 2415 cond_resched(); 2416 2417 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2418 continue; 2419 2420 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2421 break; 2422 if (rwc->done && rwc->done(folio)) 2423 break; 2424 } 2425 2426 if (!locked) 2427 anon_vma_unlock_read(anon_vma); 2428 } 2429 2430 /* 2431 * rmap_walk_file - do something to file page using the object-based rmap method 2432 * @page: the page to be handled 2433 * @rwc: control variable according to each walk type 2434 * 2435 * Find all the mappings of a page using the mapping pointer and the vma chains 2436 * contained in the address_space struct it points to. 2437 */ 2438 static void rmap_walk_file(struct folio *folio, 2439 struct rmap_walk_control *rwc, bool locked) 2440 { 2441 struct address_space *mapping = folio_mapping(folio); 2442 pgoff_t pgoff_start, pgoff_end; 2443 struct vm_area_struct *vma; 2444 2445 /* 2446 * The page lock not only makes sure that page->mapping cannot 2447 * suddenly be NULLified by truncation, it makes sure that the 2448 * structure at mapping cannot be freed and reused yet, 2449 * so we can safely take mapping->i_mmap_rwsem. 2450 */ 2451 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2452 2453 if (!mapping) 2454 return; 2455 2456 pgoff_start = folio_pgoff(folio); 2457 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2458 if (!locked) { 2459 if (i_mmap_trylock_read(mapping)) 2460 goto lookup; 2461 2462 if (rwc->try_lock) { 2463 rwc->contended = true; 2464 return; 2465 } 2466 2467 i_mmap_lock_read(mapping); 2468 } 2469 lookup: 2470 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2471 pgoff_start, pgoff_end) { 2472 unsigned long address = vma_address(&folio->page, vma); 2473 2474 VM_BUG_ON_VMA(address == -EFAULT, vma); 2475 cond_resched(); 2476 2477 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2478 continue; 2479 2480 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2481 goto done; 2482 if (rwc->done && rwc->done(folio)) 2483 goto done; 2484 } 2485 2486 done: 2487 if (!locked) 2488 i_mmap_unlock_read(mapping); 2489 } 2490 2491 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2492 { 2493 if (unlikely(folio_test_ksm(folio))) 2494 rmap_walk_ksm(folio, rwc); 2495 else if (folio_test_anon(folio)) 2496 rmap_walk_anon(folio, rwc, false); 2497 else 2498 rmap_walk_file(folio, rwc, false); 2499 } 2500 2501 /* Like rmap_walk, but caller holds relevant rmap lock */ 2502 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2503 { 2504 /* no ksm support for now */ 2505 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 2506 if (folio_test_anon(folio)) 2507 rmap_walk_anon(folio, rwc, true); 2508 else 2509 rmap_walk_file(folio, rwc, true); 2510 } 2511 2512 #ifdef CONFIG_HUGETLB_PAGE 2513 /* 2514 * The following two functions are for anonymous (private mapped) hugepages. 2515 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2516 * and no lru code, because we handle hugepages differently from common pages. 2517 * 2518 * RMAP_COMPOUND is ignored. 2519 */ 2520 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 2521 unsigned long address, rmap_t flags) 2522 { 2523 struct anon_vma *anon_vma = vma->anon_vma; 2524 int first; 2525 2526 BUG_ON(!PageLocked(page)); 2527 BUG_ON(!anon_vma); 2528 /* address might be in next vma when migration races vma_adjust */ 2529 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 2530 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 2531 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 2532 if (first) 2533 __page_set_anon_rmap(page, vma, address, 2534 !!(flags & RMAP_EXCLUSIVE)); 2535 } 2536 2537 void hugepage_add_new_anon_rmap(struct page *page, 2538 struct vm_area_struct *vma, unsigned long address) 2539 { 2540 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2541 atomic_set(compound_mapcount_ptr(page), 0); 2542 atomic_set(compound_pincount_ptr(page), 0); 2543 2544 __page_set_anon_rmap(page, vma, address, 1); 2545 } 2546 #endif /* CONFIG_HUGETLB_PAGE */ 2547