1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * page->flags PG_locked (lock_page) * (see huegtlbfs below) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * lock_page_memcg move_lock (in __set_page_dirty_buffers) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in lock_page_lruvec_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * * hugetlbfs PageHuge() pages take locks in this order: 49 * mapping->i_mmap_rwsem 50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51 * page->flags PG_locked (lock_page) 52 */ 53 54 #include <linux/mm.h> 55 #include <linux/sched/mm.h> 56 #include <linux/sched/task.h> 57 #include <linux/pagemap.h> 58 #include <linux/swap.h> 59 #include <linux/swapops.h> 60 #include <linux/slab.h> 61 #include <linux/init.h> 62 #include <linux/ksm.h> 63 #include <linux/rmap.h> 64 #include <linux/rcupdate.h> 65 #include <linux/export.h> 66 #include <linux/memcontrol.h> 67 #include <linux/mmu_notifier.h> 68 #include <linux/migrate.h> 69 #include <linux/hugetlb.h> 70 #include <linux/huge_mm.h> 71 #include <linux/backing-dev.h> 72 #include <linux/page_idle.h> 73 #include <linux/memremap.h> 74 #include <linux/userfaultfd_k.h> 75 76 #include <asm/tlbflush.h> 77 78 #include <trace/events/tlb.h> 79 80 #include "internal.h" 81 82 static struct kmem_cache *anon_vma_cachep; 83 static struct kmem_cache *anon_vma_chain_cachep; 84 85 static inline struct anon_vma *anon_vma_alloc(void) 86 { 87 struct anon_vma *anon_vma; 88 89 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 90 if (anon_vma) { 91 atomic_set(&anon_vma->refcount, 1); 92 anon_vma->degree = 1; /* Reference for first vma */ 93 anon_vma->parent = anon_vma; 94 /* 95 * Initialise the anon_vma root to point to itself. If called 96 * from fork, the root will be reset to the parents anon_vma. 97 */ 98 anon_vma->root = anon_vma; 99 } 100 101 return anon_vma; 102 } 103 104 static inline void anon_vma_free(struct anon_vma *anon_vma) 105 { 106 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 107 108 /* 109 * Synchronize against page_lock_anon_vma_read() such that 110 * we can safely hold the lock without the anon_vma getting 111 * freed. 112 * 113 * Relies on the full mb implied by the atomic_dec_and_test() from 114 * put_anon_vma() against the acquire barrier implied by 115 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 116 * 117 * page_lock_anon_vma_read() VS put_anon_vma() 118 * down_read_trylock() atomic_dec_and_test() 119 * LOCK MB 120 * atomic_read() rwsem_is_locked() 121 * 122 * LOCK should suffice since the actual taking of the lock must 123 * happen _before_ what follows. 124 */ 125 might_sleep(); 126 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 127 anon_vma_lock_write(anon_vma); 128 anon_vma_unlock_write(anon_vma); 129 } 130 131 kmem_cache_free(anon_vma_cachep, anon_vma); 132 } 133 134 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 135 { 136 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 137 } 138 139 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 140 { 141 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 142 } 143 144 static void anon_vma_chain_link(struct vm_area_struct *vma, 145 struct anon_vma_chain *avc, 146 struct anon_vma *anon_vma) 147 { 148 avc->vma = vma; 149 avc->anon_vma = anon_vma; 150 list_add(&avc->same_vma, &vma->anon_vma_chain); 151 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 152 } 153 154 /** 155 * __anon_vma_prepare - attach an anon_vma to a memory region 156 * @vma: the memory region in question 157 * 158 * This makes sure the memory mapping described by 'vma' has 159 * an 'anon_vma' attached to it, so that we can associate the 160 * anonymous pages mapped into it with that anon_vma. 161 * 162 * The common case will be that we already have one, which 163 * is handled inline by anon_vma_prepare(). But if 164 * not we either need to find an adjacent mapping that we 165 * can re-use the anon_vma from (very common when the only 166 * reason for splitting a vma has been mprotect()), or we 167 * allocate a new one. 168 * 169 * Anon-vma allocations are very subtle, because we may have 170 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 171 * and that may actually touch the rwsem even in the newly 172 * allocated vma (it depends on RCU to make sure that the 173 * anon_vma isn't actually destroyed). 174 * 175 * As a result, we need to do proper anon_vma locking even 176 * for the new allocation. At the same time, we do not want 177 * to do any locking for the common case of already having 178 * an anon_vma. 179 * 180 * This must be called with the mmap_lock held for reading. 181 */ 182 int __anon_vma_prepare(struct vm_area_struct *vma) 183 { 184 struct mm_struct *mm = vma->vm_mm; 185 struct anon_vma *anon_vma, *allocated; 186 struct anon_vma_chain *avc; 187 188 might_sleep(); 189 190 avc = anon_vma_chain_alloc(GFP_KERNEL); 191 if (!avc) 192 goto out_enomem; 193 194 anon_vma = find_mergeable_anon_vma(vma); 195 allocated = NULL; 196 if (!anon_vma) { 197 anon_vma = anon_vma_alloc(); 198 if (unlikely(!anon_vma)) 199 goto out_enomem_free_avc; 200 allocated = anon_vma; 201 } 202 203 anon_vma_lock_write(anon_vma); 204 /* page_table_lock to protect against threads */ 205 spin_lock(&mm->page_table_lock); 206 if (likely(!vma->anon_vma)) { 207 vma->anon_vma = anon_vma; 208 anon_vma_chain_link(vma, avc, anon_vma); 209 /* vma reference or self-parent link for new root */ 210 anon_vma->degree++; 211 allocated = NULL; 212 avc = NULL; 213 } 214 spin_unlock(&mm->page_table_lock); 215 anon_vma_unlock_write(anon_vma); 216 217 if (unlikely(allocated)) 218 put_anon_vma(allocated); 219 if (unlikely(avc)) 220 anon_vma_chain_free(avc); 221 222 return 0; 223 224 out_enomem_free_avc: 225 anon_vma_chain_free(avc); 226 out_enomem: 227 return -ENOMEM; 228 } 229 230 /* 231 * This is a useful helper function for locking the anon_vma root as 232 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 233 * have the same vma. 234 * 235 * Such anon_vma's should have the same root, so you'd expect to see 236 * just a single mutex_lock for the whole traversal. 237 */ 238 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 239 { 240 struct anon_vma *new_root = anon_vma->root; 241 if (new_root != root) { 242 if (WARN_ON_ONCE(root)) 243 up_write(&root->rwsem); 244 root = new_root; 245 down_write(&root->rwsem); 246 } 247 return root; 248 } 249 250 static inline void unlock_anon_vma_root(struct anon_vma *root) 251 { 252 if (root) 253 up_write(&root->rwsem); 254 } 255 256 /* 257 * Attach the anon_vmas from src to dst. 258 * Returns 0 on success, -ENOMEM on failure. 259 * 260 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 261 * anon_vma_fork(). The first three want an exact copy of src, while the last 262 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 263 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 264 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 265 * 266 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 267 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 268 * This prevents degradation of anon_vma hierarchy to endless linear chain in 269 * case of constantly forking task. On the other hand, an anon_vma with more 270 * than one child isn't reused even if there was no alive vma, thus rmap 271 * walker has a good chance of avoiding scanning the whole hierarchy when it 272 * searches where page is mapped. 273 */ 274 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 275 { 276 struct anon_vma_chain *avc, *pavc; 277 struct anon_vma *root = NULL; 278 279 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 280 struct anon_vma *anon_vma; 281 282 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 283 if (unlikely(!avc)) { 284 unlock_anon_vma_root(root); 285 root = NULL; 286 avc = anon_vma_chain_alloc(GFP_KERNEL); 287 if (!avc) 288 goto enomem_failure; 289 } 290 anon_vma = pavc->anon_vma; 291 root = lock_anon_vma_root(root, anon_vma); 292 anon_vma_chain_link(dst, avc, anon_vma); 293 294 /* 295 * Reuse existing anon_vma if its degree lower than two, 296 * that means it has no vma and only one anon_vma child. 297 * 298 * Do not chose parent anon_vma, otherwise first child 299 * will always reuse it. Root anon_vma is never reused: 300 * it has self-parent reference and at least one child. 301 */ 302 if (!dst->anon_vma && src->anon_vma && 303 anon_vma != src->anon_vma && anon_vma->degree < 2) 304 dst->anon_vma = anon_vma; 305 } 306 if (dst->anon_vma) 307 dst->anon_vma->degree++; 308 unlock_anon_vma_root(root); 309 return 0; 310 311 enomem_failure: 312 /* 313 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 314 * decremented in unlink_anon_vmas(). 315 * We can safely do this because callers of anon_vma_clone() don't care 316 * about dst->anon_vma if anon_vma_clone() failed. 317 */ 318 dst->anon_vma = NULL; 319 unlink_anon_vmas(dst); 320 return -ENOMEM; 321 } 322 323 /* 324 * Attach vma to its own anon_vma, as well as to the anon_vmas that 325 * the corresponding VMA in the parent process is attached to. 326 * Returns 0 on success, non-zero on failure. 327 */ 328 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 329 { 330 struct anon_vma_chain *avc; 331 struct anon_vma *anon_vma; 332 int error; 333 334 /* Don't bother if the parent process has no anon_vma here. */ 335 if (!pvma->anon_vma) 336 return 0; 337 338 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 339 vma->anon_vma = NULL; 340 341 /* 342 * First, attach the new VMA to the parent VMA's anon_vmas, 343 * so rmap can find non-COWed pages in child processes. 344 */ 345 error = anon_vma_clone(vma, pvma); 346 if (error) 347 return error; 348 349 /* An existing anon_vma has been reused, all done then. */ 350 if (vma->anon_vma) 351 return 0; 352 353 /* Then add our own anon_vma. */ 354 anon_vma = anon_vma_alloc(); 355 if (!anon_vma) 356 goto out_error; 357 avc = anon_vma_chain_alloc(GFP_KERNEL); 358 if (!avc) 359 goto out_error_free_anon_vma; 360 361 /* 362 * The root anon_vma's rwsem is the lock actually used when we 363 * lock any of the anon_vmas in this anon_vma tree. 364 */ 365 anon_vma->root = pvma->anon_vma->root; 366 anon_vma->parent = pvma->anon_vma; 367 /* 368 * With refcounts, an anon_vma can stay around longer than the 369 * process it belongs to. The root anon_vma needs to be pinned until 370 * this anon_vma is freed, because the lock lives in the root. 371 */ 372 get_anon_vma(anon_vma->root); 373 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 374 vma->anon_vma = anon_vma; 375 anon_vma_lock_write(anon_vma); 376 anon_vma_chain_link(vma, avc, anon_vma); 377 anon_vma->parent->degree++; 378 anon_vma_unlock_write(anon_vma); 379 380 return 0; 381 382 out_error_free_anon_vma: 383 put_anon_vma(anon_vma); 384 out_error: 385 unlink_anon_vmas(vma); 386 return -ENOMEM; 387 } 388 389 void unlink_anon_vmas(struct vm_area_struct *vma) 390 { 391 struct anon_vma_chain *avc, *next; 392 struct anon_vma *root = NULL; 393 394 /* 395 * Unlink each anon_vma chained to the VMA. This list is ordered 396 * from newest to oldest, ensuring the root anon_vma gets freed last. 397 */ 398 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 399 struct anon_vma *anon_vma = avc->anon_vma; 400 401 root = lock_anon_vma_root(root, anon_vma); 402 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 403 404 /* 405 * Leave empty anon_vmas on the list - we'll need 406 * to free them outside the lock. 407 */ 408 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 409 anon_vma->parent->degree--; 410 continue; 411 } 412 413 list_del(&avc->same_vma); 414 anon_vma_chain_free(avc); 415 } 416 if (vma->anon_vma) { 417 vma->anon_vma->degree--; 418 419 /* 420 * vma would still be needed after unlink, and anon_vma will be prepared 421 * when handle fault. 422 */ 423 vma->anon_vma = NULL; 424 } 425 unlock_anon_vma_root(root); 426 427 /* 428 * Iterate the list once more, it now only contains empty and unlinked 429 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 430 * needing to write-acquire the anon_vma->root->rwsem. 431 */ 432 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 433 struct anon_vma *anon_vma = avc->anon_vma; 434 435 VM_WARN_ON(anon_vma->degree); 436 put_anon_vma(anon_vma); 437 438 list_del(&avc->same_vma); 439 anon_vma_chain_free(avc); 440 } 441 } 442 443 static void anon_vma_ctor(void *data) 444 { 445 struct anon_vma *anon_vma = data; 446 447 init_rwsem(&anon_vma->rwsem); 448 atomic_set(&anon_vma->refcount, 0); 449 anon_vma->rb_root = RB_ROOT_CACHED; 450 } 451 452 void __init anon_vma_init(void) 453 { 454 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 455 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 456 anon_vma_ctor); 457 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 458 SLAB_PANIC|SLAB_ACCOUNT); 459 } 460 461 /* 462 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 463 * 464 * Since there is no serialization what so ever against page_remove_rmap() 465 * the best this function can do is return a refcount increased anon_vma 466 * that might have been relevant to this page. 467 * 468 * The page might have been remapped to a different anon_vma or the anon_vma 469 * returned may already be freed (and even reused). 470 * 471 * In case it was remapped to a different anon_vma, the new anon_vma will be a 472 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 473 * ensure that any anon_vma obtained from the page will still be valid for as 474 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 475 * 476 * All users of this function must be very careful when walking the anon_vma 477 * chain and verify that the page in question is indeed mapped in it 478 * [ something equivalent to page_mapped_in_vma() ]. 479 * 480 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 481 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 482 * if there is a mapcount, we can dereference the anon_vma after observing 483 * those. 484 */ 485 struct anon_vma *page_get_anon_vma(struct page *page) 486 { 487 struct anon_vma *anon_vma = NULL; 488 unsigned long anon_mapping; 489 490 rcu_read_lock(); 491 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 492 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 493 goto out; 494 if (!page_mapped(page)) 495 goto out; 496 497 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 498 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 499 anon_vma = NULL; 500 goto out; 501 } 502 503 /* 504 * If this page is still mapped, then its anon_vma cannot have been 505 * freed. But if it has been unmapped, we have no security against the 506 * anon_vma structure being freed and reused (for another anon_vma: 507 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 508 * above cannot corrupt). 509 */ 510 if (!page_mapped(page)) { 511 rcu_read_unlock(); 512 put_anon_vma(anon_vma); 513 return NULL; 514 } 515 out: 516 rcu_read_unlock(); 517 518 return anon_vma; 519 } 520 521 /* 522 * Similar to page_get_anon_vma() except it locks the anon_vma. 523 * 524 * Its a little more complex as it tries to keep the fast path to a single 525 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 526 * reference like with page_get_anon_vma() and then block on the mutex. 527 */ 528 struct anon_vma *page_lock_anon_vma_read(struct page *page) 529 { 530 struct anon_vma *anon_vma = NULL; 531 struct anon_vma *root_anon_vma; 532 unsigned long anon_mapping; 533 534 rcu_read_lock(); 535 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 536 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 537 goto out; 538 if (!page_mapped(page)) 539 goto out; 540 541 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 542 root_anon_vma = READ_ONCE(anon_vma->root); 543 if (down_read_trylock(&root_anon_vma->rwsem)) { 544 /* 545 * If the page is still mapped, then this anon_vma is still 546 * its anon_vma, and holding the mutex ensures that it will 547 * not go away, see anon_vma_free(). 548 */ 549 if (!page_mapped(page)) { 550 up_read(&root_anon_vma->rwsem); 551 anon_vma = NULL; 552 } 553 goto out; 554 } 555 556 /* trylock failed, we got to sleep */ 557 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 558 anon_vma = NULL; 559 goto out; 560 } 561 562 if (!page_mapped(page)) { 563 rcu_read_unlock(); 564 put_anon_vma(anon_vma); 565 return NULL; 566 } 567 568 /* we pinned the anon_vma, its safe to sleep */ 569 rcu_read_unlock(); 570 anon_vma_lock_read(anon_vma); 571 572 if (atomic_dec_and_test(&anon_vma->refcount)) { 573 /* 574 * Oops, we held the last refcount, release the lock 575 * and bail -- can't simply use put_anon_vma() because 576 * we'll deadlock on the anon_vma_lock_write() recursion. 577 */ 578 anon_vma_unlock_read(anon_vma); 579 __put_anon_vma(anon_vma); 580 anon_vma = NULL; 581 } 582 583 return anon_vma; 584 585 out: 586 rcu_read_unlock(); 587 return anon_vma; 588 } 589 590 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 591 { 592 anon_vma_unlock_read(anon_vma); 593 } 594 595 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 596 /* 597 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 598 * important if a PTE was dirty when it was unmapped that it's flushed 599 * before any IO is initiated on the page to prevent lost writes. Similarly, 600 * it must be flushed before freeing to prevent data leakage. 601 */ 602 void try_to_unmap_flush(void) 603 { 604 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 605 606 if (!tlb_ubc->flush_required) 607 return; 608 609 arch_tlbbatch_flush(&tlb_ubc->arch); 610 tlb_ubc->flush_required = false; 611 tlb_ubc->writable = false; 612 } 613 614 /* Flush iff there are potentially writable TLB entries that can race with IO */ 615 void try_to_unmap_flush_dirty(void) 616 { 617 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 618 619 if (tlb_ubc->writable) 620 try_to_unmap_flush(); 621 } 622 623 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 624 { 625 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 626 627 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 628 tlb_ubc->flush_required = true; 629 630 /* 631 * Ensure compiler does not re-order the setting of tlb_flush_batched 632 * before the PTE is cleared. 633 */ 634 barrier(); 635 mm->tlb_flush_batched = true; 636 637 /* 638 * If the PTE was dirty then it's best to assume it's writable. The 639 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 640 * before the page is queued for IO. 641 */ 642 if (writable) 643 tlb_ubc->writable = true; 644 } 645 646 /* 647 * Returns true if the TLB flush should be deferred to the end of a batch of 648 * unmap operations to reduce IPIs. 649 */ 650 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 651 { 652 bool should_defer = false; 653 654 if (!(flags & TTU_BATCH_FLUSH)) 655 return false; 656 657 /* If remote CPUs need to be flushed then defer batch the flush */ 658 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 659 should_defer = true; 660 put_cpu(); 661 662 return should_defer; 663 } 664 665 /* 666 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 667 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 668 * operation such as mprotect or munmap to race between reclaim unmapping 669 * the page and flushing the page. If this race occurs, it potentially allows 670 * access to data via a stale TLB entry. Tracking all mm's that have TLB 671 * batching in flight would be expensive during reclaim so instead track 672 * whether TLB batching occurred in the past and if so then do a flush here 673 * if required. This will cost one additional flush per reclaim cycle paid 674 * by the first operation at risk such as mprotect and mumap. 675 * 676 * This must be called under the PTL so that an access to tlb_flush_batched 677 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 678 * via the PTL. 679 */ 680 void flush_tlb_batched_pending(struct mm_struct *mm) 681 { 682 if (data_race(mm->tlb_flush_batched)) { 683 flush_tlb_mm(mm); 684 685 /* 686 * Do not allow the compiler to re-order the clearing of 687 * tlb_flush_batched before the tlb is flushed. 688 */ 689 barrier(); 690 mm->tlb_flush_batched = false; 691 } 692 } 693 #else 694 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 695 { 696 } 697 698 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 699 { 700 return false; 701 } 702 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 703 704 /* 705 * At what user virtual address is page expected in vma? 706 * Caller should check the page is actually part of the vma. 707 */ 708 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 709 { 710 if (PageAnon(page)) { 711 struct anon_vma *page__anon_vma = page_anon_vma(page); 712 /* 713 * Note: swapoff's unuse_vma() is more efficient with this 714 * check, and needs it to match anon_vma when KSM is active. 715 */ 716 if (!vma->anon_vma || !page__anon_vma || 717 vma->anon_vma->root != page__anon_vma->root) 718 return -EFAULT; 719 } else if (!vma->vm_file) { 720 return -EFAULT; 721 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { 722 return -EFAULT; 723 } 724 725 return vma_address(page, vma); 726 } 727 728 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 729 { 730 pgd_t *pgd; 731 p4d_t *p4d; 732 pud_t *pud; 733 pmd_t *pmd = NULL; 734 pmd_t pmde; 735 736 pgd = pgd_offset(mm, address); 737 if (!pgd_present(*pgd)) 738 goto out; 739 740 p4d = p4d_offset(pgd, address); 741 if (!p4d_present(*p4d)) 742 goto out; 743 744 pud = pud_offset(p4d, address); 745 if (!pud_present(*pud)) 746 goto out; 747 748 pmd = pmd_offset(pud, address); 749 /* 750 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 751 * without holding anon_vma lock for write. So when looking for a 752 * genuine pmde (in which to find pte), test present and !THP together. 753 */ 754 pmde = *pmd; 755 barrier(); 756 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 757 pmd = NULL; 758 out: 759 return pmd; 760 } 761 762 struct page_referenced_arg { 763 int mapcount; 764 int referenced; 765 unsigned long vm_flags; 766 struct mem_cgroup *memcg; 767 }; 768 /* 769 * arg: page_referenced_arg will be passed 770 */ 771 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 772 unsigned long address, void *arg) 773 { 774 struct page_referenced_arg *pra = arg; 775 struct page_vma_mapped_walk pvmw = { 776 .page = page, 777 .vma = vma, 778 .address = address, 779 }; 780 int referenced = 0; 781 782 while (page_vma_mapped_walk(&pvmw)) { 783 address = pvmw.address; 784 785 if (vma->vm_flags & VM_LOCKED) { 786 page_vma_mapped_walk_done(&pvmw); 787 pra->vm_flags |= VM_LOCKED; 788 return false; /* To break the loop */ 789 } 790 791 if (pvmw.pte) { 792 if (ptep_clear_flush_young_notify(vma, address, 793 pvmw.pte)) { 794 /* 795 * Don't treat a reference through 796 * a sequentially read mapping as such. 797 * If the page has been used in another mapping, 798 * we will catch it; if this other mapping is 799 * already gone, the unmap path will have set 800 * PG_referenced or activated the page. 801 */ 802 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 803 referenced++; 804 } 805 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 806 if (pmdp_clear_flush_young_notify(vma, address, 807 pvmw.pmd)) 808 referenced++; 809 } else { 810 /* unexpected pmd-mapped page? */ 811 WARN_ON_ONCE(1); 812 } 813 814 pra->mapcount--; 815 } 816 817 if (referenced) 818 clear_page_idle(page); 819 if (test_and_clear_page_young(page)) 820 referenced++; 821 822 if (referenced) { 823 pra->referenced++; 824 pra->vm_flags |= vma->vm_flags; 825 } 826 827 if (!pra->mapcount) 828 return false; /* To break the loop */ 829 830 return true; 831 } 832 833 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 834 { 835 struct page_referenced_arg *pra = arg; 836 struct mem_cgroup *memcg = pra->memcg; 837 838 if (!mm_match_cgroup(vma->vm_mm, memcg)) 839 return true; 840 841 return false; 842 } 843 844 /** 845 * page_referenced - test if the page was referenced 846 * @page: the page to test 847 * @is_locked: caller holds lock on the page 848 * @memcg: target memory cgroup 849 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 850 * 851 * Quick test_and_clear_referenced for all mappings to a page, 852 * returns the number of ptes which referenced the page. 853 */ 854 int page_referenced(struct page *page, 855 int is_locked, 856 struct mem_cgroup *memcg, 857 unsigned long *vm_flags) 858 { 859 int we_locked = 0; 860 struct page_referenced_arg pra = { 861 .mapcount = total_mapcount(page), 862 .memcg = memcg, 863 }; 864 struct rmap_walk_control rwc = { 865 .rmap_one = page_referenced_one, 866 .arg = (void *)&pra, 867 .anon_lock = page_lock_anon_vma_read, 868 }; 869 870 *vm_flags = 0; 871 if (!pra.mapcount) 872 return 0; 873 874 if (!page_rmapping(page)) 875 return 0; 876 877 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 878 we_locked = trylock_page(page); 879 if (!we_locked) 880 return 1; 881 } 882 883 /* 884 * If we are reclaiming on behalf of a cgroup, skip 885 * counting on behalf of references from different 886 * cgroups 887 */ 888 if (memcg) { 889 rwc.invalid_vma = invalid_page_referenced_vma; 890 } 891 892 rmap_walk(page, &rwc); 893 *vm_flags = pra.vm_flags; 894 895 if (we_locked) 896 unlock_page(page); 897 898 return pra.referenced; 899 } 900 901 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 902 unsigned long address, void *arg) 903 { 904 struct page_vma_mapped_walk pvmw = { 905 .page = page, 906 .vma = vma, 907 .address = address, 908 .flags = PVMW_SYNC, 909 }; 910 struct mmu_notifier_range range; 911 int *cleaned = arg; 912 913 /* 914 * We have to assume the worse case ie pmd for invalidation. Note that 915 * the page can not be free from this function. 916 */ 917 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 918 0, vma, vma->vm_mm, address, 919 vma_address_end(page, vma)); 920 mmu_notifier_invalidate_range_start(&range); 921 922 while (page_vma_mapped_walk(&pvmw)) { 923 int ret = 0; 924 925 address = pvmw.address; 926 if (pvmw.pte) { 927 pte_t entry; 928 pte_t *pte = pvmw.pte; 929 930 if (!pte_dirty(*pte) && !pte_write(*pte)) 931 continue; 932 933 flush_cache_page(vma, address, pte_pfn(*pte)); 934 entry = ptep_clear_flush(vma, address, pte); 935 entry = pte_wrprotect(entry); 936 entry = pte_mkclean(entry); 937 set_pte_at(vma->vm_mm, address, pte, entry); 938 ret = 1; 939 } else { 940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 941 pmd_t *pmd = pvmw.pmd; 942 pmd_t entry; 943 944 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 945 continue; 946 947 flush_cache_page(vma, address, page_to_pfn(page)); 948 entry = pmdp_invalidate(vma, address, pmd); 949 entry = pmd_wrprotect(entry); 950 entry = pmd_mkclean(entry); 951 set_pmd_at(vma->vm_mm, address, pmd, entry); 952 ret = 1; 953 #else 954 /* unexpected pmd-mapped page? */ 955 WARN_ON_ONCE(1); 956 #endif 957 } 958 959 /* 960 * No need to call mmu_notifier_invalidate_range() as we are 961 * downgrading page table protection not changing it to point 962 * to a new page. 963 * 964 * See Documentation/vm/mmu_notifier.rst 965 */ 966 if (ret) 967 (*cleaned)++; 968 } 969 970 mmu_notifier_invalidate_range_end(&range); 971 972 return true; 973 } 974 975 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 976 { 977 if (vma->vm_flags & VM_SHARED) 978 return false; 979 980 return true; 981 } 982 983 int page_mkclean(struct page *page) 984 { 985 int cleaned = 0; 986 struct address_space *mapping; 987 struct rmap_walk_control rwc = { 988 .arg = (void *)&cleaned, 989 .rmap_one = page_mkclean_one, 990 .invalid_vma = invalid_mkclean_vma, 991 }; 992 993 BUG_ON(!PageLocked(page)); 994 995 if (!page_mapped(page)) 996 return 0; 997 998 mapping = page_mapping(page); 999 if (!mapping) 1000 return 0; 1001 1002 rmap_walk(page, &rwc); 1003 1004 return cleaned; 1005 } 1006 EXPORT_SYMBOL_GPL(page_mkclean); 1007 1008 /** 1009 * page_move_anon_rmap - move a page to our anon_vma 1010 * @page: the page to move to our anon_vma 1011 * @vma: the vma the page belongs to 1012 * 1013 * When a page belongs exclusively to one process after a COW event, 1014 * that page can be moved into the anon_vma that belongs to just that 1015 * process, so the rmap code will not search the parent or sibling 1016 * processes. 1017 */ 1018 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1019 { 1020 struct anon_vma *anon_vma = vma->anon_vma; 1021 1022 page = compound_head(page); 1023 1024 VM_BUG_ON_PAGE(!PageLocked(page), page); 1025 VM_BUG_ON_VMA(!anon_vma, vma); 1026 1027 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1028 /* 1029 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1030 * simultaneously, so a concurrent reader (eg page_referenced()'s 1031 * PageAnon()) will not see one without the other. 1032 */ 1033 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1034 } 1035 1036 /** 1037 * __page_set_anon_rmap - set up new anonymous rmap 1038 * @page: Page or Hugepage to add to rmap 1039 * @vma: VM area to add page to. 1040 * @address: User virtual address of the mapping 1041 * @exclusive: the page is exclusively owned by the current process 1042 */ 1043 static void __page_set_anon_rmap(struct page *page, 1044 struct vm_area_struct *vma, unsigned long address, int exclusive) 1045 { 1046 struct anon_vma *anon_vma = vma->anon_vma; 1047 1048 BUG_ON(!anon_vma); 1049 1050 if (PageAnon(page)) 1051 return; 1052 1053 /* 1054 * If the page isn't exclusively mapped into this vma, 1055 * we must use the _oldest_ possible anon_vma for the 1056 * page mapping! 1057 */ 1058 if (!exclusive) 1059 anon_vma = anon_vma->root; 1060 1061 /* 1062 * page_idle does a lockless/optimistic rmap scan on page->mapping. 1063 * Make sure the compiler doesn't split the stores of anon_vma and 1064 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1065 * could mistake the mapping for a struct address_space and crash. 1066 */ 1067 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1068 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1069 page->index = linear_page_index(vma, address); 1070 } 1071 1072 /** 1073 * __page_check_anon_rmap - sanity check anonymous rmap addition 1074 * @page: the page to add the mapping to 1075 * @vma: the vm area in which the mapping is added 1076 * @address: the user virtual address mapped 1077 */ 1078 static void __page_check_anon_rmap(struct page *page, 1079 struct vm_area_struct *vma, unsigned long address) 1080 { 1081 /* 1082 * The page's anon-rmap details (mapping and index) are guaranteed to 1083 * be set up correctly at this point. 1084 * 1085 * We have exclusion against page_add_anon_rmap because the caller 1086 * always holds the page locked. 1087 * 1088 * We have exclusion against page_add_new_anon_rmap because those pages 1089 * are initially only visible via the pagetables, and the pte is locked 1090 * over the call to page_add_new_anon_rmap. 1091 */ 1092 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); 1093 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1094 page); 1095 } 1096 1097 /** 1098 * page_add_anon_rmap - add pte mapping to an anonymous page 1099 * @page: the page to add the mapping to 1100 * @vma: the vm area in which the mapping is added 1101 * @address: the user virtual address mapped 1102 * @compound: charge the page as compound or small page 1103 * 1104 * The caller needs to hold the pte lock, and the page must be locked in 1105 * the anon_vma case: to serialize mapping,index checking after setting, 1106 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1107 * (but PageKsm is never downgraded to PageAnon). 1108 */ 1109 void page_add_anon_rmap(struct page *page, 1110 struct vm_area_struct *vma, unsigned long address, bool compound) 1111 { 1112 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1113 } 1114 1115 /* 1116 * Special version of the above for do_swap_page, which often runs 1117 * into pages that are exclusively owned by the current process. 1118 * Everybody else should continue to use page_add_anon_rmap above. 1119 */ 1120 void do_page_add_anon_rmap(struct page *page, 1121 struct vm_area_struct *vma, unsigned long address, int flags) 1122 { 1123 bool compound = flags & RMAP_COMPOUND; 1124 bool first; 1125 1126 if (unlikely(PageKsm(page))) 1127 lock_page_memcg(page); 1128 else 1129 VM_BUG_ON_PAGE(!PageLocked(page), page); 1130 1131 if (compound) { 1132 atomic_t *mapcount; 1133 VM_BUG_ON_PAGE(!PageLocked(page), page); 1134 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1135 mapcount = compound_mapcount_ptr(page); 1136 first = atomic_inc_and_test(mapcount); 1137 } else { 1138 first = atomic_inc_and_test(&page->_mapcount); 1139 } 1140 1141 if (first) { 1142 int nr = compound ? thp_nr_pages(page) : 1; 1143 /* 1144 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1145 * these counters are not modified in interrupt context, and 1146 * pte lock(a spinlock) is held, which implies preemption 1147 * disabled. 1148 */ 1149 if (compound) 1150 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1151 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1152 } 1153 1154 if (unlikely(PageKsm(page))) { 1155 unlock_page_memcg(page); 1156 return; 1157 } 1158 1159 /* address might be in next vma when migration races vma_adjust */ 1160 if (first) 1161 __page_set_anon_rmap(page, vma, address, 1162 flags & RMAP_EXCLUSIVE); 1163 else 1164 __page_check_anon_rmap(page, vma, address); 1165 } 1166 1167 /** 1168 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1169 * @page: the page to add the mapping to 1170 * @vma: the vm area in which the mapping is added 1171 * @address: the user virtual address mapped 1172 * @compound: charge the page as compound or small page 1173 * 1174 * Same as page_add_anon_rmap but must only be called on *new* pages. 1175 * This means the inc-and-test can be bypassed. 1176 * Page does not have to be locked. 1177 */ 1178 void page_add_new_anon_rmap(struct page *page, 1179 struct vm_area_struct *vma, unsigned long address, bool compound) 1180 { 1181 int nr = compound ? thp_nr_pages(page) : 1; 1182 1183 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1184 __SetPageSwapBacked(page); 1185 if (compound) { 1186 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1187 /* increment count (starts at -1) */ 1188 atomic_set(compound_mapcount_ptr(page), 0); 1189 if (hpage_pincount_available(page)) 1190 atomic_set(compound_pincount_ptr(page), 0); 1191 1192 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1193 } else { 1194 /* Anon THP always mapped first with PMD */ 1195 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1196 /* increment count (starts at -1) */ 1197 atomic_set(&page->_mapcount, 0); 1198 } 1199 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1200 __page_set_anon_rmap(page, vma, address, 1); 1201 } 1202 1203 /** 1204 * page_add_file_rmap - add pte mapping to a file page 1205 * @page: the page to add the mapping to 1206 * @compound: charge the page as compound or small page 1207 * 1208 * The caller needs to hold the pte lock. 1209 */ 1210 void page_add_file_rmap(struct page *page, bool compound) 1211 { 1212 int i, nr = 1; 1213 1214 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1215 lock_page_memcg(page); 1216 if (compound && PageTransHuge(page)) { 1217 int nr_pages = thp_nr_pages(page); 1218 1219 for (i = 0, nr = 0; i < nr_pages; i++) { 1220 if (atomic_inc_and_test(&page[i]._mapcount)) 1221 nr++; 1222 } 1223 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1224 goto out; 1225 if (PageSwapBacked(page)) 1226 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1227 nr_pages); 1228 else 1229 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1230 nr_pages); 1231 } else { 1232 if (PageTransCompound(page) && page_mapping(page)) { 1233 struct page *head = compound_head(page); 1234 1235 VM_WARN_ON_ONCE(!PageLocked(page)); 1236 1237 SetPageDoubleMap(head); 1238 if (PageMlocked(page)) 1239 clear_page_mlock(head); 1240 } 1241 if (!atomic_inc_and_test(&page->_mapcount)) 1242 goto out; 1243 } 1244 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1245 out: 1246 unlock_page_memcg(page); 1247 } 1248 1249 static void page_remove_file_rmap(struct page *page, bool compound) 1250 { 1251 int i, nr = 1; 1252 1253 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1254 1255 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1256 if (unlikely(PageHuge(page))) { 1257 /* hugetlb pages are always mapped with pmds */ 1258 atomic_dec(compound_mapcount_ptr(page)); 1259 return; 1260 } 1261 1262 /* page still mapped by someone else? */ 1263 if (compound && PageTransHuge(page)) { 1264 int nr_pages = thp_nr_pages(page); 1265 1266 for (i = 0, nr = 0; i < nr_pages; i++) { 1267 if (atomic_add_negative(-1, &page[i]._mapcount)) 1268 nr++; 1269 } 1270 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1271 return; 1272 if (PageSwapBacked(page)) 1273 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1274 -nr_pages); 1275 else 1276 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1277 -nr_pages); 1278 } else { 1279 if (!atomic_add_negative(-1, &page->_mapcount)) 1280 return; 1281 } 1282 1283 /* 1284 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1285 * these counters are not modified in interrupt context, and 1286 * pte lock(a spinlock) is held, which implies preemption disabled. 1287 */ 1288 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1289 1290 if (unlikely(PageMlocked(page))) 1291 clear_page_mlock(page); 1292 } 1293 1294 static void page_remove_anon_compound_rmap(struct page *page) 1295 { 1296 int i, nr; 1297 1298 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1299 return; 1300 1301 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1302 if (unlikely(PageHuge(page))) 1303 return; 1304 1305 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1306 return; 1307 1308 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 1309 1310 if (TestClearPageDoubleMap(page)) { 1311 /* 1312 * Subpages can be mapped with PTEs too. Check how many of 1313 * them are still mapped. 1314 */ 1315 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1316 if (atomic_add_negative(-1, &page[i]._mapcount)) 1317 nr++; 1318 } 1319 1320 /* 1321 * Queue the page for deferred split if at least one small 1322 * page of the compound page is unmapped, but at least one 1323 * small page is still mapped. 1324 */ 1325 if (nr && nr < thp_nr_pages(page)) 1326 deferred_split_huge_page(page); 1327 } else { 1328 nr = thp_nr_pages(page); 1329 } 1330 1331 if (unlikely(PageMlocked(page))) 1332 clear_page_mlock(page); 1333 1334 if (nr) 1335 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 1336 } 1337 1338 /** 1339 * page_remove_rmap - take down pte mapping from a page 1340 * @page: page to remove mapping from 1341 * @compound: uncharge the page as compound or small page 1342 * 1343 * The caller needs to hold the pte lock. 1344 */ 1345 void page_remove_rmap(struct page *page, bool compound) 1346 { 1347 lock_page_memcg(page); 1348 1349 if (!PageAnon(page)) { 1350 page_remove_file_rmap(page, compound); 1351 goto out; 1352 } 1353 1354 if (compound) { 1355 page_remove_anon_compound_rmap(page); 1356 goto out; 1357 } 1358 1359 /* page still mapped by someone else? */ 1360 if (!atomic_add_negative(-1, &page->_mapcount)) 1361 goto out; 1362 1363 /* 1364 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1365 * these counters are not modified in interrupt context, and 1366 * pte lock(a spinlock) is held, which implies preemption disabled. 1367 */ 1368 __dec_lruvec_page_state(page, NR_ANON_MAPPED); 1369 1370 if (unlikely(PageMlocked(page))) 1371 clear_page_mlock(page); 1372 1373 if (PageTransCompound(page)) 1374 deferred_split_huge_page(compound_head(page)); 1375 1376 /* 1377 * It would be tidy to reset the PageAnon mapping here, 1378 * but that might overwrite a racing page_add_anon_rmap 1379 * which increments mapcount after us but sets mapping 1380 * before us: so leave the reset to free_unref_page, 1381 * and remember that it's only reliable while mapped. 1382 * Leaving it set also helps swapoff to reinstate ptes 1383 * faster for those pages still in swapcache. 1384 */ 1385 out: 1386 unlock_page_memcg(page); 1387 } 1388 1389 /* 1390 * @arg: enum ttu_flags will be passed to this argument 1391 */ 1392 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1393 unsigned long address, void *arg) 1394 { 1395 struct mm_struct *mm = vma->vm_mm; 1396 struct page_vma_mapped_walk pvmw = { 1397 .page = page, 1398 .vma = vma, 1399 .address = address, 1400 }; 1401 pte_t pteval; 1402 struct page *subpage; 1403 bool ret = true; 1404 struct mmu_notifier_range range; 1405 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1406 1407 /* 1408 * When racing against e.g. zap_pte_range() on another cpu, 1409 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1410 * try_to_unmap() may return before page_mapped() has become false, 1411 * if page table locking is skipped: use TTU_SYNC to wait for that. 1412 */ 1413 if (flags & TTU_SYNC) 1414 pvmw.flags = PVMW_SYNC; 1415 1416 if (flags & TTU_SPLIT_HUGE_PMD) 1417 split_huge_pmd_address(vma, address, false, page); 1418 1419 /* 1420 * For THP, we have to assume the worse case ie pmd for invalidation. 1421 * For hugetlb, it could be much worse if we need to do pud 1422 * invalidation in the case of pmd sharing. 1423 * 1424 * Note that the page can not be free in this function as call of 1425 * try_to_unmap() must hold a reference on the page. 1426 */ 1427 range.end = PageKsm(page) ? 1428 address + PAGE_SIZE : vma_address_end(page, vma); 1429 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1430 address, range.end); 1431 if (PageHuge(page)) { 1432 /* 1433 * If sharing is possible, start and end will be adjusted 1434 * accordingly. 1435 */ 1436 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1437 &range.end); 1438 } 1439 mmu_notifier_invalidate_range_start(&range); 1440 1441 while (page_vma_mapped_walk(&pvmw)) { 1442 /* 1443 * If the page is mlock()d, we cannot swap it out. 1444 */ 1445 if (!(flags & TTU_IGNORE_MLOCK) && 1446 (vma->vm_flags & VM_LOCKED)) { 1447 /* 1448 * PTE-mapped THP are never marked as mlocked: so do 1449 * not set it on a DoubleMap THP, nor on an Anon THP 1450 * (which may still be PTE-mapped after DoubleMap was 1451 * cleared). But stop unmapping even in those cases. 1452 */ 1453 if (!PageTransCompound(page) || (PageHead(page) && 1454 !PageDoubleMap(page) && !PageAnon(page))) 1455 mlock_vma_page(page); 1456 page_vma_mapped_walk_done(&pvmw); 1457 ret = false; 1458 break; 1459 } 1460 1461 /* Unexpected PMD-mapped THP? */ 1462 VM_BUG_ON_PAGE(!pvmw.pte, page); 1463 1464 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1465 address = pvmw.address; 1466 1467 if (PageHuge(page) && !PageAnon(page)) { 1468 /* 1469 * To call huge_pmd_unshare, i_mmap_rwsem must be 1470 * held in write mode. Caller needs to explicitly 1471 * do this outside rmap routines. 1472 */ 1473 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1474 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1475 /* 1476 * huge_pmd_unshare unmapped an entire PMD 1477 * page. There is no way of knowing exactly 1478 * which PMDs may be cached for this mm, so 1479 * we must flush them all. start/end were 1480 * already adjusted above to cover this range. 1481 */ 1482 flush_cache_range(vma, range.start, range.end); 1483 flush_tlb_range(vma, range.start, range.end); 1484 mmu_notifier_invalidate_range(mm, range.start, 1485 range.end); 1486 1487 /* 1488 * The ref count of the PMD page was dropped 1489 * which is part of the way map counting 1490 * is done for shared PMDs. Return 'true' 1491 * here. When there is no other sharing, 1492 * huge_pmd_unshare returns false and we will 1493 * unmap the actual page and drop map count 1494 * to zero. 1495 */ 1496 page_vma_mapped_walk_done(&pvmw); 1497 break; 1498 } 1499 } 1500 1501 /* Nuke the page table entry. */ 1502 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1503 if (should_defer_flush(mm, flags)) { 1504 /* 1505 * We clear the PTE but do not flush so potentially 1506 * a remote CPU could still be writing to the page. 1507 * If the entry was previously clean then the 1508 * architecture must guarantee that a clear->dirty 1509 * transition on a cached TLB entry is written through 1510 * and traps if the PTE is unmapped. 1511 */ 1512 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1513 1514 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1515 } else { 1516 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1517 } 1518 1519 /* Move the dirty bit to the page. Now the pte is gone. */ 1520 if (pte_dirty(pteval)) 1521 set_page_dirty(page); 1522 1523 /* Update high watermark before we lower rss */ 1524 update_hiwater_rss(mm); 1525 1526 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1527 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1528 if (PageHuge(page)) { 1529 hugetlb_count_sub(compound_nr(page), mm); 1530 set_huge_swap_pte_at(mm, address, 1531 pvmw.pte, pteval, 1532 vma_mmu_pagesize(vma)); 1533 } else { 1534 dec_mm_counter(mm, mm_counter(page)); 1535 set_pte_at(mm, address, pvmw.pte, pteval); 1536 } 1537 1538 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1539 /* 1540 * The guest indicated that the page content is of no 1541 * interest anymore. Simply discard the pte, vmscan 1542 * will take care of the rest. 1543 * A future reference will then fault in a new zero 1544 * page. When userfaultfd is active, we must not drop 1545 * this page though, as its main user (postcopy 1546 * migration) will not expect userfaults on already 1547 * copied pages. 1548 */ 1549 dec_mm_counter(mm, mm_counter(page)); 1550 /* We have to invalidate as we cleared the pte */ 1551 mmu_notifier_invalidate_range(mm, address, 1552 address + PAGE_SIZE); 1553 } else if (PageAnon(page)) { 1554 swp_entry_t entry = { .val = page_private(subpage) }; 1555 pte_t swp_pte; 1556 /* 1557 * Store the swap location in the pte. 1558 * See handle_pte_fault() ... 1559 */ 1560 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1561 WARN_ON_ONCE(1); 1562 ret = false; 1563 /* We have to invalidate as we cleared the pte */ 1564 mmu_notifier_invalidate_range(mm, address, 1565 address + PAGE_SIZE); 1566 page_vma_mapped_walk_done(&pvmw); 1567 break; 1568 } 1569 1570 /* MADV_FREE page check */ 1571 if (!PageSwapBacked(page)) { 1572 if (!PageDirty(page)) { 1573 /* Invalidate as we cleared the pte */ 1574 mmu_notifier_invalidate_range(mm, 1575 address, address + PAGE_SIZE); 1576 dec_mm_counter(mm, MM_ANONPAGES); 1577 goto discard; 1578 } 1579 1580 /* 1581 * If the page was redirtied, it cannot be 1582 * discarded. Remap the page to page table. 1583 */ 1584 set_pte_at(mm, address, pvmw.pte, pteval); 1585 SetPageSwapBacked(page); 1586 ret = false; 1587 page_vma_mapped_walk_done(&pvmw); 1588 break; 1589 } 1590 1591 if (swap_duplicate(entry) < 0) { 1592 set_pte_at(mm, address, pvmw.pte, pteval); 1593 ret = false; 1594 page_vma_mapped_walk_done(&pvmw); 1595 break; 1596 } 1597 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1598 set_pte_at(mm, address, pvmw.pte, pteval); 1599 ret = false; 1600 page_vma_mapped_walk_done(&pvmw); 1601 break; 1602 } 1603 if (list_empty(&mm->mmlist)) { 1604 spin_lock(&mmlist_lock); 1605 if (list_empty(&mm->mmlist)) 1606 list_add(&mm->mmlist, &init_mm.mmlist); 1607 spin_unlock(&mmlist_lock); 1608 } 1609 dec_mm_counter(mm, MM_ANONPAGES); 1610 inc_mm_counter(mm, MM_SWAPENTS); 1611 swp_pte = swp_entry_to_pte(entry); 1612 if (pte_soft_dirty(pteval)) 1613 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1614 if (pte_uffd_wp(pteval)) 1615 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1616 set_pte_at(mm, address, pvmw.pte, swp_pte); 1617 /* Invalidate as we cleared the pte */ 1618 mmu_notifier_invalidate_range(mm, address, 1619 address + PAGE_SIZE); 1620 } else { 1621 /* 1622 * This is a locked file-backed page, thus it cannot 1623 * be removed from the page cache and replaced by a new 1624 * page before mmu_notifier_invalidate_range_end, so no 1625 * concurrent thread might update its page table to 1626 * point at new page while a device still is using this 1627 * page. 1628 * 1629 * See Documentation/vm/mmu_notifier.rst 1630 */ 1631 dec_mm_counter(mm, mm_counter_file(page)); 1632 } 1633 discard: 1634 /* 1635 * No need to call mmu_notifier_invalidate_range() it has be 1636 * done above for all cases requiring it to happen under page 1637 * table lock before mmu_notifier_invalidate_range_end() 1638 * 1639 * See Documentation/vm/mmu_notifier.rst 1640 */ 1641 page_remove_rmap(subpage, PageHuge(page)); 1642 put_page(page); 1643 } 1644 1645 mmu_notifier_invalidate_range_end(&range); 1646 1647 return ret; 1648 } 1649 1650 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1651 { 1652 return vma_is_temporary_stack(vma); 1653 } 1654 1655 static int page_not_mapped(struct page *page) 1656 { 1657 return !page_mapped(page); 1658 } 1659 1660 /** 1661 * try_to_unmap - try to remove all page table mappings to a page 1662 * @page: the page to get unmapped 1663 * @flags: action and flags 1664 * 1665 * Tries to remove all the page table entries which are mapping this 1666 * page, used in the pageout path. Caller must hold the page lock. 1667 * 1668 * It is the caller's responsibility to check if the page is still 1669 * mapped when needed (use TTU_SYNC to prevent accounting races). 1670 */ 1671 void try_to_unmap(struct page *page, enum ttu_flags flags) 1672 { 1673 struct rmap_walk_control rwc = { 1674 .rmap_one = try_to_unmap_one, 1675 .arg = (void *)flags, 1676 .done = page_not_mapped, 1677 .anon_lock = page_lock_anon_vma_read, 1678 }; 1679 1680 if (flags & TTU_RMAP_LOCKED) 1681 rmap_walk_locked(page, &rwc); 1682 else 1683 rmap_walk(page, &rwc); 1684 } 1685 1686 /* 1687 * @arg: enum ttu_flags will be passed to this argument. 1688 * 1689 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1690 * containing migration entries. 1691 */ 1692 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, 1693 unsigned long address, void *arg) 1694 { 1695 struct mm_struct *mm = vma->vm_mm; 1696 struct page_vma_mapped_walk pvmw = { 1697 .page = page, 1698 .vma = vma, 1699 .address = address, 1700 }; 1701 pte_t pteval; 1702 struct page *subpage; 1703 bool ret = true; 1704 struct mmu_notifier_range range; 1705 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1706 1707 /* 1708 * When racing against e.g. zap_pte_range() on another cpu, 1709 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1710 * try_to_migrate() may return before page_mapped() has become false, 1711 * if page table locking is skipped: use TTU_SYNC to wait for that. 1712 */ 1713 if (flags & TTU_SYNC) 1714 pvmw.flags = PVMW_SYNC; 1715 1716 /* 1717 * unmap_page() in mm/huge_memory.c is the only user of migration with 1718 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1719 */ 1720 if (flags & TTU_SPLIT_HUGE_PMD) 1721 split_huge_pmd_address(vma, address, true, page); 1722 1723 /* 1724 * For THP, we have to assume the worse case ie pmd for invalidation. 1725 * For hugetlb, it could be much worse if we need to do pud 1726 * invalidation in the case of pmd sharing. 1727 * 1728 * Note that the page can not be free in this function as call of 1729 * try_to_unmap() must hold a reference on the page. 1730 */ 1731 range.end = PageKsm(page) ? 1732 address + PAGE_SIZE : vma_address_end(page, vma); 1733 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1734 address, range.end); 1735 if (PageHuge(page)) { 1736 /* 1737 * If sharing is possible, start and end will be adjusted 1738 * accordingly. 1739 */ 1740 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1741 &range.end); 1742 } 1743 mmu_notifier_invalidate_range_start(&range); 1744 1745 while (page_vma_mapped_walk(&pvmw)) { 1746 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1747 /* PMD-mapped THP migration entry */ 1748 if (!pvmw.pte) { 1749 VM_BUG_ON_PAGE(PageHuge(page) || 1750 !PageTransCompound(page), page); 1751 1752 set_pmd_migration_entry(&pvmw, page); 1753 continue; 1754 } 1755 #endif 1756 1757 /* Unexpected PMD-mapped THP? */ 1758 VM_BUG_ON_PAGE(!pvmw.pte, page); 1759 1760 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1761 address = pvmw.address; 1762 1763 if (PageHuge(page) && !PageAnon(page)) { 1764 /* 1765 * To call huge_pmd_unshare, i_mmap_rwsem must be 1766 * held in write mode. Caller needs to explicitly 1767 * do this outside rmap routines. 1768 */ 1769 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1770 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1771 /* 1772 * huge_pmd_unshare unmapped an entire PMD 1773 * page. There is no way of knowing exactly 1774 * which PMDs may be cached for this mm, so 1775 * we must flush them all. start/end were 1776 * already adjusted above to cover this range. 1777 */ 1778 flush_cache_range(vma, range.start, range.end); 1779 flush_tlb_range(vma, range.start, range.end); 1780 mmu_notifier_invalidate_range(mm, range.start, 1781 range.end); 1782 1783 /* 1784 * The ref count of the PMD page was dropped 1785 * which is part of the way map counting 1786 * is done for shared PMDs. Return 'true' 1787 * here. When there is no other sharing, 1788 * huge_pmd_unshare returns false and we will 1789 * unmap the actual page and drop map count 1790 * to zero. 1791 */ 1792 page_vma_mapped_walk_done(&pvmw); 1793 break; 1794 } 1795 } 1796 1797 /* Nuke the page table entry. */ 1798 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1799 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1800 1801 /* Move the dirty bit to the page. Now the pte is gone. */ 1802 if (pte_dirty(pteval)) 1803 set_page_dirty(page); 1804 1805 /* Update high watermark before we lower rss */ 1806 update_hiwater_rss(mm); 1807 1808 if (is_zone_device_page(page)) { 1809 swp_entry_t entry; 1810 pte_t swp_pte; 1811 1812 /* 1813 * Store the pfn of the page in a special migration 1814 * pte. do_swap_page() will wait until the migration 1815 * pte is removed and then restart fault handling. 1816 */ 1817 entry = make_readable_migration_entry( 1818 page_to_pfn(page)); 1819 swp_pte = swp_entry_to_pte(entry); 1820 1821 /* 1822 * pteval maps a zone device page and is therefore 1823 * a swap pte. 1824 */ 1825 if (pte_swp_soft_dirty(pteval)) 1826 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1827 if (pte_swp_uffd_wp(pteval)) 1828 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1829 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1830 /* 1831 * No need to invalidate here it will synchronize on 1832 * against the special swap migration pte. 1833 * 1834 * The assignment to subpage above was computed from a 1835 * swap PTE which results in an invalid pointer. 1836 * Since only PAGE_SIZE pages can currently be 1837 * migrated, just set it to page. This will need to be 1838 * changed when hugepage migrations to device private 1839 * memory are supported. 1840 */ 1841 subpage = page; 1842 } else if (PageHWPoison(page)) { 1843 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1844 if (PageHuge(page)) { 1845 hugetlb_count_sub(compound_nr(page), mm); 1846 set_huge_swap_pte_at(mm, address, 1847 pvmw.pte, pteval, 1848 vma_mmu_pagesize(vma)); 1849 } else { 1850 dec_mm_counter(mm, mm_counter(page)); 1851 set_pte_at(mm, address, pvmw.pte, pteval); 1852 } 1853 1854 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1855 /* 1856 * The guest indicated that the page content is of no 1857 * interest anymore. Simply discard the pte, vmscan 1858 * will take care of the rest. 1859 * A future reference will then fault in a new zero 1860 * page. When userfaultfd is active, we must not drop 1861 * this page though, as its main user (postcopy 1862 * migration) will not expect userfaults on already 1863 * copied pages. 1864 */ 1865 dec_mm_counter(mm, mm_counter(page)); 1866 /* We have to invalidate as we cleared the pte */ 1867 mmu_notifier_invalidate_range(mm, address, 1868 address + PAGE_SIZE); 1869 } else { 1870 swp_entry_t entry; 1871 pte_t swp_pte; 1872 1873 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1874 set_pte_at(mm, address, pvmw.pte, pteval); 1875 ret = false; 1876 page_vma_mapped_walk_done(&pvmw); 1877 break; 1878 } 1879 1880 /* 1881 * Store the pfn of the page in a special migration 1882 * pte. do_swap_page() will wait until the migration 1883 * pte is removed and then restart fault handling. 1884 */ 1885 if (pte_write(pteval)) 1886 entry = make_writable_migration_entry( 1887 page_to_pfn(subpage)); 1888 else 1889 entry = make_readable_migration_entry( 1890 page_to_pfn(subpage)); 1891 1892 swp_pte = swp_entry_to_pte(entry); 1893 if (pte_soft_dirty(pteval)) 1894 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1895 if (pte_uffd_wp(pteval)) 1896 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1897 set_pte_at(mm, address, pvmw.pte, swp_pte); 1898 /* 1899 * No need to invalidate here it will synchronize on 1900 * against the special swap migration pte. 1901 */ 1902 } 1903 1904 /* 1905 * No need to call mmu_notifier_invalidate_range() it has be 1906 * done above for all cases requiring it to happen under page 1907 * table lock before mmu_notifier_invalidate_range_end() 1908 * 1909 * See Documentation/vm/mmu_notifier.rst 1910 */ 1911 page_remove_rmap(subpage, PageHuge(page)); 1912 put_page(page); 1913 } 1914 1915 mmu_notifier_invalidate_range_end(&range); 1916 1917 return ret; 1918 } 1919 1920 /** 1921 * try_to_migrate - try to replace all page table mappings with swap entries 1922 * @page: the page to replace page table entries for 1923 * @flags: action and flags 1924 * 1925 * Tries to remove all the page table entries which are mapping this page and 1926 * replace them with special swap entries. Caller must hold the page lock. 1927 */ 1928 void try_to_migrate(struct page *page, enum ttu_flags flags) 1929 { 1930 struct rmap_walk_control rwc = { 1931 .rmap_one = try_to_migrate_one, 1932 .arg = (void *)flags, 1933 .done = page_not_mapped, 1934 .anon_lock = page_lock_anon_vma_read, 1935 }; 1936 1937 /* 1938 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 1939 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 1940 */ 1941 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 1942 TTU_SYNC))) 1943 return; 1944 1945 if (is_zone_device_page(page) && !is_device_private_page(page)) 1946 return; 1947 1948 /* 1949 * During exec, a temporary VMA is setup and later moved. 1950 * The VMA is moved under the anon_vma lock but not the 1951 * page tables leading to a race where migration cannot 1952 * find the migration ptes. Rather than increasing the 1953 * locking requirements of exec(), migration skips 1954 * temporary VMAs until after exec() completes. 1955 */ 1956 if (!PageKsm(page) && PageAnon(page)) 1957 rwc.invalid_vma = invalid_migration_vma; 1958 1959 if (flags & TTU_RMAP_LOCKED) 1960 rmap_walk_locked(page, &rwc); 1961 else 1962 rmap_walk(page, &rwc); 1963 } 1964 1965 /* 1966 * Walks the vma's mapping a page and mlocks the page if any locked vma's are 1967 * found. Once one is found the page is locked and the scan can be terminated. 1968 */ 1969 static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, 1970 unsigned long address, void *unused) 1971 { 1972 struct page_vma_mapped_walk pvmw = { 1973 .page = page, 1974 .vma = vma, 1975 .address = address, 1976 }; 1977 1978 /* An un-locked vma doesn't have any pages to lock, continue the scan */ 1979 if (!(vma->vm_flags & VM_LOCKED)) 1980 return true; 1981 1982 while (page_vma_mapped_walk(&pvmw)) { 1983 /* 1984 * Need to recheck under the ptl to serialise with 1985 * __munlock_pagevec_fill() after VM_LOCKED is cleared in 1986 * munlock_vma_pages_range(). 1987 */ 1988 if (vma->vm_flags & VM_LOCKED) { 1989 /* 1990 * PTE-mapped THP are never marked as mlocked; but 1991 * this function is never called on a DoubleMap THP, 1992 * nor on an Anon THP (which may still be PTE-mapped 1993 * after DoubleMap was cleared). 1994 */ 1995 mlock_vma_page(page); 1996 /* 1997 * No need to scan further once the page is marked 1998 * as mlocked. 1999 */ 2000 page_vma_mapped_walk_done(&pvmw); 2001 return false; 2002 } 2003 } 2004 2005 return true; 2006 } 2007 2008 /** 2009 * page_mlock - try to mlock a page 2010 * @page: the page to be mlocked 2011 * 2012 * Called from munlock code. Checks all of the VMAs mapping the page and mlocks 2013 * the page if any are found. The page will be returned with PG_mlocked cleared 2014 * if it is not mapped by any locked vmas. 2015 */ 2016 void page_mlock(struct page *page) 2017 { 2018 struct rmap_walk_control rwc = { 2019 .rmap_one = page_mlock_one, 2020 .done = page_not_mapped, 2021 .anon_lock = page_lock_anon_vma_read, 2022 2023 }; 2024 2025 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 2026 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 2027 2028 /* Anon THP are only marked as mlocked when singly mapped */ 2029 if (PageTransCompound(page) && PageAnon(page)) 2030 return; 2031 2032 rmap_walk(page, &rwc); 2033 } 2034 2035 #ifdef CONFIG_DEVICE_PRIVATE 2036 struct make_exclusive_args { 2037 struct mm_struct *mm; 2038 unsigned long address; 2039 void *owner; 2040 bool valid; 2041 }; 2042 2043 static bool page_make_device_exclusive_one(struct page *page, 2044 struct vm_area_struct *vma, unsigned long address, void *priv) 2045 { 2046 struct mm_struct *mm = vma->vm_mm; 2047 struct page_vma_mapped_walk pvmw = { 2048 .page = page, 2049 .vma = vma, 2050 .address = address, 2051 }; 2052 struct make_exclusive_args *args = priv; 2053 pte_t pteval; 2054 struct page *subpage; 2055 bool ret = true; 2056 struct mmu_notifier_range range; 2057 swp_entry_t entry; 2058 pte_t swp_pte; 2059 2060 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2061 vma->vm_mm, address, min(vma->vm_end, 2062 address + page_size(page)), args->owner); 2063 mmu_notifier_invalidate_range_start(&range); 2064 2065 while (page_vma_mapped_walk(&pvmw)) { 2066 /* Unexpected PMD-mapped THP? */ 2067 VM_BUG_ON_PAGE(!pvmw.pte, page); 2068 2069 if (!pte_present(*pvmw.pte)) { 2070 ret = false; 2071 page_vma_mapped_walk_done(&pvmw); 2072 break; 2073 } 2074 2075 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 2076 address = pvmw.address; 2077 2078 /* Nuke the page table entry. */ 2079 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2080 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2081 2082 /* Move the dirty bit to the page. Now the pte is gone. */ 2083 if (pte_dirty(pteval)) 2084 set_page_dirty(page); 2085 2086 /* 2087 * Check that our target page is still mapped at the expected 2088 * address. 2089 */ 2090 if (args->mm == mm && args->address == address && 2091 pte_write(pteval)) 2092 args->valid = true; 2093 2094 /* 2095 * Store the pfn of the page in a special migration 2096 * pte. do_swap_page() will wait until the migration 2097 * pte is removed and then restart fault handling. 2098 */ 2099 if (pte_write(pteval)) 2100 entry = make_writable_device_exclusive_entry( 2101 page_to_pfn(subpage)); 2102 else 2103 entry = make_readable_device_exclusive_entry( 2104 page_to_pfn(subpage)); 2105 swp_pte = swp_entry_to_pte(entry); 2106 if (pte_soft_dirty(pteval)) 2107 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2108 if (pte_uffd_wp(pteval)) 2109 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2110 2111 set_pte_at(mm, address, pvmw.pte, swp_pte); 2112 2113 /* 2114 * There is a reference on the page for the swap entry which has 2115 * been removed, so shouldn't take another. 2116 */ 2117 page_remove_rmap(subpage, false); 2118 } 2119 2120 mmu_notifier_invalidate_range_end(&range); 2121 2122 return ret; 2123 } 2124 2125 /** 2126 * page_make_device_exclusive - mark the page exclusively owned by a device 2127 * @page: the page to replace page table entries for 2128 * @mm: the mm_struct where the page is expected to be mapped 2129 * @address: address where the page is expected to be mapped 2130 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2131 * 2132 * Tries to remove all the page table entries which are mapping this page and 2133 * replace them with special device exclusive swap entries to grant a device 2134 * exclusive access to the page. Caller must hold the page lock. 2135 * 2136 * Returns false if the page is still mapped, or if it could not be unmapped 2137 * from the expected address. Otherwise returns true (success). 2138 */ 2139 static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm, 2140 unsigned long address, void *owner) 2141 { 2142 struct make_exclusive_args args = { 2143 .mm = mm, 2144 .address = address, 2145 .owner = owner, 2146 .valid = false, 2147 }; 2148 struct rmap_walk_control rwc = { 2149 .rmap_one = page_make_device_exclusive_one, 2150 .done = page_not_mapped, 2151 .anon_lock = page_lock_anon_vma_read, 2152 .arg = &args, 2153 }; 2154 2155 /* 2156 * Restrict to anonymous pages for now to avoid potential writeback 2157 * issues. Also tail pages shouldn't be passed to rmap_walk so skip 2158 * those. 2159 */ 2160 if (!PageAnon(page) || PageTail(page)) 2161 return false; 2162 2163 rmap_walk(page, &rwc); 2164 2165 return args.valid && !page_mapcount(page); 2166 } 2167 2168 /** 2169 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2170 * @mm: mm_struct of assoicated target process 2171 * @start: start of the region to mark for exclusive device access 2172 * @end: end address of region 2173 * @pages: returns the pages which were successfully marked for exclusive access 2174 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2175 * 2176 * Returns: number of pages found in the range by GUP. A page is marked for 2177 * exclusive access only if the page pointer is non-NULL. 2178 * 2179 * This function finds ptes mapping page(s) to the given address range, locks 2180 * them and replaces mappings with special swap entries preventing userspace CPU 2181 * access. On fault these entries are replaced with the original mapping after 2182 * calling MMU notifiers. 2183 * 2184 * A driver using this to program access from a device must use a mmu notifier 2185 * critical section to hold a device specific lock during programming. Once 2186 * programming is complete it should drop the page lock and reference after 2187 * which point CPU access to the page will revoke the exclusive access. 2188 */ 2189 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2190 unsigned long end, struct page **pages, 2191 void *owner) 2192 { 2193 long npages = (end - start) >> PAGE_SHIFT; 2194 long i; 2195 2196 npages = get_user_pages_remote(mm, start, npages, 2197 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2198 pages, NULL, NULL); 2199 if (npages < 0) 2200 return npages; 2201 2202 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2203 if (!trylock_page(pages[i])) { 2204 put_page(pages[i]); 2205 pages[i] = NULL; 2206 continue; 2207 } 2208 2209 if (!page_make_device_exclusive(pages[i], mm, start, owner)) { 2210 unlock_page(pages[i]); 2211 put_page(pages[i]); 2212 pages[i] = NULL; 2213 } 2214 } 2215 2216 return npages; 2217 } 2218 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2219 #endif 2220 2221 void __put_anon_vma(struct anon_vma *anon_vma) 2222 { 2223 struct anon_vma *root = anon_vma->root; 2224 2225 anon_vma_free(anon_vma); 2226 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2227 anon_vma_free(root); 2228 } 2229 2230 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 2231 struct rmap_walk_control *rwc) 2232 { 2233 struct anon_vma *anon_vma; 2234 2235 if (rwc->anon_lock) 2236 return rwc->anon_lock(page); 2237 2238 /* 2239 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 2240 * because that depends on page_mapped(); but not all its usages 2241 * are holding mmap_lock. Users without mmap_lock are required to 2242 * take a reference count to prevent the anon_vma disappearing 2243 */ 2244 anon_vma = page_anon_vma(page); 2245 if (!anon_vma) 2246 return NULL; 2247 2248 anon_vma_lock_read(anon_vma); 2249 return anon_vma; 2250 } 2251 2252 /* 2253 * rmap_walk_anon - do something to anonymous page using the object-based 2254 * rmap method 2255 * @page: the page to be handled 2256 * @rwc: control variable according to each walk type 2257 * 2258 * Find all the mappings of a page using the mapping pointer and the vma chains 2259 * contained in the anon_vma struct it points to. 2260 * 2261 * When called from page_mlock(), the mmap_lock of the mm containing the vma 2262 * where the page was found will be held for write. So, we won't recheck 2263 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2264 * LOCKED. 2265 */ 2266 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 2267 bool locked) 2268 { 2269 struct anon_vma *anon_vma; 2270 pgoff_t pgoff_start, pgoff_end; 2271 struct anon_vma_chain *avc; 2272 2273 if (locked) { 2274 anon_vma = page_anon_vma(page); 2275 /* anon_vma disappear under us? */ 2276 VM_BUG_ON_PAGE(!anon_vma, page); 2277 } else { 2278 anon_vma = rmap_walk_anon_lock(page, rwc); 2279 } 2280 if (!anon_vma) 2281 return; 2282 2283 pgoff_start = page_to_pgoff(page); 2284 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2285 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2286 pgoff_start, pgoff_end) { 2287 struct vm_area_struct *vma = avc->vma; 2288 unsigned long address = vma_address(page, vma); 2289 2290 VM_BUG_ON_VMA(address == -EFAULT, vma); 2291 cond_resched(); 2292 2293 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2294 continue; 2295 2296 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 2297 break; 2298 if (rwc->done && rwc->done(page)) 2299 break; 2300 } 2301 2302 if (!locked) 2303 anon_vma_unlock_read(anon_vma); 2304 } 2305 2306 /* 2307 * rmap_walk_file - do something to file page using the object-based rmap method 2308 * @page: the page to be handled 2309 * @rwc: control variable according to each walk type 2310 * 2311 * Find all the mappings of a page using the mapping pointer and the vma chains 2312 * contained in the address_space struct it points to. 2313 * 2314 * When called from page_mlock(), the mmap_lock of the mm containing the vma 2315 * where the page was found will be held for write. So, we won't recheck 2316 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2317 * LOCKED. 2318 */ 2319 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 2320 bool locked) 2321 { 2322 struct address_space *mapping = page_mapping(page); 2323 pgoff_t pgoff_start, pgoff_end; 2324 struct vm_area_struct *vma; 2325 2326 /* 2327 * The page lock not only makes sure that page->mapping cannot 2328 * suddenly be NULLified by truncation, it makes sure that the 2329 * structure at mapping cannot be freed and reused yet, 2330 * so we can safely take mapping->i_mmap_rwsem. 2331 */ 2332 VM_BUG_ON_PAGE(!PageLocked(page), page); 2333 2334 if (!mapping) 2335 return; 2336 2337 pgoff_start = page_to_pgoff(page); 2338 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2339 if (!locked) 2340 i_mmap_lock_read(mapping); 2341 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2342 pgoff_start, pgoff_end) { 2343 unsigned long address = vma_address(page, vma); 2344 2345 VM_BUG_ON_VMA(address == -EFAULT, vma); 2346 cond_resched(); 2347 2348 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2349 continue; 2350 2351 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 2352 goto done; 2353 if (rwc->done && rwc->done(page)) 2354 goto done; 2355 } 2356 2357 done: 2358 if (!locked) 2359 i_mmap_unlock_read(mapping); 2360 } 2361 2362 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 2363 { 2364 if (unlikely(PageKsm(page))) 2365 rmap_walk_ksm(page, rwc); 2366 else if (PageAnon(page)) 2367 rmap_walk_anon(page, rwc, false); 2368 else 2369 rmap_walk_file(page, rwc, false); 2370 } 2371 2372 /* Like rmap_walk, but caller holds relevant rmap lock */ 2373 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 2374 { 2375 /* no ksm support for now */ 2376 VM_BUG_ON_PAGE(PageKsm(page), page); 2377 if (PageAnon(page)) 2378 rmap_walk_anon(page, rwc, true); 2379 else 2380 rmap_walk_file(page, rwc, true); 2381 } 2382 2383 #ifdef CONFIG_HUGETLB_PAGE 2384 /* 2385 * The following two functions are for anonymous (private mapped) hugepages. 2386 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2387 * and no lru code, because we handle hugepages differently from common pages. 2388 */ 2389 void hugepage_add_anon_rmap(struct page *page, 2390 struct vm_area_struct *vma, unsigned long address) 2391 { 2392 struct anon_vma *anon_vma = vma->anon_vma; 2393 int first; 2394 2395 BUG_ON(!PageLocked(page)); 2396 BUG_ON(!anon_vma); 2397 /* address might be in next vma when migration races vma_adjust */ 2398 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 2399 if (first) 2400 __page_set_anon_rmap(page, vma, address, 0); 2401 } 2402 2403 void hugepage_add_new_anon_rmap(struct page *page, 2404 struct vm_area_struct *vma, unsigned long address) 2405 { 2406 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2407 atomic_set(compound_mapcount_ptr(page), 0); 2408 if (hpage_pincount_available(page)) 2409 atomic_set(compound_pincount_ptr(page), 0); 2410 2411 __page_set_anon_rmap(page, vma, address, 1); 2412 } 2413 #endif /* CONFIG_HUGETLB_PAGE */ 2414