1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * page->flags PG_locked (lock_page) * (see huegtlbfs below) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * lock_page_memcg move_lock (in __set_page_dirty_buffers) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in lock_page_lruvec_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * * hugetlbfs PageHuge() pages take locks in this order: 49 * mapping->i_mmap_rwsem 50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51 * page->flags PG_locked (lock_page) 52 */ 53 54 #include <linux/mm.h> 55 #include <linux/sched/mm.h> 56 #include <linux/sched/task.h> 57 #include <linux/pagemap.h> 58 #include <linux/swap.h> 59 #include <linux/swapops.h> 60 #include <linux/slab.h> 61 #include <linux/init.h> 62 #include <linux/ksm.h> 63 #include <linux/rmap.h> 64 #include <linux/rcupdate.h> 65 #include <linux/export.h> 66 #include <linux/memcontrol.h> 67 #include <linux/mmu_notifier.h> 68 #include <linux/migrate.h> 69 #include <linux/hugetlb.h> 70 #include <linux/huge_mm.h> 71 #include <linux/backing-dev.h> 72 #include <linux/page_idle.h> 73 #include <linux/memremap.h> 74 #include <linux/userfaultfd_k.h> 75 76 #include <asm/tlbflush.h> 77 78 #include <trace/events/tlb.h> 79 80 #include "internal.h" 81 82 static struct kmem_cache *anon_vma_cachep; 83 static struct kmem_cache *anon_vma_chain_cachep; 84 85 static inline struct anon_vma *anon_vma_alloc(void) 86 { 87 struct anon_vma *anon_vma; 88 89 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 90 if (anon_vma) { 91 atomic_set(&anon_vma->refcount, 1); 92 anon_vma->degree = 1; /* Reference for first vma */ 93 anon_vma->parent = anon_vma; 94 /* 95 * Initialise the anon_vma root to point to itself. If called 96 * from fork, the root will be reset to the parents anon_vma. 97 */ 98 anon_vma->root = anon_vma; 99 } 100 101 return anon_vma; 102 } 103 104 static inline void anon_vma_free(struct anon_vma *anon_vma) 105 { 106 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 107 108 /* 109 * Synchronize against page_lock_anon_vma_read() such that 110 * we can safely hold the lock without the anon_vma getting 111 * freed. 112 * 113 * Relies on the full mb implied by the atomic_dec_and_test() from 114 * put_anon_vma() against the acquire barrier implied by 115 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 116 * 117 * page_lock_anon_vma_read() VS put_anon_vma() 118 * down_read_trylock() atomic_dec_and_test() 119 * LOCK MB 120 * atomic_read() rwsem_is_locked() 121 * 122 * LOCK should suffice since the actual taking of the lock must 123 * happen _before_ what follows. 124 */ 125 might_sleep(); 126 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 127 anon_vma_lock_write(anon_vma); 128 anon_vma_unlock_write(anon_vma); 129 } 130 131 kmem_cache_free(anon_vma_cachep, anon_vma); 132 } 133 134 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 135 { 136 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 137 } 138 139 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 140 { 141 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 142 } 143 144 static void anon_vma_chain_link(struct vm_area_struct *vma, 145 struct anon_vma_chain *avc, 146 struct anon_vma *anon_vma) 147 { 148 avc->vma = vma; 149 avc->anon_vma = anon_vma; 150 list_add(&avc->same_vma, &vma->anon_vma_chain); 151 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 152 } 153 154 /** 155 * __anon_vma_prepare - attach an anon_vma to a memory region 156 * @vma: the memory region in question 157 * 158 * This makes sure the memory mapping described by 'vma' has 159 * an 'anon_vma' attached to it, so that we can associate the 160 * anonymous pages mapped into it with that anon_vma. 161 * 162 * The common case will be that we already have one, which 163 * is handled inline by anon_vma_prepare(). But if 164 * not we either need to find an adjacent mapping that we 165 * can re-use the anon_vma from (very common when the only 166 * reason for splitting a vma has been mprotect()), or we 167 * allocate a new one. 168 * 169 * Anon-vma allocations are very subtle, because we may have 170 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 171 * and that may actually touch the rwsem even in the newly 172 * allocated vma (it depends on RCU to make sure that the 173 * anon_vma isn't actually destroyed). 174 * 175 * As a result, we need to do proper anon_vma locking even 176 * for the new allocation. At the same time, we do not want 177 * to do any locking for the common case of already having 178 * an anon_vma. 179 * 180 * This must be called with the mmap_lock held for reading. 181 */ 182 int __anon_vma_prepare(struct vm_area_struct *vma) 183 { 184 struct mm_struct *mm = vma->vm_mm; 185 struct anon_vma *anon_vma, *allocated; 186 struct anon_vma_chain *avc; 187 188 might_sleep(); 189 190 avc = anon_vma_chain_alloc(GFP_KERNEL); 191 if (!avc) 192 goto out_enomem; 193 194 anon_vma = find_mergeable_anon_vma(vma); 195 allocated = NULL; 196 if (!anon_vma) { 197 anon_vma = anon_vma_alloc(); 198 if (unlikely(!anon_vma)) 199 goto out_enomem_free_avc; 200 allocated = anon_vma; 201 } 202 203 anon_vma_lock_write(anon_vma); 204 /* page_table_lock to protect against threads */ 205 spin_lock(&mm->page_table_lock); 206 if (likely(!vma->anon_vma)) { 207 vma->anon_vma = anon_vma; 208 anon_vma_chain_link(vma, avc, anon_vma); 209 /* vma reference or self-parent link for new root */ 210 anon_vma->degree++; 211 allocated = NULL; 212 avc = NULL; 213 } 214 spin_unlock(&mm->page_table_lock); 215 anon_vma_unlock_write(anon_vma); 216 217 if (unlikely(allocated)) 218 put_anon_vma(allocated); 219 if (unlikely(avc)) 220 anon_vma_chain_free(avc); 221 222 return 0; 223 224 out_enomem_free_avc: 225 anon_vma_chain_free(avc); 226 out_enomem: 227 return -ENOMEM; 228 } 229 230 /* 231 * This is a useful helper function for locking the anon_vma root as 232 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 233 * have the same vma. 234 * 235 * Such anon_vma's should have the same root, so you'd expect to see 236 * just a single mutex_lock for the whole traversal. 237 */ 238 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 239 { 240 struct anon_vma *new_root = anon_vma->root; 241 if (new_root != root) { 242 if (WARN_ON_ONCE(root)) 243 up_write(&root->rwsem); 244 root = new_root; 245 down_write(&root->rwsem); 246 } 247 return root; 248 } 249 250 static inline void unlock_anon_vma_root(struct anon_vma *root) 251 { 252 if (root) 253 up_write(&root->rwsem); 254 } 255 256 /* 257 * Attach the anon_vmas from src to dst. 258 * Returns 0 on success, -ENOMEM on failure. 259 * 260 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 261 * anon_vma_fork(). The first three want an exact copy of src, while the last 262 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 263 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 264 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 265 * 266 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 267 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 268 * This prevents degradation of anon_vma hierarchy to endless linear chain in 269 * case of constantly forking task. On the other hand, an anon_vma with more 270 * than one child isn't reused even if there was no alive vma, thus rmap 271 * walker has a good chance of avoiding scanning the whole hierarchy when it 272 * searches where page is mapped. 273 */ 274 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 275 { 276 struct anon_vma_chain *avc, *pavc; 277 struct anon_vma *root = NULL; 278 279 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 280 struct anon_vma *anon_vma; 281 282 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 283 if (unlikely(!avc)) { 284 unlock_anon_vma_root(root); 285 root = NULL; 286 avc = anon_vma_chain_alloc(GFP_KERNEL); 287 if (!avc) 288 goto enomem_failure; 289 } 290 anon_vma = pavc->anon_vma; 291 root = lock_anon_vma_root(root, anon_vma); 292 anon_vma_chain_link(dst, avc, anon_vma); 293 294 /* 295 * Reuse existing anon_vma if its degree lower than two, 296 * that means it has no vma and only one anon_vma child. 297 * 298 * Do not chose parent anon_vma, otherwise first child 299 * will always reuse it. Root anon_vma is never reused: 300 * it has self-parent reference and at least one child. 301 */ 302 if (!dst->anon_vma && src->anon_vma && 303 anon_vma != src->anon_vma && anon_vma->degree < 2) 304 dst->anon_vma = anon_vma; 305 } 306 if (dst->anon_vma) 307 dst->anon_vma->degree++; 308 unlock_anon_vma_root(root); 309 return 0; 310 311 enomem_failure: 312 /* 313 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 314 * decremented in unlink_anon_vmas(). 315 * We can safely do this because callers of anon_vma_clone() don't care 316 * about dst->anon_vma if anon_vma_clone() failed. 317 */ 318 dst->anon_vma = NULL; 319 unlink_anon_vmas(dst); 320 return -ENOMEM; 321 } 322 323 /* 324 * Attach vma to its own anon_vma, as well as to the anon_vmas that 325 * the corresponding VMA in the parent process is attached to. 326 * Returns 0 on success, non-zero on failure. 327 */ 328 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 329 { 330 struct anon_vma_chain *avc; 331 struct anon_vma *anon_vma; 332 int error; 333 334 /* Don't bother if the parent process has no anon_vma here. */ 335 if (!pvma->anon_vma) 336 return 0; 337 338 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 339 vma->anon_vma = NULL; 340 341 /* 342 * First, attach the new VMA to the parent VMA's anon_vmas, 343 * so rmap can find non-COWed pages in child processes. 344 */ 345 error = anon_vma_clone(vma, pvma); 346 if (error) 347 return error; 348 349 /* An existing anon_vma has been reused, all done then. */ 350 if (vma->anon_vma) 351 return 0; 352 353 /* Then add our own anon_vma. */ 354 anon_vma = anon_vma_alloc(); 355 if (!anon_vma) 356 goto out_error; 357 avc = anon_vma_chain_alloc(GFP_KERNEL); 358 if (!avc) 359 goto out_error_free_anon_vma; 360 361 /* 362 * The root anon_vma's rwsem is the lock actually used when we 363 * lock any of the anon_vmas in this anon_vma tree. 364 */ 365 anon_vma->root = pvma->anon_vma->root; 366 anon_vma->parent = pvma->anon_vma; 367 /* 368 * With refcounts, an anon_vma can stay around longer than the 369 * process it belongs to. The root anon_vma needs to be pinned until 370 * this anon_vma is freed, because the lock lives in the root. 371 */ 372 get_anon_vma(anon_vma->root); 373 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 374 vma->anon_vma = anon_vma; 375 anon_vma_lock_write(anon_vma); 376 anon_vma_chain_link(vma, avc, anon_vma); 377 anon_vma->parent->degree++; 378 anon_vma_unlock_write(anon_vma); 379 380 return 0; 381 382 out_error_free_anon_vma: 383 put_anon_vma(anon_vma); 384 out_error: 385 unlink_anon_vmas(vma); 386 return -ENOMEM; 387 } 388 389 void unlink_anon_vmas(struct vm_area_struct *vma) 390 { 391 struct anon_vma_chain *avc, *next; 392 struct anon_vma *root = NULL; 393 394 /* 395 * Unlink each anon_vma chained to the VMA. This list is ordered 396 * from newest to oldest, ensuring the root anon_vma gets freed last. 397 */ 398 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 399 struct anon_vma *anon_vma = avc->anon_vma; 400 401 root = lock_anon_vma_root(root, anon_vma); 402 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 403 404 /* 405 * Leave empty anon_vmas on the list - we'll need 406 * to free them outside the lock. 407 */ 408 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 409 anon_vma->parent->degree--; 410 continue; 411 } 412 413 list_del(&avc->same_vma); 414 anon_vma_chain_free(avc); 415 } 416 if (vma->anon_vma) { 417 vma->anon_vma->degree--; 418 419 /* 420 * vma would still be needed after unlink, and anon_vma will be prepared 421 * when handle fault. 422 */ 423 vma->anon_vma = NULL; 424 } 425 unlock_anon_vma_root(root); 426 427 /* 428 * Iterate the list once more, it now only contains empty and unlinked 429 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 430 * needing to write-acquire the anon_vma->root->rwsem. 431 */ 432 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 433 struct anon_vma *anon_vma = avc->anon_vma; 434 435 VM_WARN_ON(anon_vma->degree); 436 put_anon_vma(anon_vma); 437 438 list_del(&avc->same_vma); 439 anon_vma_chain_free(avc); 440 } 441 } 442 443 static void anon_vma_ctor(void *data) 444 { 445 struct anon_vma *anon_vma = data; 446 447 init_rwsem(&anon_vma->rwsem); 448 atomic_set(&anon_vma->refcount, 0); 449 anon_vma->rb_root = RB_ROOT_CACHED; 450 } 451 452 void __init anon_vma_init(void) 453 { 454 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 455 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 456 anon_vma_ctor); 457 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 458 SLAB_PANIC|SLAB_ACCOUNT); 459 } 460 461 /* 462 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 463 * 464 * Since there is no serialization what so ever against page_remove_rmap() 465 * the best this function can do is return a refcount increased anon_vma 466 * that might have been relevant to this page. 467 * 468 * The page might have been remapped to a different anon_vma or the anon_vma 469 * returned may already be freed (and even reused). 470 * 471 * In case it was remapped to a different anon_vma, the new anon_vma will be a 472 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 473 * ensure that any anon_vma obtained from the page will still be valid for as 474 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 475 * 476 * All users of this function must be very careful when walking the anon_vma 477 * chain and verify that the page in question is indeed mapped in it 478 * [ something equivalent to page_mapped_in_vma() ]. 479 * 480 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 481 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 482 * if there is a mapcount, we can dereference the anon_vma after observing 483 * those. 484 */ 485 struct anon_vma *page_get_anon_vma(struct page *page) 486 { 487 struct anon_vma *anon_vma = NULL; 488 unsigned long anon_mapping; 489 490 rcu_read_lock(); 491 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 492 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 493 goto out; 494 if (!page_mapped(page)) 495 goto out; 496 497 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 498 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 499 anon_vma = NULL; 500 goto out; 501 } 502 503 /* 504 * If this page is still mapped, then its anon_vma cannot have been 505 * freed. But if it has been unmapped, we have no security against the 506 * anon_vma structure being freed and reused (for another anon_vma: 507 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 508 * above cannot corrupt). 509 */ 510 if (!page_mapped(page)) { 511 rcu_read_unlock(); 512 put_anon_vma(anon_vma); 513 return NULL; 514 } 515 out: 516 rcu_read_unlock(); 517 518 return anon_vma; 519 } 520 521 /* 522 * Similar to page_get_anon_vma() except it locks the anon_vma. 523 * 524 * Its a little more complex as it tries to keep the fast path to a single 525 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 526 * reference like with page_get_anon_vma() and then block on the mutex. 527 */ 528 struct anon_vma *page_lock_anon_vma_read(struct page *page) 529 { 530 struct anon_vma *anon_vma = NULL; 531 struct anon_vma *root_anon_vma; 532 unsigned long anon_mapping; 533 534 rcu_read_lock(); 535 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 536 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 537 goto out; 538 if (!page_mapped(page)) 539 goto out; 540 541 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 542 root_anon_vma = READ_ONCE(anon_vma->root); 543 if (down_read_trylock(&root_anon_vma->rwsem)) { 544 /* 545 * If the page is still mapped, then this anon_vma is still 546 * its anon_vma, and holding the mutex ensures that it will 547 * not go away, see anon_vma_free(). 548 */ 549 if (!page_mapped(page)) { 550 up_read(&root_anon_vma->rwsem); 551 anon_vma = NULL; 552 } 553 goto out; 554 } 555 556 /* trylock failed, we got to sleep */ 557 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 558 anon_vma = NULL; 559 goto out; 560 } 561 562 if (!page_mapped(page)) { 563 rcu_read_unlock(); 564 put_anon_vma(anon_vma); 565 return NULL; 566 } 567 568 /* we pinned the anon_vma, its safe to sleep */ 569 rcu_read_unlock(); 570 anon_vma_lock_read(anon_vma); 571 572 if (atomic_dec_and_test(&anon_vma->refcount)) { 573 /* 574 * Oops, we held the last refcount, release the lock 575 * and bail -- can't simply use put_anon_vma() because 576 * we'll deadlock on the anon_vma_lock_write() recursion. 577 */ 578 anon_vma_unlock_read(anon_vma); 579 __put_anon_vma(anon_vma); 580 anon_vma = NULL; 581 } 582 583 return anon_vma; 584 585 out: 586 rcu_read_unlock(); 587 return anon_vma; 588 } 589 590 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 591 { 592 anon_vma_unlock_read(anon_vma); 593 } 594 595 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 596 /* 597 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 598 * important if a PTE was dirty when it was unmapped that it's flushed 599 * before any IO is initiated on the page to prevent lost writes. Similarly, 600 * it must be flushed before freeing to prevent data leakage. 601 */ 602 void try_to_unmap_flush(void) 603 { 604 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 605 606 if (!tlb_ubc->flush_required) 607 return; 608 609 arch_tlbbatch_flush(&tlb_ubc->arch); 610 tlb_ubc->flush_required = false; 611 tlb_ubc->writable = false; 612 } 613 614 /* Flush iff there are potentially writable TLB entries that can race with IO */ 615 void try_to_unmap_flush_dirty(void) 616 { 617 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 618 619 if (tlb_ubc->writable) 620 try_to_unmap_flush(); 621 } 622 623 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 624 { 625 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 626 627 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 628 tlb_ubc->flush_required = true; 629 630 /* 631 * Ensure compiler does not re-order the setting of tlb_flush_batched 632 * before the PTE is cleared. 633 */ 634 barrier(); 635 mm->tlb_flush_batched = true; 636 637 /* 638 * If the PTE was dirty then it's best to assume it's writable. The 639 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 640 * before the page is queued for IO. 641 */ 642 if (writable) 643 tlb_ubc->writable = true; 644 } 645 646 /* 647 * Returns true if the TLB flush should be deferred to the end of a batch of 648 * unmap operations to reduce IPIs. 649 */ 650 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 651 { 652 bool should_defer = false; 653 654 if (!(flags & TTU_BATCH_FLUSH)) 655 return false; 656 657 /* If remote CPUs need to be flushed then defer batch the flush */ 658 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 659 should_defer = true; 660 put_cpu(); 661 662 return should_defer; 663 } 664 665 /* 666 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 667 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 668 * operation such as mprotect or munmap to race between reclaim unmapping 669 * the page and flushing the page. If this race occurs, it potentially allows 670 * access to data via a stale TLB entry. Tracking all mm's that have TLB 671 * batching in flight would be expensive during reclaim so instead track 672 * whether TLB batching occurred in the past and if so then do a flush here 673 * if required. This will cost one additional flush per reclaim cycle paid 674 * by the first operation at risk such as mprotect and mumap. 675 * 676 * This must be called under the PTL so that an access to tlb_flush_batched 677 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 678 * via the PTL. 679 */ 680 void flush_tlb_batched_pending(struct mm_struct *mm) 681 { 682 if (data_race(mm->tlb_flush_batched)) { 683 flush_tlb_mm(mm); 684 685 /* 686 * Do not allow the compiler to re-order the clearing of 687 * tlb_flush_batched before the tlb is flushed. 688 */ 689 barrier(); 690 mm->tlb_flush_batched = false; 691 } 692 } 693 #else 694 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 695 { 696 } 697 698 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 699 { 700 return false; 701 } 702 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 703 704 /* 705 * At what user virtual address is page expected in vma? 706 * Caller should check the page is actually part of the vma. 707 */ 708 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 709 { 710 if (PageAnon(page)) { 711 struct anon_vma *page__anon_vma = page_anon_vma(page); 712 /* 713 * Note: swapoff's unuse_vma() is more efficient with this 714 * check, and needs it to match anon_vma when KSM is active. 715 */ 716 if (!vma->anon_vma || !page__anon_vma || 717 vma->anon_vma->root != page__anon_vma->root) 718 return -EFAULT; 719 } else if (!vma->vm_file) { 720 return -EFAULT; 721 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { 722 return -EFAULT; 723 } 724 725 return vma_address(page, vma); 726 } 727 728 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 729 { 730 pgd_t *pgd; 731 p4d_t *p4d; 732 pud_t *pud; 733 pmd_t *pmd = NULL; 734 pmd_t pmde; 735 736 pgd = pgd_offset(mm, address); 737 if (!pgd_present(*pgd)) 738 goto out; 739 740 p4d = p4d_offset(pgd, address); 741 if (!p4d_present(*p4d)) 742 goto out; 743 744 pud = pud_offset(p4d, address); 745 if (!pud_present(*pud)) 746 goto out; 747 748 pmd = pmd_offset(pud, address); 749 /* 750 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 751 * without holding anon_vma lock for write. So when looking for a 752 * genuine pmde (in which to find pte), test present and !THP together. 753 */ 754 pmde = *pmd; 755 barrier(); 756 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 757 pmd = NULL; 758 out: 759 return pmd; 760 } 761 762 struct page_referenced_arg { 763 int mapcount; 764 int referenced; 765 unsigned long vm_flags; 766 struct mem_cgroup *memcg; 767 }; 768 /* 769 * arg: page_referenced_arg will be passed 770 */ 771 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 772 unsigned long address, void *arg) 773 { 774 struct page_referenced_arg *pra = arg; 775 struct page_vma_mapped_walk pvmw = { 776 .page = page, 777 .vma = vma, 778 .address = address, 779 }; 780 int referenced = 0; 781 782 while (page_vma_mapped_walk(&pvmw)) { 783 address = pvmw.address; 784 785 if (vma->vm_flags & VM_LOCKED) { 786 page_vma_mapped_walk_done(&pvmw); 787 pra->vm_flags |= VM_LOCKED; 788 return false; /* To break the loop */ 789 } 790 791 if (pvmw.pte) { 792 if (ptep_clear_flush_young_notify(vma, address, 793 pvmw.pte)) { 794 /* 795 * Don't treat a reference through 796 * a sequentially read mapping as such. 797 * If the page has been used in another mapping, 798 * we will catch it; if this other mapping is 799 * already gone, the unmap path will have set 800 * PG_referenced or activated the page. 801 */ 802 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 803 referenced++; 804 } 805 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 806 if (pmdp_clear_flush_young_notify(vma, address, 807 pvmw.pmd)) 808 referenced++; 809 } else { 810 /* unexpected pmd-mapped page? */ 811 WARN_ON_ONCE(1); 812 } 813 814 pra->mapcount--; 815 } 816 817 if (referenced) 818 clear_page_idle(page); 819 if (test_and_clear_page_young(page)) 820 referenced++; 821 822 if (referenced) { 823 pra->referenced++; 824 pra->vm_flags |= vma->vm_flags; 825 } 826 827 if (!pra->mapcount) 828 return false; /* To break the loop */ 829 830 return true; 831 } 832 833 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 834 { 835 struct page_referenced_arg *pra = arg; 836 struct mem_cgroup *memcg = pra->memcg; 837 838 if (!mm_match_cgroup(vma->vm_mm, memcg)) 839 return true; 840 841 return false; 842 } 843 844 /** 845 * page_referenced - test if the page was referenced 846 * @page: the page to test 847 * @is_locked: caller holds lock on the page 848 * @memcg: target memory cgroup 849 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 850 * 851 * Quick test_and_clear_referenced for all mappings to a page, 852 * returns the number of ptes which referenced the page. 853 */ 854 int page_referenced(struct page *page, 855 int is_locked, 856 struct mem_cgroup *memcg, 857 unsigned long *vm_flags) 858 { 859 int we_locked = 0; 860 struct page_referenced_arg pra = { 861 .mapcount = total_mapcount(page), 862 .memcg = memcg, 863 }; 864 struct rmap_walk_control rwc = { 865 .rmap_one = page_referenced_one, 866 .arg = (void *)&pra, 867 .anon_lock = page_lock_anon_vma_read, 868 }; 869 870 *vm_flags = 0; 871 if (!pra.mapcount) 872 return 0; 873 874 if (!page_rmapping(page)) 875 return 0; 876 877 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 878 we_locked = trylock_page(page); 879 if (!we_locked) 880 return 1; 881 } 882 883 /* 884 * If we are reclaiming on behalf of a cgroup, skip 885 * counting on behalf of references from different 886 * cgroups 887 */ 888 if (memcg) { 889 rwc.invalid_vma = invalid_page_referenced_vma; 890 } 891 892 rmap_walk(page, &rwc); 893 *vm_flags = pra.vm_flags; 894 895 if (we_locked) 896 unlock_page(page); 897 898 return pra.referenced; 899 } 900 901 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 902 unsigned long address, void *arg) 903 { 904 struct page_vma_mapped_walk pvmw = { 905 .page = page, 906 .vma = vma, 907 .address = address, 908 .flags = PVMW_SYNC, 909 }; 910 struct mmu_notifier_range range; 911 int *cleaned = arg; 912 913 /* 914 * We have to assume the worse case ie pmd for invalidation. Note that 915 * the page can not be free from this function. 916 */ 917 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 918 0, vma, vma->vm_mm, address, 919 vma_address_end(page, vma)); 920 mmu_notifier_invalidate_range_start(&range); 921 922 while (page_vma_mapped_walk(&pvmw)) { 923 int ret = 0; 924 925 address = pvmw.address; 926 if (pvmw.pte) { 927 pte_t entry; 928 pte_t *pte = pvmw.pte; 929 930 if (!pte_dirty(*pte) && !pte_write(*pte)) 931 continue; 932 933 flush_cache_page(vma, address, pte_pfn(*pte)); 934 entry = ptep_clear_flush(vma, address, pte); 935 entry = pte_wrprotect(entry); 936 entry = pte_mkclean(entry); 937 set_pte_at(vma->vm_mm, address, pte, entry); 938 ret = 1; 939 } else { 940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 941 pmd_t *pmd = pvmw.pmd; 942 pmd_t entry; 943 944 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 945 continue; 946 947 flush_cache_page(vma, address, page_to_pfn(page)); 948 entry = pmdp_invalidate(vma, address, pmd); 949 entry = pmd_wrprotect(entry); 950 entry = pmd_mkclean(entry); 951 set_pmd_at(vma->vm_mm, address, pmd, entry); 952 ret = 1; 953 #else 954 /* unexpected pmd-mapped page? */ 955 WARN_ON_ONCE(1); 956 #endif 957 } 958 959 /* 960 * No need to call mmu_notifier_invalidate_range() as we are 961 * downgrading page table protection not changing it to point 962 * to a new page. 963 * 964 * See Documentation/vm/mmu_notifier.rst 965 */ 966 if (ret) 967 (*cleaned)++; 968 } 969 970 mmu_notifier_invalidate_range_end(&range); 971 972 return true; 973 } 974 975 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 976 { 977 if (vma->vm_flags & VM_SHARED) 978 return false; 979 980 return true; 981 } 982 983 int page_mkclean(struct page *page) 984 { 985 int cleaned = 0; 986 struct address_space *mapping; 987 struct rmap_walk_control rwc = { 988 .arg = (void *)&cleaned, 989 .rmap_one = page_mkclean_one, 990 .invalid_vma = invalid_mkclean_vma, 991 }; 992 993 BUG_ON(!PageLocked(page)); 994 995 if (!page_mapped(page)) 996 return 0; 997 998 mapping = page_mapping(page); 999 if (!mapping) 1000 return 0; 1001 1002 rmap_walk(page, &rwc); 1003 1004 return cleaned; 1005 } 1006 EXPORT_SYMBOL_GPL(page_mkclean); 1007 1008 /** 1009 * page_move_anon_rmap - move a page to our anon_vma 1010 * @page: the page to move to our anon_vma 1011 * @vma: the vma the page belongs to 1012 * 1013 * When a page belongs exclusively to one process after a COW event, 1014 * that page can be moved into the anon_vma that belongs to just that 1015 * process, so the rmap code will not search the parent or sibling 1016 * processes. 1017 */ 1018 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1019 { 1020 struct anon_vma *anon_vma = vma->anon_vma; 1021 1022 page = compound_head(page); 1023 1024 VM_BUG_ON_PAGE(!PageLocked(page), page); 1025 VM_BUG_ON_VMA(!anon_vma, vma); 1026 1027 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1028 /* 1029 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1030 * simultaneously, so a concurrent reader (eg page_referenced()'s 1031 * PageAnon()) will not see one without the other. 1032 */ 1033 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1034 } 1035 1036 /** 1037 * __page_set_anon_rmap - set up new anonymous rmap 1038 * @page: Page or Hugepage to add to rmap 1039 * @vma: VM area to add page to. 1040 * @address: User virtual address of the mapping 1041 * @exclusive: the page is exclusively owned by the current process 1042 */ 1043 static void __page_set_anon_rmap(struct page *page, 1044 struct vm_area_struct *vma, unsigned long address, int exclusive) 1045 { 1046 struct anon_vma *anon_vma = vma->anon_vma; 1047 1048 BUG_ON(!anon_vma); 1049 1050 if (PageAnon(page)) 1051 return; 1052 1053 /* 1054 * If the page isn't exclusively mapped into this vma, 1055 * we must use the _oldest_ possible anon_vma for the 1056 * page mapping! 1057 */ 1058 if (!exclusive) 1059 anon_vma = anon_vma->root; 1060 1061 /* 1062 * page_idle does a lockless/optimistic rmap scan on page->mapping. 1063 * Make sure the compiler doesn't split the stores of anon_vma and 1064 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1065 * could mistake the mapping for a struct address_space and crash. 1066 */ 1067 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1068 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1069 page->index = linear_page_index(vma, address); 1070 } 1071 1072 /** 1073 * __page_check_anon_rmap - sanity check anonymous rmap addition 1074 * @page: the page to add the mapping to 1075 * @vma: the vm area in which the mapping is added 1076 * @address: the user virtual address mapped 1077 */ 1078 static void __page_check_anon_rmap(struct page *page, 1079 struct vm_area_struct *vma, unsigned long address) 1080 { 1081 /* 1082 * The page's anon-rmap details (mapping and index) are guaranteed to 1083 * be set up correctly at this point. 1084 * 1085 * We have exclusion against page_add_anon_rmap because the caller 1086 * always holds the page locked. 1087 * 1088 * We have exclusion against page_add_new_anon_rmap because those pages 1089 * are initially only visible via the pagetables, and the pte is locked 1090 * over the call to page_add_new_anon_rmap. 1091 */ 1092 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); 1093 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1094 page); 1095 } 1096 1097 /** 1098 * page_add_anon_rmap - add pte mapping to an anonymous page 1099 * @page: the page to add the mapping to 1100 * @vma: the vm area in which the mapping is added 1101 * @address: the user virtual address mapped 1102 * @compound: charge the page as compound or small page 1103 * 1104 * The caller needs to hold the pte lock, and the page must be locked in 1105 * the anon_vma case: to serialize mapping,index checking after setting, 1106 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1107 * (but PageKsm is never downgraded to PageAnon). 1108 */ 1109 void page_add_anon_rmap(struct page *page, 1110 struct vm_area_struct *vma, unsigned long address, bool compound) 1111 { 1112 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1113 } 1114 1115 /* 1116 * Special version of the above for do_swap_page, which often runs 1117 * into pages that are exclusively owned by the current process. 1118 * Everybody else should continue to use page_add_anon_rmap above. 1119 */ 1120 void do_page_add_anon_rmap(struct page *page, 1121 struct vm_area_struct *vma, unsigned long address, int flags) 1122 { 1123 bool compound = flags & RMAP_COMPOUND; 1124 bool first; 1125 1126 if (unlikely(PageKsm(page))) 1127 lock_page_memcg(page); 1128 else 1129 VM_BUG_ON_PAGE(!PageLocked(page), page); 1130 1131 if (compound) { 1132 atomic_t *mapcount; 1133 VM_BUG_ON_PAGE(!PageLocked(page), page); 1134 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1135 mapcount = compound_mapcount_ptr(page); 1136 first = atomic_inc_and_test(mapcount); 1137 } else { 1138 first = atomic_inc_and_test(&page->_mapcount); 1139 } 1140 1141 if (first) { 1142 int nr = compound ? thp_nr_pages(page) : 1; 1143 /* 1144 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1145 * these counters are not modified in interrupt context, and 1146 * pte lock(a spinlock) is held, which implies preemption 1147 * disabled. 1148 */ 1149 if (compound) 1150 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1151 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1152 } 1153 1154 if (unlikely(PageKsm(page))) { 1155 unlock_page_memcg(page); 1156 return; 1157 } 1158 1159 /* address might be in next vma when migration races vma_adjust */ 1160 if (first) 1161 __page_set_anon_rmap(page, vma, address, 1162 flags & RMAP_EXCLUSIVE); 1163 else 1164 __page_check_anon_rmap(page, vma, address); 1165 } 1166 1167 /** 1168 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1169 * @page: the page to add the mapping to 1170 * @vma: the vm area in which the mapping is added 1171 * @address: the user virtual address mapped 1172 * @compound: charge the page as compound or small page 1173 * 1174 * Same as page_add_anon_rmap but must only be called on *new* pages. 1175 * This means the inc-and-test can be bypassed. 1176 * Page does not have to be locked. 1177 */ 1178 void page_add_new_anon_rmap(struct page *page, 1179 struct vm_area_struct *vma, unsigned long address, bool compound) 1180 { 1181 int nr = compound ? thp_nr_pages(page) : 1; 1182 1183 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1184 __SetPageSwapBacked(page); 1185 if (compound) { 1186 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1187 /* increment count (starts at -1) */ 1188 atomic_set(compound_mapcount_ptr(page), 0); 1189 if (hpage_pincount_available(page)) 1190 atomic_set(compound_pincount_ptr(page), 0); 1191 1192 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1193 } else { 1194 /* Anon THP always mapped first with PMD */ 1195 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1196 /* increment count (starts at -1) */ 1197 atomic_set(&page->_mapcount, 0); 1198 } 1199 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1200 __page_set_anon_rmap(page, vma, address, 1); 1201 } 1202 1203 /** 1204 * page_add_file_rmap - add pte mapping to a file page 1205 * @page: the page to add the mapping to 1206 * @compound: charge the page as compound or small page 1207 * 1208 * The caller needs to hold the pte lock. 1209 */ 1210 void page_add_file_rmap(struct page *page, bool compound) 1211 { 1212 int i, nr = 1; 1213 1214 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1215 lock_page_memcg(page); 1216 if (compound && PageTransHuge(page)) { 1217 int nr_pages = thp_nr_pages(page); 1218 1219 for (i = 0, nr = 0; i < nr_pages; i++) { 1220 if (atomic_inc_and_test(&page[i]._mapcount)) 1221 nr++; 1222 } 1223 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1224 goto out; 1225 if (PageSwapBacked(page)) 1226 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1227 nr_pages); 1228 else 1229 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1230 nr_pages); 1231 } else { 1232 if (PageTransCompound(page) && page_mapping(page)) { 1233 VM_WARN_ON_ONCE(!PageLocked(page)); 1234 1235 SetPageDoubleMap(compound_head(page)); 1236 if (PageMlocked(page)) 1237 clear_page_mlock(compound_head(page)); 1238 } 1239 if (!atomic_inc_and_test(&page->_mapcount)) 1240 goto out; 1241 } 1242 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1243 out: 1244 unlock_page_memcg(page); 1245 } 1246 1247 static void page_remove_file_rmap(struct page *page, bool compound) 1248 { 1249 int i, nr = 1; 1250 1251 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1252 1253 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1254 if (unlikely(PageHuge(page))) { 1255 /* hugetlb pages are always mapped with pmds */ 1256 atomic_dec(compound_mapcount_ptr(page)); 1257 return; 1258 } 1259 1260 /* page still mapped by someone else? */ 1261 if (compound && PageTransHuge(page)) { 1262 int nr_pages = thp_nr_pages(page); 1263 1264 for (i = 0, nr = 0; i < nr_pages; i++) { 1265 if (atomic_add_negative(-1, &page[i]._mapcount)) 1266 nr++; 1267 } 1268 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1269 return; 1270 if (PageSwapBacked(page)) 1271 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, 1272 -nr_pages); 1273 else 1274 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, 1275 -nr_pages); 1276 } else { 1277 if (!atomic_add_negative(-1, &page->_mapcount)) 1278 return; 1279 } 1280 1281 /* 1282 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1283 * these counters are not modified in interrupt context, and 1284 * pte lock(a spinlock) is held, which implies preemption disabled. 1285 */ 1286 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1287 1288 if (unlikely(PageMlocked(page))) 1289 clear_page_mlock(page); 1290 } 1291 1292 static void page_remove_anon_compound_rmap(struct page *page) 1293 { 1294 int i, nr; 1295 1296 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1297 return; 1298 1299 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1300 if (unlikely(PageHuge(page))) 1301 return; 1302 1303 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1304 return; 1305 1306 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); 1307 1308 if (TestClearPageDoubleMap(page)) { 1309 /* 1310 * Subpages can be mapped with PTEs too. Check how many of 1311 * them are still mapped. 1312 */ 1313 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1314 if (atomic_add_negative(-1, &page[i]._mapcount)) 1315 nr++; 1316 } 1317 1318 /* 1319 * Queue the page for deferred split if at least one small 1320 * page of the compound page is unmapped, but at least one 1321 * small page is still mapped. 1322 */ 1323 if (nr && nr < thp_nr_pages(page)) 1324 deferred_split_huge_page(page); 1325 } else { 1326 nr = thp_nr_pages(page); 1327 } 1328 1329 if (unlikely(PageMlocked(page))) 1330 clear_page_mlock(page); 1331 1332 if (nr) 1333 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 1334 } 1335 1336 /** 1337 * page_remove_rmap - take down pte mapping from a page 1338 * @page: page to remove mapping from 1339 * @compound: uncharge the page as compound or small page 1340 * 1341 * The caller needs to hold the pte lock. 1342 */ 1343 void page_remove_rmap(struct page *page, bool compound) 1344 { 1345 lock_page_memcg(page); 1346 1347 if (!PageAnon(page)) { 1348 page_remove_file_rmap(page, compound); 1349 goto out; 1350 } 1351 1352 if (compound) { 1353 page_remove_anon_compound_rmap(page); 1354 goto out; 1355 } 1356 1357 /* page still mapped by someone else? */ 1358 if (!atomic_add_negative(-1, &page->_mapcount)) 1359 goto out; 1360 1361 /* 1362 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1363 * these counters are not modified in interrupt context, and 1364 * pte lock(a spinlock) is held, which implies preemption disabled. 1365 */ 1366 __dec_lruvec_page_state(page, NR_ANON_MAPPED); 1367 1368 if (unlikely(PageMlocked(page))) 1369 clear_page_mlock(page); 1370 1371 if (PageTransCompound(page)) 1372 deferred_split_huge_page(compound_head(page)); 1373 1374 /* 1375 * It would be tidy to reset the PageAnon mapping here, 1376 * but that might overwrite a racing page_add_anon_rmap 1377 * which increments mapcount after us but sets mapping 1378 * before us: so leave the reset to free_unref_page, 1379 * and remember that it's only reliable while mapped. 1380 * Leaving it set also helps swapoff to reinstate ptes 1381 * faster for those pages still in swapcache. 1382 */ 1383 out: 1384 unlock_page_memcg(page); 1385 } 1386 1387 /* 1388 * @arg: enum ttu_flags will be passed to this argument 1389 */ 1390 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1391 unsigned long address, void *arg) 1392 { 1393 struct mm_struct *mm = vma->vm_mm; 1394 struct page_vma_mapped_walk pvmw = { 1395 .page = page, 1396 .vma = vma, 1397 .address = address, 1398 }; 1399 pte_t pteval; 1400 struct page *subpage; 1401 bool ret = true; 1402 struct mmu_notifier_range range; 1403 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1404 1405 /* 1406 * When racing against e.g. zap_pte_range() on another cpu, 1407 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1408 * try_to_unmap() may return before page_mapped() has become false, 1409 * if page table locking is skipped: use TTU_SYNC to wait for that. 1410 */ 1411 if (flags & TTU_SYNC) 1412 pvmw.flags = PVMW_SYNC; 1413 1414 if (flags & TTU_SPLIT_HUGE_PMD) 1415 split_huge_pmd_address(vma, address, false, page); 1416 1417 /* 1418 * For THP, we have to assume the worse case ie pmd for invalidation. 1419 * For hugetlb, it could be much worse if we need to do pud 1420 * invalidation in the case of pmd sharing. 1421 * 1422 * Note that the page can not be free in this function as call of 1423 * try_to_unmap() must hold a reference on the page. 1424 */ 1425 range.end = PageKsm(page) ? 1426 address + PAGE_SIZE : vma_address_end(page, vma); 1427 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1428 address, range.end); 1429 if (PageHuge(page)) { 1430 /* 1431 * If sharing is possible, start and end will be adjusted 1432 * accordingly. 1433 */ 1434 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1435 &range.end); 1436 } 1437 mmu_notifier_invalidate_range_start(&range); 1438 1439 while (page_vma_mapped_walk(&pvmw)) { 1440 /* 1441 * If the page is mlock()d, we cannot swap it out. 1442 */ 1443 if (!(flags & TTU_IGNORE_MLOCK) && 1444 (vma->vm_flags & VM_LOCKED)) { 1445 /* 1446 * PTE-mapped THP are never marked as mlocked: so do 1447 * not set it on a DoubleMap THP, nor on an Anon THP 1448 * (which may still be PTE-mapped after DoubleMap was 1449 * cleared). But stop unmapping even in those cases. 1450 */ 1451 if (!PageTransCompound(page) || (PageHead(page) && 1452 !PageDoubleMap(page) && !PageAnon(page))) 1453 mlock_vma_page(page); 1454 page_vma_mapped_walk_done(&pvmw); 1455 ret = false; 1456 break; 1457 } 1458 1459 /* Unexpected PMD-mapped THP? */ 1460 VM_BUG_ON_PAGE(!pvmw.pte, page); 1461 1462 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1463 address = pvmw.address; 1464 1465 if (PageHuge(page) && !PageAnon(page)) { 1466 /* 1467 * To call huge_pmd_unshare, i_mmap_rwsem must be 1468 * held in write mode. Caller needs to explicitly 1469 * do this outside rmap routines. 1470 */ 1471 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1472 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1473 /* 1474 * huge_pmd_unshare unmapped an entire PMD 1475 * page. There is no way of knowing exactly 1476 * which PMDs may be cached for this mm, so 1477 * we must flush them all. start/end were 1478 * already adjusted above to cover this range. 1479 */ 1480 flush_cache_range(vma, range.start, range.end); 1481 flush_tlb_range(vma, range.start, range.end); 1482 mmu_notifier_invalidate_range(mm, range.start, 1483 range.end); 1484 1485 /* 1486 * The ref count of the PMD page was dropped 1487 * which is part of the way map counting 1488 * is done for shared PMDs. Return 'true' 1489 * here. When there is no other sharing, 1490 * huge_pmd_unshare returns false and we will 1491 * unmap the actual page and drop map count 1492 * to zero. 1493 */ 1494 page_vma_mapped_walk_done(&pvmw); 1495 break; 1496 } 1497 } 1498 1499 /* Nuke the page table entry. */ 1500 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1501 if (should_defer_flush(mm, flags)) { 1502 /* 1503 * We clear the PTE but do not flush so potentially 1504 * a remote CPU could still be writing to the page. 1505 * If the entry was previously clean then the 1506 * architecture must guarantee that a clear->dirty 1507 * transition on a cached TLB entry is written through 1508 * and traps if the PTE is unmapped. 1509 */ 1510 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1511 1512 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1513 } else { 1514 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1515 } 1516 1517 /* Move the dirty bit to the page. Now the pte is gone. */ 1518 if (pte_dirty(pteval)) 1519 set_page_dirty(page); 1520 1521 /* Update high watermark before we lower rss */ 1522 update_hiwater_rss(mm); 1523 1524 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1525 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1526 if (PageHuge(page)) { 1527 hugetlb_count_sub(compound_nr(page), mm); 1528 set_huge_swap_pte_at(mm, address, 1529 pvmw.pte, pteval, 1530 vma_mmu_pagesize(vma)); 1531 } else { 1532 dec_mm_counter(mm, mm_counter(page)); 1533 set_pte_at(mm, address, pvmw.pte, pteval); 1534 } 1535 1536 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1537 /* 1538 * The guest indicated that the page content is of no 1539 * interest anymore. Simply discard the pte, vmscan 1540 * will take care of the rest. 1541 * A future reference will then fault in a new zero 1542 * page. When userfaultfd is active, we must not drop 1543 * this page though, as its main user (postcopy 1544 * migration) will not expect userfaults on already 1545 * copied pages. 1546 */ 1547 dec_mm_counter(mm, mm_counter(page)); 1548 /* We have to invalidate as we cleared the pte */ 1549 mmu_notifier_invalidate_range(mm, address, 1550 address + PAGE_SIZE); 1551 } else if (PageAnon(page)) { 1552 swp_entry_t entry = { .val = page_private(subpage) }; 1553 pte_t swp_pte; 1554 /* 1555 * Store the swap location in the pte. 1556 * See handle_pte_fault() ... 1557 */ 1558 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1559 WARN_ON_ONCE(1); 1560 ret = false; 1561 /* We have to invalidate as we cleared the pte */ 1562 mmu_notifier_invalidate_range(mm, address, 1563 address + PAGE_SIZE); 1564 page_vma_mapped_walk_done(&pvmw); 1565 break; 1566 } 1567 1568 /* MADV_FREE page check */ 1569 if (!PageSwapBacked(page)) { 1570 if (!PageDirty(page)) { 1571 /* Invalidate as we cleared the pte */ 1572 mmu_notifier_invalidate_range(mm, 1573 address, address + PAGE_SIZE); 1574 dec_mm_counter(mm, MM_ANONPAGES); 1575 goto discard; 1576 } 1577 1578 /* 1579 * If the page was redirtied, it cannot be 1580 * discarded. Remap the page to page table. 1581 */ 1582 set_pte_at(mm, address, pvmw.pte, pteval); 1583 SetPageSwapBacked(page); 1584 ret = false; 1585 page_vma_mapped_walk_done(&pvmw); 1586 break; 1587 } 1588 1589 if (swap_duplicate(entry) < 0) { 1590 set_pte_at(mm, address, pvmw.pte, pteval); 1591 ret = false; 1592 page_vma_mapped_walk_done(&pvmw); 1593 break; 1594 } 1595 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1596 set_pte_at(mm, address, pvmw.pte, pteval); 1597 ret = false; 1598 page_vma_mapped_walk_done(&pvmw); 1599 break; 1600 } 1601 if (list_empty(&mm->mmlist)) { 1602 spin_lock(&mmlist_lock); 1603 if (list_empty(&mm->mmlist)) 1604 list_add(&mm->mmlist, &init_mm.mmlist); 1605 spin_unlock(&mmlist_lock); 1606 } 1607 dec_mm_counter(mm, MM_ANONPAGES); 1608 inc_mm_counter(mm, MM_SWAPENTS); 1609 swp_pte = swp_entry_to_pte(entry); 1610 if (pte_soft_dirty(pteval)) 1611 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1612 if (pte_uffd_wp(pteval)) 1613 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1614 set_pte_at(mm, address, pvmw.pte, swp_pte); 1615 /* Invalidate as we cleared the pte */ 1616 mmu_notifier_invalidate_range(mm, address, 1617 address + PAGE_SIZE); 1618 } else { 1619 /* 1620 * This is a locked file-backed page, thus it cannot 1621 * be removed from the page cache and replaced by a new 1622 * page before mmu_notifier_invalidate_range_end, so no 1623 * concurrent thread might update its page table to 1624 * point at new page while a device still is using this 1625 * page. 1626 * 1627 * See Documentation/vm/mmu_notifier.rst 1628 */ 1629 dec_mm_counter(mm, mm_counter_file(page)); 1630 } 1631 discard: 1632 /* 1633 * No need to call mmu_notifier_invalidate_range() it has be 1634 * done above for all cases requiring it to happen under page 1635 * table lock before mmu_notifier_invalidate_range_end() 1636 * 1637 * See Documentation/vm/mmu_notifier.rst 1638 */ 1639 page_remove_rmap(subpage, PageHuge(page)); 1640 put_page(page); 1641 } 1642 1643 mmu_notifier_invalidate_range_end(&range); 1644 1645 return ret; 1646 } 1647 1648 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1649 { 1650 return vma_is_temporary_stack(vma); 1651 } 1652 1653 static int page_not_mapped(struct page *page) 1654 { 1655 return !page_mapped(page); 1656 } 1657 1658 /** 1659 * try_to_unmap - try to remove all page table mappings to a page 1660 * @page: the page to get unmapped 1661 * @flags: action and flags 1662 * 1663 * Tries to remove all the page table entries which are mapping this 1664 * page, used in the pageout path. Caller must hold the page lock. 1665 * 1666 * It is the caller's responsibility to check if the page is still 1667 * mapped when needed (use TTU_SYNC to prevent accounting races). 1668 */ 1669 void try_to_unmap(struct page *page, enum ttu_flags flags) 1670 { 1671 struct rmap_walk_control rwc = { 1672 .rmap_one = try_to_unmap_one, 1673 .arg = (void *)flags, 1674 .done = page_not_mapped, 1675 .anon_lock = page_lock_anon_vma_read, 1676 }; 1677 1678 if (flags & TTU_RMAP_LOCKED) 1679 rmap_walk_locked(page, &rwc); 1680 else 1681 rmap_walk(page, &rwc); 1682 } 1683 1684 /* 1685 * @arg: enum ttu_flags will be passed to this argument. 1686 * 1687 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1688 * containing migration entries. 1689 */ 1690 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, 1691 unsigned long address, void *arg) 1692 { 1693 struct mm_struct *mm = vma->vm_mm; 1694 struct page_vma_mapped_walk pvmw = { 1695 .page = page, 1696 .vma = vma, 1697 .address = address, 1698 }; 1699 pte_t pteval; 1700 struct page *subpage; 1701 bool ret = true; 1702 struct mmu_notifier_range range; 1703 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1704 1705 /* 1706 * When racing against e.g. zap_pte_range() on another cpu, 1707 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1708 * try_to_migrate() may return before page_mapped() has become false, 1709 * if page table locking is skipped: use TTU_SYNC to wait for that. 1710 */ 1711 if (flags & TTU_SYNC) 1712 pvmw.flags = PVMW_SYNC; 1713 1714 /* 1715 * unmap_page() in mm/huge_memory.c is the only user of migration with 1716 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1717 */ 1718 if (flags & TTU_SPLIT_HUGE_PMD) 1719 split_huge_pmd_address(vma, address, true, page); 1720 1721 /* 1722 * For THP, we have to assume the worse case ie pmd for invalidation. 1723 * For hugetlb, it could be much worse if we need to do pud 1724 * invalidation in the case of pmd sharing. 1725 * 1726 * Note that the page can not be free in this function as call of 1727 * try_to_unmap() must hold a reference on the page. 1728 */ 1729 range.end = PageKsm(page) ? 1730 address + PAGE_SIZE : vma_address_end(page, vma); 1731 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1732 address, range.end); 1733 if (PageHuge(page)) { 1734 /* 1735 * If sharing is possible, start and end will be adjusted 1736 * accordingly. 1737 */ 1738 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1739 &range.end); 1740 } 1741 mmu_notifier_invalidate_range_start(&range); 1742 1743 while (page_vma_mapped_walk(&pvmw)) { 1744 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1745 /* PMD-mapped THP migration entry */ 1746 if (!pvmw.pte) { 1747 VM_BUG_ON_PAGE(PageHuge(page) || 1748 !PageTransCompound(page), page); 1749 1750 set_pmd_migration_entry(&pvmw, page); 1751 continue; 1752 } 1753 #endif 1754 1755 /* Unexpected PMD-mapped THP? */ 1756 VM_BUG_ON_PAGE(!pvmw.pte, page); 1757 1758 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1759 address = pvmw.address; 1760 1761 if (PageHuge(page) && !PageAnon(page)) { 1762 /* 1763 * To call huge_pmd_unshare, i_mmap_rwsem must be 1764 * held in write mode. Caller needs to explicitly 1765 * do this outside rmap routines. 1766 */ 1767 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1768 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1769 /* 1770 * huge_pmd_unshare unmapped an entire PMD 1771 * page. There is no way of knowing exactly 1772 * which PMDs may be cached for this mm, so 1773 * we must flush them all. start/end were 1774 * already adjusted above to cover this range. 1775 */ 1776 flush_cache_range(vma, range.start, range.end); 1777 flush_tlb_range(vma, range.start, range.end); 1778 mmu_notifier_invalidate_range(mm, range.start, 1779 range.end); 1780 1781 /* 1782 * The ref count of the PMD page was dropped 1783 * which is part of the way map counting 1784 * is done for shared PMDs. Return 'true' 1785 * here. When there is no other sharing, 1786 * huge_pmd_unshare returns false and we will 1787 * unmap the actual page and drop map count 1788 * to zero. 1789 */ 1790 page_vma_mapped_walk_done(&pvmw); 1791 break; 1792 } 1793 } 1794 1795 /* Nuke the page table entry. */ 1796 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1797 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1798 1799 /* Move the dirty bit to the page. Now the pte is gone. */ 1800 if (pte_dirty(pteval)) 1801 set_page_dirty(page); 1802 1803 /* Update high watermark before we lower rss */ 1804 update_hiwater_rss(mm); 1805 1806 if (is_zone_device_page(page)) { 1807 swp_entry_t entry; 1808 pte_t swp_pte; 1809 1810 /* 1811 * Store the pfn of the page in a special migration 1812 * pte. do_swap_page() will wait until the migration 1813 * pte is removed and then restart fault handling. 1814 */ 1815 entry = make_readable_migration_entry( 1816 page_to_pfn(page)); 1817 swp_pte = swp_entry_to_pte(entry); 1818 1819 /* 1820 * pteval maps a zone device page and is therefore 1821 * a swap pte. 1822 */ 1823 if (pte_swp_soft_dirty(pteval)) 1824 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1825 if (pte_swp_uffd_wp(pteval)) 1826 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1827 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1828 /* 1829 * No need to invalidate here it will synchronize on 1830 * against the special swap migration pte. 1831 * 1832 * The assignment to subpage above was computed from a 1833 * swap PTE which results in an invalid pointer. 1834 * Since only PAGE_SIZE pages can currently be 1835 * migrated, just set it to page. This will need to be 1836 * changed when hugepage migrations to device private 1837 * memory are supported. 1838 */ 1839 subpage = page; 1840 } else if (PageHWPoison(page)) { 1841 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1842 if (PageHuge(page)) { 1843 hugetlb_count_sub(compound_nr(page), mm); 1844 set_huge_swap_pte_at(mm, address, 1845 pvmw.pte, pteval, 1846 vma_mmu_pagesize(vma)); 1847 } else { 1848 dec_mm_counter(mm, mm_counter(page)); 1849 set_pte_at(mm, address, pvmw.pte, pteval); 1850 } 1851 1852 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1853 /* 1854 * The guest indicated that the page content is of no 1855 * interest anymore. Simply discard the pte, vmscan 1856 * will take care of the rest. 1857 * A future reference will then fault in a new zero 1858 * page. When userfaultfd is active, we must not drop 1859 * this page though, as its main user (postcopy 1860 * migration) will not expect userfaults on already 1861 * copied pages. 1862 */ 1863 dec_mm_counter(mm, mm_counter(page)); 1864 /* We have to invalidate as we cleared the pte */ 1865 mmu_notifier_invalidate_range(mm, address, 1866 address + PAGE_SIZE); 1867 } else { 1868 swp_entry_t entry; 1869 pte_t swp_pte; 1870 1871 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1872 set_pte_at(mm, address, pvmw.pte, pteval); 1873 ret = false; 1874 page_vma_mapped_walk_done(&pvmw); 1875 break; 1876 } 1877 1878 /* 1879 * Store the pfn of the page in a special migration 1880 * pte. do_swap_page() will wait until the migration 1881 * pte is removed and then restart fault handling. 1882 */ 1883 if (pte_write(pteval)) 1884 entry = make_writable_migration_entry( 1885 page_to_pfn(subpage)); 1886 else 1887 entry = make_readable_migration_entry( 1888 page_to_pfn(subpage)); 1889 1890 swp_pte = swp_entry_to_pte(entry); 1891 if (pte_soft_dirty(pteval)) 1892 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1893 if (pte_uffd_wp(pteval)) 1894 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1895 set_pte_at(mm, address, pvmw.pte, swp_pte); 1896 /* 1897 * No need to invalidate here it will synchronize on 1898 * against the special swap migration pte. 1899 */ 1900 } 1901 1902 /* 1903 * No need to call mmu_notifier_invalidate_range() it has be 1904 * done above for all cases requiring it to happen under page 1905 * table lock before mmu_notifier_invalidate_range_end() 1906 * 1907 * See Documentation/vm/mmu_notifier.rst 1908 */ 1909 page_remove_rmap(subpage, PageHuge(page)); 1910 put_page(page); 1911 } 1912 1913 mmu_notifier_invalidate_range_end(&range); 1914 1915 return ret; 1916 } 1917 1918 /** 1919 * try_to_migrate - try to replace all page table mappings with swap entries 1920 * @page: the page to replace page table entries for 1921 * @flags: action and flags 1922 * 1923 * Tries to remove all the page table entries which are mapping this page and 1924 * replace them with special swap entries. Caller must hold the page lock. 1925 */ 1926 void try_to_migrate(struct page *page, enum ttu_flags flags) 1927 { 1928 struct rmap_walk_control rwc = { 1929 .rmap_one = try_to_migrate_one, 1930 .arg = (void *)flags, 1931 .done = page_not_mapped, 1932 .anon_lock = page_lock_anon_vma_read, 1933 }; 1934 1935 /* 1936 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 1937 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 1938 */ 1939 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 1940 TTU_SYNC))) 1941 return; 1942 1943 if (is_zone_device_page(page) && !is_device_private_page(page)) 1944 return; 1945 1946 /* 1947 * During exec, a temporary VMA is setup and later moved. 1948 * The VMA is moved under the anon_vma lock but not the 1949 * page tables leading to a race where migration cannot 1950 * find the migration ptes. Rather than increasing the 1951 * locking requirements of exec(), migration skips 1952 * temporary VMAs until after exec() completes. 1953 */ 1954 if (!PageKsm(page) && PageAnon(page)) 1955 rwc.invalid_vma = invalid_migration_vma; 1956 1957 if (flags & TTU_RMAP_LOCKED) 1958 rmap_walk_locked(page, &rwc); 1959 else 1960 rmap_walk(page, &rwc); 1961 } 1962 1963 /* 1964 * Walks the vma's mapping a page and mlocks the page if any locked vma's are 1965 * found. Once one is found the page is locked and the scan can be terminated. 1966 */ 1967 static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, 1968 unsigned long address, void *unused) 1969 { 1970 struct page_vma_mapped_walk pvmw = { 1971 .page = page, 1972 .vma = vma, 1973 .address = address, 1974 }; 1975 1976 /* An un-locked vma doesn't have any pages to lock, continue the scan */ 1977 if (!(vma->vm_flags & VM_LOCKED)) 1978 return true; 1979 1980 while (page_vma_mapped_walk(&pvmw)) { 1981 /* 1982 * Need to recheck under the ptl to serialise with 1983 * __munlock_pagevec_fill() after VM_LOCKED is cleared in 1984 * munlock_vma_pages_range(). 1985 */ 1986 if (vma->vm_flags & VM_LOCKED) { 1987 /* 1988 * PTE-mapped THP are never marked as mlocked; but 1989 * this function is never called on a DoubleMap THP, 1990 * nor on an Anon THP (which may still be PTE-mapped 1991 * after DoubleMap was cleared). 1992 */ 1993 mlock_vma_page(page); 1994 /* 1995 * No need to scan further once the page is marked 1996 * as mlocked. 1997 */ 1998 page_vma_mapped_walk_done(&pvmw); 1999 return false; 2000 } 2001 } 2002 2003 return true; 2004 } 2005 2006 /** 2007 * page_mlock - try to mlock a page 2008 * @page: the page to be mlocked 2009 * 2010 * Called from munlock code. Checks all of the VMAs mapping the page and mlocks 2011 * the page if any are found. The page will be returned with PG_mlocked cleared 2012 * if it is not mapped by any locked vmas. 2013 */ 2014 void page_mlock(struct page *page) 2015 { 2016 struct rmap_walk_control rwc = { 2017 .rmap_one = page_mlock_one, 2018 .done = page_not_mapped, 2019 .anon_lock = page_lock_anon_vma_read, 2020 2021 }; 2022 2023 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 2024 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 2025 2026 /* Anon THP are only marked as mlocked when singly mapped */ 2027 if (PageTransCompound(page) && PageAnon(page)) 2028 return; 2029 2030 rmap_walk(page, &rwc); 2031 } 2032 2033 #ifdef CONFIG_DEVICE_PRIVATE 2034 struct make_exclusive_args { 2035 struct mm_struct *mm; 2036 unsigned long address; 2037 void *owner; 2038 bool valid; 2039 }; 2040 2041 static bool page_make_device_exclusive_one(struct page *page, 2042 struct vm_area_struct *vma, unsigned long address, void *priv) 2043 { 2044 struct mm_struct *mm = vma->vm_mm; 2045 struct page_vma_mapped_walk pvmw = { 2046 .page = page, 2047 .vma = vma, 2048 .address = address, 2049 }; 2050 struct make_exclusive_args *args = priv; 2051 pte_t pteval; 2052 struct page *subpage; 2053 bool ret = true; 2054 struct mmu_notifier_range range; 2055 swp_entry_t entry; 2056 pte_t swp_pte; 2057 2058 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2059 vma->vm_mm, address, min(vma->vm_end, 2060 address + page_size(page)), args->owner); 2061 mmu_notifier_invalidate_range_start(&range); 2062 2063 while (page_vma_mapped_walk(&pvmw)) { 2064 /* Unexpected PMD-mapped THP? */ 2065 VM_BUG_ON_PAGE(!pvmw.pte, page); 2066 2067 if (!pte_present(*pvmw.pte)) { 2068 ret = false; 2069 page_vma_mapped_walk_done(&pvmw); 2070 break; 2071 } 2072 2073 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 2074 address = pvmw.address; 2075 2076 /* Nuke the page table entry. */ 2077 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2078 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2079 2080 /* Move the dirty bit to the page. Now the pte is gone. */ 2081 if (pte_dirty(pteval)) 2082 set_page_dirty(page); 2083 2084 /* 2085 * Check that our target page is still mapped at the expected 2086 * address. 2087 */ 2088 if (args->mm == mm && args->address == address && 2089 pte_write(pteval)) 2090 args->valid = true; 2091 2092 /* 2093 * Store the pfn of the page in a special migration 2094 * pte. do_swap_page() will wait until the migration 2095 * pte is removed and then restart fault handling. 2096 */ 2097 if (pte_write(pteval)) 2098 entry = make_writable_device_exclusive_entry( 2099 page_to_pfn(subpage)); 2100 else 2101 entry = make_readable_device_exclusive_entry( 2102 page_to_pfn(subpage)); 2103 swp_pte = swp_entry_to_pte(entry); 2104 if (pte_soft_dirty(pteval)) 2105 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2106 if (pte_uffd_wp(pteval)) 2107 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2108 2109 set_pte_at(mm, address, pvmw.pte, swp_pte); 2110 2111 /* 2112 * There is a reference on the page for the swap entry which has 2113 * been removed, so shouldn't take another. 2114 */ 2115 page_remove_rmap(subpage, false); 2116 } 2117 2118 mmu_notifier_invalidate_range_end(&range); 2119 2120 return ret; 2121 } 2122 2123 /** 2124 * page_make_device_exclusive - mark the page exclusively owned by a device 2125 * @page: the page to replace page table entries for 2126 * @mm: the mm_struct where the page is expected to be mapped 2127 * @address: address where the page is expected to be mapped 2128 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2129 * 2130 * Tries to remove all the page table entries which are mapping this page and 2131 * replace them with special device exclusive swap entries to grant a device 2132 * exclusive access to the page. Caller must hold the page lock. 2133 * 2134 * Returns false if the page is still mapped, or if it could not be unmapped 2135 * from the expected address. Otherwise returns true (success). 2136 */ 2137 static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm, 2138 unsigned long address, void *owner) 2139 { 2140 struct make_exclusive_args args = { 2141 .mm = mm, 2142 .address = address, 2143 .owner = owner, 2144 .valid = false, 2145 }; 2146 struct rmap_walk_control rwc = { 2147 .rmap_one = page_make_device_exclusive_one, 2148 .done = page_not_mapped, 2149 .anon_lock = page_lock_anon_vma_read, 2150 .arg = &args, 2151 }; 2152 2153 /* 2154 * Restrict to anonymous pages for now to avoid potential writeback 2155 * issues. Also tail pages shouldn't be passed to rmap_walk so skip 2156 * those. 2157 */ 2158 if (!PageAnon(page) || PageTail(page)) 2159 return false; 2160 2161 rmap_walk(page, &rwc); 2162 2163 return args.valid && !page_mapcount(page); 2164 } 2165 2166 /** 2167 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2168 * @mm: mm_struct of assoicated target process 2169 * @start: start of the region to mark for exclusive device access 2170 * @end: end address of region 2171 * @pages: returns the pages which were successfully marked for exclusive access 2172 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2173 * 2174 * Returns: number of pages found in the range by GUP. A page is marked for 2175 * exclusive access only if the page pointer is non-NULL. 2176 * 2177 * This function finds ptes mapping page(s) to the given address range, locks 2178 * them and replaces mappings with special swap entries preventing userspace CPU 2179 * access. On fault these entries are replaced with the original mapping after 2180 * calling MMU notifiers. 2181 * 2182 * A driver using this to program access from a device must use a mmu notifier 2183 * critical section to hold a device specific lock during programming. Once 2184 * programming is complete it should drop the page lock and reference after 2185 * which point CPU access to the page will revoke the exclusive access. 2186 */ 2187 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2188 unsigned long end, struct page **pages, 2189 void *owner) 2190 { 2191 long npages = (end - start) >> PAGE_SHIFT; 2192 long i; 2193 2194 npages = get_user_pages_remote(mm, start, npages, 2195 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2196 pages, NULL, NULL); 2197 if (npages < 0) 2198 return npages; 2199 2200 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2201 if (!trylock_page(pages[i])) { 2202 put_page(pages[i]); 2203 pages[i] = NULL; 2204 continue; 2205 } 2206 2207 if (!page_make_device_exclusive(pages[i], mm, start, owner)) { 2208 unlock_page(pages[i]); 2209 put_page(pages[i]); 2210 pages[i] = NULL; 2211 } 2212 } 2213 2214 return npages; 2215 } 2216 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2217 #endif 2218 2219 void __put_anon_vma(struct anon_vma *anon_vma) 2220 { 2221 struct anon_vma *root = anon_vma->root; 2222 2223 anon_vma_free(anon_vma); 2224 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2225 anon_vma_free(root); 2226 } 2227 2228 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 2229 struct rmap_walk_control *rwc) 2230 { 2231 struct anon_vma *anon_vma; 2232 2233 if (rwc->anon_lock) 2234 return rwc->anon_lock(page); 2235 2236 /* 2237 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 2238 * because that depends on page_mapped(); but not all its usages 2239 * are holding mmap_lock. Users without mmap_lock are required to 2240 * take a reference count to prevent the anon_vma disappearing 2241 */ 2242 anon_vma = page_anon_vma(page); 2243 if (!anon_vma) 2244 return NULL; 2245 2246 anon_vma_lock_read(anon_vma); 2247 return anon_vma; 2248 } 2249 2250 /* 2251 * rmap_walk_anon - do something to anonymous page using the object-based 2252 * rmap method 2253 * @page: the page to be handled 2254 * @rwc: control variable according to each walk type 2255 * 2256 * Find all the mappings of a page using the mapping pointer and the vma chains 2257 * contained in the anon_vma struct it points to. 2258 * 2259 * When called from page_mlock(), the mmap_lock of the mm containing the vma 2260 * where the page was found will be held for write. So, we won't recheck 2261 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2262 * LOCKED. 2263 */ 2264 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 2265 bool locked) 2266 { 2267 struct anon_vma *anon_vma; 2268 pgoff_t pgoff_start, pgoff_end; 2269 struct anon_vma_chain *avc; 2270 2271 if (locked) { 2272 anon_vma = page_anon_vma(page); 2273 /* anon_vma disappear under us? */ 2274 VM_BUG_ON_PAGE(!anon_vma, page); 2275 } else { 2276 anon_vma = rmap_walk_anon_lock(page, rwc); 2277 } 2278 if (!anon_vma) 2279 return; 2280 2281 pgoff_start = page_to_pgoff(page); 2282 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2283 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2284 pgoff_start, pgoff_end) { 2285 struct vm_area_struct *vma = avc->vma; 2286 unsigned long address = vma_address(page, vma); 2287 2288 VM_BUG_ON_VMA(address == -EFAULT, vma); 2289 cond_resched(); 2290 2291 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2292 continue; 2293 2294 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 2295 break; 2296 if (rwc->done && rwc->done(page)) 2297 break; 2298 } 2299 2300 if (!locked) 2301 anon_vma_unlock_read(anon_vma); 2302 } 2303 2304 /* 2305 * rmap_walk_file - do something to file page using the object-based rmap method 2306 * @page: the page to be handled 2307 * @rwc: control variable according to each walk type 2308 * 2309 * Find all the mappings of a page using the mapping pointer and the vma chains 2310 * contained in the address_space struct it points to. 2311 * 2312 * When called from page_mlock(), the mmap_lock of the mm containing the vma 2313 * where the page was found will be held for write. So, we won't recheck 2314 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 2315 * LOCKED. 2316 */ 2317 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 2318 bool locked) 2319 { 2320 struct address_space *mapping = page_mapping(page); 2321 pgoff_t pgoff_start, pgoff_end; 2322 struct vm_area_struct *vma; 2323 2324 /* 2325 * The page lock not only makes sure that page->mapping cannot 2326 * suddenly be NULLified by truncation, it makes sure that the 2327 * structure at mapping cannot be freed and reused yet, 2328 * so we can safely take mapping->i_mmap_rwsem. 2329 */ 2330 VM_BUG_ON_PAGE(!PageLocked(page), page); 2331 2332 if (!mapping) 2333 return; 2334 2335 pgoff_start = page_to_pgoff(page); 2336 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 2337 if (!locked) 2338 i_mmap_lock_read(mapping); 2339 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2340 pgoff_start, pgoff_end) { 2341 unsigned long address = vma_address(page, vma); 2342 2343 VM_BUG_ON_VMA(address == -EFAULT, vma); 2344 cond_resched(); 2345 2346 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2347 continue; 2348 2349 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 2350 goto done; 2351 if (rwc->done && rwc->done(page)) 2352 goto done; 2353 } 2354 2355 done: 2356 if (!locked) 2357 i_mmap_unlock_read(mapping); 2358 } 2359 2360 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 2361 { 2362 if (unlikely(PageKsm(page))) 2363 rmap_walk_ksm(page, rwc); 2364 else if (PageAnon(page)) 2365 rmap_walk_anon(page, rwc, false); 2366 else 2367 rmap_walk_file(page, rwc, false); 2368 } 2369 2370 /* Like rmap_walk, but caller holds relevant rmap lock */ 2371 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 2372 { 2373 /* no ksm support for now */ 2374 VM_BUG_ON_PAGE(PageKsm(page), page); 2375 if (PageAnon(page)) 2376 rmap_walk_anon(page, rwc, true); 2377 else 2378 rmap_walk_file(page, rwc, true); 2379 } 2380 2381 #ifdef CONFIG_HUGETLB_PAGE 2382 /* 2383 * The following two functions are for anonymous (private mapped) hugepages. 2384 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2385 * and no lru code, because we handle hugepages differently from common pages. 2386 */ 2387 void hugepage_add_anon_rmap(struct page *page, 2388 struct vm_area_struct *vma, unsigned long address) 2389 { 2390 struct anon_vma *anon_vma = vma->anon_vma; 2391 int first; 2392 2393 BUG_ON(!PageLocked(page)); 2394 BUG_ON(!anon_vma); 2395 /* address might be in next vma when migration races vma_adjust */ 2396 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 2397 if (first) 2398 __page_set_anon_rmap(page, vma, address, 0); 2399 } 2400 2401 void hugepage_add_new_anon_rmap(struct page *page, 2402 struct vm_area_struct *vma, unsigned long address) 2403 { 2404 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2405 atomic_set(compound_mapcount_ptr(page), 0); 2406 if (hpage_pincount_available(page)) 2407 atomic_set(compound_pincount_ptr(page), 0); 2408 2409 __page_set_anon_rmap(page, vma, address, 1); 2410 } 2411 #endif /* CONFIG_HUGETLB_PAGE */ 2412