1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 * 40 * (code doesn't rely on that order so it could be switched around) 41 * ->tasklist_lock 42 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * pte map lock 44 */ 45 46 #include <linux/mm.h> 47 #include <linux/pagemap.h> 48 #include <linux/swap.h> 49 #include <linux/swapops.h> 50 #include <linux/slab.h> 51 #include <linux/init.h> 52 #include <linux/ksm.h> 53 #include <linux/rmap.h> 54 #include <linux/rcupdate.h> 55 #include <linux/module.h> 56 #include <linux/memcontrol.h> 57 #include <linux/mmu_notifier.h> 58 #include <linux/migrate.h> 59 60 #include <asm/tlbflush.h> 61 62 #include "internal.h" 63 64 static struct kmem_cache *anon_vma_cachep; 65 static struct kmem_cache *anon_vma_chain_cachep; 66 67 static inline struct anon_vma *anon_vma_alloc(void) 68 { 69 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 70 } 71 72 void anon_vma_free(struct anon_vma *anon_vma) 73 { 74 kmem_cache_free(anon_vma_cachep, anon_vma); 75 } 76 77 static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 78 { 79 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 } 81 82 void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83 { 84 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 } 86 87 /** 88 * anon_vma_prepare - attach an anon_vma to a memory region 89 * @vma: the memory region in question 90 * 91 * This makes sure the memory mapping described by 'vma' has 92 * an 'anon_vma' attached to it, so that we can associate the 93 * anonymous pages mapped into it with that anon_vma. 94 * 95 * The common case will be that we already have one, but if 96 * if not we either need to find an adjacent mapping that we 97 * can re-use the anon_vma from (very common when the only 98 * reason for splitting a vma has been mprotect()), or we 99 * allocate a new one. 100 * 101 * Anon-vma allocations are very subtle, because we may have 102 * optimistically looked up an anon_vma in page_lock_anon_vma() 103 * and that may actually touch the spinlock even in the newly 104 * allocated vma (it depends on RCU to make sure that the 105 * anon_vma isn't actually destroyed). 106 * 107 * As a result, we need to do proper anon_vma locking even 108 * for the new allocation. At the same time, we do not want 109 * to do any locking for the common case of already having 110 * an anon_vma. 111 * 112 * This must be called with the mmap_sem held for reading. 113 */ 114 int anon_vma_prepare(struct vm_area_struct *vma) 115 { 116 struct anon_vma *anon_vma = vma->anon_vma; 117 struct anon_vma_chain *avc; 118 119 might_sleep(); 120 if (unlikely(!anon_vma)) { 121 struct mm_struct *mm = vma->vm_mm; 122 struct anon_vma *allocated; 123 124 avc = anon_vma_chain_alloc(); 125 if (!avc) 126 goto out_enomem; 127 128 anon_vma = find_mergeable_anon_vma(vma); 129 allocated = NULL; 130 if (!anon_vma) { 131 anon_vma = anon_vma_alloc(); 132 if (unlikely(!anon_vma)) 133 goto out_enomem_free_avc; 134 allocated = anon_vma; 135 /* 136 * This VMA had no anon_vma yet. This anon_vma is 137 * the root of any anon_vma tree that might form. 138 */ 139 anon_vma->root = anon_vma; 140 } 141 142 anon_vma_lock(anon_vma); 143 /* page_table_lock to protect against threads */ 144 spin_lock(&mm->page_table_lock); 145 if (likely(!vma->anon_vma)) { 146 vma->anon_vma = anon_vma; 147 avc->anon_vma = anon_vma; 148 avc->vma = vma; 149 list_add(&avc->same_vma, &vma->anon_vma_chain); 150 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 151 allocated = NULL; 152 avc = NULL; 153 } 154 spin_unlock(&mm->page_table_lock); 155 anon_vma_unlock(anon_vma); 156 157 if (unlikely(allocated)) 158 anon_vma_free(allocated); 159 if (unlikely(avc)) 160 anon_vma_chain_free(avc); 161 } 162 return 0; 163 164 out_enomem_free_avc: 165 anon_vma_chain_free(avc); 166 out_enomem: 167 return -ENOMEM; 168 } 169 170 static void anon_vma_chain_link(struct vm_area_struct *vma, 171 struct anon_vma_chain *avc, 172 struct anon_vma *anon_vma) 173 { 174 avc->vma = vma; 175 avc->anon_vma = anon_vma; 176 list_add(&avc->same_vma, &vma->anon_vma_chain); 177 178 anon_vma_lock(anon_vma); 179 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 180 anon_vma_unlock(anon_vma); 181 } 182 183 /* 184 * Attach the anon_vmas from src to dst. 185 * Returns 0 on success, -ENOMEM on failure. 186 */ 187 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 188 { 189 struct anon_vma_chain *avc, *pavc; 190 191 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 192 avc = anon_vma_chain_alloc(); 193 if (!avc) 194 goto enomem_failure; 195 anon_vma_chain_link(dst, avc, pavc->anon_vma); 196 } 197 return 0; 198 199 enomem_failure: 200 unlink_anon_vmas(dst); 201 return -ENOMEM; 202 } 203 204 /* 205 * Attach vma to its own anon_vma, as well as to the anon_vmas that 206 * the corresponding VMA in the parent process is attached to. 207 * Returns 0 on success, non-zero on failure. 208 */ 209 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 210 { 211 struct anon_vma_chain *avc; 212 struct anon_vma *anon_vma; 213 214 /* Don't bother if the parent process has no anon_vma here. */ 215 if (!pvma->anon_vma) 216 return 0; 217 218 /* 219 * First, attach the new VMA to the parent VMA's anon_vmas, 220 * so rmap can find non-COWed pages in child processes. 221 */ 222 if (anon_vma_clone(vma, pvma)) 223 return -ENOMEM; 224 225 /* Then add our own anon_vma. */ 226 anon_vma = anon_vma_alloc(); 227 if (!anon_vma) 228 goto out_error; 229 avc = anon_vma_chain_alloc(); 230 if (!avc) 231 goto out_error_free_anon_vma; 232 233 /* 234 * The root anon_vma's spinlock is the lock actually used when we 235 * lock any of the anon_vmas in this anon_vma tree. 236 */ 237 anon_vma->root = pvma->anon_vma->root; 238 /* 239 * With KSM refcounts, an anon_vma can stay around longer than the 240 * process it belongs to. The root anon_vma needs to be pinned 241 * until this anon_vma is freed, because the lock lives in the root. 242 */ 243 get_anon_vma(anon_vma->root); 244 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 245 vma->anon_vma = anon_vma; 246 anon_vma_chain_link(vma, avc, anon_vma); 247 248 return 0; 249 250 out_error_free_anon_vma: 251 anon_vma_free(anon_vma); 252 out_error: 253 unlink_anon_vmas(vma); 254 return -ENOMEM; 255 } 256 257 static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) 258 { 259 struct anon_vma *anon_vma = anon_vma_chain->anon_vma; 260 int empty; 261 262 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ 263 if (!anon_vma) 264 return; 265 266 anon_vma_lock(anon_vma); 267 list_del(&anon_vma_chain->same_anon_vma); 268 269 /* We must garbage collect the anon_vma if it's empty */ 270 empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); 271 anon_vma_unlock(anon_vma); 272 273 if (empty) { 274 /* We no longer need the root anon_vma */ 275 if (anon_vma->root != anon_vma) 276 drop_anon_vma(anon_vma->root); 277 anon_vma_free(anon_vma); 278 } 279 } 280 281 void unlink_anon_vmas(struct vm_area_struct *vma) 282 { 283 struct anon_vma_chain *avc, *next; 284 285 /* 286 * Unlink each anon_vma chained to the VMA. This list is ordered 287 * from newest to oldest, ensuring the root anon_vma gets freed last. 288 */ 289 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 290 anon_vma_unlink(avc); 291 list_del(&avc->same_vma); 292 anon_vma_chain_free(avc); 293 } 294 } 295 296 static void anon_vma_ctor(void *data) 297 { 298 struct anon_vma *anon_vma = data; 299 300 spin_lock_init(&anon_vma->lock); 301 anonvma_external_refcount_init(anon_vma); 302 INIT_LIST_HEAD(&anon_vma->head); 303 } 304 305 void __init anon_vma_init(void) 306 { 307 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 308 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 309 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 310 } 311 312 /* 313 * Getting a lock on a stable anon_vma from a page off the LRU is 314 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 315 */ 316 struct anon_vma *page_lock_anon_vma(struct page *page) 317 { 318 struct anon_vma *anon_vma; 319 unsigned long anon_mapping; 320 321 rcu_read_lock(); 322 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 323 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 324 goto out; 325 if (!page_mapped(page)) 326 goto out; 327 328 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 329 anon_vma_lock(anon_vma); 330 return anon_vma; 331 out: 332 rcu_read_unlock(); 333 return NULL; 334 } 335 336 void page_unlock_anon_vma(struct anon_vma *anon_vma) 337 { 338 anon_vma_unlock(anon_vma); 339 rcu_read_unlock(); 340 } 341 342 /* 343 * At what user virtual address is page expected in @vma? 344 * Returns virtual address or -EFAULT if page's index/offset is not 345 * within the range mapped the @vma. 346 */ 347 static inline unsigned long 348 vma_address(struct page *page, struct vm_area_struct *vma) 349 { 350 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 351 unsigned long address; 352 353 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 354 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 355 /* page should be within @vma mapping range */ 356 return -EFAULT; 357 } 358 return address; 359 } 360 361 /* 362 * At what user virtual address is page expected in vma? 363 * Caller should check the page is actually part of the vma. 364 */ 365 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 366 { 367 if (PageAnon(page)) { 368 if (vma->anon_vma->root != page_anon_vma(page)->root) 369 return -EFAULT; 370 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 371 if (!vma->vm_file || 372 vma->vm_file->f_mapping != page->mapping) 373 return -EFAULT; 374 } else 375 return -EFAULT; 376 return vma_address(page, vma); 377 } 378 379 /* 380 * Check that @page is mapped at @address into @mm. 381 * 382 * If @sync is false, page_check_address may perform a racy check to avoid 383 * the page table lock when the pte is not present (helpful when reclaiming 384 * highly shared pages). 385 * 386 * On success returns with pte mapped and locked. 387 */ 388 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 389 unsigned long address, spinlock_t **ptlp, int sync) 390 { 391 pgd_t *pgd; 392 pud_t *pud; 393 pmd_t *pmd; 394 pte_t *pte; 395 spinlock_t *ptl; 396 397 pgd = pgd_offset(mm, address); 398 if (!pgd_present(*pgd)) 399 return NULL; 400 401 pud = pud_offset(pgd, address); 402 if (!pud_present(*pud)) 403 return NULL; 404 405 pmd = pmd_offset(pud, address); 406 if (!pmd_present(*pmd)) 407 return NULL; 408 409 pte = pte_offset_map(pmd, address); 410 /* Make a quick check before getting the lock */ 411 if (!sync && !pte_present(*pte)) { 412 pte_unmap(pte); 413 return NULL; 414 } 415 416 ptl = pte_lockptr(mm, pmd); 417 spin_lock(ptl); 418 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 419 *ptlp = ptl; 420 return pte; 421 } 422 pte_unmap_unlock(pte, ptl); 423 return NULL; 424 } 425 426 /** 427 * page_mapped_in_vma - check whether a page is really mapped in a VMA 428 * @page: the page to test 429 * @vma: the VMA to test 430 * 431 * Returns 1 if the page is mapped into the page tables of the VMA, 0 432 * if the page is not mapped into the page tables of this VMA. Only 433 * valid for normal file or anonymous VMAs. 434 */ 435 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 436 { 437 unsigned long address; 438 pte_t *pte; 439 spinlock_t *ptl; 440 441 address = vma_address(page, vma); 442 if (address == -EFAULT) /* out of vma range */ 443 return 0; 444 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 445 if (!pte) /* the page is not in this mm */ 446 return 0; 447 pte_unmap_unlock(pte, ptl); 448 449 return 1; 450 } 451 452 /* 453 * Subfunctions of page_referenced: page_referenced_one called 454 * repeatedly from either page_referenced_anon or page_referenced_file. 455 */ 456 int page_referenced_one(struct page *page, struct vm_area_struct *vma, 457 unsigned long address, unsigned int *mapcount, 458 unsigned long *vm_flags) 459 { 460 struct mm_struct *mm = vma->vm_mm; 461 pte_t *pte; 462 spinlock_t *ptl; 463 int referenced = 0; 464 465 pte = page_check_address(page, mm, address, &ptl, 0); 466 if (!pte) 467 goto out; 468 469 /* 470 * Don't want to elevate referenced for mlocked page that gets this far, 471 * in order that it progresses to try_to_unmap and is moved to the 472 * unevictable list. 473 */ 474 if (vma->vm_flags & VM_LOCKED) { 475 *mapcount = 1; /* break early from loop */ 476 *vm_flags |= VM_LOCKED; 477 goto out_unmap; 478 } 479 480 if (ptep_clear_flush_young_notify(vma, address, pte)) { 481 /* 482 * Don't treat a reference through a sequentially read 483 * mapping as such. If the page has been used in 484 * another mapping, we will catch it; if this other 485 * mapping is already gone, the unmap path will have 486 * set PG_referenced or activated the page. 487 */ 488 if (likely(!VM_SequentialReadHint(vma))) 489 referenced++; 490 } 491 492 /* Pretend the page is referenced if the task has the 493 swap token and is in the middle of a page fault. */ 494 if (mm != current->mm && has_swap_token(mm) && 495 rwsem_is_locked(&mm->mmap_sem)) 496 referenced++; 497 498 out_unmap: 499 (*mapcount)--; 500 pte_unmap_unlock(pte, ptl); 501 502 if (referenced) 503 *vm_flags |= vma->vm_flags; 504 out: 505 return referenced; 506 } 507 508 static int page_referenced_anon(struct page *page, 509 struct mem_cgroup *mem_cont, 510 unsigned long *vm_flags) 511 { 512 unsigned int mapcount; 513 struct anon_vma *anon_vma; 514 struct anon_vma_chain *avc; 515 int referenced = 0; 516 517 anon_vma = page_lock_anon_vma(page); 518 if (!anon_vma) 519 return referenced; 520 521 mapcount = page_mapcount(page); 522 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 523 struct vm_area_struct *vma = avc->vma; 524 unsigned long address = vma_address(page, vma); 525 if (address == -EFAULT) 526 continue; 527 /* 528 * If we are reclaiming on behalf of a cgroup, skip 529 * counting on behalf of references from different 530 * cgroups 531 */ 532 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 533 continue; 534 referenced += page_referenced_one(page, vma, address, 535 &mapcount, vm_flags); 536 if (!mapcount) 537 break; 538 } 539 540 page_unlock_anon_vma(anon_vma); 541 return referenced; 542 } 543 544 /** 545 * page_referenced_file - referenced check for object-based rmap 546 * @page: the page we're checking references on. 547 * @mem_cont: target memory controller 548 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 549 * 550 * For an object-based mapped page, find all the places it is mapped and 551 * check/clear the referenced flag. This is done by following the page->mapping 552 * pointer, then walking the chain of vmas it holds. It returns the number 553 * of references it found. 554 * 555 * This function is only called from page_referenced for object-based pages. 556 */ 557 static int page_referenced_file(struct page *page, 558 struct mem_cgroup *mem_cont, 559 unsigned long *vm_flags) 560 { 561 unsigned int mapcount; 562 struct address_space *mapping = page->mapping; 563 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 564 struct vm_area_struct *vma; 565 struct prio_tree_iter iter; 566 int referenced = 0; 567 568 /* 569 * The caller's checks on page->mapping and !PageAnon have made 570 * sure that this is a file page: the check for page->mapping 571 * excludes the case just before it gets set on an anon page. 572 */ 573 BUG_ON(PageAnon(page)); 574 575 /* 576 * The page lock not only makes sure that page->mapping cannot 577 * suddenly be NULLified by truncation, it makes sure that the 578 * structure at mapping cannot be freed and reused yet, 579 * so we can safely take mapping->i_mmap_lock. 580 */ 581 BUG_ON(!PageLocked(page)); 582 583 spin_lock(&mapping->i_mmap_lock); 584 585 /* 586 * i_mmap_lock does not stabilize mapcount at all, but mapcount 587 * is more likely to be accurate if we note it after spinning. 588 */ 589 mapcount = page_mapcount(page); 590 591 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 592 unsigned long address = vma_address(page, vma); 593 if (address == -EFAULT) 594 continue; 595 /* 596 * If we are reclaiming on behalf of a cgroup, skip 597 * counting on behalf of references from different 598 * cgroups 599 */ 600 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 601 continue; 602 referenced += page_referenced_one(page, vma, address, 603 &mapcount, vm_flags); 604 if (!mapcount) 605 break; 606 } 607 608 spin_unlock(&mapping->i_mmap_lock); 609 return referenced; 610 } 611 612 /** 613 * page_referenced - test if the page was referenced 614 * @page: the page to test 615 * @is_locked: caller holds lock on the page 616 * @mem_cont: target memory controller 617 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 618 * 619 * Quick test_and_clear_referenced for all mappings to a page, 620 * returns the number of ptes which referenced the page. 621 */ 622 int page_referenced(struct page *page, 623 int is_locked, 624 struct mem_cgroup *mem_cont, 625 unsigned long *vm_flags) 626 { 627 int referenced = 0; 628 int we_locked = 0; 629 630 *vm_flags = 0; 631 if (page_mapped(page) && page_rmapping(page)) { 632 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 633 we_locked = trylock_page(page); 634 if (!we_locked) { 635 referenced++; 636 goto out; 637 } 638 } 639 if (unlikely(PageKsm(page))) 640 referenced += page_referenced_ksm(page, mem_cont, 641 vm_flags); 642 else if (PageAnon(page)) 643 referenced += page_referenced_anon(page, mem_cont, 644 vm_flags); 645 else if (page->mapping) 646 referenced += page_referenced_file(page, mem_cont, 647 vm_flags); 648 if (we_locked) 649 unlock_page(page); 650 } 651 out: 652 if (page_test_and_clear_young(page)) 653 referenced++; 654 655 return referenced; 656 } 657 658 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 659 unsigned long address) 660 { 661 struct mm_struct *mm = vma->vm_mm; 662 pte_t *pte; 663 spinlock_t *ptl; 664 int ret = 0; 665 666 pte = page_check_address(page, mm, address, &ptl, 1); 667 if (!pte) 668 goto out; 669 670 if (pte_dirty(*pte) || pte_write(*pte)) { 671 pte_t entry; 672 673 flush_cache_page(vma, address, pte_pfn(*pte)); 674 entry = ptep_clear_flush_notify(vma, address, pte); 675 entry = pte_wrprotect(entry); 676 entry = pte_mkclean(entry); 677 set_pte_at(mm, address, pte, entry); 678 ret = 1; 679 } 680 681 pte_unmap_unlock(pte, ptl); 682 out: 683 return ret; 684 } 685 686 static int page_mkclean_file(struct address_space *mapping, struct page *page) 687 { 688 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 689 struct vm_area_struct *vma; 690 struct prio_tree_iter iter; 691 int ret = 0; 692 693 BUG_ON(PageAnon(page)); 694 695 spin_lock(&mapping->i_mmap_lock); 696 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 697 if (vma->vm_flags & VM_SHARED) { 698 unsigned long address = vma_address(page, vma); 699 if (address == -EFAULT) 700 continue; 701 ret += page_mkclean_one(page, vma, address); 702 } 703 } 704 spin_unlock(&mapping->i_mmap_lock); 705 return ret; 706 } 707 708 int page_mkclean(struct page *page) 709 { 710 int ret = 0; 711 712 BUG_ON(!PageLocked(page)); 713 714 if (page_mapped(page)) { 715 struct address_space *mapping = page_mapping(page); 716 if (mapping) { 717 ret = page_mkclean_file(mapping, page); 718 if (page_test_dirty(page)) { 719 page_clear_dirty(page); 720 ret = 1; 721 } 722 } 723 } 724 725 return ret; 726 } 727 EXPORT_SYMBOL_GPL(page_mkclean); 728 729 /** 730 * page_move_anon_rmap - move a page to our anon_vma 731 * @page: the page to move to our anon_vma 732 * @vma: the vma the page belongs to 733 * @address: the user virtual address mapped 734 * 735 * When a page belongs exclusively to one process after a COW event, 736 * that page can be moved into the anon_vma that belongs to just that 737 * process, so the rmap code will not search the parent or sibling 738 * processes. 739 */ 740 void page_move_anon_rmap(struct page *page, 741 struct vm_area_struct *vma, unsigned long address) 742 { 743 struct anon_vma *anon_vma = vma->anon_vma; 744 745 VM_BUG_ON(!PageLocked(page)); 746 VM_BUG_ON(!anon_vma); 747 VM_BUG_ON(page->index != linear_page_index(vma, address)); 748 749 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 750 page->mapping = (struct address_space *) anon_vma; 751 } 752 753 /** 754 * __page_set_anon_rmap - setup new anonymous rmap 755 * @page: the page to add the mapping to 756 * @vma: the vm area in which the mapping is added 757 * @address: the user virtual address mapped 758 * @exclusive: the page is exclusively owned by the current process 759 */ 760 static void __page_set_anon_rmap(struct page *page, 761 struct vm_area_struct *vma, unsigned long address, int exclusive) 762 { 763 struct anon_vma *anon_vma = vma->anon_vma; 764 765 BUG_ON(!anon_vma); 766 767 /* 768 * If the page isn't exclusively mapped into this vma, 769 * we must use the _oldest_ possible anon_vma for the 770 * page mapping! 771 */ 772 if (!exclusive) { 773 if (PageAnon(page)) 774 return; 775 anon_vma = anon_vma->root; 776 } else { 777 /* 778 * In this case, swapped-out-but-not-discarded swap-cache 779 * is remapped. So, no need to update page->mapping here. 780 * We convice anon_vma poitned by page->mapping is not obsolete 781 * because vma->anon_vma is necessary to be a family of it. 782 */ 783 if (PageAnon(page)) 784 return; 785 } 786 787 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 788 page->mapping = (struct address_space *) anon_vma; 789 page->index = linear_page_index(vma, address); 790 } 791 792 /** 793 * __page_check_anon_rmap - sanity check anonymous rmap addition 794 * @page: the page to add the mapping to 795 * @vma: the vm area in which the mapping is added 796 * @address: the user virtual address mapped 797 */ 798 static void __page_check_anon_rmap(struct page *page, 799 struct vm_area_struct *vma, unsigned long address) 800 { 801 #ifdef CONFIG_DEBUG_VM 802 /* 803 * The page's anon-rmap details (mapping and index) are guaranteed to 804 * be set up correctly at this point. 805 * 806 * We have exclusion against page_add_anon_rmap because the caller 807 * always holds the page locked, except if called from page_dup_rmap, 808 * in which case the page is already known to be setup. 809 * 810 * We have exclusion against page_add_new_anon_rmap because those pages 811 * are initially only visible via the pagetables, and the pte is locked 812 * over the call to page_add_new_anon_rmap. 813 */ 814 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 815 BUG_ON(page->index != linear_page_index(vma, address)); 816 #endif 817 } 818 819 /** 820 * page_add_anon_rmap - add pte mapping to an anonymous page 821 * @page: the page to add the mapping to 822 * @vma: the vm area in which the mapping is added 823 * @address: the user virtual address mapped 824 * 825 * The caller needs to hold the pte lock, and the page must be locked in 826 * the anon_vma case: to serialize mapping,index checking after setting, 827 * and to ensure that PageAnon is not being upgraded racily to PageKsm 828 * (but PageKsm is never downgraded to PageAnon). 829 */ 830 void page_add_anon_rmap(struct page *page, 831 struct vm_area_struct *vma, unsigned long address) 832 { 833 do_page_add_anon_rmap(page, vma, address, 0); 834 } 835 836 /* 837 * Special version of the above for do_swap_page, which often runs 838 * into pages that are exclusively owned by the current process. 839 * Everybody else should continue to use page_add_anon_rmap above. 840 */ 841 void do_page_add_anon_rmap(struct page *page, 842 struct vm_area_struct *vma, unsigned long address, int exclusive) 843 { 844 int first = atomic_inc_and_test(&page->_mapcount); 845 if (first) 846 __inc_zone_page_state(page, NR_ANON_PAGES); 847 if (unlikely(PageKsm(page))) 848 return; 849 850 VM_BUG_ON(!PageLocked(page)); 851 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 852 if (first) 853 __page_set_anon_rmap(page, vma, address, exclusive); 854 else 855 __page_check_anon_rmap(page, vma, address); 856 } 857 858 /** 859 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 860 * @page: the page to add the mapping to 861 * @vma: the vm area in which the mapping is added 862 * @address: the user virtual address mapped 863 * 864 * Same as page_add_anon_rmap but must only be called on *new* pages. 865 * This means the inc-and-test can be bypassed. 866 * Page does not have to be locked. 867 */ 868 void page_add_new_anon_rmap(struct page *page, 869 struct vm_area_struct *vma, unsigned long address) 870 { 871 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 872 SetPageSwapBacked(page); 873 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 874 __inc_zone_page_state(page, NR_ANON_PAGES); 875 __page_set_anon_rmap(page, vma, address, 1); 876 if (page_evictable(page, vma)) 877 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 878 else 879 add_page_to_unevictable_list(page); 880 } 881 882 /** 883 * page_add_file_rmap - add pte mapping to a file page 884 * @page: the page to add the mapping to 885 * 886 * The caller needs to hold the pte lock. 887 */ 888 void page_add_file_rmap(struct page *page) 889 { 890 if (atomic_inc_and_test(&page->_mapcount)) { 891 __inc_zone_page_state(page, NR_FILE_MAPPED); 892 mem_cgroup_update_file_mapped(page, 1); 893 } 894 } 895 896 /** 897 * page_remove_rmap - take down pte mapping from a page 898 * @page: page to remove mapping from 899 * 900 * The caller needs to hold the pte lock. 901 */ 902 void page_remove_rmap(struct page *page) 903 { 904 /* page still mapped by someone else? */ 905 if (!atomic_add_negative(-1, &page->_mapcount)) 906 return; 907 908 /* 909 * Now that the last pte has gone, s390 must transfer dirty 910 * flag from storage key to struct page. We can usually skip 911 * this if the page is anon, so about to be freed; but perhaps 912 * not if it's in swapcache - there might be another pte slot 913 * containing the swap entry, but page not yet written to swap. 914 */ 915 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 916 page_clear_dirty(page); 917 set_page_dirty(page); 918 } 919 if (PageAnon(page)) { 920 mem_cgroup_uncharge_page(page); 921 __dec_zone_page_state(page, NR_ANON_PAGES); 922 } else { 923 __dec_zone_page_state(page, NR_FILE_MAPPED); 924 mem_cgroup_update_file_mapped(page, -1); 925 } 926 /* 927 * It would be tidy to reset the PageAnon mapping here, 928 * but that might overwrite a racing page_add_anon_rmap 929 * which increments mapcount after us but sets mapping 930 * before us: so leave the reset to free_hot_cold_page, 931 * and remember that it's only reliable while mapped. 932 * Leaving it set also helps swapoff to reinstate ptes 933 * faster for those pages still in swapcache. 934 */ 935 } 936 937 /* 938 * Subfunctions of try_to_unmap: try_to_unmap_one called 939 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 940 */ 941 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 942 unsigned long address, enum ttu_flags flags) 943 { 944 struct mm_struct *mm = vma->vm_mm; 945 pte_t *pte; 946 pte_t pteval; 947 spinlock_t *ptl; 948 int ret = SWAP_AGAIN; 949 950 pte = page_check_address(page, mm, address, &ptl, 0); 951 if (!pte) 952 goto out; 953 954 /* 955 * If the page is mlock()d, we cannot swap it out. 956 * If it's recently referenced (perhaps page_referenced 957 * skipped over this mm) then we should reactivate it. 958 */ 959 if (!(flags & TTU_IGNORE_MLOCK)) { 960 if (vma->vm_flags & VM_LOCKED) 961 goto out_mlock; 962 963 if (TTU_ACTION(flags) == TTU_MUNLOCK) 964 goto out_unmap; 965 } 966 if (!(flags & TTU_IGNORE_ACCESS)) { 967 if (ptep_clear_flush_young_notify(vma, address, pte)) { 968 ret = SWAP_FAIL; 969 goto out_unmap; 970 } 971 } 972 973 /* Nuke the page table entry. */ 974 flush_cache_page(vma, address, page_to_pfn(page)); 975 pteval = ptep_clear_flush_notify(vma, address, pte); 976 977 /* Move the dirty bit to the physical page now the pte is gone. */ 978 if (pte_dirty(pteval)) 979 set_page_dirty(page); 980 981 /* Update high watermark before we lower rss */ 982 update_hiwater_rss(mm); 983 984 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 985 if (PageAnon(page)) 986 dec_mm_counter(mm, MM_ANONPAGES); 987 else 988 dec_mm_counter(mm, MM_FILEPAGES); 989 set_pte_at(mm, address, pte, 990 swp_entry_to_pte(make_hwpoison_entry(page))); 991 } else if (PageAnon(page)) { 992 swp_entry_t entry = { .val = page_private(page) }; 993 994 if (PageSwapCache(page)) { 995 /* 996 * Store the swap location in the pte. 997 * See handle_pte_fault() ... 998 */ 999 if (swap_duplicate(entry) < 0) { 1000 set_pte_at(mm, address, pte, pteval); 1001 ret = SWAP_FAIL; 1002 goto out_unmap; 1003 } 1004 if (list_empty(&mm->mmlist)) { 1005 spin_lock(&mmlist_lock); 1006 if (list_empty(&mm->mmlist)) 1007 list_add(&mm->mmlist, &init_mm.mmlist); 1008 spin_unlock(&mmlist_lock); 1009 } 1010 dec_mm_counter(mm, MM_ANONPAGES); 1011 inc_mm_counter(mm, MM_SWAPENTS); 1012 } else if (PAGE_MIGRATION) { 1013 /* 1014 * Store the pfn of the page in a special migration 1015 * pte. do_swap_page() will wait until the migration 1016 * pte is removed and then restart fault handling. 1017 */ 1018 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1019 entry = make_migration_entry(page, pte_write(pteval)); 1020 } 1021 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1022 BUG_ON(pte_file(*pte)); 1023 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { 1024 /* Establish migration entry for a file page */ 1025 swp_entry_t entry; 1026 entry = make_migration_entry(page, pte_write(pteval)); 1027 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1028 } else 1029 dec_mm_counter(mm, MM_FILEPAGES); 1030 1031 page_remove_rmap(page); 1032 page_cache_release(page); 1033 1034 out_unmap: 1035 pte_unmap_unlock(pte, ptl); 1036 out: 1037 return ret; 1038 1039 out_mlock: 1040 pte_unmap_unlock(pte, ptl); 1041 1042 1043 /* 1044 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1045 * unstable result and race. Plus, We can't wait here because 1046 * we now hold anon_vma->lock or mapping->i_mmap_lock. 1047 * if trylock failed, the page remain in evictable lru and later 1048 * vmscan could retry to move the page to unevictable lru if the 1049 * page is actually mlocked. 1050 */ 1051 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1052 if (vma->vm_flags & VM_LOCKED) { 1053 mlock_vma_page(page); 1054 ret = SWAP_MLOCK; 1055 } 1056 up_read(&vma->vm_mm->mmap_sem); 1057 } 1058 return ret; 1059 } 1060 1061 /* 1062 * objrmap doesn't work for nonlinear VMAs because the assumption that 1063 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1064 * Consequently, given a particular page and its ->index, we cannot locate the 1065 * ptes which are mapping that page without an exhaustive linear search. 1066 * 1067 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1068 * maps the file to which the target page belongs. The ->vm_private_data field 1069 * holds the current cursor into that scan. Successive searches will circulate 1070 * around the vma's virtual address space. 1071 * 1072 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1073 * more scanning pressure is placed against them as well. Eventually pages 1074 * will become fully unmapped and are eligible for eviction. 1075 * 1076 * For very sparsely populated VMAs this is a little inefficient - chances are 1077 * there there won't be many ptes located within the scan cluster. In this case 1078 * maybe we could scan further - to the end of the pte page, perhaps. 1079 * 1080 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1081 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1082 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1083 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1084 */ 1085 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1086 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1087 1088 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1089 struct vm_area_struct *vma, struct page *check_page) 1090 { 1091 struct mm_struct *mm = vma->vm_mm; 1092 pgd_t *pgd; 1093 pud_t *pud; 1094 pmd_t *pmd; 1095 pte_t *pte; 1096 pte_t pteval; 1097 spinlock_t *ptl; 1098 struct page *page; 1099 unsigned long address; 1100 unsigned long end; 1101 int ret = SWAP_AGAIN; 1102 int locked_vma = 0; 1103 1104 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1105 end = address + CLUSTER_SIZE; 1106 if (address < vma->vm_start) 1107 address = vma->vm_start; 1108 if (end > vma->vm_end) 1109 end = vma->vm_end; 1110 1111 pgd = pgd_offset(mm, address); 1112 if (!pgd_present(*pgd)) 1113 return ret; 1114 1115 pud = pud_offset(pgd, address); 1116 if (!pud_present(*pud)) 1117 return ret; 1118 1119 pmd = pmd_offset(pud, address); 1120 if (!pmd_present(*pmd)) 1121 return ret; 1122 1123 /* 1124 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1125 * keep the sem while scanning the cluster for mlocking pages. 1126 */ 1127 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1128 locked_vma = (vma->vm_flags & VM_LOCKED); 1129 if (!locked_vma) 1130 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1131 } 1132 1133 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1134 1135 /* Update high watermark before we lower rss */ 1136 update_hiwater_rss(mm); 1137 1138 for (; address < end; pte++, address += PAGE_SIZE) { 1139 if (!pte_present(*pte)) 1140 continue; 1141 page = vm_normal_page(vma, address, *pte); 1142 BUG_ON(!page || PageAnon(page)); 1143 1144 if (locked_vma) { 1145 mlock_vma_page(page); /* no-op if already mlocked */ 1146 if (page == check_page) 1147 ret = SWAP_MLOCK; 1148 continue; /* don't unmap */ 1149 } 1150 1151 if (ptep_clear_flush_young_notify(vma, address, pte)) 1152 continue; 1153 1154 /* Nuke the page table entry. */ 1155 flush_cache_page(vma, address, pte_pfn(*pte)); 1156 pteval = ptep_clear_flush_notify(vma, address, pte); 1157 1158 /* If nonlinear, store the file page offset in the pte. */ 1159 if (page->index != linear_page_index(vma, address)) 1160 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1161 1162 /* Move the dirty bit to the physical page now the pte is gone. */ 1163 if (pte_dirty(pteval)) 1164 set_page_dirty(page); 1165 1166 page_remove_rmap(page); 1167 page_cache_release(page); 1168 dec_mm_counter(mm, MM_FILEPAGES); 1169 (*mapcount)--; 1170 } 1171 pte_unmap_unlock(pte - 1, ptl); 1172 if (locked_vma) 1173 up_read(&vma->vm_mm->mmap_sem); 1174 return ret; 1175 } 1176 1177 static bool is_vma_temporary_stack(struct vm_area_struct *vma) 1178 { 1179 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1180 1181 if (!maybe_stack) 1182 return false; 1183 1184 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1185 VM_STACK_INCOMPLETE_SETUP) 1186 return true; 1187 1188 return false; 1189 } 1190 1191 /** 1192 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1193 * rmap method 1194 * @page: the page to unmap/unlock 1195 * @flags: action and flags 1196 * 1197 * Find all the mappings of a page using the mapping pointer and the vma chains 1198 * contained in the anon_vma struct it points to. 1199 * 1200 * This function is only called from try_to_unmap/try_to_munlock for 1201 * anonymous pages. 1202 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1203 * where the page was found will be held for write. So, we won't recheck 1204 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1205 * 'LOCKED. 1206 */ 1207 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 1208 { 1209 struct anon_vma *anon_vma; 1210 struct anon_vma_chain *avc; 1211 int ret = SWAP_AGAIN; 1212 1213 anon_vma = page_lock_anon_vma(page); 1214 if (!anon_vma) 1215 return ret; 1216 1217 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1218 struct vm_area_struct *vma = avc->vma; 1219 unsigned long address; 1220 1221 /* 1222 * During exec, a temporary VMA is setup and later moved. 1223 * The VMA is moved under the anon_vma lock but not the 1224 * page tables leading to a race where migration cannot 1225 * find the migration ptes. Rather than increasing the 1226 * locking requirements of exec(), migration skips 1227 * temporary VMAs until after exec() completes. 1228 */ 1229 if (PAGE_MIGRATION && (flags & TTU_MIGRATION) && 1230 is_vma_temporary_stack(vma)) 1231 continue; 1232 1233 address = vma_address(page, vma); 1234 if (address == -EFAULT) 1235 continue; 1236 ret = try_to_unmap_one(page, vma, address, flags); 1237 if (ret != SWAP_AGAIN || !page_mapped(page)) 1238 break; 1239 } 1240 1241 page_unlock_anon_vma(anon_vma); 1242 return ret; 1243 } 1244 1245 /** 1246 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1247 * @page: the page to unmap/unlock 1248 * @flags: action and flags 1249 * 1250 * Find all the mappings of a page using the mapping pointer and the vma chains 1251 * contained in the address_space struct it points to. 1252 * 1253 * This function is only called from try_to_unmap/try_to_munlock for 1254 * object-based pages. 1255 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1256 * where the page was found will be held for write. So, we won't recheck 1257 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1258 * 'LOCKED. 1259 */ 1260 static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 1261 { 1262 struct address_space *mapping = page->mapping; 1263 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1264 struct vm_area_struct *vma; 1265 struct prio_tree_iter iter; 1266 int ret = SWAP_AGAIN; 1267 unsigned long cursor; 1268 unsigned long max_nl_cursor = 0; 1269 unsigned long max_nl_size = 0; 1270 unsigned int mapcount; 1271 1272 spin_lock(&mapping->i_mmap_lock); 1273 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1274 unsigned long address = vma_address(page, vma); 1275 if (address == -EFAULT) 1276 continue; 1277 ret = try_to_unmap_one(page, vma, address, flags); 1278 if (ret != SWAP_AGAIN || !page_mapped(page)) 1279 goto out; 1280 } 1281 1282 if (list_empty(&mapping->i_mmap_nonlinear)) 1283 goto out; 1284 1285 /* 1286 * We don't bother to try to find the munlocked page in nonlinears. 1287 * It's costly. Instead, later, page reclaim logic may call 1288 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 1289 */ 1290 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1291 goto out; 1292 1293 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1294 shared.vm_set.list) { 1295 cursor = (unsigned long) vma->vm_private_data; 1296 if (cursor > max_nl_cursor) 1297 max_nl_cursor = cursor; 1298 cursor = vma->vm_end - vma->vm_start; 1299 if (cursor > max_nl_size) 1300 max_nl_size = cursor; 1301 } 1302 1303 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1304 ret = SWAP_FAIL; 1305 goto out; 1306 } 1307 1308 /* 1309 * We don't try to search for this page in the nonlinear vmas, 1310 * and page_referenced wouldn't have found it anyway. Instead 1311 * just walk the nonlinear vmas trying to age and unmap some. 1312 * The mapcount of the page we came in with is irrelevant, 1313 * but even so use it as a guide to how hard we should try? 1314 */ 1315 mapcount = page_mapcount(page); 1316 if (!mapcount) 1317 goto out; 1318 cond_resched_lock(&mapping->i_mmap_lock); 1319 1320 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1321 if (max_nl_cursor == 0) 1322 max_nl_cursor = CLUSTER_SIZE; 1323 1324 do { 1325 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1326 shared.vm_set.list) { 1327 cursor = (unsigned long) vma->vm_private_data; 1328 while ( cursor < max_nl_cursor && 1329 cursor < vma->vm_end - vma->vm_start) { 1330 if (try_to_unmap_cluster(cursor, &mapcount, 1331 vma, page) == SWAP_MLOCK) 1332 ret = SWAP_MLOCK; 1333 cursor += CLUSTER_SIZE; 1334 vma->vm_private_data = (void *) cursor; 1335 if ((int)mapcount <= 0) 1336 goto out; 1337 } 1338 vma->vm_private_data = (void *) max_nl_cursor; 1339 } 1340 cond_resched_lock(&mapping->i_mmap_lock); 1341 max_nl_cursor += CLUSTER_SIZE; 1342 } while (max_nl_cursor <= max_nl_size); 1343 1344 /* 1345 * Don't loop forever (perhaps all the remaining pages are 1346 * in locked vmas). Reset cursor on all unreserved nonlinear 1347 * vmas, now forgetting on which ones it had fallen behind. 1348 */ 1349 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1350 vma->vm_private_data = NULL; 1351 out: 1352 spin_unlock(&mapping->i_mmap_lock); 1353 return ret; 1354 } 1355 1356 /** 1357 * try_to_unmap - try to remove all page table mappings to a page 1358 * @page: the page to get unmapped 1359 * @flags: action and flags 1360 * 1361 * Tries to remove all the page table entries which are mapping this 1362 * page, used in the pageout path. Caller must hold the page lock. 1363 * Return values are: 1364 * 1365 * SWAP_SUCCESS - we succeeded in removing all mappings 1366 * SWAP_AGAIN - we missed a mapping, try again later 1367 * SWAP_FAIL - the page is unswappable 1368 * SWAP_MLOCK - page is mlocked. 1369 */ 1370 int try_to_unmap(struct page *page, enum ttu_flags flags) 1371 { 1372 int ret; 1373 1374 BUG_ON(!PageLocked(page)); 1375 1376 if (unlikely(PageKsm(page))) 1377 ret = try_to_unmap_ksm(page, flags); 1378 else if (PageAnon(page)) 1379 ret = try_to_unmap_anon(page, flags); 1380 else 1381 ret = try_to_unmap_file(page, flags); 1382 if (ret != SWAP_MLOCK && !page_mapped(page)) 1383 ret = SWAP_SUCCESS; 1384 return ret; 1385 } 1386 1387 /** 1388 * try_to_munlock - try to munlock a page 1389 * @page: the page to be munlocked 1390 * 1391 * Called from munlock code. Checks all of the VMAs mapping the page 1392 * to make sure nobody else has this page mlocked. The page will be 1393 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1394 * 1395 * Return values are: 1396 * 1397 * SWAP_AGAIN - no vma is holding page mlocked, or, 1398 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1399 * SWAP_FAIL - page cannot be located at present 1400 * SWAP_MLOCK - page is now mlocked. 1401 */ 1402 int try_to_munlock(struct page *page) 1403 { 1404 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1405 1406 if (unlikely(PageKsm(page))) 1407 return try_to_unmap_ksm(page, TTU_MUNLOCK); 1408 else if (PageAnon(page)) 1409 return try_to_unmap_anon(page, TTU_MUNLOCK); 1410 else 1411 return try_to_unmap_file(page, TTU_MUNLOCK); 1412 } 1413 1414 #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) 1415 /* 1416 * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root 1417 * if necessary. Be careful to do all the tests under the lock. Once 1418 * we know we are the last user, nobody else can get a reference and we 1419 * can do the freeing without the lock. 1420 */ 1421 void drop_anon_vma(struct anon_vma *anon_vma) 1422 { 1423 BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); 1424 if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { 1425 struct anon_vma *root = anon_vma->root; 1426 int empty = list_empty(&anon_vma->head); 1427 int last_root_user = 0; 1428 int root_empty = 0; 1429 1430 /* 1431 * The refcount on a non-root anon_vma got dropped. Drop 1432 * the refcount on the root and check if we need to free it. 1433 */ 1434 if (empty && anon_vma != root) { 1435 BUG_ON(atomic_read(&root->external_refcount) <= 0); 1436 last_root_user = atomic_dec_and_test(&root->external_refcount); 1437 root_empty = list_empty(&root->head); 1438 } 1439 anon_vma_unlock(anon_vma); 1440 1441 if (empty) { 1442 anon_vma_free(anon_vma); 1443 if (root_empty && last_root_user) 1444 anon_vma_free(root); 1445 } 1446 } 1447 } 1448 #endif 1449 1450 #ifdef CONFIG_MIGRATION 1451 /* 1452 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1453 * Called by migrate.c to remove migration ptes, but might be used more later. 1454 */ 1455 static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1456 struct vm_area_struct *, unsigned long, void *), void *arg) 1457 { 1458 struct anon_vma *anon_vma; 1459 struct anon_vma_chain *avc; 1460 int ret = SWAP_AGAIN; 1461 1462 /* 1463 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1464 * because that depends on page_mapped(); but not all its usages 1465 * are holding mmap_sem. Users without mmap_sem are required to 1466 * take a reference count to prevent the anon_vma disappearing 1467 */ 1468 anon_vma = page_anon_vma(page); 1469 if (!anon_vma) 1470 return ret; 1471 anon_vma_lock(anon_vma); 1472 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1473 struct vm_area_struct *vma = avc->vma; 1474 unsigned long address = vma_address(page, vma); 1475 if (address == -EFAULT) 1476 continue; 1477 ret = rmap_one(page, vma, address, arg); 1478 if (ret != SWAP_AGAIN) 1479 break; 1480 } 1481 anon_vma_unlock(anon_vma); 1482 return ret; 1483 } 1484 1485 static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1486 struct vm_area_struct *, unsigned long, void *), void *arg) 1487 { 1488 struct address_space *mapping = page->mapping; 1489 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1490 struct vm_area_struct *vma; 1491 struct prio_tree_iter iter; 1492 int ret = SWAP_AGAIN; 1493 1494 if (!mapping) 1495 return ret; 1496 spin_lock(&mapping->i_mmap_lock); 1497 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1498 unsigned long address = vma_address(page, vma); 1499 if (address == -EFAULT) 1500 continue; 1501 ret = rmap_one(page, vma, address, arg); 1502 if (ret != SWAP_AGAIN) 1503 break; 1504 } 1505 /* 1506 * No nonlinear handling: being always shared, nonlinear vmas 1507 * never contain migration ptes. Decide what to do about this 1508 * limitation to linear when we need rmap_walk() on nonlinear. 1509 */ 1510 spin_unlock(&mapping->i_mmap_lock); 1511 return ret; 1512 } 1513 1514 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1515 struct vm_area_struct *, unsigned long, void *), void *arg) 1516 { 1517 VM_BUG_ON(!PageLocked(page)); 1518 1519 if (unlikely(PageKsm(page))) 1520 return rmap_walk_ksm(page, rmap_one, arg); 1521 else if (PageAnon(page)) 1522 return rmap_walk_anon(page, rmap_one, arg); 1523 else 1524 return rmap_walk_file(page, rmap_one, arg); 1525 } 1526 #endif /* CONFIG_MIGRATION */ 1527