1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 * 40 * (code doesn't rely on that order so it could be switched around) 41 * ->tasklist_lock 42 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * pte map lock 44 */ 45 46 #include <linux/mm.h> 47 #include <linux/pagemap.h> 48 #include <linux/swap.h> 49 #include <linux/swapops.h> 50 #include <linux/slab.h> 51 #include <linux/init.h> 52 #include <linux/ksm.h> 53 #include <linux/rmap.h> 54 #include <linux/rcupdate.h> 55 #include <linux/module.h> 56 #include <linux/memcontrol.h> 57 #include <linux/mmu_notifier.h> 58 #include <linux/migrate.h> 59 60 #include <asm/tlbflush.h> 61 62 #include "internal.h" 63 64 static struct kmem_cache *anon_vma_cachep; 65 static struct kmem_cache *anon_vma_chain_cachep; 66 67 static inline struct anon_vma *anon_vma_alloc(void) 68 { 69 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 70 } 71 72 void anon_vma_free(struct anon_vma *anon_vma) 73 { 74 kmem_cache_free(anon_vma_cachep, anon_vma); 75 } 76 77 static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 78 { 79 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 } 81 82 void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83 { 84 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 } 86 87 /** 88 * anon_vma_prepare - attach an anon_vma to a memory region 89 * @vma: the memory region in question 90 * 91 * This makes sure the memory mapping described by 'vma' has 92 * an 'anon_vma' attached to it, so that we can associate the 93 * anonymous pages mapped into it with that anon_vma. 94 * 95 * The common case will be that we already have one, but if 96 * if not we either need to find an adjacent mapping that we 97 * can re-use the anon_vma from (very common when the only 98 * reason for splitting a vma has been mprotect()), or we 99 * allocate a new one. 100 * 101 * Anon-vma allocations are very subtle, because we may have 102 * optimistically looked up an anon_vma in page_lock_anon_vma() 103 * and that may actually touch the spinlock even in the newly 104 * allocated vma (it depends on RCU to make sure that the 105 * anon_vma isn't actually destroyed). 106 * 107 * As a result, we need to do proper anon_vma locking even 108 * for the new allocation. At the same time, we do not want 109 * to do any locking for the common case of already having 110 * an anon_vma. 111 * 112 * This must be called with the mmap_sem held for reading. 113 */ 114 int anon_vma_prepare(struct vm_area_struct *vma) 115 { 116 struct anon_vma *anon_vma = vma->anon_vma; 117 struct anon_vma_chain *avc; 118 119 might_sleep(); 120 if (unlikely(!anon_vma)) { 121 struct mm_struct *mm = vma->vm_mm; 122 struct anon_vma *allocated; 123 124 avc = anon_vma_chain_alloc(); 125 if (!avc) 126 goto out_enomem; 127 128 anon_vma = find_mergeable_anon_vma(vma); 129 allocated = NULL; 130 if (!anon_vma) { 131 anon_vma = anon_vma_alloc(); 132 if (unlikely(!anon_vma)) 133 goto out_enomem_free_avc; 134 allocated = anon_vma; 135 } 136 spin_lock(&anon_vma->lock); 137 138 /* page_table_lock to protect against threads */ 139 spin_lock(&mm->page_table_lock); 140 if (likely(!vma->anon_vma)) { 141 vma->anon_vma = anon_vma; 142 avc->anon_vma = anon_vma; 143 avc->vma = vma; 144 list_add(&avc->same_vma, &vma->anon_vma_chain); 145 list_add(&avc->same_anon_vma, &anon_vma->head); 146 allocated = NULL; 147 } 148 spin_unlock(&mm->page_table_lock); 149 150 spin_unlock(&anon_vma->lock); 151 if (unlikely(allocated)) { 152 anon_vma_free(allocated); 153 anon_vma_chain_free(avc); 154 } 155 } 156 return 0; 157 158 out_enomem_free_avc: 159 anon_vma_chain_free(avc); 160 out_enomem: 161 return -ENOMEM; 162 } 163 164 static void anon_vma_chain_link(struct vm_area_struct *vma, 165 struct anon_vma_chain *avc, 166 struct anon_vma *anon_vma) 167 { 168 avc->vma = vma; 169 avc->anon_vma = anon_vma; 170 list_add(&avc->same_vma, &vma->anon_vma_chain); 171 172 spin_lock(&anon_vma->lock); 173 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 174 spin_unlock(&anon_vma->lock); 175 } 176 177 /* 178 * Attach the anon_vmas from src to dst. 179 * Returns 0 on success, -ENOMEM on failure. 180 */ 181 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 182 { 183 struct anon_vma_chain *avc, *pavc; 184 185 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 186 avc = anon_vma_chain_alloc(); 187 if (!avc) 188 goto enomem_failure; 189 anon_vma_chain_link(dst, avc, pavc->anon_vma); 190 } 191 return 0; 192 193 enomem_failure: 194 unlink_anon_vmas(dst); 195 return -ENOMEM; 196 } 197 198 /* 199 * Attach vma to its own anon_vma, as well as to the anon_vmas that 200 * the corresponding VMA in the parent process is attached to. 201 * Returns 0 on success, non-zero on failure. 202 */ 203 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 204 { 205 struct anon_vma_chain *avc; 206 struct anon_vma *anon_vma; 207 208 /* Don't bother if the parent process has no anon_vma here. */ 209 if (!pvma->anon_vma) 210 return 0; 211 212 /* 213 * First, attach the new VMA to the parent VMA's anon_vmas, 214 * so rmap can find non-COWed pages in child processes. 215 */ 216 if (anon_vma_clone(vma, pvma)) 217 return -ENOMEM; 218 219 /* Then add our own anon_vma. */ 220 anon_vma = anon_vma_alloc(); 221 if (!anon_vma) 222 goto out_error; 223 avc = anon_vma_chain_alloc(); 224 if (!avc) 225 goto out_error_free_anon_vma; 226 anon_vma_chain_link(vma, avc, anon_vma); 227 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 228 vma->anon_vma = anon_vma; 229 230 return 0; 231 232 out_error_free_anon_vma: 233 anon_vma_free(anon_vma); 234 out_error: 235 unlink_anon_vmas(vma); 236 return -ENOMEM; 237 } 238 239 static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) 240 { 241 struct anon_vma *anon_vma = anon_vma_chain->anon_vma; 242 int empty; 243 244 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ 245 if (!anon_vma) 246 return; 247 248 spin_lock(&anon_vma->lock); 249 list_del(&anon_vma_chain->same_anon_vma); 250 251 /* We must garbage collect the anon_vma if it's empty */ 252 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); 253 spin_unlock(&anon_vma->lock); 254 255 if (empty) 256 anon_vma_free(anon_vma); 257 } 258 259 void unlink_anon_vmas(struct vm_area_struct *vma) 260 { 261 struct anon_vma_chain *avc, *next; 262 263 /* Unlink each anon_vma chained to the VMA. */ 264 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 265 anon_vma_unlink(avc); 266 list_del(&avc->same_vma); 267 anon_vma_chain_free(avc); 268 } 269 } 270 271 static void anon_vma_ctor(void *data) 272 { 273 struct anon_vma *anon_vma = data; 274 275 spin_lock_init(&anon_vma->lock); 276 ksm_refcount_init(anon_vma); 277 INIT_LIST_HEAD(&anon_vma->head); 278 } 279 280 void __init anon_vma_init(void) 281 { 282 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 283 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 284 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 285 } 286 287 /* 288 * Getting a lock on a stable anon_vma from a page off the LRU is 289 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 290 */ 291 struct anon_vma *page_lock_anon_vma(struct page *page) 292 { 293 struct anon_vma *anon_vma; 294 unsigned long anon_mapping; 295 296 rcu_read_lock(); 297 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 298 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 299 goto out; 300 if (!page_mapped(page)) 301 goto out; 302 303 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 304 spin_lock(&anon_vma->lock); 305 return anon_vma; 306 out: 307 rcu_read_unlock(); 308 return NULL; 309 } 310 311 void page_unlock_anon_vma(struct anon_vma *anon_vma) 312 { 313 spin_unlock(&anon_vma->lock); 314 rcu_read_unlock(); 315 } 316 317 /* 318 * At what user virtual address is page expected in @vma? 319 * Returns virtual address or -EFAULT if page's index/offset is not 320 * within the range mapped the @vma. 321 */ 322 static inline unsigned long 323 vma_address(struct page *page, struct vm_area_struct *vma) 324 { 325 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 326 unsigned long address; 327 328 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 329 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 330 /* page should be within @vma mapping range */ 331 return -EFAULT; 332 } 333 return address; 334 } 335 336 /* 337 * At what user virtual address is page expected in vma? 338 * checking that the page matches the vma. 339 */ 340 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 341 { 342 if (PageAnon(page)) { 343 if (vma->anon_vma != page_anon_vma(page)) 344 return -EFAULT; 345 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 346 if (!vma->vm_file || 347 vma->vm_file->f_mapping != page->mapping) 348 return -EFAULT; 349 } else 350 return -EFAULT; 351 return vma_address(page, vma); 352 } 353 354 /* 355 * Check that @page is mapped at @address into @mm. 356 * 357 * If @sync is false, page_check_address may perform a racy check to avoid 358 * the page table lock when the pte is not present (helpful when reclaiming 359 * highly shared pages). 360 * 361 * On success returns with pte mapped and locked. 362 */ 363 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 364 unsigned long address, spinlock_t **ptlp, int sync) 365 { 366 pgd_t *pgd; 367 pud_t *pud; 368 pmd_t *pmd; 369 pte_t *pte; 370 spinlock_t *ptl; 371 372 pgd = pgd_offset(mm, address); 373 if (!pgd_present(*pgd)) 374 return NULL; 375 376 pud = pud_offset(pgd, address); 377 if (!pud_present(*pud)) 378 return NULL; 379 380 pmd = pmd_offset(pud, address); 381 if (!pmd_present(*pmd)) 382 return NULL; 383 384 pte = pte_offset_map(pmd, address); 385 /* Make a quick check before getting the lock */ 386 if (!sync && !pte_present(*pte)) { 387 pte_unmap(pte); 388 return NULL; 389 } 390 391 ptl = pte_lockptr(mm, pmd); 392 spin_lock(ptl); 393 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 394 *ptlp = ptl; 395 return pte; 396 } 397 pte_unmap_unlock(pte, ptl); 398 return NULL; 399 } 400 401 /** 402 * page_mapped_in_vma - check whether a page is really mapped in a VMA 403 * @page: the page to test 404 * @vma: the VMA to test 405 * 406 * Returns 1 if the page is mapped into the page tables of the VMA, 0 407 * if the page is not mapped into the page tables of this VMA. Only 408 * valid for normal file or anonymous VMAs. 409 */ 410 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 411 { 412 unsigned long address; 413 pte_t *pte; 414 spinlock_t *ptl; 415 416 address = vma_address(page, vma); 417 if (address == -EFAULT) /* out of vma range */ 418 return 0; 419 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 420 if (!pte) /* the page is not in this mm */ 421 return 0; 422 pte_unmap_unlock(pte, ptl); 423 424 return 1; 425 } 426 427 /* 428 * Subfunctions of page_referenced: page_referenced_one called 429 * repeatedly from either page_referenced_anon or page_referenced_file. 430 */ 431 int page_referenced_one(struct page *page, struct vm_area_struct *vma, 432 unsigned long address, unsigned int *mapcount, 433 unsigned long *vm_flags) 434 { 435 struct mm_struct *mm = vma->vm_mm; 436 pte_t *pte; 437 spinlock_t *ptl; 438 int referenced = 0; 439 440 pte = page_check_address(page, mm, address, &ptl, 0); 441 if (!pte) 442 goto out; 443 444 /* 445 * Don't want to elevate referenced for mlocked page that gets this far, 446 * in order that it progresses to try_to_unmap and is moved to the 447 * unevictable list. 448 */ 449 if (vma->vm_flags & VM_LOCKED) { 450 *mapcount = 1; /* break early from loop */ 451 *vm_flags |= VM_LOCKED; 452 goto out_unmap; 453 } 454 455 if (ptep_clear_flush_young_notify(vma, address, pte)) { 456 /* 457 * Don't treat a reference through a sequentially read 458 * mapping as such. If the page has been used in 459 * another mapping, we will catch it; if this other 460 * mapping is already gone, the unmap path will have 461 * set PG_referenced or activated the page. 462 */ 463 if (likely(!VM_SequentialReadHint(vma))) 464 referenced++; 465 } 466 467 /* Pretend the page is referenced if the task has the 468 swap token and is in the middle of a page fault. */ 469 if (mm != current->mm && has_swap_token(mm) && 470 rwsem_is_locked(&mm->mmap_sem)) 471 referenced++; 472 473 out_unmap: 474 (*mapcount)--; 475 pte_unmap_unlock(pte, ptl); 476 477 if (referenced) 478 *vm_flags |= vma->vm_flags; 479 out: 480 return referenced; 481 } 482 483 static int page_referenced_anon(struct page *page, 484 struct mem_cgroup *mem_cont, 485 unsigned long *vm_flags) 486 { 487 unsigned int mapcount; 488 struct anon_vma *anon_vma; 489 struct anon_vma_chain *avc; 490 int referenced = 0; 491 492 anon_vma = page_lock_anon_vma(page); 493 if (!anon_vma) 494 return referenced; 495 496 mapcount = page_mapcount(page); 497 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 498 struct vm_area_struct *vma = avc->vma; 499 unsigned long address = vma_address(page, vma); 500 if (address == -EFAULT) 501 continue; 502 /* 503 * If we are reclaiming on behalf of a cgroup, skip 504 * counting on behalf of references from different 505 * cgroups 506 */ 507 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 508 continue; 509 referenced += page_referenced_one(page, vma, address, 510 &mapcount, vm_flags); 511 if (!mapcount) 512 break; 513 } 514 515 page_unlock_anon_vma(anon_vma); 516 return referenced; 517 } 518 519 /** 520 * page_referenced_file - referenced check for object-based rmap 521 * @page: the page we're checking references on. 522 * @mem_cont: target memory controller 523 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 524 * 525 * For an object-based mapped page, find all the places it is mapped and 526 * check/clear the referenced flag. This is done by following the page->mapping 527 * pointer, then walking the chain of vmas it holds. It returns the number 528 * of references it found. 529 * 530 * This function is only called from page_referenced for object-based pages. 531 */ 532 static int page_referenced_file(struct page *page, 533 struct mem_cgroup *mem_cont, 534 unsigned long *vm_flags) 535 { 536 unsigned int mapcount; 537 struct address_space *mapping = page->mapping; 538 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 539 struct vm_area_struct *vma; 540 struct prio_tree_iter iter; 541 int referenced = 0; 542 543 /* 544 * The caller's checks on page->mapping and !PageAnon have made 545 * sure that this is a file page: the check for page->mapping 546 * excludes the case just before it gets set on an anon page. 547 */ 548 BUG_ON(PageAnon(page)); 549 550 /* 551 * The page lock not only makes sure that page->mapping cannot 552 * suddenly be NULLified by truncation, it makes sure that the 553 * structure at mapping cannot be freed and reused yet, 554 * so we can safely take mapping->i_mmap_lock. 555 */ 556 BUG_ON(!PageLocked(page)); 557 558 spin_lock(&mapping->i_mmap_lock); 559 560 /* 561 * i_mmap_lock does not stabilize mapcount at all, but mapcount 562 * is more likely to be accurate if we note it after spinning. 563 */ 564 mapcount = page_mapcount(page); 565 566 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 567 unsigned long address = vma_address(page, vma); 568 if (address == -EFAULT) 569 continue; 570 /* 571 * If we are reclaiming on behalf of a cgroup, skip 572 * counting on behalf of references from different 573 * cgroups 574 */ 575 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 576 continue; 577 referenced += page_referenced_one(page, vma, address, 578 &mapcount, vm_flags); 579 if (!mapcount) 580 break; 581 } 582 583 spin_unlock(&mapping->i_mmap_lock); 584 return referenced; 585 } 586 587 /** 588 * page_referenced - test if the page was referenced 589 * @page: the page to test 590 * @is_locked: caller holds lock on the page 591 * @mem_cont: target memory controller 592 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 593 * 594 * Quick test_and_clear_referenced for all mappings to a page, 595 * returns the number of ptes which referenced the page. 596 */ 597 int page_referenced(struct page *page, 598 int is_locked, 599 struct mem_cgroup *mem_cont, 600 unsigned long *vm_flags) 601 { 602 int referenced = 0; 603 int we_locked = 0; 604 605 *vm_flags = 0; 606 if (page_mapped(page) && page_rmapping(page)) { 607 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 608 we_locked = trylock_page(page); 609 if (!we_locked) { 610 referenced++; 611 goto out; 612 } 613 } 614 if (unlikely(PageKsm(page))) 615 referenced += page_referenced_ksm(page, mem_cont, 616 vm_flags); 617 else if (PageAnon(page)) 618 referenced += page_referenced_anon(page, mem_cont, 619 vm_flags); 620 else if (page->mapping) 621 referenced += page_referenced_file(page, mem_cont, 622 vm_flags); 623 if (we_locked) 624 unlock_page(page); 625 } 626 out: 627 if (page_test_and_clear_young(page)) 628 referenced++; 629 630 return referenced; 631 } 632 633 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 634 unsigned long address) 635 { 636 struct mm_struct *mm = vma->vm_mm; 637 pte_t *pte; 638 spinlock_t *ptl; 639 int ret = 0; 640 641 pte = page_check_address(page, mm, address, &ptl, 1); 642 if (!pte) 643 goto out; 644 645 if (pte_dirty(*pte) || pte_write(*pte)) { 646 pte_t entry; 647 648 flush_cache_page(vma, address, pte_pfn(*pte)); 649 entry = ptep_clear_flush_notify(vma, address, pte); 650 entry = pte_wrprotect(entry); 651 entry = pte_mkclean(entry); 652 set_pte_at(mm, address, pte, entry); 653 ret = 1; 654 } 655 656 pte_unmap_unlock(pte, ptl); 657 out: 658 return ret; 659 } 660 661 static int page_mkclean_file(struct address_space *mapping, struct page *page) 662 { 663 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 664 struct vm_area_struct *vma; 665 struct prio_tree_iter iter; 666 int ret = 0; 667 668 BUG_ON(PageAnon(page)); 669 670 spin_lock(&mapping->i_mmap_lock); 671 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 672 if (vma->vm_flags & VM_SHARED) { 673 unsigned long address = vma_address(page, vma); 674 if (address == -EFAULT) 675 continue; 676 ret += page_mkclean_one(page, vma, address); 677 } 678 } 679 spin_unlock(&mapping->i_mmap_lock); 680 return ret; 681 } 682 683 int page_mkclean(struct page *page) 684 { 685 int ret = 0; 686 687 BUG_ON(!PageLocked(page)); 688 689 if (page_mapped(page)) { 690 struct address_space *mapping = page_mapping(page); 691 if (mapping) { 692 ret = page_mkclean_file(mapping, page); 693 if (page_test_dirty(page)) { 694 page_clear_dirty(page); 695 ret = 1; 696 } 697 } 698 } 699 700 return ret; 701 } 702 EXPORT_SYMBOL_GPL(page_mkclean); 703 704 /** 705 * page_move_anon_rmap - move a page to our anon_vma 706 * @page: the page to move to our anon_vma 707 * @vma: the vma the page belongs to 708 * @address: the user virtual address mapped 709 * 710 * When a page belongs exclusively to one process after a COW event, 711 * that page can be moved into the anon_vma that belongs to just that 712 * process, so the rmap code will not search the parent or sibling 713 * processes. 714 */ 715 void page_move_anon_rmap(struct page *page, 716 struct vm_area_struct *vma, unsigned long address) 717 { 718 struct anon_vma *anon_vma = vma->anon_vma; 719 720 VM_BUG_ON(!PageLocked(page)); 721 VM_BUG_ON(!anon_vma); 722 VM_BUG_ON(page->index != linear_page_index(vma, address)); 723 724 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 725 page->mapping = (struct address_space *) anon_vma; 726 } 727 728 /** 729 * __page_set_anon_rmap - setup new anonymous rmap 730 * @page: the page to add the mapping to 731 * @vma: the vm area in which the mapping is added 732 * @address: the user virtual address mapped 733 * @exclusive: the page is exclusively owned by the current process 734 */ 735 static void __page_set_anon_rmap(struct page *page, 736 struct vm_area_struct *vma, unsigned long address, int exclusive) 737 { 738 struct anon_vma *anon_vma = vma->anon_vma; 739 740 BUG_ON(!anon_vma); 741 742 /* 743 * If the page isn't exclusively mapped into this vma, 744 * we must use the _oldest_ possible anon_vma for the 745 * page mapping! 746 * 747 * So take the last AVC chain entry in the vma, which is 748 * the deepest ancestor, and use the anon_vma from that. 749 */ 750 if (!exclusive) { 751 struct anon_vma_chain *avc; 752 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); 753 anon_vma = avc->anon_vma; 754 } 755 756 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 757 page->mapping = (struct address_space *) anon_vma; 758 page->index = linear_page_index(vma, address); 759 } 760 761 /** 762 * __page_check_anon_rmap - sanity check anonymous rmap addition 763 * @page: the page to add the mapping to 764 * @vma: the vm area in which the mapping is added 765 * @address: the user virtual address mapped 766 */ 767 static void __page_check_anon_rmap(struct page *page, 768 struct vm_area_struct *vma, unsigned long address) 769 { 770 #ifdef CONFIG_DEBUG_VM 771 /* 772 * The page's anon-rmap details (mapping and index) are guaranteed to 773 * be set up correctly at this point. 774 * 775 * We have exclusion against page_add_anon_rmap because the caller 776 * always holds the page locked, except if called from page_dup_rmap, 777 * in which case the page is already known to be setup. 778 * 779 * We have exclusion against page_add_new_anon_rmap because those pages 780 * are initially only visible via the pagetables, and the pte is locked 781 * over the call to page_add_new_anon_rmap. 782 */ 783 BUG_ON(page->index != linear_page_index(vma, address)); 784 #endif 785 } 786 787 /** 788 * page_add_anon_rmap - add pte mapping to an anonymous page 789 * @page: the page to add the mapping to 790 * @vma: the vm area in which the mapping is added 791 * @address: the user virtual address mapped 792 * 793 * The caller needs to hold the pte lock, and the page must be locked in 794 * the anon_vma case: to serialize mapping,index checking after setting, 795 * and to ensure that PageAnon is not being upgraded racily to PageKsm 796 * (but PageKsm is never downgraded to PageAnon). 797 */ 798 void page_add_anon_rmap(struct page *page, 799 struct vm_area_struct *vma, unsigned long address) 800 { 801 int first = atomic_inc_and_test(&page->_mapcount); 802 if (first) 803 __inc_zone_page_state(page, NR_ANON_PAGES); 804 if (unlikely(PageKsm(page))) 805 return; 806 807 VM_BUG_ON(!PageLocked(page)); 808 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 809 if (first) 810 __page_set_anon_rmap(page, vma, address, 0); 811 else 812 __page_check_anon_rmap(page, vma, address); 813 } 814 815 /** 816 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 817 * @page: the page to add the mapping to 818 * @vma: the vm area in which the mapping is added 819 * @address: the user virtual address mapped 820 * 821 * Same as page_add_anon_rmap but must only be called on *new* pages. 822 * This means the inc-and-test can be bypassed. 823 * Page does not have to be locked. 824 */ 825 void page_add_new_anon_rmap(struct page *page, 826 struct vm_area_struct *vma, unsigned long address) 827 { 828 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 829 SetPageSwapBacked(page); 830 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 831 __inc_zone_page_state(page, NR_ANON_PAGES); 832 __page_set_anon_rmap(page, vma, address, 1); 833 if (page_evictable(page, vma)) 834 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 835 else 836 add_page_to_unevictable_list(page); 837 } 838 839 /** 840 * page_add_file_rmap - add pte mapping to a file page 841 * @page: the page to add the mapping to 842 * 843 * The caller needs to hold the pte lock. 844 */ 845 void page_add_file_rmap(struct page *page) 846 { 847 if (atomic_inc_and_test(&page->_mapcount)) { 848 __inc_zone_page_state(page, NR_FILE_MAPPED); 849 mem_cgroup_update_file_mapped(page, 1); 850 } 851 } 852 853 /** 854 * page_remove_rmap - take down pte mapping from a page 855 * @page: page to remove mapping from 856 * 857 * The caller needs to hold the pte lock. 858 */ 859 void page_remove_rmap(struct page *page) 860 { 861 /* page still mapped by someone else? */ 862 if (!atomic_add_negative(-1, &page->_mapcount)) 863 return; 864 865 /* 866 * Now that the last pte has gone, s390 must transfer dirty 867 * flag from storage key to struct page. We can usually skip 868 * this if the page is anon, so about to be freed; but perhaps 869 * not if it's in swapcache - there might be another pte slot 870 * containing the swap entry, but page not yet written to swap. 871 */ 872 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 873 page_clear_dirty(page); 874 set_page_dirty(page); 875 } 876 if (PageAnon(page)) { 877 mem_cgroup_uncharge_page(page); 878 __dec_zone_page_state(page, NR_ANON_PAGES); 879 } else { 880 __dec_zone_page_state(page, NR_FILE_MAPPED); 881 mem_cgroup_update_file_mapped(page, -1); 882 } 883 /* 884 * It would be tidy to reset the PageAnon mapping here, 885 * but that might overwrite a racing page_add_anon_rmap 886 * which increments mapcount after us but sets mapping 887 * before us: so leave the reset to free_hot_cold_page, 888 * and remember that it's only reliable while mapped. 889 * Leaving it set also helps swapoff to reinstate ptes 890 * faster for those pages still in swapcache. 891 */ 892 } 893 894 /* 895 * Subfunctions of try_to_unmap: try_to_unmap_one called 896 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 897 */ 898 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 899 unsigned long address, enum ttu_flags flags) 900 { 901 struct mm_struct *mm = vma->vm_mm; 902 pte_t *pte; 903 pte_t pteval; 904 spinlock_t *ptl; 905 int ret = SWAP_AGAIN; 906 907 pte = page_check_address(page, mm, address, &ptl, 0); 908 if (!pte) 909 goto out; 910 911 /* 912 * If the page is mlock()d, we cannot swap it out. 913 * If it's recently referenced (perhaps page_referenced 914 * skipped over this mm) then we should reactivate it. 915 */ 916 if (!(flags & TTU_IGNORE_MLOCK)) { 917 if (vma->vm_flags & VM_LOCKED) 918 goto out_mlock; 919 920 if (TTU_ACTION(flags) == TTU_MUNLOCK) 921 goto out_unmap; 922 } 923 if (!(flags & TTU_IGNORE_ACCESS)) { 924 if (ptep_clear_flush_young_notify(vma, address, pte)) { 925 ret = SWAP_FAIL; 926 goto out_unmap; 927 } 928 } 929 930 /* Nuke the page table entry. */ 931 flush_cache_page(vma, address, page_to_pfn(page)); 932 pteval = ptep_clear_flush_notify(vma, address, pte); 933 934 /* Move the dirty bit to the physical page now the pte is gone. */ 935 if (pte_dirty(pteval)) 936 set_page_dirty(page); 937 938 /* Update high watermark before we lower rss */ 939 update_hiwater_rss(mm); 940 941 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 942 if (PageAnon(page)) 943 dec_mm_counter(mm, MM_ANONPAGES); 944 else 945 dec_mm_counter(mm, MM_FILEPAGES); 946 set_pte_at(mm, address, pte, 947 swp_entry_to_pte(make_hwpoison_entry(page))); 948 } else if (PageAnon(page)) { 949 swp_entry_t entry = { .val = page_private(page) }; 950 951 if (PageSwapCache(page)) { 952 /* 953 * Store the swap location in the pte. 954 * See handle_pte_fault() ... 955 */ 956 if (swap_duplicate(entry) < 0) { 957 set_pte_at(mm, address, pte, pteval); 958 ret = SWAP_FAIL; 959 goto out_unmap; 960 } 961 if (list_empty(&mm->mmlist)) { 962 spin_lock(&mmlist_lock); 963 if (list_empty(&mm->mmlist)) 964 list_add(&mm->mmlist, &init_mm.mmlist); 965 spin_unlock(&mmlist_lock); 966 } 967 dec_mm_counter(mm, MM_ANONPAGES); 968 inc_mm_counter(mm, MM_SWAPENTS); 969 } else if (PAGE_MIGRATION) { 970 /* 971 * Store the pfn of the page in a special migration 972 * pte. do_swap_page() will wait until the migration 973 * pte is removed and then restart fault handling. 974 */ 975 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 976 entry = make_migration_entry(page, pte_write(pteval)); 977 } 978 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 979 BUG_ON(pte_file(*pte)); 980 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { 981 /* Establish migration entry for a file page */ 982 swp_entry_t entry; 983 entry = make_migration_entry(page, pte_write(pteval)); 984 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 985 } else 986 dec_mm_counter(mm, MM_FILEPAGES); 987 988 page_remove_rmap(page); 989 page_cache_release(page); 990 991 out_unmap: 992 pte_unmap_unlock(pte, ptl); 993 out: 994 return ret; 995 996 out_mlock: 997 pte_unmap_unlock(pte, ptl); 998 999 1000 /* 1001 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1002 * unstable result and race. Plus, We can't wait here because 1003 * we now hold anon_vma->lock or mapping->i_mmap_lock. 1004 * if trylock failed, the page remain in evictable lru and later 1005 * vmscan could retry to move the page to unevictable lru if the 1006 * page is actually mlocked. 1007 */ 1008 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1009 if (vma->vm_flags & VM_LOCKED) { 1010 mlock_vma_page(page); 1011 ret = SWAP_MLOCK; 1012 } 1013 up_read(&vma->vm_mm->mmap_sem); 1014 } 1015 return ret; 1016 } 1017 1018 /* 1019 * objrmap doesn't work for nonlinear VMAs because the assumption that 1020 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1021 * Consequently, given a particular page and its ->index, we cannot locate the 1022 * ptes which are mapping that page without an exhaustive linear search. 1023 * 1024 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1025 * maps the file to which the target page belongs. The ->vm_private_data field 1026 * holds the current cursor into that scan. Successive searches will circulate 1027 * around the vma's virtual address space. 1028 * 1029 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1030 * more scanning pressure is placed against them as well. Eventually pages 1031 * will become fully unmapped and are eligible for eviction. 1032 * 1033 * For very sparsely populated VMAs this is a little inefficient - chances are 1034 * there there won't be many ptes located within the scan cluster. In this case 1035 * maybe we could scan further - to the end of the pte page, perhaps. 1036 * 1037 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1038 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1039 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1040 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1041 */ 1042 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1043 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1044 1045 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1046 struct vm_area_struct *vma, struct page *check_page) 1047 { 1048 struct mm_struct *mm = vma->vm_mm; 1049 pgd_t *pgd; 1050 pud_t *pud; 1051 pmd_t *pmd; 1052 pte_t *pte; 1053 pte_t pteval; 1054 spinlock_t *ptl; 1055 struct page *page; 1056 unsigned long address; 1057 unsigned long end; 1058 int ret = SWAP_AGAIN; 1059 int locked_vma = 0; 1060 1061 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1062 end = address + CLUSTER_SIZE; 1063 if (address < vma->vm_start) 1064 address = vma->vm_start; 1065 if (end > vma->vm_end) 1066 end = vma->vm_end; 1067 1068 pgd = pgd_offset(mm, address); 1069 if (!pgd_present(*pgd)) 1070 return ret; 1071 1072 pud = pud_offset(pgd, address); 1073 if (!pud_present(*pud)) 1074 return ret; 1075 1076 pmd = pmd_offset(pud, address); 1077 if (!pmd_present(*pmd)) 1078 return ret; 1079 1080 /* 1081 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1082 * keep the sem while scanning the cluster for mlocking pages. 1083 */ 1084 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1085 locked_vma = (vma->vm_flags & VM_LOCKED); 1086 if (!locked_vma) 1087 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1088 } 1089 1090 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1091 1092 /* Update high watermark before we lower rss */ 1093 update_hiwater_rss(mm); 1094 1095 for (; address < end; pte++, address += PAGE_SIZE) { 1096 if (!pte_present(*pte)) 1097 continue; 1098 page = vm_normal_page(vma, address, *pte); 1099 BUG_ON(!page || PageAnon(page)); 1100 1101 if (locked_vma) { 1102 mlock_vma_page(page); /* no-op if already mlocked */ 1103 if (page == check_page) 1104 ret = SWAP_MLOCK; 1105 continue; /* don't unmap */ 1106 } 1107 1108 if (ptep_clear_flush_young_notify(vma, address, pte)) 1109 continue; 1110 1111 /* Nuke the page table entry. */ 1112 flush_cache_page(vma, address, pte_pfn(*pte)); 1113 pteval = ptep_clear_flush_notify(vma, address, pte); 1114 1115 /* If nonlinear, store the file page offset in the pte. */ 1116 if (page->index != linear_page_index(vma, address)) 1117 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1118 1119 /* Move the dirty bit to the physical page now the pte is gone. */ 1120 if (pte_dirty(pteval)) 1121 set_page_dirty(page); 1122 1123 page_remove_rmap(page); 1124 page_cache_release(page); 1125 dec_mm_counter(mm, MM_FILEPAGES); 1126 (*mapcount)--; 1127 } 1128 pte_unmap_unlock(pte - 1, ptl); 1129 if (locked_vma) 1130 up_read(&vma->vm_mm->mmap_sem); 1131 return ret; 1132 } 1133 1134 /** 1135 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1136 * rmap method 1137 * @page: the page to unmap/unlock 1138 * @flags: action and flags 1139 * 1140 * Find all the mappings of a page using the mapping pointer and the vma chains 1141 * contained in the anon_vma struct it points to. 1142 * 1143 * This function is only called from try_to_unmap/try_to_munlock for 1144 * anonymous pages. 1145 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1146 * where the page was found will be held for write. So, we won't recheck 1147 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1148 * 'LOCKED. 1149 */ 1150 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 1151 { 1152 struct anon_vma *anon_vma; 1153 struct anon_vma_chain *avc; 1154 int ret = SWAP_AGAIN; 1155 1156 anon_vma = page_lock_anon_vma(page); 1157 if (!anon_vma) 1158 return ret; 1159 1160 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1161 struct vm_area_struct *vma = avc->vma; 1162 unsigned long address = vma_address(page, vma); 1163 if (address == -EFAULT) 1164 continue; 1165 ret = try_to_unmap_one(page, vma, address, flags); 1166 if (ret != SWAP_AGAIN || !page_mapped(page)) 1167 break; 1168 } 1169 1170 page_unlock_anon_vma(anon_vma); 1171 return ret; 1172 } 1173 1174 /** 1175 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1176 * @page: the page to unmap/unlock 1177 * @flags: action and flags 1178 * 1179 * Find all the mappings of a page using the mapping pointer and the vma chains 1180 * contained in the address_space struct it points to. 1181 * 1182 * This function is only called from try_to_unmap/try_to_munlock for 1183 * object-based pages. 1184 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1185 * where the page was found will be held for write. So, we won't recheck 1186 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1187 * 'LOCKED. 1188 */ 1189 static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 1190 { 1191 struct address_space *mapping = page->mapping; 1192 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1193 struct vm_area_struct *vma; 1194 struct prio_tree_iter iter; 1195 int ret = SWAP_AGAIN; 1196 unsigned long cursor; 1197 unsigned long max_nl_cursor = 0; 1198 unsigned long max_nl_size = 0; 1199 unsigned int mapcount; 1200 1201 spin_lock(&mapping->i_mmap_lock); 1202 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1203 unsigned long address = vma_address(page, vma); 1204 if (address == -EFAULT) 1205 continue; 1206 ret = try_to_unmap_one(page, vma, address, flags); 1207 if (ret != SWAP_AGAIN || !page_mapped(page)) 1208 goto out; 1209 } 1210 1211 if (list_empty(&mapping->i_mmap_nonlinear)) 1212 goto out; 1213 1214 /* 1215 * We don't bother to try to find the munlocked page in nonlinears. 1216 * It's costly. Instead, later, page reclaim logic may call 1217 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 1218 */ 1219 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1220 goto out; 1221 1222 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1223 shared.vm_set.list) { 1224 cursor = (unsigned long) vma->vm_private_data; 1225 if (cursor > max_nl_cursor) 1226 max_nl_cursor = cursor; 1227 cursor = vma->vm_end - vma->vm_start; 1228 if (cursor > max_nl_size) 1229 max_nl_size = cursor; 1230 } 1231 1232 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1233 ret = SWAP_FAIL; 1234 goto out; 1235 } 1236 1237 /* 1238 * We don't try to search for this page in the nonlinear vmas, 1239 * and page_referenced wouldn't have found it anyway. Instead 1240 * just walk the nonlinear vmas trying to age and unmap some. 1241 * The mapcount of the page we came in with is irrelevant, 1242 * but even so use it as a guide to how hard we should try? 1243 */ 1244 mapcount = page_mapcount(page); 1245 if (!mapcount) 1246 goto out; 1247 cond_resched_lock(&mapping->i_mmap_lock); 1248 1249 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1250 if (max_nl_cursor == 0) 1251 max_nl_cursor = CLUSTER_SIZE; 1252 1253 do { 1254 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1255 shared.vm_set.list) { 1256 cursor = (unsigned long) vma->vm_private_data; 1257 while ( cursor < max_nl_cursor && 1258 cursor < vma->vm_end - vma->vm_start) { 1259 if (try_to_unmap_cluster(cursor, &mapcount, 1260 vma, page) == SWAP_MLOCK) 1261 ret = SWAP_MLOCK; 1262 cursor += CLUSTER_SIZE; 1263 vma->vm_private_data = (void *) cursor; 1264 if ((int)mapcount <= 0) 1265 goto out; 1266 } 1267 vma->vm_private_data = (void *) max_nl_cursor; 1268 } 1269 cond_resched_lock(&mapping->i_mmap_lock); 1270 max_nl_cursor += CLUSTER_SIZE; 1271 } while (max_nl_cursor <= max_nl_size); 1272 1273 /* 1274 * Don't loop forever (perhaps all the remaining pages are 1275 * in locked vmas). Reset cursor on all unreserved nonlinear 1276 * vmas, now forgetting on which ones it had fallen behind. 1277 */ 1278 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1279 vma->vm_private_data = NULL; 1280 out: 1281 spin_unlock(&mapping->i_mmap_lock); 1282 return ret; 1283 } 1284 1285 /** 1286 * try_to_unmap - try to remove all page table mappings to a page 1287 * @page: the page to get unmapped 1288 * @flags: action and flags 1289 * 1290 * Tries to remove all the page table entries which are mapping this 1291 * page, used in the pageout path. Caller must hold the page lock. 1292 * Return values are: 1293 * 1294 * SWAP_SUCCESS - we succeeded in removing all mappings 1295 * SWAP_AGAIN - we missed a mapping, try again later 1296 * SWAP_FAIL - the page is unswappable 1297 * SWAP_MLOCK - page is mlocked. 1298 */ 1299 int try_to_unmap(struct page *page, enum ttu_flags flags) 1300 { 1301 int ret; 1302 1303 BUG_ON(!PageLocked(page)); 1304 1305 if (unlikely(PageKsm(page))) 1306 ret = try_to_unmap_ksm(page, flags); 1307 else if (PageAnon(page)) 1308 ret = try_to_unmap_anon(page, flags); 1309 else 1310 ret = try_to_unmap_file(page, flags); 1311 if (ret != SWAP_MLOCK && !page_mapped(page)) 1312 ret = SWAP_SUCCESS; 1313 return ret; 1314 } 1315 1316 /** 1317 * try_to_munlock - try to munlock a page 1318 * @page: the page to be munlocked 1319 * 1320 * Called from munlock code. Checks all of the VMAs mapping the page 1321 * to make sure nobody else has this page mlocked. The page will be 1322 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1323 * 1324 * Return values are: 1325 * 1326 * SWAP_AGAIN - no vma is holding page mlocked, or, 1327 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1328 * SWAP_FAIL - page cannot be located at present 1329 * SWAP_MLOCK - page is now mlocked. 1330 */ 1331 int try_to_munlock(struct page *page) 1332 { 1333 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1334 1335 if (unlikely(PageKsm(page))) 1336 return try_to_unmap_ksm(page, TTU_MUNLOCK); 1337 else if (PageAnon(page)) 1338 return try_to_unmap_anon(page, TTU_MUNLOCK); 1339 else 1340 return try_to_unmap_file(page, TTU_MUNLOCK); 1341 } 1342 1343 #ifdef CONFIG_MIGRATION 1344 /* 1345 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1346 * Called by migrate.c to remove migration ptes, but might be used more later. 1347 */ 1348 static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1349 struct vm_area_struct *, unsigned long, void *), void *arg) 1350 { 1351 struct anon_vma *anon_vma; 1352 struct anon_vma_chain *avc; 1353 int ret = SWAP_AGAIN; 1354 1355 /* 1356 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1357 * because that depends on page_mapped(); but not all its usages 1358 * are holding mmap_sem, which also gave the necessary guarantee 1359 * (that this anon_vma's slab has not already been destroyed). 1360 * This needs to be reviewed later: avoiding page_lock_anon_vma() 1361 * is risky, and currently limits the usefulness of rmap_walk(). 1362 */ 1363 anon_vma = page_anon_vma(page); 1364 if (!anon_vma) 1365 return ret; 1366 spin_lock(&anon_vma->lock); 1367 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1368 struct vm_area_struct *vma = avc->vma; 1369 unsigned long address = vma_address(page, vma); 1370 if (address == -EFAULT) 1371 continue; 1372 ret = rmap_one(page, vma, address, arg); 1373 if (ret != SWAP_AGAIN) 1374 break; 1375 } 1376 spin_unlock(&anon_vma->lock); 1377 return ret; 1378 } 1379 1380 static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1381 struct vm_area_struct *, unsigned long, void *), void *arg) 1382 { 1383 struct address_space *mapping = page->mapping; 1384 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1385 struct vm_area_struct *vma; 1386 struct prio_tree_iter iter; 1387 int ret = SWAP_AGAIN; 1388 1389 if (!mapping) 1390 return ret; 1391 spin_lock(&mapping->i_mmap_lock); 1392 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1393 unsigned long address = vma_address(page, vma); 1394 if (address == -EFAULT) 1395 continue; 1396 ret = rmap_one(page, vma, address, arg); 1397 if (ret != SWAP_AGAIN) 1398 break; 1399 } 1400 /* 1401 * No nonlinear handling: being always shared, nonlinear vmas 1402 * never contain migration ptes. Decide what to do about this 1403 * limitation to linear when we need rmap_walk() on nonlinear. 1404 */ 1405 spin_unlock(&mapping->i_mmap_lock); 1406 return ret; 1407 } 1408 1409 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1410 struct vm_area_struct *, unsigned long, void *), void *arg) 1411 { 1412 VM_BUG_ON(!PageLocked(page)); 1413 1414 if (unlikely(PageKsm(page))) 1415 return rmap_walk_ksm(page, rmap_one, arg); 1416 else if (PageAnon(page)) 1417 return rmap_walk_anon(page, rmap_one, arg); 1418 else 1419 return rmap_walk_file(page, rmap_one, arg); 1420 } 1421 #endif /* CONFIG_MIGRATION */ 1422