1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 * 40 * (code doesn't rely on that order so it could be switched around) 41 * ->tasklist_lock 42 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * pte map lock 44 */ 45 46 #include <linux/mm.h> 47 #include <linux/pagemap.h> 48 #include <linux/swap.h> 49 #include <linux/swapops.h> 50 #include <linux/slab.h> 51 #include <linux/init.h> 52 #include <linux/ksm.h> 53 #include <linux/rmap.h> 54 #include <linux/rcupdate.h> 55 #include <linux/module.h> 56 #include <linux/memcontrol.h> 57 #include <linux/mmu_notifier.h> 58 #include <linux/migrate.h> 59 60 #include <asm/tlbflush.h> 61 62 #include "internal.h" 63 64 static struct kmem_cache *anon_vma_cachep; 65 static struct kmem_cache *anon_vma_chain_cachep; 66 67 static inline struct anon_vma *anon_vma_alloc(void) 68 { 69 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 70 } 71 72 void anon_vma_free(struct anon_vma *anon_vma) 73 { 74 kmem_cache_free(anon_vma_cachep, anon_vma); 75 } 76 77 static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 78 { 79 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 } 81 82 void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83 { 84 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 } 86 87 /** 88 * anon_vma_prepare - attach an anon_vma to a memory region 89 * @vma: the memory region in question 90 * 91 * This makes sure the memory mapping described by 'vma' has 92 * an 'anon_vma' attached to it, so that we can associate the 93 * anonymous pages mapped into it with that anon_vma. 94 * 95 * The common case will be that we already have one, but if 96 * if not we either need to find an adjacent mapping that we 97 * can re-use the anon_vma from (very common when the only 98 * reason for splitting a vma has been mprotect()), or we 99 * allocate a new one. 100 * 101 * Anon-vma allocations are very subtle, because we may have 102 * optimistically looked up an anon_vma in page_lock_anon_vma() 103 * and that may actually touch the spinlock even in the newly 104 * allocated vma (it depends on RCU to make sure that the 105 * anon_vma isn't actually destroyed). 106 * 107 * As a result, we need to do proper anon_vma locking even 108 * for the new allocation. At the same time, we do not want 109 * to do any locking for the common case of already having 110 * an anon_vma. 111 * 112 * This must be called with the mmap_sem held for reading. 113 */ 114 int anon_vma_prepare(struct vm_area_struct *vma) 115 { 116 struct anon_vma *anon_vma = vma->anon_vma; 117 struct anon_vma_chain *avc; 118 119 might_sleep(); 120 if (unlikely(!anon_vma)) { 121 struct mm_struct *mm = vma->vm_mm; 122 struct anon_vma *allocated; 123 124 avc = anon_vma_chain_alloc(); 125 if (!avc) 126 goto out_enomem; 127 128 anon_vma = find_mergeable_anon_vma(vma); 129 allocated = NULL; 130 if (!anon_vma) { 131 anon_vma = anon_vma_alloc(); 132 if (unlikely(!anon_vma)) 133 goto out_enomem_free_avc; 134 allocated = anon_vma; 135 } 136 spin_lock(&anon_vma->lock); 137 138 /* page_table_lock to protect against threads */ 139 spin_lock(&mm->page_table_lock); 140 if (likely(!vma->anon_vma)) { 141 vma->anon_vma = anon_vma; 142 avc->anon_vma = anon_vma; 143 avc->vma = vma; 144 list_add(&avc->same_vma, &vma->anon_vma_chain); 145 list_add(&avc->same_anon_vma, &anon_vma->head); 146 allocated = NULL; 147 } 148 spin_unlock(&mm->page_table_lock); 149 150 spin_unlock(&anon_vma->lock); 151 if (unlikely(allocated)) { 152 anon_vma_free(allocated); 153 anon_vma_chain_free(avc); 154 } 155 } 156 return 0; 157 158 out_enomem_free_avc: 159 anon_vma_chain_free(avc); 160 out_enomem: 161 return -ENOMEM; 162 } 163 164 static void anon_vma_chain_link(struct vm_area_struct *vma, 165 struct anon_vma_chain *avc, 166 struct anon_vma *anon_vma) 167 { 168 avc->vma = vma; 169 avc->anon_vma = anon_vma; 170 list_add(&avc->same_vma, &vma->anon_vma_chain); 171 172 spin_lock(&anon_vma->lock); 173 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 174 spin_unlock(&anon_vma->lock); 175 } 176 177 /* 178 * Attach the anon_vmas from src to dst. 179 * Returns 0 on success, -ENOMEM on failure. 180 */ 181 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 182 { 183 struct anon_vma_chain *avc, *pavc; 184 185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 186 avc = anon_vma_chain_alloc(); 187 if (!avc) 188 goto enomem_failure; 189 anon_vma_chain_link(dst, avc, pavc->anon_vma); 190 } 191 return 0; 192 193 enomem_failure: 194 unlink_anon_vmas(dst); 195 return -ENOMEM; 196 } 197 198 /* 199 * Attach vma to its own anon_vma, as well as to the anon_vmas that 200 * the corresponding VMA in the parent process is attached to. 201 * Returns 0 on success, non-zero on failure. 202 */ 203 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 204 { 205 struct anon_vma_chain *avc; 206 struct anon_vma *anon_vma; 207 208 /* Don't bother if the parent process has no anon_vma here. */ 209 if (!pvma->anon_vma) 210 return 0; 211 212 /* 213 * First, attach the new VMA to the parent VMA's anon_vmas, 214 * so rmap can find non-COWed pages in child processes. 215 */ 216 if (anon_vma_clone(vma, pvma)) 217 return -ENOMEM; 218 219 /* Then add our own anon_vma. */ 220 anon_vma = anon_vma_alloc(); 221 if (!anon_vma) 222 goto out_error; 223 avc = anon_vma_chain_alloc(); 224 if (!avc) 225 goto out_error_free_anon_vma; 226 anon_vma_chain_link(vma, avc, anon_vma); 227 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 228 vma->anon_vma = anon_vma; 229 230 return 0; 231 232 out_error_free_anon_vma: 233 anon_vma_free(anon_vma); 234 out_error: 235 unlink_anon_vmas(vma); 236 return -ENOMEM; 237 } 238 239 static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) 240 { 241 struct anon_vma *anon_vma = anon_vma_chain->anon_vma; 242 int empty; 243 244 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ 245 if (!anon_vma) 246 return; 247 248 spin_lock(&anon_vma->lock); 249 list_del(&anon_vma_chain->same_anon_vma); 250 251 /* We must garbage collect the anon_vma if it's empty */ 252 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); 253 spin_unlock(&anon_vma->lock); 254 255 if (empty) 256 anon_vma_free(anon_vma); 257 } 258 259 void unlink_anon_vmas(struct vm_area_struct *vma) 260 { 261 struct anon_vma_chain *avc, *next; 262 263 /* Unlink each anon_vma chained to the VMA. */ 264 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 265 anon_vma_unlink(avc); 266 list_del(&avc->same_vma); 267 anon_vma_chain_free(avc); 268 } 269 } 270 271 static void anon_vma_ctor(void *data) 272 { 273 struct anon_vma *anon_vma = data; 274 275 spin_lock_init(&anon_vma->lock); 276 ksm_refcount_init(anon_vma); 277 INIT_LIST_HEAD(&anon_vma->head); 278 } 279 280 void __init anon_vma_init(void) 281 { 282 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 283 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 284 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 285 } 286 287 /* 288 * Getting a lock on a stable anon_vma from a page off the LRU is 289 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 290 */ 291 struct anon_vma *page_lock_anon_vma(struct page *page) 292 { 293 struct anon_vma *anon_vma; 294 unsigned long anon_mapping; 295 296 rcu_read_lock(); 297 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 298 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 299 goto out; 300 if (!page_mapped(page)) 301 goto out; 302 303 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 304 spin_lock(&anon_vma->lock); 305 return anon_vma; 306 out: 307 rcu_read_unlock(); 308 return NULL; 309 } 310 311 void page_unlock_anon_vma(struct anon_vma *anon_vma) 312 { 313 spin_unlock(&anon_vma->lock); 314 rcu_read_unlock(); 315 } 316 317 /* 318 * At what user virtual address is page expected in @vma? 319 * Returns virtual address or -EFAULT if page's index/offset is not 320 * within the range mapped the @vma. 321 */ 322 static inline unsigned long 323 vma_address(struct page *page, struct vm_area_struct *vma) 324 { 325 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 326 unsigned long address; 327 328 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 329 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 330 /* page should be within @vma mapping range */ 331 return -EFAULT; 332 } 333 return address; 334 } 335 336 /* 337 * At what user virtual address is page expected in vma? 338 * checking that the page matches the vma. 339 */ 340 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 341 { 342 if (PageAnon(page)) { 343 if (vma->anon_vma != page_anon_vma(page)) 344 return -EFAULT; 345 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 346 if (!vma->vm_file || 347 vma->vm_file->f_mapping != page->mapping) 348 return -EFAULT; 349 } else 350 return -EFAULT; 351 return vma_address(page, vma); 352 } 353 354 /* 355 * Check that @page is mapped at @address into @mm. 356 * 357 * If @sync is false, page_check_address may perform a racy check to avoid 358 * the page table lock when the pte is not present (helpful when reclaiming 359 * highly shared pages). 360 * 361 * On success returns with pte mapped and locked. 362 */ 363 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 364 unsigned long address, spinlock_t **ptlp, int sync) 365 { 366 pgd_t *pgd; 367 pud_t *pud; 368 pmd_t *pmd; 369 pte_t *pte; 370 spinlock_t *ptl; 371 372 pgd = pgd_offset(mm, address); 373 if (!pgd_present(*pgd)) 374 return NULL; 375 376 pud = pud_offset(pgd, address); 377 if (!pud_present(*pud)) 378 return NULL; 379 380 pmd = pmd_offset(pud, address); 381 if (!pmd_present(*pmd)) 382 return NULL; 383 384 pte = pte_offset_map(pmd, address); 385 /* Make a quick check before getting the lock */ 386 if (!sync && !pte_present(*pte)) { 387 pte_unmap(pte); 388 return NULL; 389 } 390 391 ptl = pte_lockptr(mm, pmd); 392 spin_lock(ptl); 393 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 394 *ptlp = ptl; 395 return pte; 396 } 397 pte_unmap_unlock(pte, ptl); 398 return NULL; 399 } 400 401 /** 402 * page_mapped_in_vma - check whether a page is really mapped in a VMA 403 * @page: the page to test 404 * @vma: the VMA to test 405 * 406 * Returns 1 if the page is mapped into the page tables of the VMA, 0 407 * if the page is not mapped into the page tables of this VMA. Only 408 * valid for normal file or anonymous VMAs. 409 */ 410 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 411 { 412 unsigned long address; 413 pte_t *pte; 414 spinlock_t *ptl; 415 416 address = vma_address(page, vma); 417 if (address == -EFAULT) /* out of vma range */ 418 return 0; 419 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 420 if (!pte) /* the page is not in this mm */ 421 return 0; 422 pte_unmap_unlock(pte, ptl); 423 424 return 1; 425 } 426 427 /* 428 * Subfunctions of page_referenced: page_referenced_one called 429 * repeatedly from either page_referenced_anon or page_referenced_file. 430 */ 431 int page_referenced_one(struct page *page, struct vm_area_struct *vma, 432 unsigned long address, unsigned int *mapcount, 433 unsigned long *vm_flags) 434 { 435 struct mm_struct *mm = vma->vm_mm; 436 pte_t *pte; 437 spinlock_t *ptl; 438 int referenced = 0; 439 440 pte = page_check_address(page, mm, address, &ptl, 0); 441 if (!pte) 442 goto out; 443 444 /* 445 * Don't want to elevate referenced for mlocked page that gets this far, 446 * in order that it progresses to try_to_unmap and is moved to the 447 * unevictable list. 448 */ 449 if (vma->vm_flags & VM_LOCKED) { 450 *mapcount = 1; /* break early from loop */ 451 *vm_flags |= VM_LOCKED; 452 goto out_unmap; 453 } 454 455 if (ptep_clear_flush_young_notify(vma, address, pte)) { 456 /* 457 * Don't treat a reference through a sequentially read 458 * mapping as such. If the page has been used in 459 * another mapping, we will catch it; if this other 460 * mapping is already gone, the unmap path will have 461 * set PG_referenced or activated the page. 462 */ 463 if (likely(!VM_SequentialReadHint(vma))) 464 referenced++; 465 } 466 467 /* Pretend the page is referenced if the task has the 468 swap token and is in the middle of a page fault. */ 469 if (mm != current->mm && has_swap_token(mm) && 470 rwsem_is_locked(&mm->mmap_sem)) 471 referenced++; 472 473 out_unmap: 474 (*mapcount)--; 475 pte_unmap_unlock(pte, ptl); 476 477 if (referenced) 478 *vm_flags |= vma->vm_flags; 479 out: 480 return referenced; 481 } 482 483 static int page_referenced_anon(struct page *page, 484 struct mem_cgroup *mem_cont, 485 unsigned long *vm_flags) 486 { 487 unsigned int mapcount; 488 struct anon_vma *anon_vma; 489 struct anon_vma_chain *avc; 490 int referenced = 0; 491 492 anon_vma = page_lock_anon_vma(page); 493 if (!anon_vma) 494 return referenced; 495 496 mapcount = page_mapcount(page); 497 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 498 struct vm_area_struct *vma = avc->vma; 499 unsigned long address = vma_address(page, vma); 500 if (address == -EFAULT) 501 continue; 502 /* 503 * If we are reclaiming on behalf of a cgroup, skip 504 * counting on behalf of references from different 505 * cgroups 506 */ 507 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 508 continue; 509 referenced += page_referenced_one(page, vma, address, 510 &mapcount, vm_flags); 511 if (!mapcount) 512 break; 513 } 514 515 page_unlock_anon_vma(anon_vma); 516 return referenced; 517 } 518 519 /** 520 * page_referenced_file - referenced check for object-based rmap 521 * @page: the page we're checking references on. 522 * @mem_cont: target memory controller 523 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 524 * 525 * For an object-based mapped page, find all the places it is mapped and 526 * check/clear the referenced flag. This is done by following the page->mapping 527 * pointer, then walking the chain of vmas it holds. It returns the number 528 * of references it found. 529 * 530 * This function is only called from page_referenced for object-based pages. 531 */ 532 static int page_referenced_file(struct page *page, 533 struct mem_cgroup *mem_cont, 534 unsigned long *vm_flags) 535 { 536 unsigned int mapcount; 537 struct address_space *mapping = page->mapping; 538 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 539 struct vm_area_struct *vma; 540 struct prio_tree_iter iter; 541 int referenced = 0; 542 543 /* 544 * The caller's checks on page->mapping and !PageAnon have made 545 * sure that this is a file page: the check for page->mapping 546 * excludes the case just before it gets set on an anon page. 547 */ 548 BUG_ON(PageAnon(page)); 549 550 /* 551 * The page lock not only makes sure that page->mapping cannot 552 * suddenly be NULLified by truncation, it makes sure that the 553 * structure at mapping cannot be freed and reused yet, 554 * so we can safely take mapping->i_mmap_lock. 555 */ 556 BUG_ON(!PageLocked(page)); 557 558 spin_lock(&mapping->i_mmap_lock); 559 560 /* 561 * i_mmap_lock does not stabilize mapcount at all, but mapcount 562 * is more likely to be accurate if we note it after spinning. 563 */ 564 mapcount = page_mapcount(page); 565 566 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 567 unsigned long address = vma_address(page, vma); 568 if (address == -EFAULT) 569 continue; 570 /* 571 * If we are reclaiming on behalf of a cgroup, skip 572 * counting on behalf of references from different 573 * cgroups 574 */ 575 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 576 continue; 577 referenced += page_referenced_one(page, vma, address, 578 &mapcount, vm_flags); 579 if (!mapcount) 580 break; 581 } 582 583 spin_unlock(&mapping->i_mmap_lock); 584 return referenced; 585 } 586 587 /** 588 * page_referenced - test if the page was referenced 589 * @page: the page to test 590 * @is_locked: caller holds lock on the page 591 * @mem_cont: target memory controller 592 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 593 * 594 * Quick test_and_clear_referenced for all mappings to a page, 595 * returns the number of ptes which referenced the page. 596 */ 597 int page_referenced(struct page *page, 598 int is_locked, 599 struct mem_cgroup *mem_cont, 600 unsigned long *vm_flags) 601 { 602 int referenced = 0; 603 int we_locked = 0; 604 605 *vm_flags = 0; 606 if (page_mapped(page) && page_rmapping(page)) { 607 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 608 we_locked = trylock_page(page); 609 if (!we_locked) { 610 referenced++; 611 goto out; 612 } 613 } 614 if (unlikely(PageKsm(page))) 615 referenced += page_referenced_ksm(page, mem_cont, 616 vm_flags); 617 else if (PageAnon(page)) 618 referenced += page_referenced_anon(page, mem_cont, 619 vm_flags); 620 else if (page->mapping) 621 referenced += page_referenced_file(page, mem_cont, 622 vm_flags); 623 if (we_locked) 624 unlock_page(page); 625 } 626 out: 627 if (page_test_and_clear_young(page)) 628 referenced++; 629 630 return referenced; 631 } 632 633 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 634 unsigned long address) 635 { 636 struct mm_struct *mm = vma->vm_mm; 637 pte_t *pte; 638 spinlock_t *ptl; 639 int ret = 0; 640 641 pte = page_check_address(page, mm, address, &ptl, 1); 642 if (!pte) 643 goto out; 644 645 if (pte_dirty(*pte) || pte_write(*pte)) { 646 pte_t entry; 647 648 flush_cache_page(vma, address, pte_pfn(*pte)); 649 entry = ptep_clear_flush_notify(vma, address, pte); 650 entry = pte_wrprotect(entry); 651 entry = pte_mkclean(entry); 652 set_pte_at(mm, address, pte, entry); 653 ret = 1; 654 } 655 656 pte_unmap_unlock(pte, ptl); 657 out: 658 return ret; 659 } 660 661 static int page_mkclean_file(struct address_space *mapping, struct page *page) 662 { 663 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 664 struct vm_area_struct *vma; 665 struct prio_tree_iter iter; 666 int ret = 0; 667 668 BUG_ON(PageAnon(page)); 669 670 spin_lock(&mapping->i_mmap_lock); 671 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 672 if (vma->vm_flags & VM_SHARED) { 673 unsigned long address = vma_address(page, vma); 674 if (address == -EFAULT) 675 continue; 676 ret += page_mkclean_one(page, vma, address); 677 } 678 } 679 spin_unlock(&mapping->i_mmap_lock); 680 return ret; 681 } 682 683 int page_mkclean(struct page *page) 684 { 685 int ret = 0; 686 687 BUG_ON(!PageLocked(page)); 688 689 if (page_mapped(page)) { 690 struct address_space *mapping = page_mapping(page); 691 if (mapping) { 692 ret = page_mkclean_file(mapping, page); 693 if (page_test_dirty(page)) { 694 page_clear_dirty(page); 695 ret = 1; 696 } 697 } 698 } 699 700 return ret; 701 } 702 EXPORT_SYMBOL_GPL(page_mkclean); 703 704 /** 705 * page_move_anon_rmap - move a page to our anon_vma 706 * @page: the page to move to our anon_vma 707 * @vma: the vma the page belongs to 708 * @address: the user virtual address mapped 709 * 710 * When a page belongs exclusively to one process after a COW event, 711 * that page can be moved into the anon_vma that belongs to just that 712 * process, so the rmap code will not search the parent or sibling 713 * processes. 714 */ 715 void page_move_anon_rmap(struct page *page, 716 struct vm_area_struct *vma, unsigned long address) 717 { 718 struct anon_vma *anon_vma = vma->anon_vma; 719 720 VM_BUG_ON(!PageLocked(page)); 721 VM_BUG_ON(!anon_vma); 722 VM_BUG_ON(page->index != linear_page_index(vma, address)); 723 724 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 725 page->mapping = (struct address_space *) anon_vma; 726 } 727 728 /** 729 * __page_set_anon_rmap - setup new anonymous rmap 730 * @page: the page to add the mapping to 731 * @vma: the vm area in which the mapping is added 732 * @address: the user virtual address mapped 733 */ 734 static void __page_set_anon_rmap(struct page *page, 735 struct vm_area_struct *vma, unsigned long address) 736 { 737 struct anon_vma *anon_vma = vma->anon_vma; 738 739 BUG_ON(!anon_vma); 740 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 741 page->mapping = (struct address_space *) anon_vma; 742 page->index = linear_page_index(vma, address); 743 } 744 745 /** 746 * __page_check_anon_rmap - sanity check anonymous rmap addition 747 * @page: the page to add the mapping to 748 * @vma: the vm area in which the mapping is added 749 * @address: the user virtual address mapped 750 */ 751 static void __page_check_anon_rmap(struct page *page, 752 struct vm_area_struct *vma, unsigned long address) 753 { 754 #ifdef CONFIG_DEBUG_VM 755 /* 756 * The page's anon-rmap details (mapping and index) are guaranteed to 757 * be set up correctly at this point. 758 * 759 * We have exclusion against page_add_anon_rmap because the caller 760 * always holds the page locked, except if called from page_dup_rmap, 761 * in which case the page is already known to be setup. 762 * 763 * We have exclusion against page_add_new_anon_rmap because those pages 764 * are initially only visible via the pagetables, and the pte is locked 765 * over the call to page_add_new_anon_rmap. 766 */ 767 BUG_ON(page->index != linear_page_index(vma, address)); 768 #endif 769 } 770 771 /** 772 * page_add_anon_rmap - add pte mapping to an anonymous page 773 * @page: the page to add the mapping to 774 * @vma: the vm area in which the mapping is added 775 * @address: the user virtual address mapped 776 * 777 * The caller needs to hold the pte lock, and the page must be locked in 778 * the anon_vma case: to serialize mapping,index checking after setting, 779 * and to ensure that PageAnon is not being upgraded racily to PageKsm 780 * (but PageKsm is never downgraded to PageAnon). 781 */ 782 void page_add_anon_rmap(struct page *page, 783 struct vm_area_struct *vma, unsigned long address) 784 { 785 int first = atomic_inc_and_test(&page->_mapcount); 786 if (first) 787 __inc_zone_page_state(page, NR_ANON_PAGES); 788 if (unlikely(PageKsm(page))) 789 return; 790 791 VM_BUG_ON(!PageLocked(page)); 792 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 793 if (first) 794 __page_set_anon_rmap(page, vma, address); 795 else 796 __page_check_anon_rmap(page, vma, address); 797 } 798 799 /** 800 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 801 * @page: the page to add the mapping to 802 * @vma: the vm area in which the mapping is added 803 * @address: the user virtual address mapped 804 * 805 * Same as page_add_anon_rmap but must only be called on *new* pages. 806 * This means the inc-and-test can be bypassed. 807 * Page does not have to be locked. 808 */ 809 void page_add_new_anon_rmap(struct page *page, 810 struct vm_area_struct *vma, unsigned long address) 811 { 812 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 813 SetPageSwapBacked(page); 814 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 815 __inc_zone_page_state(page, NR_ANON_PAGES); 816 __page_set_anon_rmap(page, vma, address); 817 if (page_evictable(page, vma)) 818 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 819 else 820 add_page_to_unevictable_list(page); 821 } 822 823 /** 824 * page_add_file_rmap - add pte mapping to a file page 825 * @page: the page to add the mapping to 826 * 827 * The caller needs to hold the pte lock. 828 */ 829 void page_add_file_rmap(struct page *page) 830 { 831 if (atomic_inc_and_test(&page->_mapcount)) { 832 __inc_zone_page_state(page, NR_FILE_MAPPED); 833 mem_cgroup_update_file_mapped(page, 1); 834 } 835 } 836 837 /** 838 * page_remove_rmap - take down pte mapping from a page 839 * @page: page to remove mapping from 840 * 841 * The caller needs to hold the pte lock. 842 */ 843 void page_remove_rmap(struct page *page) 844 { 845 /* page still mapped by someone else? */ 846 if (!atomic_add_negative(-1, &page->_mapcount)) 847 return; 848 849 /* 850 * Now that the last pte has gone, s390 must transfer dirty 851 * flag from storage key to struct page. We can usually skip 852 * this if the page is anon, so about to be freed; but perhaps 853 * not if it's in swapcache - there might be another pte slot 854 * containing the swap entry, but page not yet written to swap. 855 */ 856 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 857 page_clear_dirty(page); 858 set_page_dirty(page); 859 } 860 if (PageAnon(page)) { 861 mem_cgroup_uncharge_page(page); 862 __dec_zone_page_state(page, NR_ANON_PAGES); 863 } else { 864 __dec_zone_page_state(page, NR_FILE_MAPPED); 865 mem_cgroup_update_file_mapped(page, -1); 866 } 867 /* 868 * It would be tidy to reset the PageAnon mapping here, 869 * but that might overwrite a racing page_add_anon_rmap 870 * which increments mapcount after us but sets mapping 871 * before us: so leave the reset to free_hot_cold_page, 872 * and remember that it's only reliable while mapped. 873 * Leaving it set also helps swapoff to reinstate ptes 874 * faster for those pages still in swapcache. 875 */ 876 } 877 878 /* 879 * Subfunctions of try_to_unmap: try_to_unmap_one called 880 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 881 */ 882 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 883 unsigned long address, enum ttu_flags flags) 884 { 885 struct mm_struct *mm = vma->vm_mm; 886 pte_t *pte; 887 pte_t pteval; 888 spinlock_t *ptl; 889 int ret = SWAP_AGAIN; 890 891 pte = page_check_address(page, mm, address, &ptl, 0); 892 if (!pte) 893 goto out; 894 895 /* 896 * If the page is mlock()d, we cannot swap it out. 897 * If it's recently referenced (perhaps page_referenced 898 * skipped over this mm) then we should reactivate it. 899 */ 900 if (!(flags & TTU_IGNORE_MLOCK)) { 901 if (vma->vm_flags & VM_LOCKED) 902 goto out_mlock; 903 904 if (TTU_ACTION(flags) == TTU_MUNLOCK) 905 goto out_unmap; 906 } 907 if (!(flags & TTU_IGNORE_ACCESS)) { 908 if (ptep_clear_flush_young_notify(vma, address, pte)) { 909 ret = SWAP_FAIL; 910 goto out_unmap; 911 } 912 } 913 914 /* Nuke the page table entry. */ 915 flush_cache_page(vma, address, page_to_pfn(page)); 916 pteval = ptep_clear_flush_notify(vma, address, pte); 917 918 /* Move the dirty bit to the physical page now the pte is gone. */ 919 if (pte_dirty(pteval)) 920 set_page_dirty(page); 921 922 /* Update high watermark before we lower rss */ 923 update_hiwater_rss(mm); 924 925 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 926 if (PageAnon(page)) 927 dec_mm_counter(mm, MM_ANONPAGES); 928 else 929 dec_mm_counter(mm, MM_FILEPAGES); 930 set_pte_at(mm, address, pte, 931 swp_entry_to_pte(make_hwpoison_entry(page))); 932 } else if (PageAnon(page)) { 933 swp_entry_t entry = { .val = page_private(page) }; 934 935 if (PageSwapCache(page)) { 936 /* 937 * Store the swap location in the pte. 938 * See handle_pte_fault() ... 939 */ 940 if (swap_duplicate(entry) < 0) { 941 set_pte_at(mm, address, pte, pteval); 942 ret = SWAP_FAIL; 943 goto out_unmap; 944 } 945 if (list_empty(&mm->mmlist)) { 946 spin_lock(&mmlist_lock); 947 if (list_empty(&mm->mmlist)) 948 list_add(&mm->mmlist, &init_mm.mmlist); 949 spin_unlock(&mmlist_lock); 950 } 951 dec_mm_counter(mm, MM_ANONPAGES); 952 inc_mm_counter(mm, MM_SWAPENTS); 953 } else if (PAGE_MIGRATION) { 954 /* 955 * Store the pfn of the page in a special migration 956 * pte. do_swap_page() will wait until the migration 957 * pte is removed and then restart fault handling. 958 */ 959 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 960 entry = make_migration_entry(page, pte_write(pteval)); 961 } 962 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 963 BUG_ON(pte_file(*pte)); 964 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { 965 /* Establish migration entry for a file page */ 966 swp_entry_t entry; 967 entry = make_migration_entry(page, pte_write(pteval)); 968 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 969 } else 970 dec_mm_counter(mm, MM_FILEPAGES); 971 972 page_remove_rmap(page); 973 page_cache_release(page); 974 975 out_unmap: 976 pte_unmap_unlock(pte, ptl); 977 out: 978 return ret; 979 980 out_mlock: 981 pte_unmap_unlock(pte, ptl); 982 983 984 /* 985 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 986 * unstable result and race. Plus, We can't wait here because 987 * we now hold anon_vma->lock or mapping->i_mmap_lock. 988 * if trylock failed, the page remain in evictable lru and later 989 * vmscan could retry to move the page to unevictable lru if the 990 * page is actually mlocked. 991 */ 992 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 993 if (vma->vm_flags & VM_LOCKED) { 994 mlock_vma_page(page); 995 ret = SWAP_MLOCK; 996 } 997 up_read(&vma->vm_mm->mmap_sem); 998 } 999 return ret; 1000 } 1001 1002 /* 1003 * objrmap doesn't work for nonlinear VMAs because the assumption that 1004 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1005 * Consequently, given a particular page and its ->index, we cannot locate the 1006 * ptes which are mapping that page without an exhaustive linear search. 1007 * 1008 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1009 * maps the file to which the target page belongs. The ->vm_private_data field 1010 * holds the current cursor into that scan. Successive searches will circulate 1011 * around the vma's virtual address space. 1012 * 1013 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1014 * more scanning pressure is placed against them as well. Eventually pages 1015 * will become fully unmapped and are eligible for eviction. 1016 * 1017 * For very sparsely populated VMAs this is a little inefficient - chances are 1018 * there there won't be many ptes located within the scan cluster. In this case 1019 * maybe we could scan further - to the end of the pte page, perhaps. 1020 * 1021 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1022 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1023 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1024 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1025 */ 1026 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1027 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1028 1029 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1030 struct vm_area_struct *vma, struct page *check_page) 1031 { 1032 struct mm_struct *mm = vma->vm_mm; 1033 pgd_t *pgd; 1034 pud_t *pud; 1035 pmd_t *pmd; 1036 pte_t *pte; 1037 pte_t pteval; 1038 spinlock_t *ptl; 1039 struct page *page; 1040 unsigned long address; 1041 unsigned long end; 1042 int ret = SWAP_AGAIN; 1043 int locked_vma = 0; 1044 1045 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1046 end = address + CLUSTER_SIZE; 1047 if (address < vma->vm_start) 1048 address = vma->vm_start; 1049 if (end > vma->vm_end) 1050 end = vma->vm_end; 1051 1052 pgd = pgd_offset(mm, address); 1053 if (!pgd_present(*pgd)) 1054 return ret; 1055 1056 pud = pud_offset(pgd, address); 1057 if (!pud_present(*pud)) 1058 return ret; 1059 1060 pmd = pmd_offset(pud, address); 1061 if (!pmd_present(*pmd)) 1062 return ret; 1063 1064 /* 1065 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1066 * keep the sem while scanning the cluster for mlocking pages. 1067 */ 1068 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1069 locked_vma = (vma->vm_flags & VM_LOCKED); 1070 if (!locked_vma) 1071 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1072 } 1073 1074 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1075 1076 /* Update high watermark before we lower rss */ 1077 update_hiwater_rss(mm); 1078 1079 for (; address < end; pte++, address += PAGE_SIZE) { 1080 if (!pte_present(*pte)) 1081 continue; 1082 page = vm_normal_page(vma, address, *pte); 1083 BUG_ON(!page || PageAnon(page)); 1084 1085 if (locked_vma) { 1086 mlock_vma_page(page); /* no-op if already mlocked */ 1087 if (page == check_page) 1088 ret = SWAP_MLOCK; 1089 continue; /* don't unmap */ 1090 } 1091 1092 if (ptep_clear_flush_young_notify(vma, address, pte)) 1093 continue; 1094 1095 /* Nuke the page table entry. */ 1096 flush_cache_page(vma, address, pte_pfn(*pte)); 1097 pteval = ptep_clear_flush_notify(vma, address, pte); 1098 1099 /* If nonlinear, store the file page offset in the pte. */ 1100 if (page->index != linear_page_index(vma, address)) 1101 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1102 1103 /* Move the dirty bit to the physical page now the pte is gone. */ 1104 if (pte_dirty(pteval)) 1105 set_page_dirty(page); 1106 1107 page_remove_rmap(page); 1108 page_cache_release(page); 1109 dec_mm_counter(mm, MM_FILEPAGES); 1110 (*mapcount)--; 1111 } 1112 pte_unmap_unlock(pte - 1, ptl); 1113 if (locked_vma) 1114 up_read(&vma->vm_mm->mmap_sem); 1115 return ret; 1116 } 1117 1118 /** 1119 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1120 * rmap method 1121 * @page: the page to unmap/unlock 1122 * @flags: action and flags 1123 * 1124 * Find all the mappings of a page using the mapping pointer and the vma chains 1125 * contained in the anon_vma struct it points to. 1126 * 1127 * This function is only called from try_to_unmap/try_to_munlock for 1128 * anonymous pages. 1129 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1130 * where the page was found will be held for write. So, we won't recheck 1131 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1132 * 'LOCKED. 1133 */ 1134 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 1135 { 1136 struct anon_vma *anon_vma; 1137 struct anon_vma_chain *avc; 1138 int ret = SWAP_AGAIN; 1139 1140 anon_vma = page_lock_anon_vma(page); 1141 if (!anon_vma) 1142 return ret; 1143 1144 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1145 struct vm_area_struct *vma = avc->vma; 1146 unsigned long address = vma_address(page, vma); 1147 if (address == -EFAULT) 1148 continue; 1149 ret = try_to_unmap_one(page, vma, address, flags); 1150 if (ret != SWAP_AGAIN || !page_mapped(page)) 1151 break; 1152 } 1153 1154 page_unlock_anon_vma(anon_vma); 1155 return ret; 1156 } 1157 1158 /** 1159 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1160 * @page: the page to unmap/unlock 1161 * @flags: action and flags 1162 * 1163 * Find all the mappings of a page using the mapping pointer and the vma chains 1164 * contained in the address_space struct it points to. 1165 * 1166 * This function is only called from try_to_unmap/try_to_munlock for 1167 * object-based pages. 1168 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1169 * where the page was found will be held for write. So, we won't recheck 1170 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1171 * 'LOCKED. 1172 */ 1173 static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 1174 { 1175 struct address_space *mapping = page->mapping; 1176 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1177 struct vm_area_struct *vma; 1178 struct prio_tree_iter iter; 1179 int ret = SWAP_AGAIN; 1180 unsigned long cursor; 1181 unsigned long max_nl_cursor = 0; 1182 unsigned long max_nl_size = 0; 1183 unsigned int mapcount; 1184 1185 spin_lock(&mapping->i_mmap_lock); 1186 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1187 unsigned long address = vma_address(page, vma); 1188 if (address == -EFAULT) 1189 continue; 1190 ret = try_to_unmap_one(page, vma, address, flags); 1191 if (ret != SWAP_AGAIN || !page_mapped(page)) 1192 goto out; 1193 } 1194 1195 if (list_empty(&mapping->i_mmap_nonlinear)) 1196 goto out; 1197 1198 /* 1199 * We don't bother to try to find the munlocked page in nonlinears. 1200 * It's costly. Instead, later, page reclaim logic may call 1201 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 1202 */ 1203 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1204 goto out; 1205 1206 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1207 shared.vm_set.list) { 1208 cursor = (unsigned long) vma->vm_private_data; 1209 if (cursor > max_nl_cursor) 1210 max_nl_cursor = cursor; 1211 cursor = vma->vm_end - vma->vm_start; 1212 if (cursor > max_nl_size) 1213 max_nl_size = cursor; 1214 } 1215 1216 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1217 ret = SWAP_FAIL; 1218 goto out; 1219 } 1220 1221 /* 1222 * We don't try to search for this page in the nonlinear vmas, 1223 * and page_referenced wouldn't have found it anyway. Instead 1224 * just walk the nonlinear vmas trying to age and unmap some. 1225 * The mapcount of the page we came in with is irrelevant, 1226 * but even so use it as a guide to how hard we should try? 1227 */ 1228 mapcount = page_mapcount(page); 1229 if (!mapcount) 1230 goto out; 1231 cond_resched_lock(&mapping->i_mmap_lock); 1232 1233 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1234 if (max_nl_cursor == 0) 1235 max_nl_cursor = CLUSTER_SIZE; 1236 1237 do { 1238 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1239 shared.vm_set.list) { 1240 cursor = (unsigned long) vma->vm_private_data; 1241 while ( cursor < max_nl_cursor && 1242 cursor < vma->vm_end - vma->vm_start) { 1243 if (try_to_unmap_cluster(cursor, &mapcount, 1244 vma, page) == SWAP_MLOCK) 1245 ret = SWAP_MLOCK; 1246 cursor += CLUSTER_SIZE; 1247 vma->vm_private_data = (void *) cursor; 1248 if ((int)mapcount <= 0) 1249 goto out; 1250 } 1251 vma->vm_private_data = (void *) max_nl_cursor; 1252 } 1253 cond_resched_lock(&mapping->i_mmap_lock); 1254 max_nl_cursor += CLUSTER_SIZE; 1255 } while (max_nl_cursor <= max_nl_size); 1256 1257 /* 1258 * Don't loop forever (perhaps all the remaining pages are 1259 * in locked vmas). Reset cursor on all unreserved nonlinear 1260 * vmas, now forgetting on which ones it had fallen behind. 1261 */ 1262 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1263 vma->vm_private_data = NULL; 1264 out: 1265 spin_unlock(&mapping->i_mmap_lock); 1266 return ret; 1267 } 1268 1269 /** 1270 * try_to_unmap - try to remove all page table mappings to a page 1271 * @page: the page to get unmapped 1272 * @flags: action and flags 1273 * 1274 * Tries to remove all the page table entries which are mapping this 1275 * page, used in the pageout path. Caller must hold the page lock. 1276 * Return values are: 1277 * 1278 * SWAP_SUCCESS - we succeeded in removing all mappings 1279 * SWAP_AGAIN - we missed a mapping, try again later 1280 * SWAP_FAIL - the page is unswappable 1281 * SWAP_MLOCK - page is mlocked. 1282 */ 1283 int try_to_unmap(struct page *page, enum ttu_flags flags) 1284 { 1285 int ret; 1286 1287 BUG_ON(!PageLocked(page)); 1288 1289 if (unlikely(PageKsm(page))) 1290 ret = try_to_unmap_ksm(page, flags); 1291 else if (PageAnon(page)) 1292 ret = try_to_unmap_anon(page, flags); 1293 else 1294 ret = try_to_unmap_file(page, flags); 1295 if (ret != SWAP_MLOCK && !page_mapped(page)) 1296 ret = SWAP_SUCCESS; 1297 return ret; 1298 } 1299 1300 /** 1301 * try_to_munlock - try to munlock a page 1302 * @page: the page to be munlocked 1303 * 1304 * Called from munlock code. Checks all of the VMAs mapping the page 1305 * to make sure nobody else has this page mlocked. The page will be 1306 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1307 * 1308 * Return values are: 1309 * 1310 * SWAP_AGAIN - no vma is holding page mlocked, or, 1311 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1312 * SWAP_FAIL - page cannot be located at present 1313 * SWAP_MLOCK - page is now mlocked. 1314 */ 1315 int try_to_munlock(struct page *page) 1316 { 1317 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1318 1319 if (unlikely(PageKsm(page))) 1320 return try_to_unmap_ksm(page, TTU_MUNLOCK); 1321 else if (PageAnon(page)) 1322 return try_to_unmap_anon(page, TTU_MUNLOCK); 1323 else 1324 return try_to_unmap_file(page, TTU_MUNLOCK); 1325 } 1326 1327 #ifdef CONFIG_MIGRATION 1328 /* 1329 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1330 * Called by migrate.c to remove migration ptes, but might be used more later. 1331 */ 1332 static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1333 struct vm_area_struct *, unsigned long, void *), void *arg) 1334 { 1335 struct anon_vma *anon_vma; 1336 struct anon_vma_chain *avc; 1337 int ret = SWAP_AGAIN; 1338 1339 /* 1340 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1341 * because that depends on page_mapped(); but not all its usages 1342 * are holding mmap_sem, which also gave the necessary guarantee 1343 * (that this anon_vma's slab has not already been destroyed). 1344 * This needs to be reviewed later: avoiding page_lock_anon_vma() 1345 * is risky, and currently limits the usefulness of rmap_walk(). 1346 */ 1347 anon_vma = page_anon_vma(page); 1348 if (!anon_vma) 1349 return ret; 1350 spin_lock(&anon_vma->lock); 1351 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1352 struct vm_area_struct *vma = avc->vma; 1353 unsigned long address = vma_address(page, vma); 1354 if (address == -EFAULT) 1355 continue; 1356 ret = rmap_one(page, vma, address, arg); 1357 if (ret != SWAP_AGAIN) 1358 break; 1359 } 1360 spin_unlock(&anon_vma->lock); 1361 return ret; 1362 } 1363 1364 static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1365 struct vm_area_struct *, unsigned long, void *), void *arg) 1366 { 1367 struct address_space *mapping = page->mapping; 1368 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1369 struct vm_area_struct *vma; 1370 struct prio_tree_iter iter; 1371 int ret = SWAP_AGAIN; 1372 1373 if (!mapping) 1374 return ret; 1375 spin_lock(&mapping->i_mmap_lock); 1376 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1377 unsigned long address = vma_address(page, vma); 1378 if (address == -EFAULT) 1379 continue; 1380 ret = rmap_one(page, vma, address, arg); 1381 if (ret != SWAP_AGAIN) 1382 break; 1383 } 1384 /* 1385 * No nonlinear handling: being always shared, nonlinear vmas 1386 * never contain migration ptes. Decide what to do about this 1387 * limitation to linear when we need rmap_walk() on nonlinear. 1388 */ 1389 spin_unlock(&mapping->i_mmap_lock); 1390 return ret; 1391 } 1392 1393 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1394 struct vm_area_struct *, unsigned long, void *), void *arg) 1395 { 1396 VM_BUG_ON(!PageLocked(page)); 1397 1398 if (unlikely(PageKsm(page))) 1399 return rmap_walk_ksm(page, rmap_one, arg); 1400 else if (PageAnon(page)) 1401 return rmap_walk_anon(page, rmap_one, arg); 1402 else 1403 return rmap_walk_file(page, rmap_one, arg); 1404 } 1405 #endif /* CONFIG_MIGRATION */ 1406