1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 * 40 * (code doesn't rely on that order so it could be switched around) 41 * ->tasklist_lock 42 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * pte map lock 44 */ 45 46 #include <linux/mm.h> 47 #include <linux/pagemap.h> 48 #include <linux/swap.h> 49 #include <linux/swapops.h> 50 #include <linux/slab.h> 51 #include <linux/init.h> 52 #include <linux/ksm.h> 53 #include <linux/rmap.h> 54 #include <linux/rcupdate.h> 55 #include <linux/module.h> 56 #include <linux/memcontrol.h> 57 #include <linux/mmu_notifier.h> 58 #include <linux/migrate.h> 59 60 #include <asm/tlbflush.h> 61 62 #include "internal.h" 63 64 static struct kmem_cache *anon_vma_cachep; 65 static struct kmem_cache *anon_vma_chain_cachep; 66 67 static inline struct anon_vma *anon_vma_alloc(void) 68 { 69 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 70 } 71 72 void anon_vma_free(struct anon_vma *anon_vma) 73 { 74 kmem_cache_free(anon_vma_cachep, anon_vma); 75 } 76 77 static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 78 { 79 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 80 } 81 82 void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 83 { 84 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 85 } 86 87 /** 88 * anon_vma_prepare - attach an anon_vma to a memory region 89 * @vma: the memory region in question 90 * 91 * This makes sure the memory mapping described by 'vma' has 92 * an 'anon_vma' attached to it, so that we can associate the 93 * anonymous pages mapped into it with that anon_vma. 94 * 95 * The common case will be that we already have one, but if 96 * if not we either need to find an adjacent mapping that we 97 * can re-use the anon_vma from (very common when the only 98 * reason for splitting a vma has been mprotect()), or we 99 * allocate a new one. 100 * 101 * Anon-vma allocations are very subtle, because we may have 102 * optimistically looked up an anon_vma in page_lock_anon_vma() 103 * and that may actually touch the spinlock even in the newly 104 * allocated vma (it depends on RCU to make sure that the 105 * anon_vma isn't actually destroyed). 106 * 107 * As a result, we need to do proper anon_vma locking even 108 * for the new allocation. At the same time, we do not want 109 * to do any locking for the common case of already having 110 * an anon_vma. 111 * 112 * This must be called with the mmap_sem held for reading. 113 */ 114 int anon_vma_prepare(struct vm_area_struct *vma) 115 { 116 struct anon_vma *anon_vma = vma->anon_vma; 117 struct anon_vma_chain *avc; 118 119 might_sleep(); 120 if (unlikely(!anon_vma)) { 121 struct mm_struct *mm = vma->vm_mm; 122 struct anon_vma *allocated; 123 124 avc = anon_vma_chain_alloc(); 125 if (!avc) 126 goto out_enomem; 127 128 anon_vma = find_mergeable_anon_vma(vma); 129 allocated = NULL; 130 if (!anon_vma) { 131 anon_vma = anon_vma_alloc(); 132 if (unlikely(!anon_vma)) 133 goto out_enomem_free_avc; 134 allocated = anon_vma; 135 } 136 spin_lock(&anon_vma->lock); 137 138 /* page_table_lock to protect against threads */ 139 spin_lock(&mm->page_table_lock); 140 if (likely(!vma->anon_vma)) { 141 vma->anon_vma = anon_vma; 142 avc->anon_vma = anon_vma; 143 avc->vma = vma; 144 list_add(&avc->same_vma, &vma->anon_vma_chain); 145 list_add(&avc->same_anon_vma, &anon_vma->head); 146 allocated = NULL; 147 } 148 spin_unlock(&mm->page_table_lock); 149 150 spin_unlock(&anon_vma->lock); 151 if (unlikely(allocated)) { 152 anon_vma_free(allocated); 153 anon_vma_chain_free(avc); 154 } 155 } 156 return 0; 157 158 out_enomem_free_avc: 159 anon_vma_chain_free(avc); 160 out_enomem: 161 return -ENOMEM; 162 } 163 164 static void anon_vma_chain_link(struct vm_area_struct *vma, 165 struct anon_vma_chain *avc, 166 struct anon_vma *anon_vma) 167 { 168 avc->vma = vma; 169 avc->anon_vma = anon_vma; 170 list_add(&avc->same_vma, &vma->anon_vma_chain); 171 172 spin_lock(&anon_vma->lock); 173 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 174 spin_unlock(&anon_vma->lock); 175 } 176 177 /* 178 * Attach the anon_vmas from src to dst. 179 * Returns 0 on success, -ENOMEM on failure. 180 */ 181 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 182 { 183 struct anon_vma_chain *avc, *pavc; 184 185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 186 avc = anon_vma_chain_alloc(); 187 if (!avc) 188 goto enomem_failure; 189 anon_vma_chain_link(dst, avc, pavc->anon_vma); 190 } 191 return 0; 192 193 enomem_failure: 194 unlink_anon_vmas(dst); 195 return -ENOMEM; 196 } 197 198 /* 199 * Attach vma to its own anon_vma, as well as to the anon_vmas that 200 * the corresponding VMA in the parent process is attached to. 201 * Returns 0 on success, non-zero on failure. 202 */ 203 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 204 { 205 struct anon_vma_chain *avc; 206 struct anon_vma *anon_vma; 207 208 /* Don't bother if the parent process has no anon_vma here. */ 209 if (!pvma->anon_vma) 210 return 0; 211 212 /* 213 * First, attach the new VMA to the parent VMA's anon_vmas, 214 * so rmap can find non-COWed pages in child processes. 215 */ 216 if (anon_vma_clone(vma, pvma)) 217 return -ENOMEM; 218 219 /* Then add our own anon_vma. */ 220 anon_vma = anon_vma_alloc(); 221 if (!anon_vma) 222 goto out_error; 223 avc = anon_vma_chain_alloc(); 224 if (!avc) 225 goto out_error_free_anon_vma; 226 anon_vma_chain_link(vma, avc, anon_vma); 227 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 228 vma->anon_vma = anon_vma; 229 230 return 0; 231 232 out_error_free_anon_vma: 233 anon_vma_free(anon_vma); 234 out_error: 235 return -ENOMEM; 236 } 237 238 static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) 239 { 240 struct anon_vma *anon_vma = anon_vma_chain->anon_vma; 241 int empty; 242 243 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */ 244 if (!anon_vma) 245 return; 246 247 spin_lock(&anon_vma->lock); 248 list_del(&anon_vma_chain->same_anon_vma); 249 250 /* We must garbage collect the anon_vma if it's empty */ 251 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); 252 spin_unlock(&anon_vma->lock); 253 254 if (empty) 255 anon_vma_free(anon_vma); 256 } 257 258 void unlink_anon_vmas(struct vm_area_struct *vma) 259 { 260 struct anon_vma_chain *avc, *next; 261 262 /* Unlink each anon_vma chained to the VMA. */ 263 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 264 anon_vma_unlink(avc); 265 list_del(&avc->same_vma); 266 anon_vma_chain_free(avc); 267 } 268 } 269 270 static void anon_vma_ctor(void *data) 271 { 272 struct anon_vma *anon_vma = data; 273 274 spin_lock_init(&anon_vma->lock); 275 ksm_refcount_init(anon_vma); 276 INIT_LIST_HEAD(&anon_vma->head); 277 } 278 279 void __init anon_vma_init(void) 280 { 281 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 282 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 283 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 284 } 285 286 /* 287 * Getting a lock on a stable anon_vma from a page off the LRU is 288 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 289 */ 290 struct anon_vma *page_lock_anon_vma(struct page *page) 291 { 292 struct anon_vma *anon_vma; 293 unsigned long anon_mapping; 294 295 rcu_read_lock(); 296 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 297 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 298 goto out; 299 if (!page_mapped(page)) 300 goto out; 301 302 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 303 spin_lock(&anon_vma->lock); 304 return anon_vma; 305 out: 306 rcu_read_unlock(); 307 return NULL; 308 } 309 310 void page_unlock_anon_vma(struct anon_vma *anon_vma) 311 { 312 spin_unlock(&anon_vma->lock); 313 rcu_read_unlock(); 314 } 315 316 /* 317 * At what user virtual address is page expected in @vma? 318 * Returns virtual address or -EFAULT if page's index/offset is not 319 * within the range mapped the @vma. 320 */ 321 static inline unsigned long 322 vma_address(struct page *page, struct vm_area_struct *vma) 323 { 324 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 325 unsigned long address; 326 327 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 328 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 329 /* page should be within @vma mapping range */ 330 return -EFAULT; 331 } 332 return address; 333 } 334 335 /* 336 * At what user virtual address is page expected in vma? 337 * checking that the page matches the vma. 338 */ 339 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 340 { 341 if (PageAnon(page)) { 342 if (vma->anon_vma != page_anon_vma(page)) 343 return -EFAULT; 344 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 345 if (!vma->vm_file || 346 vma->vm_file->f_mapping != page->mapping) 347 return -EFAULT; 348 } else 349 return -EFAULT; 350 return vma_address(page, vma); 351 } 352 353 /* 354 * Check that @page is mapped at @address into @mm. 355 * 356 * If @sync is false, page_check_address may perform a racy check to avoid 357 * the page table lock when the pte is not present (helpful when reclaiming 358 * highly shared pages). 359 * 360 * On success returns with pte mapped and locked. 361 */ 362 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 363 unsigned long address, spinlock_t **ptlp, int sync) 364 { 365 pgd_t *pgd; 366 pud_t *pud; 367 pmd_t *pmd; 368 pte_t *pte; 369 spinlock_t *ptl; 370 371 pgd = pgd_offset(mm, address); 372 if (!pgd_present(*pgd)) 373 return NULL; 374 375 pud = pud_offset(pgd, address); 376 if (!pud_present(*pud)) 377 return NULL; 378 379 pmd = pmd_offset(pud, address); 380 if (!pmd_present(*pmd)) 381 return NULL; 382 383 pte = pte_offset_map(pmd, address); 384 /* Make a quick check before getting the lock */ 385 if (!sync && !pte_present(*pte)) { 386 pte_unmap(pte); 387 return NULL; 388 } 389 390 ptl = pte_lockptr(mm, pmd); 391 spin_lock(ptl); 392 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 393 *ptlp = ptl; 394 return pte; 395 } 396 pte_unmap_unlock(pte, ptl); 397 return NULL; 398 } 399 400 /** 401 * page_mapped_in_vma - check whether a page is really mapped in a VMA 402 * @page: the page to test 403 * @vma: the VMA to test 404 * 405 * Returns 1 if the page is mapped into the page tables of the VMA, 0 406 * if the page is not mapped into the page tables of this VMA. Only 407 * valid for normal file or anonymous VMAs. 408 */ 409 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 410 { 411 unsigned long address; 412 pte_t *pte; 413 spinlock_t *ptl; 414 415 address = vma_address(page, vma); 416 if (address == -EFAULT) /* out of vma range */ 417 return 0; 418 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 419 if (!pte) /* the page is not in this mm */ 420 return 0; 421 pte_unmap_unlock(pte, ptl); 422 423 return 1; 424 } 425 426 /* 427 * Subfunctions of page_referenced: page_referenced_one called 428 * repeatedly from either page_referenced_anon or page_referenced_file. 429 */ 430 int page_referenced_one(struct page *page, struct vm_area_struct *vma, 431 unsigned long address, unsigned int *mapcount, 432 unsigned long *vm_flags) 433 { 434 struct mm_struct *mm = vma->vm_mm; 435 pte_t *pte; 436 spinlock_t *ptl; 437 int referenced = 0; 438 439 pte = page_check_address(page, mm, address, &ptl, 0); 440 if (!pte) 441 goto out; 442 443 /* 444 * Don't want to elevate referenced for mlocked page that gets this far, 445 * in order that it progresses to try_to_unmap and is moved to the 446 * unevictable list. 447 */ 448 if (vma->vm_flags & VM_LOCKED) { 449 *mapcount = 1; /* break early from loop */ 450 *vm_flags |= VM_LOCKED; 451 goto out_unmap; 452 } 453 454 if (ptep_clear_flush_young_notify(vma, address, pte)) { 455 /* 456 * Don't treat a reference through a sequentially read 457 * mapping as such. If the page has been used in 458 * another mapping, we will catch it; if this other 459 * mapping is already gone, the unmap path will have 460 * set PG_referenced or activated the page. 461 */ 462 if (likely(!VM_SequentialReadHint(vma))) 463 referenced++; 464 } 465 466 /* Pretend the page is referenced if the task has the 467 swap token and is in the middle of a page fault. */ 468 if (mm != current->mm && has_swap_token(mm) && 469 rwsem_is_locked(&mm->mmap_sem)) 470 referenced++; 471 472 out_unmap: 473 (*mapcount)--; 474 pte_unmap_unlock(pte, ptl); 475 476 if (referenced) 477 *vm_flags |= vma->vm_flags; 478 out: 479 return referenced; 480 } 481 482 static int page_referenced_anon(struct page *page, 483 struct mem_cgroup *mem_cont, 484 unsigned long *vm_flags) 485 { 486 unsigned int mapcount; 487 struct anon_vma *anon_vma; 488 struct anon_vma_chain *avc; 489 int referenced = 0; 490 491 anon_vma = page_lock_anon_vma(page); 492 if (!anon_vma) 493 return referenced; 494 495 mapcount = page_mapcount(page); 496 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 497 struct vm_area_struct *vma = avc->vma; 498 unsigned long address = vma_address(page, vma); 499 if (address == -EFAULT) 500 continue; 501 /* 502 * If we are reclaiming on behalf of a cgroup, skip 503 * counting on behalf of references from different 504 * cgroups 505 */ 506 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 507 continue; 508 referenced += page_referenced_one(page, vma, address, 509 &mapcount, vm_flags); 510 if (!mapcount) 511 break; 512 } 513 514 page_unlock_anon_vma(anon_vma); 515 return referenced; 516 } 517 518 /** 519 * page_referenced_file - referenced check for object-based rmap 520 * @page: the page we're checking references on. 521 * @mem_cont: target memory controller 522 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 523 * 524 * For an object-based mapped page, find all the places it is mapped and 525 * check/clear the referenced flag. This is done by following the page->mapping 526 * pointer, then walking the chain of vmas it holds. It returns the number 527 * of references it found. 528 * 529 * This function is only called from page_referenced for object-based pages. 530 */ 531 static int page_referenced_file(struct page *page, 532 struct mem_cgroup *mem_cont, 533 unsigned long *vm_flags) 534 { 535 unsigned int mapcount; 536 struct address_space *mapping = page->mapping; 537 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 538 struct vm_area_struct *vma; 539 struct prio_tree_iter iter; 540 int referenced = 0; 541 542 /* 543 * The caller's checks on page->mapping and !PageAnon have made 544 * sure that this is a file page: the check for page->mapping 545 * excludes the case just before it gets set on an anon page. 546 */ 547 BUG_ON(PageAnon(page)); 548 549 /* 550 * The page lock not only makes sure that page->mapping cannot 551 * suddenly be NULLified by truncation, it makes sure that the 552 * structure at mapping cannot be freed and reused yet, 553 * so we can safely take mapping->i_mmap_lock. 554 */ 555 BUG_ON(!PageLocked(page)); 556 557 spin_lock(&mapping->i_mmap_lock); 558 559 /* 560 * i_mmap_lock does not stabilize mapcount at all, but mapcount 561 * is more likely to be accurate if we note it after spinning. 562 */ 563 mapcount = page_mapcount(page); 564 565 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 566 unsigned long address = vma_address(page, vma); 567 if (address == -EFAULT) 568 continue; 569 /* 570 * If we are reclaiming on behalf of a cgroup, skip 571 * counting on behalf of references from different 572 * cgroups 573 */ 574 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 575 continue; 576 referenced += page_referenced_one(page, vma, address, 577 &mapcount, vm_flags); 578 if (!mapcount) 579 break; 580 } 581 582 spin_unlock(&mapping->i_mmap_lock); 583 return referenced; 584 } 585 586 /** 587 * page_referenced - test if the page was referenced 588 * @page: the page to test 589 * @is_locked: caller holds lock on the page 590 * @mem_cont: target memory controller 591 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 592 * 593 * Quick test_and_clear_referenced for all mappings to a page, 594 * returns the number of ptes which referenced the page. 595 */ 596 int page_referenced(struct page *page, 597 int is_locked, 598 struct mem_cgroup *mem_cont, 599 unsigned long *vm_flags) 600 { 601 int referenced = 0; 602 int we_locked = 0; 603 604 *vm_flags = 0; 605 if (page_mapped(page) && page_rmapping(page)) { 606 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 607 we_locked = trylock_page(page); 608 if (!we_locked) { 609 referenced++; 610 goto out; 611 } 612 } 613 if (unlikely(PageKsm(page))) 614 referenced += page_referenced_ksm(page, mem_cont, 615 vm_flags); 616 else if (PageAnon(page)) 617 referenced += page_referenced_anon(page, mem_cont, 618 vm_flags); 619 else if (page->mapping) 620 referenced += page_referenced_file(page, mem_cont, 621 vm_flags); 622 if (we_locked) 623 unlock_page(page); 624 } 625 out: 626 if (page_test_and_clear_young(page)) 627 referenced++; 628 629 return referenced; 630 } 631 632 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 633 unsigned long address) 634 { 635 struct mm_struct *mm = vma->vm_mm; 636 pte_t *pte; 637 spinlock_t *ptl; 638 int ret = 0; 639 640 pte = page_check_address(page, mm, address, &ptl, 1); 641 if (!pte) 642 goto out; 643 644 if (pte_dirty(*pte) || pte_write(*pte)) { 645 pte_t entry; 646 647 flush_cache_page(vma, address, pte_pfn(*pte)); 648 entry = ptep_clear_flush_notify(vma, address, pte); 649 entry = pte_wrprotect(entry); 650 entry = pte_mkclean(entry); 651 set_pte_at(mm, address, pte, entry); 652 ret = 1; 653 } 654 655 pte_unmap_unlock(pte, ptl); 656 out: 657 return ret; 658 } 659 660 static int page_mkclean_file(struct address_space *mapping, struct page *page) 661 { 662 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 663 struct vm_area_struct *vma; 664 struct prio_tree_iter iter; 665 int ret = 0; 666 667 BUG_ON(PageAnon(page)); 668 669 spin_lock(&mapping->i_mmap_lock); 670 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 671 if (vma->vm_flags & VM_SHARED) { 672 unsigned long address = vma_address(page, vma); 673 if (address == -EFAULT) 674 continue; 675 ret += page_mkclean_one(page, vma, address); 676 } 677 } 678 spin_unlock(&mapping->i_mmap_lock); 679 return ret; 680 } 681 682 int page_mkclean(struct page *page) 683 { 684 int ret = 0; 685 686 BUG_ON(!PageLocked(page)); 687 688 if (page_mapped(page)) { 689 struct address_space *mapping = page_mapping(page); 690 if (mapping) { 691 ret = page_mkclean_file(mapping, page); 692 if (page_test_dirty(page)) { 693 page_clear_dirty(page); 694 ret = 1; 695 } 696 } 697 } 698 699 return ret; 700 } 701 EXPORT_SYMBOL_GPL(page_mkclean); 702 703 /** 704 * page_move_anon_rmap - move a page to our anon_vma 705 * @page: the page to move to our anon_vma 706 * @vma: the vma the page belongs to 707 * @address: the user virtual address mapped 708 * 709 * When a page belongs exclusively to one process after a COW event, 710 * that page can be moved into the anon_vma that belongs to just that 711 * process, so the rmap code will not search the parent or sibling 712 * processes. 713 */ 714 void page_move_anon_rmap(struct page *page, 715 struct vm_area_struct *vma, unsigned long address) 716 { 717 struct anon_vma *anon_vma = vma->anon_vma; 718 719 VM_BUG_ON(!PageLocked(page)); 720 VM_BUG_ON(!anon_vma); 721 VM_BUG_ON(page->index != linear_page_index(vma, address)); 722 723 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 724 page->mapping = (struct address_space *) anon_vma; 725 } 726 727 /** 728 * __page_set_anon_rmap - setup new anonymous rmap 729 * @page: the page to add the mapping to 730 * @vma: the vm area in which the mapping is added 731 * @address: the user virtual address mapped 732 */ 733 static void __page_set_anon_rmap(struct page *page, 734 struct vm_area_struct *vma, unsigned long address) 735 { 736 struct anon_vma *anon_vma = vma->anon_vma; 737 738 BUG_ON(!anon_vma); 739 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 740 page->mapping = (struct address_space *) anon_vma; 741 page->index = linear_page_index(vma, address); 742 } 743 744 /** 745 * __page_check_anon_rmap - sanity check anonymous rmap addition 746 * @page: the page to add the mapping to 747 * @vma: the vm area in which the mapping is added 748 * @address: the user virtual address mapped 749 */ 750 static void __page_check_anon_rmap(struct page *page, 751 struct vm_area_struct *vma, unsigned long address) 752 { 753 #ifdef CONFIG_DEBUG_VM 754 /* 755 * The page's anon-rmap details (mapping and index) are guaranteed to 756 * be set up correctly at this point. 757 * 758 * We have exclusion against page_add_anon_rmap because the caller 759 * always holds the page locked, except if called from page_dup_rmap, 760 * in which case the page is already known to be setup. 761 * 762 * We have exclusion against page_add_new_anon_rmap because those pages 763 * are initially only visible via the pagetables, and the pte is locked 764 * over the call to page_add_new_anon_rmap. 765 */ 766 BUG_ON(page->index != linear_page_index(vma, address)); 767 #endif 768 } 769 770 /** 771 * page_add_anon_rmap - add pte mapping to an anonymous page 772 * @page: the page to add the mapping to 773 * @vma: the vm area in which the mapping is added 774 * @address: the user virtual address mapped 775 * 776 * The caller needs to hold the pte lock, and the page must be locked in 777 * the anon_vma case: to serialize mapping,index checking after setting, 778 * and to ensure that PageAnon is not being upgraded racily to PageKsm 779 * (but PageKsm is never downgraded to PageAnon). 780 */ 781 void page_add_anon_rmap(struct page *page, 782 struct vm_area_struct *vma, unsigned long address) 783 { 784 int first = atomic_inc_and_test(&page->_mapcount); 785 if (first) 786 __inc_zone_page_state(page, NR_ANON_PAGES); 787 if (unlikely(PageKsm(page))) 788 return; 789 790 VM_BUG_ON(!PageLocked(page)); 791 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 792 if (first) 793 __page_set_anon_rmap(page, vma, address); 794 else 795 __page_check_anon_rmap(page, vma, address); 796 } 797 798 /** 799 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 800 * @page: the page to add the mapping to 801 * @vma: the vm area in which the mapping is added 802 * @address: the user virtual address mapped 803 * 804 * Same as page_add_anon_rmap but must only be called on *new* pages. 805 * This means the inc-and-test can be bypassed. 806 * Page does not have to be locked. 807 */ 808 void page_add_new_anon_rmap(struct page *page, 809 struct vm_area_struct *vma, unsigned long address) 810 { 811 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 812 SetPageSwapBacked(page); 813 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 814 __inc_zone_page_state(page, NR_ANON_PAGES); 815 __page_set_anon_rmap(page, vma, address); 816 if (page_evictable(page, vma)) 817 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 818 else 819 add_page_to_unevictable_list(page); 820 } 821 822 /** 823 * page_add_file_rmap - add pte mapping to a file page 824 * @page: the page to add the mapping to 825 * 826 * The caller needs to hold the pte lock. 827 */ 828 void page_add_file_rmap(struct page *page) 829 { 830 if (atomic_inc_and_test(&page->_mapcount)) { 831 __inc_zone_page_state(page, NR_FILE_MAPPED); 832 mem_cgroup_update_file_mapped(page, 1); 833 } 834 } 835 836 /** 837 * page_remove_rmap - take down pte mapping from a page 838 * @page: page to remove mapping from 839 * 840 * The caller needs to hold the pte lock. 841 */ 842 void page_remove_rmap(struct page *page) 843 { 844 /* page still mapped by someone else? */ 845 if (!atomic_add_negative(-1, &page->_mapcount)) 846 return; 847 848 /* 849 * Now that the last pte has gone, s390 must transfer dirty 850 * flag from storage key to struct page. We can usually skip 851 * this if the page is anon, so about to be freed; but perhaps 852 * not if it's in swapcache - there might be another pte slot 853 * containing the swap entry, but page not yet written to swap. 854 */ 855 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { 856 page_clear_dirty(page); 857 set_page_dirty(page); 858 } 859 if (PageAnon(page)) { 860 mem_cgroup_uncharge_page(page); 861 __dec_zone_page_state(page, NR_ANON_PAGES); 862 } else { 863 __dec_zone_page_state(page, NR_FILE_MAPPED); 864 mem_cgroup_update_file_mapped(page, -1); 865 } 866 /* 867 * It would be tidy to reset the PageAnon mapping here, 868 * but that might overwrite a racing page_add_anon_rmap 869 * which increments mapcount after us but sets mapping 870 * before us: so leave the reset to free_hot_cold_page, 871 * and remember that it's only reliable while mapped. 872 * Leaving it set also helps swapoff to reinstate ptes 873 * faster for those pages still in swapcache. 874 */ 875 } 876 877 /* 878 * Subfunctions of try_to_unmap: try_to_unmap_one called 879 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 880 */ 881 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 882 unsigned long address, enum ttu_flags flags) 883 { 884 struct mm_struct *mm = vma->vm_mm; 885 pte_t *pte; 886 pte_t pteval; 887 spinlock_t *ptl; 888 int ret = SWAP_AGAIN; 889 890 pte = page_check_address(page, mm, address, &ptl, 0); 891 if (!pte) 892 goto out; 893 894 /* 895 * If the page is mlock()d, we cannot swap it out. 896 * If it's recently referenced (perhaps page_referenced 897 * skipped over this mm) then we should reactivate it. 898 */ 899 if (!(flags & TTU_IGNORE_MLOCK)) { 900 if (vma->vm_flags & VM_LOCKED) 901 goto out_mlock; 902 903 if (TTU_ACTION(flags) == TTU_MUNLOCK) 904 goto out_unmap; 905 } 906 if (!(flags & TTU_IGNORE_ACCESS)) { 907 if (ptep_clear_flush_young_notify(vma, address, pte)) { 908 ret = SWAP_FAIL; 909 goto out_unmap; 910 } 911 } 912 913 /* Nuke the page table entry. */ 914 flush_cache_page(vma, address, page_to_pfn(page)); 915 pteval = ptep_clear_flush_notify(vma, address, pte); 916 917 /* Move the dirty bit to the physical page now the pte is gone. */ 918 if (pte_dirty(pteval)) 919 set_page_dirty(page); 920 921 /* Update high watermark before we lower rss */ 922 update_hiwater_rss(mm); 923 924 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 925 if (PageAnon(page)) 926 dec_mm_counter(mm, MM_ANONPAGES); 927 else 928 dec_mm_counter(mm, MM_FILEPAGES); 929 set_pte_at(mm, address, pte, 930 swp_entry_to_pte(make_hwpoison_entry(page))); 931 } else if (PageAnon(page)) { 932 swp_entry_t entry = { .val = page_private(page) }; 933 934 if (PageSwapCache(page)) { 935 /* 936 * Store the swap location in the pte. 937 * See handle_pte_fault() ... 938 */ 939 if (swap_duplicate(entry) < 0) { 940 set_pte_at(mm, address, pte, pteval); 941 ret = SWAP_FAIL; 942 goto out_unmap; 943 } 944 if (list_empty(&mm->mmlist)) { 945 spin_lock(&mmlist_lock); 946 if (list_empty(&mm->mmlist)) 947 list_add(&mm->mmlist, &init_mm.mmlist); 948 spin_unlock(&mmlist_lock); 949 } 950 dec_mm_counter(mm, MM_ANONPAGES); 951 inc_mm_counter(mm, MM_SWAPENTS); 952 } else if (PAGE_MIGRATION) { 953 /* 954 * Store the pfn of the page in a special migration 955 * pte. do_swap_page() will wait until the migration 956 * pte is removed and then restart fault handling. 957 */ 958 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 959 entry = make_migration_entry(page, pte_write(pteval)); 960 } 961 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 962 BUG_ON(pte_file(*pte)); 963 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { 964 /* Establish migration entry for a file page */ 965 swp_entry_t entry; 966 entry = make_migration_entry(page, pte_write(pteval)); 967 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 968 } else 969 dec_mm_counter(mm, MM_FILEPAGES); 970 971 page_remove_rmap(page); 972 page_cache_release(page); 973 974 out_unmap: 975 pte_unmap_unlock(pte, ptl); 976 out: 977 return ret; 978 979 out_mlock: 980 pte_unmap_unlock(pte, ptl); 981 982 983 /* 984 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 985 * unstable result and race. Plus, We can't wait here because 986 * we now hold anon_vma->lock or mapping->i_mmap_lock. 987 * if trylock failed, the page remain in evictable lru and later 988 * vmscan could retry to move the page to unevictable lru if the 989 * page is actually mlocked. 990 */ 991 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 992 if (vma->vm_flags & VM_LOCKED) { 993 mlock_vma_page(page); 994 ret = SWAP_MLOCK; 995 } 996 up_read(&vma->vm_mm->mmap_sem); 997 } 998 return ret; 999 } 1000 1001 /* 1002 * objrmap doesn't work for nonlinear VMAs because the assumption that 1003 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1004 * Consequently, given a particular page and its ->index, we cannot locate the 1005 * ptes which are mapping that page without an exhaustive linear search. 1006 * 1007 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1008 * maps the file to which the target page belongs. The ->vm_private_data field 1009 * holds the current cursor into that scan. Successive searches will circulate 1010 * around the vma's virtual address space. 1011 * 1012 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1013 * more scanning pressure is placed against them as well. Eventually pages 1014 * will become fully unmapped and are eligible for eviction. 1015 * 1016 * For very sparsely populated VMAs this is a little inefficient - chances are 1017 * there there won't be many ptes located within the scan cluster. In this case 1018 * maybe we could scan further - to the end of the pte page, perhaps. 1019 * 1020 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1021 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1022 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1023 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1024 */ 1025 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1026 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1027 1028 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1029 struct vm_area_struct *vma, struct page *check_page) 1030 { 1031 struct mm_struct *mm = vma->vm_mm; 1032 pgd_t *pgd; 1033 pud_t *pud; 1034 pmd_t *pmd; 1035 pte_t *pte; 1036 pte_t pteval; 1037 spinlock_t *ptl; 1038 struct page *page; 1039 unsigned long address; 1040 unsigned long end; 1041 int ret = SWAP_AGAIN; 1042 int locked_vma = 0; 1043 1044 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1045 end = address + CLUSTER_SIZE; 1046 if (address < vma->vm_start) 1047 address = vma->vm_start; 1048 if (end > vma->vm_end) 1049 end = vma->vm_end; 1050 1051 pgd = pgd_offset(mm, address); 1052 if (!pgd_present(*pgd)) 1053 return ret; 1054 1055 pud = pud_offset(pgd, address); 1056 if (!pud_present(*pud)) 1057 return ret; 1058 1059 pmd = pmd_offset(pud, address); 1060 if (!pmd_present(*pmd)) 1061 return ret; 1062 1063 /* 1064 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1065 * keep the sem while scanning the cluster for mlocking pages. 1066 */ 1067 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1068 locked_vma = (vma->vm_flags & VM_LOCKED); 1069 if (!locked_vma) 1070 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1071 } 1072 1073 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1074 1075 /* Update high watermark before we lower rss */ 1076 update_hiwater_rss(mm); 1077 1078 for (; address < end; pte++, address += PAGE_SIZE) { 1079 if (!pte_present(*pte)) 1080 continue; 1081 page = vm_normal_page(vma, address, *pte); 1082 BUG_ON(!page || PageAnon(page)); 1083 1084 if (locked_vma) { 1085 mlock_vma_page(page); /* no-op if already mlocked */ 1086 if (page == check_page) 1087 ret = SWAP_MLOCK; 1088 continue; /* don't unmap */ 1089 } 1090 1091 if (ptep_clear_flush_young_notify(vma, address, pte)) 1092 continue; 1093 1094 /* Nuke the page table entry. */ 1095 flush_cache_page(vma, address, pte_pfn(*pte)); 1096 pteval = ptep_clear_flush_notify(vma, address, pte); 1097 1098 /* If nonlinear, store the file page offset in the pte. */ 1099 if (page->index != linear_page_index(vma, address)) 1100 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1101 1102 /* Move the dirty bit to the physical page now the pte is gone. */ 1103 if (pte_dirty(pteval)) 1104 set_page_dirty(page); 1105 1106 page_remove_rmap(page); 1107 page_cache_release(page); 1108 dec_mm_counter(mm, MM_FILEPAGES); 1109 (*mapcount)--; 1110 } 1111 pte_unmap_unlock(pte - 1, ptl); 1112 if (locked_vma) 1113 up_read(&vma->vm_mm->mmap_sem); 1114 return ret; 1115 } 1116 1117 /** 1118 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1119 * rmap method 1120 * @page: the page to unmap/unlock 1121 * @flags: action and flags 1122 * 1123 * Find all the mappings of a page using the mapping pointer and the vma chains 1124 * contained in the anon_vma struct it points to. 1125 * 1126 * This function is only called from try_to_unmap/try_to_munlock for 1127 * anonymous pages. 1128 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1129 * where the page was found will be held for write. So, we won't recheck 1130 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1131 * 'LOCKED. 1132 */ 1133 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 1134 { 1135 struct anon_vma *anon_vma; 1136 struct anon_vma_chain *avc; 1137 int ret = SWAP_AGAIN; 1138 1139 anon_vma = page_lock_anon_vma(page); 1140 if (!anon_vma) 1141 return ret; 1142 1143 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1144 struct vm_area_struct *vma = avc->vma; 1145 unsigned long address = vma_address(page, vma); 1146 if (address == -EFAULT) 1147 continue; 1148 ret = try_to_unmap_one(page, vma, address, flags); 1149 if (ret != SWAP_AGAIN || !page_mapped(page)) 1150 break; 1151 } 1152 1153 page_unlock_anon_vma(anon_vma); 1154 return ret; 1155 } 1156 1157 /** 1158 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1159 * @page: the page to unmap/unlock 1160 * @flags: action and flags 1161 * 1162 * Find all the mappings of a page using the mapping pointer and the vma chains 1163 * contained in the address_space struct it points to. 1164 * 1165 * This function is only called from try_to_unmap/try_to_munlock for 1166 * object-based pages. 1167 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1168 * where the page was found will be held for write. So, we won't recheck 1169 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1170 * 'LOCKED. 1171 */ 1172 static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 1173 { 1174 struct address_space *mapping = page->mapping; 1175 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1176 struct vm_area_struct *vma; 1177 struct prio_tree_iter iter; 1178 int ret = SWAP_AGAIN; 1179 unsigned long cursor; 1180 unsigned long max_nl_cursor = 0; 1181 unsigned long max_nl_size = 0; 1182 unsigned int mapcount; 1183 1184 spin_lock(&mapping->i_mmap_lock); 1185 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1186 unsigned long address = vma_address(page, vma); 1187 if (address == -EFAULT) 1188 continue; 1189 ret = try_to_unmap_one(page, vma, address, flags); 1190 if (ret != SWAP_AGAIN || !page_mapped(page)) 1191 goto out; 1192 } 1193 1194 if (list_empty(&mapping->i_mmap_nonlinear)) 1195 goto out; 1196 1197 /* 1198 * We don't bother to try to find the munlocked page in nonlinears. 1199 * It's costly. Instead, later, page reclaim logic may call 1200 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 1201 */ 1202 if (TTU_ACTION(flags) == TTU_MUNLOCK) 1203 goto out; 1204 1205 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1206 shared.vm_set.list) { 1207 cursor = (unsigned long) vma->vm_private_data; 1208 if (cursor > max_nl_cursor) 1209 max_nl_cursor = cursor; 1210 cursor = vma->vm_end - vma->vm_start; 1211 if (cursor > max_nl_size) 1212 max_nl_size = cursor; 1213 } 1214 1215 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1216 ret = SWAP_FAIL; 1217 goto out; 1218 } 1219 1220 /* 1221 * We don't try to search for this page in the nonlinear vmas, 1222 * and page_referenced wouldn't have found it anyway. Instead 1223 * just walk the nonlinear vmas trying to age and unmap some. 1224 * The mapcount of the page we came in with is irrelevant, 1225 * but even so use it as a guide to how hard we should try? 1226 */ 1227 mapcount = page_mapcount(page); 1228 if (!mapcount) 1229 goto out; 1230 cond_resched_lock(&mapping->i_mmap_lock); 1231 1232 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1233 if (max_nl_cursor == 0) 1234 max_nl_cursor = CLUSTER_SIZE; 1235 1236 do { 1237 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1238 shared.vm_set.list) { 1239 cursor = (unsigned long) vma->vm_private_data; 1240 while ( cursor < max_nl_cursor && 1241 cursor < vma->vm_end - vma->vm_start) { 1242 if (try_to_unmap_cluster(cursor, &mapcount, 1243 vma, page) == SWAP_MLOCK) 1244 ret = SWAP_MLOCK; 1245 cursor += CLUSTER_SIZE; 1246 vma->vm_private_data = (void *) cursor; 1247 if ((int)mapcount <= 0) 1248 goto out; 1249 } 1250 vma->vm_private_data = (void *) max_nl_cursor; 1251 } 1252 cond_resched_lock(&mapping->i_mmap_lock); 1253 max_nl_cursor += CLUSTER_SIZE; 1254 } while (max_nl_cursor <= max_nl_size); 1255 1256 /* 1257 * Don't loop forever (perhaps all the remaining pages are 1258 * in locked vmas). Reset cursor on all unreserved nonlinear 1259 * vmas, now forgetting on which ones it had fallen behind. 1260 */ 1261 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1262 vma->vm_private_data = NULL; 1263 out: 1264 spin_unlock(&mapping->i_mmap_lock); 1265 return ret; 1266 } 1267 1268 /** 1269 * try_to_unmap - try to remove all page table mappings to a page 1270 * @page: the page to get unmapped 1271 * @flags: action and flags 1272 * 1273 * Tries to remove all the page table entries which are mapping this 1274 * page, used in the pageout path. Caller must hold the page lock. 1275 * Return values are: 1276 * 1277 * SWAP_SUCCESS - we succeeded in removing all mappings 1278 * SWAP_AGAIN - we missed a mapping, try again later 1279 * SWAP_FAIL - the page is unswappable 1280 * SWAP_MLOCK - page is mlocked. 1281 */ 1282 int try_to_unmap(struct page *page, enum ttu_flags flags) 1283 { 1284 int ret; 1285 1286 BUG_ON(!PageLocked(page)); 1287 1288 if (unlikely(PageKsm(page))) 1289 ret = try_to_unmap_ksm(page, flags); 1290 else if (PageAnon(page)) 1291 ret = try_to_unmap_anon(page, flags); 1292 else 1293 ret = try_to_unmap_file(page, flags); 1294 if (ret != SWAP_MLOCK && !page_mapped(page)) 1295 ret = SWAP_SUCCESS; 1296 return ret; 1297 } 1298 1299 /** 1300 * try_to_munlock - try to munlock a page 1301 * @page: the page to be munlocked 1302 * 1303 * Called from munlock code. Checks all of the VMAs mapping the page 1304 * to make sure nobody else has this page mlocked. The page will be 1305 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1306 * 1307 * Return values are: 1308 * 1309 * SWAP_AGAIN - no vma is holding page mlocked, or, 1310 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1311 * SWAP_FAIL - page cannot be located at present 1312 * SWAP_MLOCK - page is now mlocked. 1313 */ 1314 int try_to_munlock(struct page *page) 1315 { 1316 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1317 1318 if (unlikely(PageKsm(page))) 1319 return try_to_unmap_ksm(page, TTU_MUNLOCK); 1320 else if (PageAnon(page)) 1321 return try_to_unmap_anon(page, TTU_MUNLOCK); 1322 else 1323 return try_to_unmap_file(page, TTU_MUNLOCK); 1324 } 1325 1326 #ifdef CONFIG_MIGRATION 1327 /* 1328 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1329 * Called by migrate.c to remove migration ptes, but might be used more later. 1330 */ 1331 static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, 1332 struct vm_area_struct *, unsigned long, void *), void *arg) 1333 { 1334 struct anon_vma *anon_vma; 1335 struct anon_vma_chain *avc; 1336 int ret = SWAP_AGAIN; 1337 1338 /* 1339 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1340 * because that depends on page_mapped(); but not all its usages 1341 * are holding mmap_sem, which also gave the necessary guarantee 1342 * (that this anon_vma's slab has not already been destroyed). 1343 * This needs to be reviewed later: avoiding page_lock_anon_vma() 1344 * is risky, and currently limits the usefulness of rmap_walk(). 1345 */ 1346 anon_vma = page_anon_vma(page); 1347 if (!anon_vma) 1348 return ret; 1349 spin_lock(&anon_vma->lock); 1350 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1351 struct vm_area_struct *vma = avc->vma; 1352 unsigned long address = vma_address(page, vma); 1353 if (address == -EFAULT) 1354 continue; 1355 ret = rmap_one(page, vma, address, arg); 1356 if (ret != SWAP_AGAIN) 1357 break; 1358 } 1359 spin_unlock(&anon_vma->lock); 1360 return ret; 1361 } 1362 1363 static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, 1364 struct vm_area_struct *, unsigned long, void *), void *arg) 1365 { 1366 struct address_space *mapping = page->mapping; 1367 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1368 struct vm_area_struct *vma; 1369 struct prio_tree_iter iter; 1370 int ret = SWAP_AGAIN; 1371 1372 if (!mapping) 1373 return ret; 1374 spin_lock(&mapping->i_mmap_lock); 1375 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1376 unsigned long address = vma_address(page, vma); 1377 if (address == -EFAULT) 1378 continue; 1379 ret = rmap_one(page, vma, address, arg); 1380 if (ret != SWAP_AGAIN) 1381 break; 1382 } 1383 /* 1384 * No nonlinear handling: being always shared, nonlinear vmas 1385 * never contain migration ptes. Decide what to do about this 1386 * limitation to linear when we need rmap_walk() on nonlinear. 1387 */ 1388 spin_unlock(&mapping->i_mmap_lock); 1389 return ret; 1390 } 1391 1392 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 1393 struct vm_area_struct *, unsigned long, void *), void *arg) 1394 { 1395 VM_BUG_ON(!PageLocked(page)); 1396 1397 if (unlikely(PageKsm(page))) 1398 return rmap_walk_ksm(page, rmap_one, arg); 1399 else if (PageAnon(page)) 1400 return rmap_walk_anon(page, rmap_one, arg); 1401 else 1402 return rmap_walk_file(page, rmap_one, arg); 1403 } 1404 #endif /* CONFIG_MIGRATION */ 1405