1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 */ 40 41 #include <linux/mm.h> 42 #include <linux/pagemap.h> 43 #include <linux/swap.h> 44 #include <linux/swapops.h> 45 #include <linux/slab.h> 46 #include <linux/init.h> 47 #include <linux/rmap.h> 48 #include <linux/rcupdate.h> 49 #include <linux/module.h> 50 #include <linux/kallsyms.h> 51 #include <linux/memcontrol.h> 52 #include <linux/mmu_notifier.h> 53 54 #include <asm/tlbflush.h> 55 56 #include "internal.h" 57 58 static struct kmem_cache *anon_vma_cachep; 59 60 static inline struct anon_vma *anon_vma_alloc(void) 61 { 62 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 63 } 64 65 static inline void anon_vma_free(struct anon_vma *anon_vma) 66 { 67 kmem_cache_free(anon_vma_cachep, anon_vma); 68 } 69 70 /** 71 * anon_vma_prepare - attach an anon_vma to a memory region 72 * @vma: the memory region in question 73 * 74 * This makes sure the memory mapping described by 'vma' has 75 * an 'anon_vma' attached to it, so that we can associate the 76 * anonymous pages mapped into it with that anon_vma. 77 * 78 * The common case will be that we already have one, but if 79 * if not we either need to find an adjacent mapping that we 80 * can re-use the anon_vma from (very common when the only 81 * reason for splitting a vma has been mprotect()), or we 82 * allocate a new one. 83 * 84 * Anon-vma allocations are very subtle, because we may have 85 * optimistically looked up an anon_vma in page_lock_anon_vma() 86 * and that may actually touch the spinlock even in the newly 87 * allocated vma (it depends on RCU to make sure that the 88 * anon_vma isn't actually destroyed). 89 * 90 * As a result, we need to do proper anon_vma locking even 91 * for the new allocation. At the same time, we do not want 92 * to do any locking for the common case of already having 93 * an anon_vma. 94 * 95 * This must be called with the mmap_sem held for reading. 96 */ 97 int anon_vma_prepare(struct vm_area_struct *vma) 98 { 99 struct anon_vma *anon_vma = vma->anon_vma; 100 101 might_sleep(); 102 if (unlikely(!anon_vma)) { 103 struct mm_struct *mm = vma->vm_mm; 104 struct anon_vma *allocated; 105 106 anon_vma = find_mergeable_anon_vma(vma); 107 allocated = NULL; 108 if (!anon_vma) { 109 anon_vma = anon_vma_alloc(); 110 if (unlikely(!anon_vma)) 111 return -ENOMEM; 112 allocated = anon_vma; 113 } 114 spin_lock(&anon_vma->lock); 115 116 /* page_table_lock to protect against threads */ 117 spin_lock(&mm->page_table_lock); 118 if (likely(!vma->anon_vma)) { 119 vma->anon_vma = anon_vma; 120 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 121 allocated = NULL; 122 } 123 spin_unlock(&mm->page_table_lock); 124 125 spin_unlock(&anon_vma->lock); 126 if (unlikely(allocated)) 127 anon_vma_free(allocated); 128 } 129 return 0; 130 } 131 132 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 133 { 134 BUG_ON(vma->anon_vma != next->anon_vma); 135 list_del(&next->anon_vma_node); 136 } 137 138 void __anon_vma_link(struct vm_area_struct *vma) 139 { 140 struct anon_vma *anon_vma = vma->anon_vma; 141 142 if (anon_vma) 143 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 144 } 145 146 void anon_vma_link(struct vm_area_struct *vma) 147 { 148 struct anon_vma *anon_vma = vma->anon_vma; 149 150 if (anon_vma) { 151 spin_lock(&anon_vma->lock); 152 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 153 spin_unlock(&anon_vma->lock); 154 } 155 } 156 157 void anon_vma_unlink(struct vm_area_struct *vma) 158 { 159 struct anon_vma *anon_vma = vma->anon_vma; 160 int empty; 161 162 if (!anon_vma) 163 return; 164 165 spin_lock(&anon_vma->lock); 166 list_del(&vma->anon_vma_node); 167 168 /* We must garbage collect the anon_vma if it's empty */ 169 empty = list_empty(&anon_vma->head); 170 spin_unlock(&anon_vma->lock); 171 172 if (empty) 173 anon_vma_free(anon_vma); 174 } 175 176 static void anon_vma_ctor(void *data) 177 { 178 struct anon_vma *anon_vma = data; 179 180 spin_lock_init(&anon_vma->lock); 181 INIT_LIST_HEAD(&anon_vma->head); 182 } 183 184 void __init anon_vma_init(void) 185 { 186 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 187 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 188 } 189 190 /* 191 * Getting a lock on a stable anon_vma from a page off the LRU is 192 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 193 */ 194 struct anon_vma *page_lock_anon_vma(struct page *page) 195 { 196 struct anon_vma *anon_vma; 197 unsigned long anon_mapping; 198 199 rcu_read_lock(); 200 anon_mapping = (unsigned long) page->mapping; 201 if (!(anon_mapping & PAGE_MAPPING_ANON)) 202 goto out; 203 if (!page_mapped(page)) 204 goto out; 205 206 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 207 spin_lock(&anon_vma->lock); 208 return anon_vma; 209 out: 210 rcu_read_unlock(); 211 return NULL; 212 } 213 214 void page_unlock_anon_vma(struct anon_vma *anon_vma) 215 { 216 spin_unlock(&anon_vma->lock); 217 rcu_read_unlock(); 218 } 219 220 /* 221 * At what user virtual address is page expected in @vma? 222 * Returns virtual address or -EFAULT if page's index/offset is not 223 * within the range mapped the @vma. 224 */ 225 static inline unsigned long 226 vma_address(struct page *page, struct vm_area_struct *vma) 227 { 228 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 229 unsigned long address; 230 231 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 232 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 233 /* page should be within @vma mapping range */ 234 return -EFAULT; 235 } 236 return address; 237 } 238 239 /* 240 * At what user virtual address is page expected in vma? checking that the 241 * page matches the vma: currently only used on anon pages, by unuse_vma; 242 */ 243 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 244 { 245 if (PageAnon(page)) { 246 if ((void *)vma->anon_vma != 247 (void *)page->mapping - PAGE_MAPPING_ANON) 248 return -EFAULT; 249 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 250 if (!vma->vm_file || 251 vma->vm_file->f_mapping != page->mapping) 252 return -EFAULT; 253 } else 254 return -EFAULT; 255 return vma_address(page, vma); 256 } 257 258 /* 259 * Check that @page is mapped at @address into @mm. 260 * 261 * If @sync is false, page_check_address may perform a racy check to avoid 262 * the page table lock when the pte is not present (helpful when reclaiming 263 * highly shared pages). 264 * 265 * On success returns with pte mapped and locked. 266 */ 267 pte_t *page_check_address(struct page *page, struct mm_struct *mm, 268 unsigned long address, spinlock_t **ptlp, int sync) 269 { 270 pgd_t *pgd; 271 pud_t *pud; 272 pmd_t *pmd; 273 pte_t *pte; 274 spinlock_t *ptl; 275 276 pgd = pgd_offset(mm, address); 277 if (!pgd_present(*pgd)) 278 return NULL; 279 280 pud = pud_offset(pgd, address); 281 if (!pud_present(*pud)) 282 return NULL; 283 284 pmd = pmd_offset(pud, address); 285 if (!pmd_present(*pmd)) 286 return NULL; 287 288 pte = pte_offset_map(pmd, address); 289 /* Make a quick check before getting the lock */ 290 if (!sync && !pte_present(*pte)) { 291 pte_unmap(pte); 292 return NULL; 293 } 294 295 ptl = pte_lockptr(mm, pmd); 296 spin_lock(ptl); 297 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 298 *ptlp = ptl; 299 return pte; 300 } 301 pte_unmap_unlock(pte, ptl); 302 return NULL; 303 } 304 305 /** 306 * page_mapped_in_vma - check whether a page is really mapped in a VMA 307 * @page: the page to test 308 * @vma: the VMA to test 309 * 310 * Returns 1 if the page is mapped into the page tables of the VMA, 0 311 * if the page is not mapped into the page tables of this VMA. Only 312 * valid for normal file or anonymous VMAs. 313 */ 314 static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 315 { 316 unsigned long address; 317 pte_t *pte; 318 spinlock_t *ptl; 319 320 address = vma_address(page, vma); 321 if (address == -EFAULT) /* out of vma range */ 322 return 0; 323 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 324 if (!pte) /* the page is not in this mm */ 325 return 0; 326 pte_unmap_unlock(pte, ptl); 327 328 return 1; 329 } 330 331 /* 332 * Subfunctions of page_referenced: page_referenced_one called 333 * repeatedly from either page_referenced_anon or page_referenced_file. 334 */ 335 static int page_referenced_one(struct page *page, 336 struct vm_area_struct *vma, unsigned int *mapcount) 337 { 338 struct mm_struct *mm = vma->vm_mm; 339 unsigned long address; 340 pte_t *pte; 341 spinlock_t *ptl; 342 int referenced = 0; 343 344 address = vma_address(page, vma); 345 if (address == -EFAULT) 346 goto out; 347 348 pte = page_check_address(page, mm, address, &ptl, 0); 349 if (!pte) 350 goto out; 351 352 /* 353 * Don't want to elevate referenced for mlocked page that gets this far, 354 * in order that it progresses to try_to_unmap and is moved to the 355 * unevictable list. 356 */ 357 if (vma->vm_flags & VM_LOCKED) { 358 *mapcount = 1; /* break early from loop */ 359 goto out_unmap; 360 } 361 362 if (ptep_clear_flush_young_notify(vma, address, pte)) 363 referenced++; 364 365 /* Pretend the page is referenced if the task has the 366 swap token and is in the middle of a page fault. */ 367 if (mm != current->mm && has_swap_token(mm) && 368 rwsem_is_locked(&mm->mmap_sem)) 369 referenced++; 370 371 out_unmap: 372 (*mapcount)--; 373 pte_unmap_unlock(pte, ptl); 374 out: 375 return referenced; 376 } 377 378 static int page_referenced_anon(struct page *page, 379 struct mem_cgroup *mem_cont) 380 { 381 unsigned int mapcount; 382 struct anon_vma *anon_vma; 383 struct vm_area_struct *vma; 384 int referenced = 0; 385 386 anon_vma = page_lock_anon_vma(page); 387 if (!anon_vma) 388 return referenced; 389 390 mapcount = page_mapcount(page); 391 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 392 /* 393 * If we are reclaiming on behalf of a cgroup, skip 394 * counting on behalf of references from different 395 * cgroups 396 */ 397 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 398 continue; 399 referenced += page_referenced_one(page, vma, &mapcount); 400 if (!mapcount) 401 break; 402 } 403 404 page_unlock_anon_vma(anon_vma); 405 return referenced; 406 } 407 408 /** 409 * page_referenced_file - referenced check for object-based rmap 410 * @page: the page we're checking references on. 411 * @mem_cont: target memory controller 412 * 413 * For an object-based mapped page, find all the places it is mapped and 414 * check/clear the referenced flag. This is done by following the page->mapping 415 * pointer, then walking the chain of vmas it holds. It returns the number 416 * of references it found. 417 * 418 * This function is only called from page_referenced for object-based pages. 419 */ 420 static int page_referenced_file(struct page *page, 421 struct mem_cgroup *mem_cont) 422 { 423 unsigned int mapcount; 424 struct address_space *mapping = page->mapping; 425 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 426 struct vm_area_struct *vma; 427 struct prio_tree_iter iter; 428 int referenced = 0; 429 430 /* 431 * The caller's checks on page->mapping and !PageAnon have made 432 * sure that this is a file page: the check for page->mapping 433 * excludes the case just before it gets set on an anon page. 434 */ 435 BUG_ON(PageAnon(page)); 436 437 /* 438 * The page lock not only makes sure that page->mapping cannot 439 * suddenly be NULLified by truncation, it makes sure that the 440 * structure at mapping cannot be freed and reused yet, 441 * so we can safely take mapping->i_mmap_lock. 442 */ 443 BUG_ON(!PageLocked(page)); 444 445 spin_lock(&mapping->i_mmap_lock); 446 447 /* 448 * i_mmap_lock does not stabilize mapcount at all, but mapcount 449 * is more likely to be accurate if we note it after spinning. 450 */ 451 mapcount = page_mapcount(page); 452 453 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 454 /* 455 * If we are reclaiming on behalf of a cgroup, skip 456 * counting on behalf of references from different 457 * cgroups 458 */ 459 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 460 continue; 461 referenced += page_referenced_one(page, vma, &mapcount); 462 if (!mapcount) 463 break; 464 } 465 466 spin_unlock(&mapping->i_mmap_lock); 467 return referenced; 468 } 469 470 /** 471 * page_referenced - test if the page was referenced 472 * @page: the page to test 473 * @is_locked: caller holds lock on the page 474 * @mem_cont: target memory controller 475 * 476 * Quick test_and_clear_referenced for all mappings to a page, 477 * returns the number of ptes which referenced the page. 478 */ 479 int page_referenced(struct page *page, int is_locked, 480 struct mem_cgroup *mem_cont) 481 { 482 int referenced = 0; 483 484 if (TestClearPageReferenced(page)) 485 referenced++; 486 487 if (page_mapped(page) && page->mapping) { 488 if (PageAnon(page)) 489 referenced += page_referenced_anon(page, mem_cont); 490 else if (is_locked) 491 referenced += page_referenced_file(page, mem_cont); 492 else if (!trylock_page(page)) 493 referenced++; 494 else { 495 if (page->mapping) 496 referenced += 497 page_referenced_file(page, mem_cont); 498 unlock_page(page); 499 } 500 } 501 502 if (page_test_and_clear_young(page)) 503 referenced++; 504 505 return referenced; 506 } 507 508 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 509 { 510 struct mm_struct *mm = vma->vm_mm; 511 unsigned long address; 512 pte_t *pte; 513 spinlock_t *ptl; 514 int ret = 0; 515 516 address = vma_address(page, vma); 517 if (address == -EFAULT) 518 goto out; 519 520 pte = page_check_address(page, mm, address, &ptl, 1); 521 if (!pte) 522 goto out; 523 524 if (pte_dirty(*pte) || pte_write(*pte)) { 525 pte_t entry; 526 527 flush_cache_page(vma, address, pte_pfn(*pte)); 528 entry = ptep_clear_flush_notify(vma, address, pte); 529 entry = pte_wrprotect(entry); 530 entry = pte_mkclean(entry); 531 set_pte_at(mm, address, pte, entry); 532 ret = 1; 533 } 534 535 pte_unmap_unlock(pte, ptl); 536 out: 537 return ret; 538 } 539 540 static int page_mkclean_file(struct address_space *mapping, struct page *page) 541 { 542 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 543 struct vm_area_struct *vma; 544 struct prio_tree_iter iter; 545 int ret = 0; 546 547 BUG_ON(PageAnon(page)); 548 549 spin_lock(&mapping->i_mmap_lock); 550 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 551 if (vma->vm_flags & VM_SHARED) 552 ret += page_mkclean_one(page, vma); 553 } 554 spin_unlock(&mapping->i_mmap_lock); 555 return ret; 556 } 557 558 int page_mkclean(struct page *page) 559 { 560 int ret = 0; 561 562 BUG_ON(!PageLocked(page)); 563 564 if (page_mapped(page)) { 565 struct address_space *mapping = page_mapping(page); 566 if (mapping) { 567 ret = page_mkclean_file(mapping, page); 568 if (page_test_dirty(page)) { 569 page_clear_dirty(page); 570 ret = 1; 571 } 572 } 573 } 574 575 return ret; 576 } 577 EXPORT_SYMBOL_GPL(page_mkclean); 578 579 /** 580 * __page_set_anon_rmap - setup new anonymous rmap 581 * @page: the page to add the mapping to 582 * @vma: the vm area in which the mapping is added 583 * @address: the user virtual address mapped 584 */ 585 static void __page_set_anon_rmap(struct page *page, 586 struct vm_area_struct *vma, unsigned long address) 587 { 588 struct anon_vma *anon_vma = vma->anon_vma; 589 590 BUG_ON(!anon_vma); 591 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 592 page->mapping = (struct address_space *) anon_vma; 593 594 page->index = linear_page_index(vma, address); 595 596 /* 597 * nr_mapped state can be updated without turning off 598 * interrupts because it is not modified via interrupt. 599 */ 600 __inc_zone_page_state(page, NR_ANON_PAGES); 601 } 602 603 /** 604 * __page_check_anon_rmap - sanity check anonymous rmap addition 605 * @page: the page to add the mapping to 606 * @vma: the vm area in which the mapping is added 607 * @address: the user virtual address mapped 608 */ 609 static void __page_check_anon_rmap(struct page *page, 610 struct vm_area_struct *vma, unsigned long address) 611 { 612 #ifdef CONFIG_DEBUG_VM 613 /* 614 * The page's anon-rmap details (mapping and index) are guaranteed to 615 * be set up correctly at this point. 616 * 617 * We have exclusion against page_add_anon_rmap because the caller 618 * always holds the page locked, except if called from page_dup_rmap, 619 * in which case the page is already known to be setup. 620 * 621 * We have exclusion against page_add_new_anon_rmap because those pages 622 * are initially only visible via the pagetables, and the pte is locked 623 * over the call to page_add_new_anon_rmap. 624 */ 625 struct anon_vma *anon_vma = vma->anon_vma; 626 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 627 BUG_ON(page->mapping != (struct address_space *)anon_vma); 628 BUG_ON(page->index != linear_page_index(vma, address)); 629 #endif 630 } 631 632 /** 633 * page_add_anon_rmap - add pte mapping to an anonymous page 634 * @page: the page to add the mapping to 635 * @vma: the vm area in which the mapping is added 636 * @address: the user virtual address mapped 637 * 638 * The caller needs to hold the pte lock and the page must be locked. 639 */ 640 void page_add_anon_rmap(struct page *page, 641 struct vm_area_struct *vma, unsigned long address) 642 { 643 VM_BUG_ON(!PageLocked(page)); 644 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 645 if (atomic_inc_and_test(&page->_mapcount)) 646 __page_set_anon_rmap(page, vma, address); 647 else 648 __page_check_anon_rmap(page, vma, address); 649 } 650 651 /** 652 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 653 * @page: the page to add the mapping to 654 * @vma: the vm area in which the mapping is added 655 * @address: the user virtual address mapped 656 * 657 * Same as page_add_anon_rmap but must only be called on *new* pages. 658 * This means the inc-and-test can be bypassed. 659 * Page does not have to be locked. 660 */ 661 void page_add_new_anon_rmap(struct page *page, 662 struct vm_area_struct *vma, unsigned long address) 663 { 664 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 665 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 666 __page_set_anon_rmap(page, vma, address); 667 } 668 669 /** 670 * page_add_file_rmap - add pte mapping to a file page 671 * @page: the page to add the mapping to 672 * 673 * The caller needs to hold the pte lock. 674 */ 675 void page_add_file_rmap(struct page *page) 676 { 677 if (atomic_inc_and_test(&page->_mapcount)) 678 __inc_zone_page_state(page, NR_FILE_MAPPED); 679 } 680 681 #ifdef CONFIG_DEBUG_VM 682 /** 683 * page_dup_rmap - duplicate pte mapping to a page 684 * @page: the page to add the mapping to 685 * @vma: the vm area being duplicated 686 * @address: the user virtual address mapped 687 * 688 * For copy_page_range only: minimal extract from page_add_file_rmap / 689 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's 690 * quicker. 691 * 692 * The caller needs to hold the pte lock. 693 */ 694 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 695 { 696 BUG_ON(page_mapcount(page) == 0); 697 if (PageAnon(page)) 698 __page_check_anon_rmap(page, vma, address); 699 atomic_inc(&page->_mapcount); 700 } 701 #endif 702 703 /** 704 * page_remove_rmap - take down pte mapping from a page 705 * @page: page to remove mapping from 706 * @vma: the vm area in which the mapping is removed 707 * 708 * The caller needs to hold the pte lock. 709 */ 710 void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 711 { 712 if (atomic_add_negative(-1, &page->_mapcount)) { 713 if (unlikely(page_mapcount(page) < 0)) { 714 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 715 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 716 printk (KERN_EMERG " page->flags = %lx\n", page->flags); 717 printk (KERN_EMERG " page->count = %x\n", page_count(page)); 718 printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 719 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 720 if (vma->vm_ops) { 721 print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); 722 } 723 if (vma->vm_file && vma->vm_file->f_op) 724 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 725 BUG(); 726 } 727 728 /* 729 * Now that the last pte has gone, s390 must transfer dirty 730 * flag from storage key to struct page. We can usually skip 731 * this if the page is anon, so about to be freed; but perhaps 732 * not if it's in swapcache - there might be another pte slot 733 * containing the swap entry, but page not yet written to swap. 734 */ 735 if ((!PageAnon(page) || PageSwapCache(page)) && 736 page_test_dirty(page)) { 737 page_clear_dirty(page); 738 set_page_dirty(page); 739 } 740 if (PageAnon(page)) 741 mem_cgroup_uncharge_page(page); 742 __dec_zone_page_state(page, 743 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 744 /* 745 * It would be tidy to reset the PageAnon mapping here, 746 * but that might overwrite a racing page_add_anon_rmap 747 * which increments mapcount after us but sets mapping 748 * before us: so leave the reset to free_hot_cold_page, 749 * and remember that it's only reliable while mapped. 750 * Leaving it set also helps swapoff to reinstate ptes 751 * faster for those pages still in swapcache. 752 */ 753 } 754 } 755 756 /* 757 * Subfunctions of try_to_unmap: try_to_unmap_one called 758 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 759 */ 760 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 761 int migration) 762 { 763 struct mm_struct *mm = vma->vm_mm; 764 unsigned long address; 765 pte_t *pte; 766 pte_t pteval; 767 spinlock_t *ptl; 768 int ret = SWAP_AGAIN; 769 770 address = vma_address(page, vma); 771 if (address == -EFAULT) 772 goto out; 773 774 pte = page_check_address(page, mm, address, &ptl, 0); 775 if (!pte) 776 goto out; 777 778 /* 779 * If the page is mlock()d, we cannot swap it out. 780 * If it's recently referenced (perhaps page_referenced 781 * skipped over this mm) then we should reactivate it. 782 */ 783 if (!migration) { 784 if (vma->vm_flags & VM_LOCKED) { 785 ret = SWAP_MLOCK; 786 goto out_unmap; 787 } 788 if (ptep_clear_flush_young_notify(vma, address, pte)) { 789 ret = SWAP_FAIL; 790 goto out_unmap; 791 } 792 } 793 794 /* Nuke the page table entry. */ 795 flush_cache_page(vma, address, page_to_pfn(page)); 796 pteval = ptep_clear_flush_notify(vma, address, pte); 797 798 /* Move the dirty bit to the physical page now the pte is gone. */ 799 if (pte_dirty(pteval)) 800 set_page_dirty(page); 801 802 /* Update high watermark before we lower rss */ 803 update_hiwater_rss(mm); 804 805 if (PageAnon(page)) { 806 swp_entry_t entry = { .val = page_private(page) }; 807 808 if (PageSwapCache(page)) { 809 /* 810 * Store the swap location in the pte. 811 * See handle_pte_fault() ... 812 */ 813 swap_duplicate(entry); 814 if (list_empty(&mm->mmlist)) { 815 spin_lock(&mmlist_lock); 816 if (list_empty(&mm->mmlist)) 817 list_add(&mm->mmlist, &init_mm.mmlist); 818 spin_unlock(&mmlist_lock); 819 } 820 dec_mm_counter(mm, anon_rss); 821 #ifdef CONFIG_MIGRATION 822 } else { 823 /* 824 * Store the pfn of the page in a special migration 825 * pte. do_swap_page() will wait until the migration 826 * pte is removed and then restart fault handling. 827 */ 828 BUG_ON(!migration); 829 entry = make_migration_entry(page, pte_write(pteval)); 830 #endif 831 } 832 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 833 BUG_ON(pte_file(*pte)); 834 } else 835 #ifdef CONFIG_MIGRATION 836 if (migration) { 837 /* Establish migration entry for a file page */ 838 swp_entry_t entry; 839 entry = make_migration_entry(page, pte_write(pteval)); 840 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 841 } else 842 #endif 843 dec_mm_counter(mm, file_rss); 844 845 846 page_remove_rmap(page, vma); 847 page_cache_release(page); 848 849 out_unmap: 850 pte_unmap_unlock(pte, ptl); 851 out: 852 return ret; 853 } 854 855 /* 856 * objrmap doesn't work for nonlinear VMAs because the assumption that 857 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 858 * Consequently, given a particular page and its ->index, we cannot locate the 859 * ptes which are mapping that page without an exhaustive linear search. 860 * 861 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 862 * maps the file to which the target page belongs. The ->vm_private_data field 863 * holds the current cursor into that scan. Successive searches will circulate 864 * around the vma's virtual address space. 865 * 866 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 867 * more scanning pressure is placed against them as well. Eventually pages 868 * will become fully unmapped and are eligible for eviction. 869 * 870 * For very sparsely populated VMAs this is a little inefficient - chances are 871 * there there won't be many ptes located within the scan cluster. In this case 872 * maybe we could scan further - to the end of the pte page, perhaps. 873 * 874 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 875 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 876 * rather than unmapping them. If we encounter the "check_page" that vmscan is 877 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 878 */ 879 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 880 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 881 882 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 883 struct vm_area_struct *vma, struct page *check_page) 884 { 885 struct mm_struct *mm = vma->vm_mm; 886 pgd_t *pgd; 887 pud_t *pud; 888 pmd_t *pmd; 889 pte_t *pte; 890 pte_t pteval; 891 spinlock_t *ptl; 892 struct page *page; 893 unsigned long address; 894 unsigned long end; 895 int ret = SWAP_AGAIN; 896 int locked_vma = 0; 897 898 address = (vma->vm_start + cursor) & CLUSTER_MASK; 899 end = address + CLUSTER_SIZE; 900 if (address < vma->vm_start) 901 address = vma->vm_start; 902 if (end > vma->vm_end) 903 end = vma->vm_end; 904 905 pgd = pgd_offset(mm, address); 906 if (!pgd_present(*pgd)) 907 return ret; 908 909 pud = pud_offset(pgd, address); 910 if (!pud_present(*pud)) 911 return ret; 912 913 pmd = pmd_offset(pud, address); 914 if (!pmd_present(*pmd)) 915 return ret; 916 917 /* 918 * MLOCK_PAGES => feature is configured. 919 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, 920 * keep the sem while scanning the cluster for mlocking pages. 921 */ 922 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { 923 locked_vma = (vma->vm_flags & VM_LOCKED); 924 if (!locked_vma) 925 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 926 } 927 928 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 929 930 /* Update high watermark before we lower rss */ 931 update_hiwater_rss(mm); 932 933 for (; address < end; pte++, address += PAGE_SIZE) { 934 if (!pte_present(*pte)) 935 continue; 936 page = vm_normal_page(vma, address, *pte); 937 BUG_ON(!page || PageAnon(page)); 938 939 if (locked_vma) { 940 mlock_vma_page(page); /* no-op if already mlocked */ 941 if (page == check_page) 942 ret = SWAP_MLOCK; 943 continue; /* don't unmap */ 944 } 945 946 if (ptep_clear_flush_young_notify(vma, address, pte)) 947 continue; 948 949 /* Nuke the page table entry. */ 950 flush_cache_page(vma, address, pte_pfn(*pte)); 951 pteval = ptep_clear_flush_notify(vma, address, pte); 952 953 /* If nonlinear, store the file page offset in the pte. */ 954 if (page->index != linear_page_index(vma, address)) 955 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 956 957 /* Move the dirty bit to the physical page now the pte is gone. */ 958 if (pte_dirty(pteval)) 959 set_page_dirty(page); 960 961 page_remove_rmap(page, vma); 962 page_cache_release(page); 963 dec_mm_counter(mm, file_rss); 964 (*mapcount)--; 965 } 966 pte_unmap_unlock(pte - 1, ptl); 967 if (locked_vma) 968 up_read(&vma->vm_mm->mmap_sem); 969 return ret; 970 } 971 972 /* 973 * common handling for pages mapped in VM_LOCKED vmas 974 */ 975 static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) 976 { 977 int mlocked = 0; 978 979 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 980 if (vma->vm_flags & VM_LOCKED) { 981 mlock_vma_page(page); 982 mlocked++; /* really mlocked the page */ 983 } 984 up_read(&vma->vm_mm->mmap_sem); 985 } 986 return mlocked; 987 } 988 989 /** 990 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 991 * rmap method 992 * @page: the page to unmap/unlock 993 * @unlock: request for unlock rather than unmap [unlikely] 994 * @migration: unmapping for migration - ignored if @unlock 995 * 996 * Find all the mappings of a page using the mapping pointer and the vma chains 997 * contained in the anon_vma struct it points to. 998 * 999 * This function is only called from try_to_unmap/try_to_munlock for 1000 * anonymous pages. 1001 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1002 * where the page was found will be held for write. So, we won't recheck 1003 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1004 * 'LOCKED. 1005 */ 1006 static int try_to_unmap_anon(struct page *page, int unlock, int migration) 1007 { 1008 struct anon_vma *anon_vma; 1009 struct vm_area_struct *vma; 1010 unsigned int mlocked = 0; 1011 int ret = SWAP_AGAIN; 1012 1013 if (MLOCK_PAGES && unlikely(unlock)) 1014 ret = SWAP_SUCCESS; /* default for try_to_munlock() */ 1015 1016 anon_vma = page_lock_anon_vma(page); 1017 if (!anon_vma) 1018 return ret; 1019 1020 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1021 if (MLOCK_PAGES && unlikely(unlock)) { 1022 if (!((vma->vm_flags & VM_LOCKED) && 1023 page_mapped_in_vma(page, vma))) 1024 continue; /* must visit all unlocked vmas */ 1025 ret = SWAP_MLOCK; /* saw at least one mlocked vma */ 1026 } else { 1027 ret = try_to_unmap_one(page, vma, migration); 1028 if (ret == SWAP_FAIL || !page_mapped(page)) 1029 break; 1030 } 1031 if (ret == SWAP_MLOCK) { 1032 mlocked = try_to_mlock_page(page, vma); 1033 if (mlocked) 1034 break; /* stop if actually mlocked page */ 1035 } 1036 } 1037 1038 page_unlock_anon_vma(anon_vma); 1039 1040 if (mlocked) 1041 ret = SWAP_MLOCK; /* actually mlocked the page */ 1042 else if (ret == SWAP_MLOCK) 1043 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ 1044 1045 return ret; 1046 } 1047 1048 /** 1049 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1050 * @page: the page to unmap/unlock 1051 * @unlock: request for unlock rather than unmap [unlikely] 1052 * @migration: unmapping for migration - ignored if @unlock 1053 * 1054 * Find all the mappings of a page using the mapping pointer and the vma chains 1055 * contained in the address_space struct it points to. 1056 * 1057 * This function is only called from try_to_unmap/try_to_munlock for 1058 * object-based pages. 1059 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1060 * where the page was found will be held for write. So, we won't recheck 1061 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1062 * 'LOCKED. 1063 */ 1064 static int try_to_unmap_file(struct page *page, int unlock, int migration) 1065 { 1066 struct address_space *mapping = page->mapping; 1067 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1068 struct vm_area_struct *vma; 1069 struct prio_tree_iter iter; 1070 int ret = SWAP_AGAIN; 1071 unsigned long cursor; 1072 unsigned long max_nl_cursor = 0; 1073 unsigned long max_nl_size = 0; 1074 unsigned int mapcount; 1075 unsigned int mlocked = 0; 1076 1077 if (MLOCK_PAGES && unlikely(unlock)) 1078 ret = SWAP_SUCCESS; /* default for try_to_munlock() */ 1079 1080 spin_lock(&mapping->i_mmap_lock); 1081 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1082 if (MLOCK_PAGES && unlikely(unlock)) { 1083 if (!(vma->vm_flags & VM_LOCKED)) 1084 continue; /* must visit all vmas */ 1085 ret = SWAP_MLOCK; 1086 } else { 1087 ret = try_to_unmap_one(page, vma, migration); 1088 if (ret == SWAP_FAIL || !page_mapped(page)) 1089 goto out; 1090 } 1091 if (ret == SWAP_MLOCK) { 1092 mlocked = try_to_mlock_page(page, vma); 1093 if (mlocked) 1094 break; /* stop if actually mlocked page */ 1095 } 1096 } 1097 1098 if (mlocked) 1099 goto out; 1100 1101 if (list_empty(&mapping->i_mmap_nonlinear)) 1102 goto out; 1103 1104 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1105 shared.vm_set.list) { 1106 if (MLOCK_PAGES && unlikely(unlock)) { 1107 if (!(vma->vm_flags & VM_LOCKED)) 1108 continue; /* must visit all vmas */ 1109 ret = SWAP_MLOCK; /* leave mlocked == 0 */ 1110 goto out; /* no need to look further */ 1111 } 1112 if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED)) 1113 continue; 1114 cursor = (unsigned long) vma->vm_private_data; 1115 if (cursor > max_nl_cursor) 1116 max_nl_cursor = cursor; 1117 cursor = vma->vm_end - vma->vm_start; 1118 if (cursor > max_nl_size) 1119 max_nl_size = cursor; 1120 } 1121 1122 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1123 ret = SWAP_FAIL; 1124 goto out; 1125 } 1126 1127 /* 1128 * We don't try to search for this page in the nonlinear vmas, 1129 * and page_referenced wouldn't have found it anyway. Instead 1130 * just walk the nonlinear vmas trying to age and unmap some. 1131 * The mapcount of the page we came in with is irrelevant, 1132 * but even so use it as a guide to how hard we should try? 1133 */ 1134 mapcount = page_mapcount(page); 1135 if (!mapcount) 1136 goto out; 1137 cond_resched_lock(&mapping->i_mmap_lock); 1138 1139 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1140 if (max_nl_cursor == 0) 1141 max_nl_cursor = CLUSTER_SIZE; 1142 1143 do { 1144 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1145 shared.vm_set.list) { 1146 if (!MLOCK_PAGES && !migration && 1147 (vma->vm_flags & VM_LOCKED)) 1148 continue; 1149 cursor = (unsigned long) vma->vm_private_data; 1150 while ( cursor < max_nl_cursor && 1151 cursor < vma->vm_end - vma->vm_start) { 1152 ret = try_to_unmap_cluster(cursor, &mapcount, 1153 vma, page); 1154 if (ret == SWAP_MLOCK) 1155 mlocked = 2; /* to return below */ 1156 cursor += CLUSTER_SIZE; 1157 vma->vm_private_data = (void *) cursor; 1158 if ((int)mapcount <= 0) 1159 goto out; 1160 } 1161 vma->vm_private_data = (void *) max_nl_cursor; 1162 } 1163 cond_resched_lock(&mapping->i_mmap_lock); 1164 max_nl_cursor += CLUSTER_SIZE; 1165 } while (max_nl_cursor <= max_nl_size); 1166 1167 /* 1168 * Don't loop forever (perhaps all the remaining pages are 1169 * in locked vmas). Reset cursor on all unreserved nonlinear 1170 * vmas, now forgetting on which ones it had fallen behind. 1171 */ 1172 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1173 vma->vm_private_data = NULL; 1174 out: 1175 spin_unlock(&mapping->i_mmap_lock); 1176 if (mlocked) 1177 ret = SWAP_MLOCK; /* actually mlocked the page */ 1178 else if (ret == SWAP_MLOCK) 1179 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ 1180 return ret; 1181 } 1182 1183 /** 1184 * try_to_unmap - try to remove all page table mappings to a page 1185 * @page: the page to get unmapped 1186 * @migration: migration flag 1187 * 1188 * Tries to remove all the page table entries which are mapping this 1189 * page, used in the pageout path. Caller must hold the page lock. 1190 * Return values are: 1191 * 1192 * SWAP_SUCCESS - we succeeded in removing all mappings 1193 * SWAP_AGAIN - we missed a mapping, try again later 1194 * SWAP_FAIL - the page is unswappable 1195 * SWAP_MLOCK - page is mlocked. 1196 */ 1197 int try_to_unmap(struct page *page, int migration) 1198 { 1199 int ret; 1200 1201 BUG_ON(!PageLocked(page)); 1202 1203 if (PageAnon(page)) 1204 ret = try_to_unmap_anon(page, 0, migration); 1205 else 1206 ret = try_to_unmap_file(page, 0, migration); 1207 if (ret != SWAP_MLOCK && !page_mapped(page)) 1208 ret = SWAP_SUCCESS; 1209 return ret; 1210 } 1211 1212 #ifdef CONFIG_UNEVICTABLE_LRU 1213 /** 1214 * try_to_munlock - try to munlock a page 1215 * @page: the page to be munlocked 1216 * 1217 * Called from munlock code. Checks all of the VMAs mapping the page 1218 * to make sure nobody else has this page mlocked. The page will be 1219 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1220 * 1221 * Return values are: 1222 * 1223 * SWAP_SUCCESS - no vma's holding page mlocked. 1224 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1225 * SWAP_MLOCK - page is now mlocked. 1226 */ 1227 int try_to_munlock(struct page *page) 1228 { 1229 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1230 1231 if (PageAnon(page)) 1232 return try_to_unmap_anon(page, 1, 0); 1233 else 1234 return try_to_unmap_file(page, 1, 0); 1235 } 1236 #endif 1237