1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/file.h> 23 #include <linux/writeback.h> 24 #include <linux/blkdev.h> 25 #include <linux/buffer_head.h> /* for try_to_release_page(), 26 buffer_heads_over_limit */ 27 #include <linux/mm_inline.h> 28 #include <linux/pagevec.h> 29 #include <linux/backing-dev.h> 30 #include <linux/rmap.h> 31 #include <linux/topology.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/notifier.h> 35 #include <linux/rwsem.h> 36 37 #include <asm/tlbflush.h> 38 #include <asm/div64.h> 39 40 #include <linux/swapops.h> 41 42 /* possible outcome of pageout() */ 43 typedef enum { 44 /* failed to write page out, page is locked */ 45 PAGE_KEEP, 46 /* move page to the active list, page is locked */ 47 PAGE_ACTIVATE, 48 /* page has been sent to the disk successfully, page is unlocked */ 49 PAGE_SUCCESS, 50 /* page is clean and locked */ 51 PAGE_CLEAN, 52 } pageout_t; 53 54 struct scan_control { 55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ 56 unsigned long nr_to_scan; 57 58 /* Incremented by the number of inactive pages that were scanned */ 59 unsigned long nr_scanned; 60 61 /* Incremented by the number of pages reclaimed */ 62 unsigned long nr_reclaimed; 63 64 unsigned long nr_mapped; /* From page_state */ 65 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 67 unsigned int priority; 68 69 /* This context's GFP mask */ 70 gfp_t gfp_mask; 71 72 int may_writepage; 73 74 /* Can pages be swapped as part of reclaim? */ 75 int may_swap; 76 77 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 78 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 79 * In this context, it doesn't matter that we scan the 80 * whole list at once. */ 81 int swap_cluster_max; 82 }; 83 84 /* 85 * The list of shrinker callbacks used by to apply pressure to 86 * ageable caches. 87 */ 88 struct shrinker { 89 shrinker_t shrinker; 90 struct list_head list; 91 int seeks; /* seeks to recreate an obj */ 92 long nr; /* objs pending delete */ 93 }; 94 95 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 96 97 #ifdef ARCH_HAS_PREFETCH 98 #define prefetch_prev_lru_page(_page, _base, _field) \ 99 do { \ 100 if ((_page)->lru.prev != _base) { \ 101 struct page *prev; \ 102 \ 103 prev = lru_to_page(&(_page->lru)); \ 104 prefetch(&prev->_field); \ 105 } \ 106 } while (0) 107 #else 108 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 109 #endif 110 111 #ifdef ARCH_HAS_PREFETCHW 112 #define prefetchw_prev_lru_page(_page, _base, _field) \ 113 do { \ 114 if ((_page)->lru.prev != _base) { \ 115 struct page *prev; \ 116 \ 117 prev = lru_to_page(&(_page->lru)); \ 118 prefetchw(&prev->_field); \ 119 } \ 120 } while (0) 121 #else 122 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 123 #endif 124 125 /* 126 * From 0 .. 100. Higher means more swappy. 127 */ 128 int vm_swappiness = 60; 129 static long total_memory; 130 131 static LIST_HEAD(shrinker_list); 132 static DECLARE_RWSEM(shrinker_rwsem); 133 134 /* 135 * Add a shrinker callback to be called from the vm 136 */ 137 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 138 { 139 struct shrinker *shrinker; 140 141 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 142 if (shrinker) { 143 shrinker->shrinker = theshrinker; 144 shrinker->seeks = seeks; 145 shrinker->nr = 0; 146 down_write(&shrinker_rwsem); 147 list_add_tail(&shrinker->list, &shrinker_list); 148 up_write(&shrinker_rwsem); 149 } 150 return shrinker; 151 } 152 EXPORT_SYMBOL(set_shrinker); 153 154 /* 155 * Remove one 156 */ 157 void remove_shrinker(struct shrinker *shrinker) 158 { 159 down_write(&shrinker_rwsem); 160 list_del(&shrinker->list); 161 up_write(&shrinker_rwsem); 162 kfree(shrinker); 163 } 164 EXPORT_SYMBOL(remove_shrinker); 165 166 #define SHRINK_BATCH 128 167 /* 168 * Call the shrink functions to age shrinkable caches 169 * 170 * Here we assume it costs one seek to replace a lru page and that it also 171 * takes a seek to recreate a cache object. With this in mind we age equal 172 * percentages of the lru and ageable caches. This should balance the seeks 173 * generated by these structures. 174 * 175 * If the vm encounted mapped pages on the LRU it increase the pressure on 176 * slab to avoid swapping. 177 * 178 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 179 * 180 * `lru_pages' represents the number of on-LRU pages in all the zones which 181 * are eligible for the caller's allocation attempt. It is used for balancing 182 * slab reclaim versus page reclaim. 183 * 184 * Returns the number of slab objects which we shrunk. 185 */ 186 int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) 187 { 188 struct shrinker *shrinker; 189 int ret = 0; 190 191 if (scanned == 0) 192 scanned = SWAP_CLUSTER_MAX; 193 194 if (!down_read_trylock(&shrinker_rwsem)) 195 return 1; /* Assume we'll be able to shrink next time */ 196 197 list_for_each_entry(shrinker, &shrinker_list, list) { 198 unsigned long long delta; 199 unsigned long total_scan; 200 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 201 202 delta = (4 * scanned) / shrinker->seeks; 203 delta *= max_pass; 204 do_div(delta, lru_pages + 1); 205 shrinker->nr += delta; 206 if (shrinker->nr < 0) { 207 printk(KERN_ERR "%s: nr=%ld\n", 208 __FUNCTION__, shrinker->nr); 209 shrinker->nr = max_pass; 210 } 211 212 /* 213 * Avoid risking looping forever due to too large nr value: 214 * never try to free more than twice the estimate number of 215 * freeable entries. 216 */ 217 if (shrinker->nr > max_pass * 2) 218 shrinker->nr = max_pass * 2; 219 220 total_scan = shrinker->nr; 221 shrinker->nr = 0; 222 223 while (total_scan >= SHRINK_BATCH) { 224 long this_scan = SHRINK_BATCH; 225 int shrink_ret; 226 int nr_before; 227 228 nr_before = (*shrinker->shrinker)(0, gfp_mask); 229 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 230 if (shrink_ret == -1) 231 break; 232 if (shrink_ret < nr_before) 233 ret += nr_before - shrink_ret; 234 mod_page_state(slabs_scanned, this_scan); 235 total_scan -= this_scan; 236 237 cond_resched(); 238 } 239 240 shrinker->nr += total_scan; 241 } 242 up_read(&shrinker_rwsem); 243 return ret; 244 } 245 246 /* Called without lock on whether page is mapped, so answer is unstable */ 247 static inline int page_mapping_inuse(struct page *page) 248 { 249 struct address_space *mapping; 250 251 /* Page is in somebody's page tables. */ 252 if (page_mapped(page)) 253 return 1; 254 255 /* Be more reluctant to reclaim swapcache than pagecache */ 256 if (PageSwapCache(page)) 257 return 1; 258 259 mapping = page_mapping(page); 260 if (!mapping) 261 return 0; 262 263 /* File is mmap'd by somebody? */ 264 return mapping_mapped(mapping); 265 } 266 267 static inline int is_page_cache_freeable(struct page *page) 268 { 269 return page_count(page) - !!PagePrivate(page) == 2; 270 } 271 272 static int may_write_to_queue(struct backing_dev_info *bdi) 273 { 274 if (current->flags & PF_SWAPWRITE) 275 return 1; 276 if (!bdi_write_congested(bdi)) 277 return 1; 278 if (bdi == current->backing_dev_info) 279 return 1; 280 return 0; 281 } 282 283 /* 284 * We detected a synchronous write error writing a page out. Probably 285 * -ENOSPC. We need to propagate that into the address_space for a subsequent 286 * fsync(), msync() or close(). 287 * 288 * The tricky part is that after writepage we cannot touch the mapping: nothing 289 * prevents it from being freed up. But we have a ref on the page and once 290 * that page is locked, the mapping is pinned. 291 * 292 * We're allowed to run sleeping lock_page() here because we know the caller has 293 * __GFP_FS. 294 */ 295 static void handle_write_error(struct address_space *mapping, 296 struct page *page, int error) 297 { 298 lock_page(page); 299 if (page_mapping(page) == mapping) { 300 if (error == -ENOSPC) 301 set_bit(AS_ENOSPC, &mapping->flags); 302 else 303 set_bit(AS_EIO, &mapping->flags); 304 } 305 unlock_page(page); 306 } 307 308 /* 309 * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 310 */ 311 static pageout_t pageout(struct page *page, struct address_space *mapping) 312 { 313 /* 314 * If the page is dirty, only perform writeback if that write 315 * will be non-blocking. To prevent this allocation from being 316 * stalled by pagecache activity. But note that there may be 317 * stalls if we need to run get_block(). We could test 318 * PagePrivate for that. 319 * 320 * If this process is currently in generic_file_write() against 321 * this page's queue, we can perform writeback even if that 322 * will block. 323 * 324 * If the page is swapcache, write it back even if that would 325 * block, for some throttling. This happens by accident, because 326 * swap_backing_dev_info is bust: it doesn't reflect the 327 * congestion state of the swapdevs. Easy to fix, if needed. 328 * See swapfile.c:page_queue_congested(). 329 */ 330 if (!is_page_cache_freeable(page)) 331 return PAGE_KEEP; 332 if (!mapping) { 333 /* 334 * Some data journaling orphaned pages can have 335 * page->mapping == NULL while being dirty with clean buffers. 336 */ 337 if (PagePrivate(page)) { 338 if (try_to_free_buffers(page)) { 339 ClearPageDirty(page); 340 printk("%s: orphaned page\n", __FUNCTION__); 341 return PAGE_CLEAN; 342 } 343 } 344 return PAGE_KEEP; 345 } 346 if (mapping->a_ops->writepage == NULL) 347 return PAGE_ACTIVATE; 348 if (!may_write_to_queue(mapping->backing_dev_info)) 349 return PAGE_KEEP; 350 351 if (clear_page_dirty_for_io(page)) { 352 int res; 353 struct writeback_control wbc = { 354 .sync_mode = WB_SYNC_NONE, 355 .nr_to_write = SWAP_CLUSTER_MAX, 356 .nonblocking = 1, 357 .for_reclaim = 1, 358 }; 359 360 SetPageReclaim(page); 361 res = mapping->a_ops->writepage(page, &wbc); 362 if (res < 0) 363 handle_write_error(mapping, page, res); 364 if (res == AOP_WRITEPAGE_ACTIVATE) { 365 ClearPageReclaim(page); 366 return PAGE_ACTIVATE; 367 } 368 if (!PageWriteback(page)) { 369 /* synchronous write or broken a_ops? */ 370 ClearPageReclaim(page); 371 } 372 373 return PAGE_SUCCESS; 374 } 375 376 return PAGE_CLEAN; 377 } 378 379 static int remove_mapping(struct address_space *mapping, struct page *page) 380 { 381 if (!mapping) 382 return 0; /* truncate got there first */ 383 384 write_lock_irq(&mapping->tree_lock); 385 386 /* 387 * The non-racy check for busy page. It is critical to check 388 * PageDirty _after_ making sure that the page is freeable and 389 * not in use by anybody. (pagecache + us == 2) 390 */ 391 if (unlikely(page_count(page) != 2)) 392 goto cannot_free; 393 smp_rmb(); 394 if (unlikely(PageDirty(page))) 395 goto cannot_free; 396 397 if (PageSwapCache(page)) { 398 swp_entry_t swap = { .val = page_private(page) }; 399 __delete_from_swap_cache(page); 400 write_unlock_irq(&mapping->tree_lock); 401 swap_free(swap); 402 __put_page(page); /* The pagecache ref */ 403 return 1; 404 } 405 406 __remove_from_page_cache(page); 407 write_unlock_irq(&mapping->tree_lock); 408 __put_page(page); 409 return 1; 410 411 cannot_free: 412 write_unlock_irq(&mapping->tree_lock); 413 return 0; 414 } 415 416 /* 417 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 418 */ 419 static int shrink_list(struct list_head *page_list, struct scan_control *sc) 420 { 421 LIST_HEAD(ret_pages); 422 struct pagevec freed_pvec; 423 int pgactivate = 0; 424 int reclaimed = 0; 425 426 cond_resched(); 427 428 pagevec_init(&freed_pvec, 1); 429 while (!list_empty(page_list)) { 430 struct address_space *mapping; 431 struct page *page; 432 int may_enter_fs; 433 int referenced; 434 435 cond_resched(); 436 437 page = lru_to_page(page_list); 438 list_del(&page->lru); 439 440 if (TestSetPageLocked(page)) 441 goto keep; 442 443 BUG_ON(PageActive(page)); 444 445 sc->nr_scanned++; 446 447 if (!sc->may_swap && page_mapped(page)) 448 goto keep_locked; 449 450 /* Double the slab pressure for mapped and swapcache pages */ 451 if (page_mapped(page) || PageSwapCache(page)) 452 sc->nr_scanned++; 453 454 if (PageWriteback(page)) 455 goto keep_locked; 456 457 referenced = page_referenced(page, 1); 458 /* In active use or really unfreeable? Activate it. */ 459 if (referenced && page_mapping_inuse(page)) 460 goto activate_locked; 461 462 #ifdef CONFIG_SWAP 463 /* 464 * Anonymous process memory has backing store? 465 * Try to allocate it some swap space here. 466 */ 467 if (PageAnon(page) && !PageSwapCache(page)) { 468 if (!sc->may_swap) 469 goto keep_locked; 470 if (!add_to_swap(page, GFP_ATOMIC)) 471 goto activate_locked; 472 } 473 #endif /* CONFIG_SWAP */ 474 475 mapping = page_mapping(page); 476 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 477 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 478 479 /* 480 * The page is mapped into the page tables of one or more 481 * processes. Try to unmap it here. 482 */ 483 if (page_mapped(page) && mapping) { 484 /* 485 * No unmapping if we do not swap 486 */ 487 if (!sc->may_swap) 488 goto keep_locked; 489 490 switch (try_to_unmap(page, 0)) { 491 case SWAP_FAIL: 492 goto activate_locked; 493 case SWAP_AGAIN: 494 goto keep_locked; 495 case SWAP_SUCCESS: 496 ; /* try to free the page below */ 497 } 498 } 499 500 if (PageDirty(page)) { 501 if (referenced) 502 goto keep_locked; 503 if (!may_enter_fs) 504 goto keep_locked; 505 if (!sc->may_writepage) 506 goto keep_locked; 507 508 /* Page is dirty, try to write it out here */ 509 switch(pageout(page, mapping)) { 510 case PAGE_KEEP: 511 goto keep_locked; 512 case PAGE_ACTIVATE: 513 goto activate_locked; 514 case PAGE_SUCCESS: 515 if (PageWriteback(page) || PageDirty(page)) 516 goto keep; 517 /* 518 * A synchronous write - probably a ramdisk. Go 519 * ahead and try to reclaim the page. 520 */ 521 if (TestSetPageLocked(page)) 522 goto keep; 523 if (PageDirty(page) || PageWriteback(page)) 524 goto keep_locked; 525 mapping = page_mapping(page); 526 case PAGE_CLEAN: 527 ; /* try to free the page below */ 528 } 529 } 530 531 /* 532 * If the page has buffers, try to free the buffer mappings 533 * associated with this page. If we succeed we try to free 534 * the page as well. 535 * 536 * We do this even if the page is PageDirty(). 537 * try_to_release_page() does not perform I/O, but it is 538 * possible for a page to have PageDirty set, but it is actually 539 * clean (all its buffers are clean). This happens if the 540 * buffers were written out directly, with submit_bh(). ext3 541 * will do this, as well as the blockdev mapping. 542 * try_to_release_page() will discover that cleanness and will 543 * drop the buffers and mark the page clean - it can be freed. 544 * 545 * Rarely, pages can have buffers and no ->mapping. These are 546 * the pages which were not successfully invalidated in 547 * truncate_complete_page(). We try to drop those buffers here 548 * and if that worked, and the page is no longer mapped into 549 * process address space (page_count == 1) it can be freed. 550 * Otherwise, leave the page on the LRU so it is swappable. 551 */ 552 if (PagePrivate(page)) { 553 if (!try_to_release_page(page, sc->gfp_mask)) 554 goto activate_locked; 555 if (!mapping && page_count(page) == 1) 556 goto free_it; 557 } 558 559 if (!remove_mapping(mapping, page)) 560 goto keep_locked; 561 562 free_it: 563 unlock_page(page); 564 reclaimed++; 565 if (!pagevec_add(&freed_pvec, page)) 566 __pagevec_release_nonlru(&freed_pvec); 567 continue; 568 569 activate_locked: 570 SetPageActive(page); 571 pgactivate++; 572 keep_locked: 573 unlock_page(page); 574 keep: 575 list_add(&page->lru, &ret_pages); 576 BUG_ON(PageLRU(page)); 577 } 578 list_splice(&ret_pages, page_list); 579 if (pagevec_count(&freed_pvec)) 580 __pagevec_release_nonlru(&freed_pvec); 581 mod_page_state(pgactivate, pgactivate); 582 sc->nr_reclaimed += reclaimed; 583 return reclaimed; 584 } 585 586 #ifdef CONFIG_MIGRATION 587 static inline void move_to_lru(struct page *page) 588 { 589 list_del(&page->lru); 590 if (PageActive(page)) { 591 /* 592 * lru_cache_add_active checks that 593 * the PG_active bit is off. 594 */ 595 ClearPageActive(page); 596 lru_cache_add_active(page); 597 } else { 598 lru_cache_add(page); 599 } 600 put_page(page); 601 } 602 603 /* 604 * Add isolated pages on the list back to the LRU. 605 * 606 * returns the number of pages put back. 607 */ 608 int putback_lru_pages(struct list_head *l) 609 { 610 struct page *page; 611 struct page *page2; 612 int count = 0; 613 614 list_for_each_entry_safe(page, page2, l, lru) { 615 move_to_lru(page); 616 count++; 617 } 618 return count; 619 } 620 621 /* 622 * Non migratable page 623 */ 624 int fail_migrate_page(struct page *newpage, struct page *page) 625 { 626 return -EIO; 627 } 628 EXPORT_SYMBOL(fail_migrate_page); 629 630 /* 631 * swapout a single page 632 * page is locked upon entry, unlocked on exit 633 */ 634 static int swap_page(struct page *page) 635 { 636 struct address_space *mapping = page_mapping(page); 637 638 if (page_mapped(page) && mapping) 639 if (try_to_unmap(page, 1) != SWAP_SUCCESS) 640 goto unlock_retry; 641 642 if (PageDirty(page)) { 643 /* Page is dirty, try to write it out here */ 644 switch(pageout(page, mapping)) { 645 case PAGE_KEEP: 646 case PAGE_ACTIVATE: 647 goto unlock_retry; 648 649 case PAGE_SUCCESS: 650 goto retry; 651 652 case PAGE_CLEAN: 653 ; /* try to free the page below */ 654 } 655 } 656 657 if (PagePrivate(page)) { 658 if (!try_to_release_page(page, GFP_KERNEL) || 659 (!mapping && page_count(page) == 1)) 660 goto unlock_retry; 661 } 662 663 if (remove_mapping(mapping, page)) { 664 /* Success */ 665 unlock_page(page); 666 return 0; 667 } 668 669 unlock_retry: 670 unlock_page(page); 671 672 retry: 673 return -EAGAIN; 674 } 675 EXPORT_SYMBOL(swap_page); 676 677 /* 678 * Page migration was first developed in the context of the memory hotplug 679 * project. The main authors of the migration code are: 680 * 681 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 682 * Hirokazu Takahashi <taka@valinux.co.jp> 683 * Dave Hansen <haveblue@us.ibm.com> 684 * Christoph Lameter <clameter@sgi.com> 685 */ 686 687 /* 688 * Remove references for a page and establish the new page with the correct 689 * basic settings to be able to stop accesses to the page. 690 */ 691 int migrate_page_remove_references(struct page *newpage, 692 struct page *page, int nr_refs) 693 { 694 struct address_space *mapping = page_mapping(page); 695 struct page **radix_pointer; 696 697 /* 698 * Avoid doing any of the following work if the page count 699 * indicates that the page is in use or truncate has removed 700 * the page. 701 */ 702 if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 703 return -EAGAIN; 704 705 /* 706 * Establish swap ptes for anonymous pages or destroy pte 707 * maps for files. 708 * 709 * In order to reestablish file backed mappings the fault handlers 710 * will take the radix tree_lock which may then be used to stop 711 * processses from accessing this page until the new page is ready. 712 * 713 * A process accessing via a swap pte (an anonymous page) will take a 714 * page_lock on the old page which will block the process until the 715 * migration attempt is complete. At that time the PageSwapCache bit 716 * will be examined. If the page was migrated then the PageSwapCache 717 * bit will be clear and the operation to retrieve the page will be 718 * retried which will find the new page in the radix tree. Then a new 719 * direct mapping may be generated based on the radix tree contents. 720 * 721 * If the page was not migrated then the PageSwapCache bit 722 * is still set and the operation may continue. 723 */ 724 if (try_to_unmap(page, 1) == SWAP_FAIL) 725 /* A vma has VM_LOCKED set -> Permanent failure */ 726 return -EPERM; 727 728 /* 729 * Give up if we were unable to remove all mappings. 730 */ 731 if (page_mapcount(page)) 732 return -EAGAIN; 733 734 write_lock_irq(&mapping->tree_lock); 735 736 radix_pointer = (struct page **)radix_tree_lookup_slot( 737 &mapping->page_tree, 738 page_index(page)); 739 740 if (!page_mapping(page) || page_count(page) != nr_refs || 741 *radix_pointer != page) { 742 write_unlock_irq(&mapping->tree_lock); 743 return -EAGAIN; 744 } 745 746 /* 747 * Now we know that no one else is looking at the page. 748 * 749 * Certain minimal information about a page must be available 750 * in order for other subsystems to properly handle the page if they 751 * find it through the radix tree update before we are finished 752 * copying the page. 753 */ 754 get_page(newpage); 755 newpage->index = page->index; 756 newpage->mapping = page->mapping; 757 if (PageSwapCache(page)) { 758 SetPageSwapCache(newpage); 759 set_page_private(newpage, page_private(page)); 760 } 761 762 *radix_pointer = newpage; 763 __put_page(page); 764 write_unlock_irq(&mapping->tree_lock); 765 766 return 0; 767 } 768 EXPORT_SYMBOL(migrate_page_remove_references); 769 770 /* 771 * Copy the page to its new location 772 */ 773 void migrate_page_copy(struct page *newpage, struct page *page) 774 { 775 copy_highpage(newpage, page); 776 777 if (PageError(page)) 778 SetPageError(newpage); 779 if (PageReferenced(page)) 780 SetPageReferenced(newpage); 781 if (PageUptodate(page)) 782 SetPageUptodate(newpage); 783 if (PageActive(page)) 784 SetPageActive(newpage); 785 if (PageChecked(page)) 786 SetPageChecked(newpage); 787 if (PageMappedToDisk(page)) 788 SetPageMappedToDisk(newpage); 789 790 if (PageDirty(page)) { 791 clear_page_dirty_for_io(page); 792 set_page_dirty(newpage); 793 } 794 795 ClearPageSwapCache(page); 796 ClearPageActive(page); 797 ClearPagePrivate(page); 798 set_page_private(page, 0); 799 page->mapping = NULL; 800 801 /* 802 * If any waiters have accumulated on the new page then 803 * wake them up. 804 */ 805 if (PageWriteback(newpage)) 806 end_page_writeback(newpage); 807 } 808 EXPORT_SYMBOL(migrate_page_copy); 809 810 /* 811 * Common logic to directly migrate a single page suitable for 812 * pages that do not use PagePrivate. 813 * 814 * Pages are locked upon entry and exit. 815 */ 816 int migrate_page(struct page *newpage, struct page *page) 817 { 818 int rc; 819 820 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 821 822 rc = migrate_page_remove_references(newpage, page, 2); 823 824 if (rc) 825 return rc; 826 827 migrate_page_copy(newpage, page); 828 829 /* 830 * Remove auxiliary swap entries and replace 831 * them with real ptes. 832 * 833 * Note that a real pte entry will allow processes that are not 834 * waiting on the page lock to use the new page via the page tables 835 * before the new page is unlocked. 836 */ 837 remove_from_swap(newpage); 838 return 0; 839 } 840 EXPORT_SYMBOL(migrate_page); 841 842 /* 843 * migrate_pages 844 * 845 * Two lists are passed to this function. The first list 846 * contains the pages isolated from the LRU to be migrated. 847 * The second list contains new pages that the pages isolated 848 * can be moved to. If the second list is NULL then all 849 * pages are swapped out. 850 * 851 * The function returns after 10 attempts or if no pages 852 * are movable anymore because to has become empty 853 * or no retryable pages exist anymore. 854 * 855 * Return: Number of pages not migrated when "to" ran empty. 856 */ 857 int migrate_pages(struct list_head *from, struct list_head *to, 858 struct list_head *moved, struct list_head *failed) 859 { 860 int retry; 861 int nr_failed = 0; 862 int pass = 0; 863 struct page *page; 864 struct page *page2; 865 int swapwrite = current->flags & PF_SWAPWRITE; 866 int rc; 867 868 if (!swapwrite) 869 current->flags |= PF_SWAPWRITE; 870 871 redo: 872 retry = 0; 873 874 list_for_each_entry_safe(page, page2, from, lru) { 875 struct page *newpage = NULL; 876 struct address_space *mapping; 877 878 cond_resched(); 879 880 rc = 0; 881 if (page_count(page) == 1) 882 /* page was freed from under us. So we are done. */ 883 goto next; 884 885 if (to && list_empty(to)) 886 break; 887 888 /* 889 * Skip locked pages during the first two passes to give the 890 * functions holding the lock time to release the page. Later we 891 * use lock_page() to have a higher chance of acquiring the 892 * lock. 893 */ 894 rc = -EAGAIN; 895 if (pass > 2) 896 lock_page(page); 897 else 898 if (TestSetPageLocked(page)) 899 goto next; 900 901 /* 902 * Only wait on writeback if we have already done a pass where 903 * we we may have triggered writeouts for lots of pages. 904 */ 905 if (pass > 0) { 906 wait_on_page_writeback(page); 907 } else { 908 if (PageWriteback(page)) 909 goto unlock_page; 910 } 911 912 /* 913 * Anonymous pages must have swap cache references otherwise 914 * the information contained in the page maps cannot be 915 * preserved. 916 */ 917 if (PageAnon(page) && !PageSwapCache(page)) { 918 if (!add_to_swap(page, GFP_KERNEL)) { 919 rc = -ENOMEM; 920 goto unlock_page; 921 } 922 } 923 924 if (!to) { 925 rc = swap_page(page); 926 goto next; 927 } 928 929 newpage = lru_to_page(to); 930 lock_page(newpage); 931 932 /* 933 * Pages are properly locked and writeback is complete. 934 * Try to migrate the page. 935 */ 936 mapping = page_mapping(page); 937 if (!mapping) 938 goto unlock_both; 939 940 if (mapping->a_ops->migratepage) { 941 /* 942 * Most pages have a mapping and most filesystems 943 * should provide a migration function. Anonymous 944 * pages are part of swap space which also has its 945 * own migration function. This is the most common 946 * path for page migration. 947 */ 948 rc = mapping->a_ops->migratepage(newpage, page); 949 goto unlock_both; 950 } 951 952 /* 953 * Default handling if a filesystem does not provide 954 * a migration function. We can only migrate clean 955 * pages so try to write out any dirty pages first. 956 */ 957 if (PageDirty(page)) { 958 switch (pageout(page, mapping)) { 959 case PAGE_KEEP: 960 case PAGE_ACTIVATE: 961 goto unlock_both; 962 963 case PAGE_SUCCESS: 964 unlock_page(newpage); 965 goto next; 966 967 case PAGE_CLEAN: 968 ; /* try to migrate the page below */ 969 } 970 } 971 972 /* 973 * Buffers are managed in a filesystem specific way. 974 * We must have no buffers or drop them. 975 */ 976 if (!page_has_buffers(page) || 977 try_to_release_page(page, GFP_KERNEL)) { 978 rc = migrate_page(newpage, page); 979 goto unlock_both; 980 } 981 982 /* 983 * On early passes with mapped pages simply 984 * retry. There may be a lock held for some 985 * buffers that may go away. Later 986 * swap them out. 987 */ 988 if (pass > 4) { 989 /* 990 * Persistently unable to drop buffers..... As a 991 * measure of last resort we fall back to 992 * swap_page(). 993 */ 994 unlock_page(newpage); 995 newpage = NULL; 996 rc = swap_page(page); 997 goto next; 998 } 999 1000 unlock_both: 1001 unlock_page(newpage); 1002 1003 unlock_page: 1004 unlock_page(page); 1005 1006 next: 1007 if (rc == -EAGAIN) { 1008 retry++; 1009 } else if (rc) { 1010 /* Permanent failure */ 1011 list_move(&page->lru, failed); 1012 nr_failed++; 1013 } else { 1014 if (newpage) { 1015 /* Successful migration. Return page to LRU */ 1016 move_to_lru(newpage); 1017 } 1018 list_move(&page->lru, moved); 1019 } 1020 } 1021 if (retry && pass++ < 10) 1022 goto redo; 1023 1024 if (!swapwrite) 1025 current->flags &= ~PF_SWAPWRITE; 1026 1027 return nr_failed + retry; 1028 } 1029 1030 /* 1031 * Isolate one page from the LRU lists and put it on the 1032 * indicated list with elevated refcount. 1033 * 1034 * Result: 1035 * 0 = page not on LRU list 1036 * 1 = page removed from LRU list and added to the specified list. 1037 */ 1038 int isolate_lru_page(struct page *page) 1039 { 1040 int ret = 0; 1041 1042 if (PageLRU(page)) { 1043 struct zone *zone = page_zone(page); 1044 spin_lock_irq(&zone->lru_lock); 1045 if (TestClearPageLRU(page)) { 1046 ret = 1; 1047 get_page(page); 1048 if (PageActive(page)) 1049 del_page_from_active_list(zone, page); 1050 else 1051 del_page_from_inactive_list(zone, page); 1052 } 1053 spin_unlock_irq(&zone->lru_lock); 1054 } 1055 1056 return ret; 1057 } 1058 #endif 1059 1060 /* 1061 * zone->lru_lock is heavily contended. Some of the functions that 1062 * shrink the lists perform better by taking out a batch of pages 1063 * and working on them outside the LRU lock. 1064 * 1065 * For pagecache intensive workloads, this function is the hottest 1066 * spot in the kernel (apart from copy_*_user functions). 1067 * 1068 * Appropriate locks must be held before calling this function. 1069 * 1070 * @nr_to_scan: The number of pages to look through on the list. 1071 * @src: The LRU list to pull pages off. 1072 * @dst: The temp list to put pages on to. 1073 * @scanned: The number of pages that were scanned. 1074 * 1075 * returns how many pages were moved onto *@dst. 1076 */ 1077 static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 1078 struct list_head *dst, int *scanned) 1079 { 1080 int nr_taken = 0; 1081 struct page *page; 1082 int scan = 0; 1083 1084 while (scan++ < nr_to_scan && !list_empty(src)) { 1085 page = lru_to_page(src); 1086 prefetchw_prev_lru_page(page, src, flags); 1087 1088 if (!TestClearPageLRU(page)) 1089 BUG(); 1090 list_del(&page->lru); 1091 if (get_page_testone(page)) { 1092 /* 1093 * It is being freed elsewhere 1094 */ 1095 __put_page(page); 1096 SetPageLRU(page); 1097 list_add(&page->lru, src); 1098 continue; 1099 } else { 1100 list_add(&page->lru, dst); 1101 nr_taken++; 1102 } 1103 } 1104 1105 *scanned = scan; 1106 return nr_taken; 1107 } 1108 1109 /* 1110 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 1111 */ 1112 static void shrink_cache(struct zone *zone, struct scan_control *sc) 1113 { 1114 LIST_HEAD(page_list); 1115 struct pagevec pvec; 1116 int max_scan = sc->nr_to_scan; 1117 1118 pagevec_init(&pvec, 1); 1119 1120 lru_add_drain(); 1121 spin_lock_irq(&zone->lru_lock); 1122 while (max_scan > 0) { 1123 struct page *page; 1124 int nr_taken; 1125 int nr_scan; 1126 int nr_freed; 1127 1128 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 1129 &zone->inactive_list, 1130 &page_list, &nr_scan); 1131 zone->nr_inactive -= nr_taken; 1132 zone->pages_scanned += nr_scan; 1133 spin_unlock_irq(&zone->lru_lock); 1134 1135 if (nr_taken == 0) 1136 goto done; 1137 1138 max_scan -= nr_scan; 1139 nr_freed = shrink_list(&page_list, sc); 1140 1141 local_irq_disable(); 1142 if (current_is_kswapd()) { 1143 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 1144 __mod_page_state(kswapd_steal, nr_freed); 1145 } else 1146 __mod_page_state_zone(zone, pgscan_direct, nr_scan); 1147 __mod_page_state_zone(zone, pgsteal, nr_freed); 1148 1149 spin_lock(&zone->lru_lock); 1150 /* 1151 * Put back any unfreeable pages. 1152 */ 1153 while (!list_empty(&page_list)) { 1154 page = lru_to_page(&page_list); 1155 if (TestSetPageLRU(page)) 1156 BUG(); 1157 list_del(&page->lru); 1158 if (PageActive(page)) 1159 add_page_to_active_list(zone, page); 1160 else 1161 add_page_to_inactive_list(zone, page); 1162 if (!pagevec_add(&pvec, page)) { 1163 spin_unlock_irq(&zone->lru_lock); 1164 __pagevec_release(&pvec); 1165 spin_lock_irq(&zone->lru_lock); 1166 } 1167 } 1168 } 1169 spin_unlock_irq(&zone->lru_lock); 1170 done: 1171 pagevec_release(&pvec); 1172 } 1173 1174 /* 1175 * This moves pages from the active list to the inactive list. 1176 * 1177 * We move them the other way if the page is referenced by one or more 1178 * processes, from rmap. 1179 * 1180 * If the pages are mostly unmapped, the processing is fast and it is 1181 * appropriate to hold zone->lru_lock across the whole operation. But if 1182 * the pages are mapped, the processing is slow (page_referenced()) so we 1183 * should drop zone->lru_lock around each page. It's impossible to balance 1184 * this, so instead we remove the pages from the LRU while processing them. 1185 * It is safe to rely on PG_active against the non-LRU pages in here because 1186 * nobody will play with that bit on a non-LRU page. 1187 * 1188 * The downside is that we have to touch page->_count against each page. 1189 * But we had to alter page->flags anyway. 1190 */ 1191 static void 1192 refill_inactive_zone(struct zone *zone, struct scan_control *sc) 1193 { 1194 int pgmoved; 1195 int pgdeactivate = 0; 1196 int pgscanned; 1197 int nr_pages = sc->nr_to_scan; 1198 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1199 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 1200 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 1201 struct page *page; 1202 struct pagevec pvec; 1203 int reclaim_mapped = 0; 1204 1205 if (unlikely(sc->may_swap)) { 1206 long mapped_ratio; 1207 long distress; 1208 long swap_tendency; 1209 1210 /* 1211 * `distress' is a measure of how much trouble we're having 1212 * reclaiming pages. 0 -> no problems. 100 -> great trouble. 1213 */ 1214 distress = 100 >> zone->prev_priority; 1215 1216 /* 1217 * The point of this algorithm is to decide when to start 1218 * reclaiming mapped memory instead of just pagecache. Work out 1219 * how much memory 1220 * is mapped. 1221 */ 1222 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 1223 1224 /* 1225 * Now decide how much we really want to unmap some pages. The 1226 * mapped ratio is downgraded - just because there's a lot of 1227 * mapped memory doesn't necessarily mean that page reclaim 1228 * isn't succeeding. 1229 * 1230 * The distress ratio is important - we don't want to start 1231 * going oom. 1232 * 1233 * A 100% value of vm_swappiness overrides this algorithm 1234 * altogether. 1235 */ 1236 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 1237 1238 /* 1239 * Now use this metric to decide whether to start moving mapped 1240 * memory onto the inactive list. 1241 */ 1242 if (swap_tendency >= 100) 1243 reclaim_mapped = 1; 1244 } 1245 1246 lru_add_drain(); 1247 spin_lock_irq(&zone->lru_lock); 1248 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 1249 &l_hold, &pgscanned); 1250 zone->pages_scanned += pgscanned; 1251 zone->nr_active -= pgmoved; 1252 spin_unlock_irq(&zone->lru_lock); 1253 1254 while (!list_empty(&l_hold)) { 1255 cond_resched(); 1256 page = lru_to_page(&l_hold); 1257 list_del(&page->lru); 1258 if (page_mapped(page)) { 1259 if (!reclaim_mapped || 1260 (total_swap_pages == 0 && PageAnon(page)) || 1261 page_referenced(page, 0)) { 1262 list_add(&page->lru, &l_active); 1263 continue; 1264 } 1265 } 1266 list_add(&page->lru, &l_inactive); 1267 } 1268 1269 pagevec_init(&pvec, 1); 1270 pgmoved = 0; 1271 spin_lock_irq(&zone->lru_lock); 1272 while (!list_empty(&l_inactive)) { 1273 page = lru_to_page(&l_inactive); 1274 prefetchw_prev_lru_page(page, &l_inactive, flags); 1275 if (TestSetPageLRU(page)) 1276 BUG(); 1277 if (!TestClearPageActive(page)) 1278 BUG(); 1279 list_move(&page->lru, &zone->inactive_list); 1280 pgmoved++; 1281 if (!pagevec_add(&pvec, page)) { 1282 zone->nr_inactive += pgmoved; 1283 spin_unlock_irq(&zone->lru_lock); 1284 pgdeactivate += pgmoved; 1285 pgmoved = 0; 1286 if (buffer_heads_over_limit) 1287 pagevec_strip(&pvec); 1288 __pagevec_release(&pvec); 1289 spin_lock_irq(&zone->lru_lock); 1290 } 1291 } 1292 zone->nr_inactive += pgmoved; 1293 pgdeactivate += pgmoved; 1294 if (buffer_heads_over_limit) { 1295 spin_unlock_irq(&zone->lru_lock); 1296 pagevec_strip(&pvec); 1297 spin_lock_irq(&zone->lru_lock); 1298 } 1299 1300 pgmoved = 0; 1301 while (!list_empty(&l_active)) { 1302 page = lru_to_page(&l_active); 1303 prefetchw_prev_lru_page(page, &l_active, flags); 1304 if (TestSetPageLRU(page)) 1305 BUG(); 1306 BUG_ON(!PageActive(page)); 1307 list_move(&page->lru, &zone->active_list); 1308 pgmoved++; 1309 if (!pagevec_add(&pvec, page)) { 1310 zone->nr_active += pgmoved; 1311 pgmoved = 0; 1312 spin_unlock_irq(&zone->lru_lock); 1313 __pagevec_release(&pvec); 1314 spin_lock_irq(&zone->lru_lock); 1315 } 1316 } 1317 zone->nr_active += pgmoved; 1318 spin_unlock(&zone->lru_lock); 1319 1320 __mod_page_state_zone(zone, pgrefill, pgscanned); 1321 __mod_page_state(pgdeactivate, pgdeactivate); 1322 local_irq_enable(); 1323 1324 pagevec_release(&pvec); 1325 } 1326 1327 /* 1328 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1329 */ 1330 static void 1331 shrink_zone(struct zone *zone, struct scan_control *sc) 1332 { 1333 unsigned long nr_active; 1334 unsigned long nr_inactive; 1335 1336 atomic_inc(&zone->reclaim_in_progress); 1337 1338 /* 1339 * Add one to `nr_to_scan' just to make sure that the kernel will 1340 * slowly sift through the active list. 1341 */ 1342 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 1343 nr_active = zone->nr_scan_active; 1344 if (nr_active >= sc->swap_cluster_max) 1345 zone->nr_scan_active = 0; 1346 else 1347 nr_active = 0; 1348 1349 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 1350 nr_inactive = zone->nr_scan_inactive; 1351 if (nr_inactive >= sc->swap_cluster_max) 1352 zone->nr_scan_inactive = 0; 1353 else 1354 nr_inactive = 0; 1355 1356 while (nr_active || nr_inactive) { 1357 if (nr_active) { 1358 sc->nr_to_scan = min(nr_active, 1359 (unsigned long)sc->swap_cluster_max); 1360 nr_active -= sc->nr_to_scan; 1361 refill_inactive_zone(zone, sc); 1362 } 1363 1364 if (nr_inactive) { 1365 sc->nr_to_scan = min(nr_inactive, 1366 (unsigned long)sc->swap_cluster_max); 1367 nr_inactive -= sc->nr_to_scan; 1368 shrink_cache(zone, sc); 1369 } 1370 } 1371 1372 throttle_vm_writeout(); 1373 1374 atomic_dec(&zone->reclaim_in_progress); 1375 } 1376 1377 /* 1378 * This is the direct reclaim path, for page-allocating processes. We only 1379 * try to reclaim pages from zones which will satisfy the caller's allocation 1380 * request. 1381 * 1382 * We reclaim from a zone even if that zone is over pages_high. Because: 1383 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1384 * allocation or 1385 * b) The zones may be over pages_high but they must go *over* pages_high to 1386 * satisfy the `incremental min' zone defense algorithm. 1387 * 1388 * Returns the number of reclaimed pages. 1389 * 1390 * If a zone is deemed to be full of pinned pages then just give it a light 1391 * scan then give up on it. 1392 */ 1393 static void 1394 shrink_caches(struct zone **zones, struct scan_control *sc) 1395 { 1396 int i; 1397 1398 for (i = 0; zones[i] != NULL; i++) { 1399 struct zone *zone = zones[i]; 1400 1401 if (!populated_zone(zone)) 1402 continue; 1403 1404 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1405 continue; 1406 1407 zone->temp_priority = sc->priority; 1408 if (zone->prev_priority > sc->priority) 1409 zone->prev_priority = sc->priority; 1410 1411 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 1412 continue; /* Let kswapd poll it */ 1413 1414 shrink_zone(zone, sc); 1415 } 1416 } 1417 1418 /* 1419 * This is the main entry point to direct page reclaim. 1420 * 1421 * If a full scan of the inactive list fails to free enough memory then we 1422 * are "out of memory" and something needs to be killed. 1423 * 1424 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1425 * high - the zone may be full of dirty or under-writeback pages, which this 1426 * caller can't do much about. We kick pdflush and take explicit naps in the 1427 * hope that some of these pages can be written. But if the allocating task 1428 * holds filesystem locks which prevent writeout this might not work, and the 1429 * allocation attempt will fail. 1430 */ 1431 int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 1432 { 1433 int priority; 1434 int ret = 0; 1435 int total_scanned = 0, total_reclaimed = 0; 1436 struct reclaim_state *reclaim_state = current->reclaim_state; 1437 struct scan_control sc; 1438 unsigned long lru_pages = 0; 1439 int i; 1440 1441 sc.gfp_mask = gfp_mask; 1442 sc.may_writepage = !laptop_mode; 1443 sc.may_swap = 1; 1444 1445 inc_page_state(allocstall); 1446 1447 for (i = 0; zones[i] != NULL; i++) { 1448 struct zone *zone = zones[i]; 1449 1450 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1451 continue; 1452 1453 zone->temp_priority = DEF_PRIORITY; 1454 lru_pages += zone->nr_active + zone->nr_inactive; 1455 } 1456 1457 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1458 sc.nr_mapped = read_page_state(nr_mapped); 1459 sc.nr_scanned = 0; 1460 sc.nr_reclaimed = 0; 1461 sc.priority = priority; 1462 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1463 if (!priority) 1464 disable_swap_token(); 1465 shrink_caches(zones, &sc); 1466 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 1467 if (reclaim_state) { 1468 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1469 reclaim_state->reclaimed_slab = 0; 1470 } 1471 total_scanned += sc.nr_scanned; 1472 total_reclaimed += sc.nr_reclaimed; 1473 if (total_reclaimed >= sc.swap_cluster_max) { 1474 ret = 1; 1475 goto out; 1476 } 1477 1478 /* 1479 * Try to write back as many pages as we just scanned. This 1480 * tends to cause slow streaming writers to write data to the 1481 * disk smoothly, at the dirtying rate, which is nice. But 1482 * that's undesirable in laptop mode, where we *want* lumpy 1483 * writeout. So in laptop mode, write out the whole world. 1484 */ 1485 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { 1486 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 1487 sc.may_writepage = 1; 1488 } 1489 1490 /* Take a nap, wait for some writeback to complete */ 1491 if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 1492 blk_congestion_wait(WRITE, HZ/10); 1493 } 1494 out: 1495 for (i = 0; zones[i] != 0; i++) { 1496 struct zone *zone = zones[i]; 1497 1498 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1499 continue; 1500 1501 zone->prev_priority = zone->temp_priority; 1502 } 1503 return ret; 1504 } 1505 1506 /* 1507 * For kswapd, balance_pgdat() will work across all this node's zones until 1508 * they are all at pages_high. 1509 * 1510 * If `nr_pages' is non-zero then it is the number of pages which are to be 1511 * reclaimed, regardless of the zone occupancies. This is a software suspend 1512 * special. 1513 * 1514 * Returns the number of pages which were actually freed. 1515 * 1516 * There is special handling here for zones which are full of pinned pages. 1517 * This can happen if the pages are all mlocked, or if they are all used by 1518 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1519 * What we do is to detect the case where all pages in the zone have been 1520 * scanned twice and there has been zero successful reclaim. Mark the zone as 1521 * dead and from now on, only perform a short scan. Basically we're polling 1522 * the zone for when the problem goes away. 1523 * 1524 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1525 * zones which have free_pages > pages_high, but once a zone is found to have 1526 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1527 * of the number of free pages in the lower zones. This interoperates with 1528 * the page allocator fallback scheme to ensure that aging of pages is balanced 1529 * across the zones. 1530 */ 1531 static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 1532 { 1533 int to_free = nr_pages; 1534 int all_zones_ok; 1535 int priority; 1536 int i; 1537 int total_scanned, total_reclaimed; 1538 struct reclaim_state *reclaim_state = current->reclaim_state; 1539 struct scan_control sc; 1540 1541 loop_again: 1542 total_scanned = 0; 1543 total_reclaimed = 0; 1544 sc.gfp_mask = GFP_KERNEL; 1545 sc.may_writepage = !laptop_mode; 1546 sc.may_swap = 1; 1547 sc.nr_mapped = read_page_state(nr_mapped); 1548 1549 inc_page_state(pageoutrun); 1550 1551 for (i = 0; i < pgdat->nr_zones; i++) { 1552 struct zone *zone = pgdat->node_zones + i; 1553 1554 zone->temp_priority = DEF_PRIORITY; 1555 } 1556 1557 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1558 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1559 unsigned long lru_pages = 0; 1560 1561 /* The swap token gets in the way of swapout... */ 1562 if (!priority) 1563 disable_swap_token(); 1564 1565 all_zones_ok = 1; 1566 1567 if (nr_pages == 0) { 1568 /* 1569 * Scan in the highmem->dma direction for the highest 1570 * zone which needs scanning 1571 */ 1572 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1573 struct zone *zone = pgdat->node_zones + i; 1574 1575 if (!populated_zone(zone)) 1576 continue; 1577 1578 if (zone->all_unreclaimable && 1579 priority != DEF_PRIORITY) 1580 continue; 1581 1582 if (!zone_watermark_ok(zone, order, 1583 zone->pages_high, 0, 0)) { 1584 end_zone = i; 1585 goto scan; 1586 } 1587 } 1588 goto out; 1589 } else { 1590 end_zone = pgdat->nr_zones - 1; 1591 } 1592 scan: 1593 for (i = 0; i <= end_zone; i++) { 1594 struct zone *zone = pgdat->node_zones + i; 1595 1596 lru_pages += zone->nr_active + zone->nr_inactive; 1597 } 1598 1599 /* 1600 * Now scan the zone in the dma->highmem direction, stopping 1601 * at the last zone which needs scanning. 1602 * 1603 * We do this because the page allocator works in the opposite 1604 * direction. This prevents the page allocator from allocating 1605 * pages behind kswapd's direction of progress, which would 1606 * cause too much scanning of the lower zones. 1607 */ 1608 for (i = 0; i <= end_zone; i++) { 1609 struct zone *zone = pgdat->node_zones + i; 1610 int nr_slab; 1611 1612 if (!populated_zone(zone)) 1613 continue; 1614 1615 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1616 continue; 1617 1618 if (nr_pages == 0) { /* Not software suspend */ 1619 if (!zone_watermark_ok(zone, order, 1620 zone->pages_high, end_zone, 0)) 1621 all_zones_ok = 0; 1622 } 1623 zone->temp_priority = priority; 1624 if (zone->prev_priority > priority) 1625 zone->prev_priority = priority; 1626 sc.nr_scanned = 0; 1627 sc.nr_reclaimed = 0; 1628 sc.priority = priority; 1629 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1630 shrink_zone(zone, &sc); 1631 reclaim_state->reclaimed_slab = 0; 1632 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1633 lru_pages); 1634 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1635 total_reclaimed += sc.nr_reclaimed; 1636 total_scanned += sc.nr_scanned; 1637 if (zone->all_unreclaimable) 1638 continue; 1639 if (nr_slab == 0 && zone->pages_scanned >= 1640 (zone->nr_active + zone->nr_inactive) * 4) 1641 zone->all_unreclaimable = 1; 1642 /* 1643 * If we've done a decent amount of scanning and 1644 * the reclaim ratio is low, start doing writepage 1645 * even in laptop mode 1646 */ 1647 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 1648 total_scanned > total_reclaimed+total_reclaimed/2) 1649 sc.may_writepage = 1; 1650 } 1651 if (nr_pages && to_free > total_reclaimed) 1652 continue; /* swsusp: need to do more work */ 1653 if (all_zones_ok) 1654 break; /* kswapd: all done */ 1655 /* 1656 * OK, kswapd is getting into trouble. Take a nap, then take 1657 * another pass across the zones. 1658 */ 1659 if (total_scanned && priority < DEF_PRIORITY - 2) 1660 blk_congestion_wait(WRITE, HZ/10); 1661 1662 /* 1663 * We do this so kswapd doesn't build up large priorities for 1664 * example when it is freeing in parallel with allocators. It 1665 * matches the direct reclaim path behaviour in terms of impact 1666 * on zone->*_priority. 1667 */ 1668 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) 1669 break; 1670 } 1671 out: 1672 for (i = 0; i < pgdat->nr_zones; i++) { 1673 struct zone *zone = pgdat->node_zones + i; 1674 1675 zone->prev_priority = zone->temp_priority; 1676 } 1677 if (!all_zones_ok) { 1678 cond_resched(); 1679 goto loop_again; 1680 } 1681 1682 return total_reclaimed; 1683 } 1684 1685 /* 1686 * The background pageout daemon, started as a kernel thread 1687 * from the init process. 1688 * 1689 * This basically trickles out pages so that we have _some_ 1690 * free memory available even if there is no other activity 1691 * that frees anything up. This is needed for things like routing 1692 * etc, where we otherwise might have all activity going on in 1693 * asynchronous contexts that cannot page things out. 1694 * 1695 * If there are applications that are active memory-allocators 1696 * (most normal use), this basically shouldn't matter. 1697 */ 1698 static int kswapd(void *p) 1699 { 1700 unsigned long order; 1701 pg_data_t *pgdat = (pg_data_t*)p; 1702 struct task_struct *tsk = current; 1703 DEFINE_WAIT(wait); 1704 struct reclaim_state reclaim_state = { 1705 .reclaimed_slab = 0, 1706 }; 1707 cpumask_t cpumask; 1708 1709 daemonize("kswapd%d", pgdat->node_id); 1710 cpumask = node_to_cpumask(pgdat->node_id); 1711 if (!cpus_empty(cpumask)) 1712 set_cpus_allowed(tsk, cpumask); 1713 current->reclaim_state = &reclaim_state; 1714 1715 /* 1716 * Tell the memory management that we're a "memory allocator", 1717 * and that if we need more memory we should get access to it 1718 * regardless (see "__alloc_pages()"). "kswapd" should 1719 * never get caught in the normal page freeing logic. 1720 * 1721 * (Kswapd normally doesn't need memory anyway, but sometimes 1722 * you need a small amount of memory in order to be able to 1723 * page out something else, and this flag essentially protects 1724 * us from recursively trying to free more memory as we're 1725 * trying to free the first piece of memory in the first place). 1726 */ 1727 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 1728 1729 order = 0; 1730 for ( ; ; ) { 1731 unsigned long new_order; 1732 1733 try_to_freeze(); 1734 1735 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 1736 new_order = pgdat->kswapd_max_order; 1737 pgdat->kswapd_max_order = 0; 1738 if (order < new_order) { 1739 /* 1740 * Don't sleep if someone wants a larger 'order' 1741 * allocation 1742 */ 1743 order = new_order; 1744 } else { 1745 schedule(); 1746 order = pgdat->kswapd_max_order; 1747 } 1748 finish_wait(&pgdat->kswapd_wait, &wait); 1749 1750 balance_pgdat(pgdat, 0, order); 1751 } 1752 return 0; 1753 } 1754 1755 /* 1756 * A zone is low on free memory, so wake its kswapd task to service it. 1757 */ 1758 void wakeup_kswapd(struct zone *zone, int order) 1759 { 1760 pg_data_t *pgdat; 1761 1762 if (!populated_zone(zone)) 1763 return; 1764 1765 pgdat = zone->zone_pgdat; 1766 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 1767 return; 1768 if (pgdat->kswapd_max_order < order) 1769 pgdat->kswapd_max_order = order; 1770 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1771 return; 1772 if (!waitqueue_active(&pgdat->kswapd_wait)) 1773 return; 1774 wake_up_interruptible(&pgdat->kswapd_wait); 1775 } 1776 1777 #ifdef CONFIG_PM 1778 /* 1779 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 1780 * pages. 1781 */ 1782 int shrink_all_memory(int nr_pages) 1783 { 1784 pg_data_t *pgdat; 1785 int nr_to_free = nr_pages; 1786 int ret = 0; 1787 struct reclaim_state reclaim_state = { 1788 .reclaimed_slab = 0, 1789 }; 1790 1791 current->reclaim_state = &reclaim_state; 1792 for_each_pgdat(pgdat) { 1793 int freed; 1794 freed = balance_pgdat(pgdat, nr_to_free, 0); 1795 ret += freed; 1796 nr_to_free -= freed; 1797 if (nr_to_free <= 0) 1798 break; 1799 } 1800 current->reclaim_state = NULL; 1801 return ret; 1802 } 1803 #endif 1804 1805 #ifdef CONFIG_HOTPLUG_CPU 1806 /* It's optimal to keep kswapds on the same CPUs as their memory, but 1807 not required for correctness. So if the last cpu in a node goes 1808 away, we get changed to run anywhere: as the first one comes back, 1809 restore their cpu bindings. */ 1810 static int __devinit cpu_callback(struct notifier_block *nfb, 1811 unsigned long action, 1812 void *hcpu) 1813 { 1814 pg_data_t *pgdat; 1815 cpumask_t mask; 1816 1817 if (action == CPU_ONLINE) { 1818 for_each_pgdat(pgdat) { 1819 mask = node_to_cpumask(pgdat->node_id); 1820 if (any_online_cpu(mask) != NR_CPUS) 1821 /* One of our CPUs online: restore mask */ 1822 set_cpus_allowed(pgdat->kswapd, mask); 1823 } 1824 } 1825 return NOTIFY_OK; 1826 } 1827 #endif /* CONFIG_HOTPLUG_CPU */ 1828 1829 static int __init kswapd_init(void) 1830 { 1831 pg_data_t *pgdat; 1832 swap_setup(); 1833 for_each_pgdat(pgdat) 1834 pgdat->kswapd 1835 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 1836 total_memory = nr_free_pagecache_pages(); 1837 hotcpu_notifier(cpu_callback, 0); 1838 return 0; 1839 } 1840 1841 module_init(kswapd_init) 1842 1843 #ifdef CONFIG_NUMA 1844 /* 1845 * Zone reclaim mode 1846 * 1847 * If non-zero call zone_reclaim when the number of free pages falls below 1848 * the watermarks. 1849 * 1850 * In the future we may add flags to the mode. However, the page allocator 1851 * should only have to check that zone_reclaim_mode != 0 before calling 1852 * zone_reclaim(). 1853 */ 1854 int zone_reclaim_mode __read_mostly; 1855 1856 #define RECLAIM_OFF 0 1857 #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 1858 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 1859 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 1860 #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 1861 1862 /* 1863 * Mininum time between zone reclaim scans 1864 */ 1865 int zone_reclaim_interval __read_mostly = 30*HZ; 1866 1867 /* 1868 * Priority for ZONE_RECLAIM. This determines the fraction of pages 1869 * of a node considered for each zone_reclaim. 4 scans 1/16th of 1870 * a zone. 1871 */ 1872 #define ZONE_RECLAIM_PRIORITY 4 1873 1874 /* 1875 * Try to free up some pages from this zone through reclaim. 1876 */ 1877 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1878 { 1879 int nr_pages; 1880 struct task_struct *p = current; 1881 struct reclaim_state reclaim_state; 1882 struct scan_control sc; 1883 cpumask_t mask; 1884 int node_id; 1885 1886 if (time_before(jiffies, 1887 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1888 return 0; 1889 1890 if (!(gfp_mask & __GFP_WAIT) || 1891 zone->all_unreclaimable || 1892 atomic_read(&zone->reclaim_in_progress) > 0 || 1893 (p->flags & PF_MEMALLOC)) 1894 return 0; 1895 1896 node_id = zone->zone_pgdat->node_id; 1897 mask = node_to_cpumask(node_id); 1898 if (!cpus_empty(mask) && node_id != numa_node_id()) 1899 return 0; 1900 1901 sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE); 1902 sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); 1903 sc.nr_scanned = 0; 1904 sc.nr_reclaimed = 0; 1905 sc.priority = ZONE_RECLAIM_PRIORITY + 1; 1906 sc.nr_mapped = read_page_state(nr_mapped); 1907 sc.gfp_mask = gfp_mask; 1908 1909 disable_swap_token(); 1910 1911 nr_pages = 1 << order; 1912 if (nr_pages > SWAP_CLUSTER_MAX) 1913 sc.swap_cluster_max = nr_pages; 1914 else 1915 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1916 1917 cond_resched(); 1918 /* 1919 * We need to be able to allocate from the reserves for RECLAIM_SWAP 1920 * and we also need to be able to write out pages for RECLAIM_WRITE 1921 * and RECLAIM_SWAP. 1922 */ 1923 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 1924 reclaim_state.reclaimed_slab = 0; 1925 p->reclaim_state = &reclaim_state; 1926 1927 /* 1928 * Free memory by calling shrink zone with increasing priorities 1929 * until we have enough memory freed. 1930 */ 1931 do { 1932 sc.priority--; 1933 shrink_zone(zone, &sc); 1934 1935 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0); 1936 1937 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 1938 /* 1939 * shrink_slab does not currently allow us to determine 1940 * how many pages were freed in the zone. So we just 1941 * shake the slab and then go offnode for a single allocation. 1942 * 1943 * shrink_slab will free memory on all zones and may take 1944 * a long time. 1945 */ 1946 shrink_slab(sc.nr_scanned, gfp_mask, order); 1947 } 1948 1949 p->reclaim_state = NULL; 1950 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 1951 1952 if (sc.nr_reclaimed == 0) 1953 zone->last_unsuccessful_zone_reclaim = jiffies; 1954 1955 return sc.nr_reclaimed >= nr_pages; 1956 } 1957 #endif 1958 1959