1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/gfp.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/vmstat.h> 23 #include <linux/file.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28 #include <linux/mm_inline.h> 29 #include <linux/backing-dev.h> 30 #include <linux/rmap.h> 31 #include <linux/topology.h> 32 #include <linux/cpu.h> 33 #include <linux/cpuset.h> 34 #include <linux/compaction.h> 35 #include <linux/notifier.h> 36 #include <linux/rwsem.h> 37 #include <linux/delay.h> 38 #include <linux/kthread.h> 39 #include <linux/freezer.h> 40 #include <linux/memcontrol.h> 41 #include <linux/delayacct.h> 42 #include <linux/sysctl.h> 43 #include <linux/oom.h> 44 #include <linux/prefetch.h> 45 46 #include <asm/tlbflush.h> 47 #include <asm/div64.h> 48 49 #include <linux/swapops.h> 50 51 #include "internal.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/vmscan.h> 55 56 /* 57 * reclaim_mode determines how the inactive list is shrunk 58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 59 * RECLAIM_MODE_ASYNC: Do not block 60 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback 61 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 62 * page from the LRU and reclaim all pages within a 63 * naturally aligned range 64 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 65 * order-0 pages and then compact the zone 66 */ 67 typedef unsigned __bitwise__ reclaim_mode_t; 68 #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) 69 #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) 70 #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) 71 #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) 72 #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) 73 74 struct scan_control { 75 /* Incremented by the number of inactive pages that were scanned */ 76 unsigned long nr_scanned; 77 78 /* Number of pages freed so far during a call to shrink_zones() */ 79 unsigned long nr_reclaimed; 80 81 /* How many pages shrink_list() should reclaim */ 82 unsigned long nr_to_reclaim; 83 84 unsigned long hibernation_mode; 85 86 /* This context's GFP mask */ 87 gfp_t gfp_mask; 88 89 int may_writepage; 90 91 /* Can mapped pages be reclaimed? */ 92 int may_unmap; 93 94 /* Can pages be swapped as part of reclaim? */ 95 int may_swap; 96 97 int order; 98 99 /* 100 * Intend to reclaim enough continuous memory rather than reclaim 101 * enough amount of memory. i.e, mode for high order allocation. 102 */ 103 reclaim_mode_t reclaim_mode; 104 105 /* 106 * The memory cgroup that hit its limit and as a result is the 107 * primary target of this reclaim invocation. 108 */ 109 struct mem_cgroup *target_mem_cgroup; 110 111 /* 112 * Nodemask of nodes allowed by the caller. If NULL, all nodes 113 * are scanned. 114 */ 115 nodemask_t *nodemask; 116 }; 117 118 struct mem_cgroup_zone { 119 struct mem_cgroup *mem_cgroup; 120 struct zone *zone; 121 }; 122 123 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 124 125 #ifdef ARCH_HAS_PREFETCH 126 #define prefetch_prev_lru_page(_page, _base, _field) \ 127 do { \ 128 if ((_page)->lru.prev != _base) { \ 129 struct page *prev; \ 130 \ 131 prev = lru_to_page(&(_page->lru)); \ 132 prefetch(&prev->_field); \ 133 } \ 134 } while (0) 135 #else 136 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 137 #endif 138 139 #ifdef ARCH_HAS_PREFETCHW 140 #define prefetchw_prev_lru_page(_page, _base, _field) \ 141 do { \ 142 if ((_page)->lru.prev != _base) { \ 143 struct page *prev; \ 144 \ 145 prev = lru_to_page(&(_page->lru)); \ 146 prefetchw(&prev->_field); \ 147 } \ 148 } while (0) 149 #else 150 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 151 #endif 152 153 /* 154 * From 0 .. 100. Higher means more swappy. 155 */ 156 int vm_swappiness = 60; 157 long vm_total_pages; /* The total number of pages which the VM controls */ 158 159 static LIST_HEAD(shrinker_list); 160 static DECLARE_RWSEM(shrinker_rwsem); 161 162 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 163 static bool global_reclaim(struct scan_control *sc) 164 { 165 return !sc->target_mem_cgroup; 166 } 167 168 static bool scanning_global_lru(struct mem_cgroup_zone *mz) 169 { 170 return !mz->mem_cgroup; 171 } 172 #else 173 static bool global_reclaim(struct scan_control *sc) 174 { 175 return true; 176 } 177 178 static bool scanning_global_lru(struct mem_cgroup_zone *mz) 179 { 180 return true; 181 } 182 #endif 183 184 static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz) 185 { 186 if (!scanning_global_lru(mz)) 187 return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone); 188 189 return &mz->zone->reclaim_stat; 190 } 191 192 static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz, 193 enum lru_list lru) 194 { 195 if (!scanning_global_lru(mz)) 196 return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup, 197 zone_to_nid(mz->zone), 198 zone_idx(mz->zone), 199 BIT(lru)); 200 201 return zone_page_state(mz->zone, NR_LRU_BASE + lru); 202 } 203 204 205 /* 206 * Add a shrinker callback to be called from the vm 207 */ 208 void register_shrinker(struct shrinker *shrinker) 209 { 210 atomic_long_set(&shrinker->nr_in_batch, 0); 211 down_write(&shrinker_rwsem); 212 list_add_tail(&shrinker->list, &shrinker_list); 213 up_write(&shrinker_rwsem); 214 } 215 EXPORT_SYMBOL(register_shrinker); 216 217 /* 218 * Remove one 219 */ 220 void unregister_shrinker(struct shrinker *shrinker) 221 { 222 down_write(&shrinker_rwsem); 223 list_del(&shrinker->list); 224 up_write(&shrinker_rwsem); 225 } 226 EXPORT_SYMBOL(unregister_shrinker); 227 228 static inline int do_shrinker_shrink(struct shrinker *shrinker, 229 struct shrink_control *sc, 230 unsigned long nr_to_scan) 231 { 232 sc->nr_to_scan = nr_to_scan; 233 return (*shrinker->shrink)(shrinker, sc); 234 } 235 236 #define SHRINK_BATCH 128 237 /* 238 * Call the shrink functions to age shrinkable caches 239 * 240 * Here we assume it costs one seek to replace a lru page and that it also 241 * takes a seek to recreate a cache object. With this in mind we age equal 242 * percentages of the lru and ageable caches. This should balance the seeks 243 * generated by these structures. 244 * 245 * If the vm encountered mapped pages on the LRU it increase the pressure on 246 * slab to avoid swapping. 247 * 248 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 249 * 250 * `lru_pages' represents the number of on-LRU pages in all the zones which 251 * are eligible for the caller's allocation attempt. It is used for balancing 252 * slab reclaim versus page reclaim. 253 * 254 * Returns the number of slab objects which we shrunk. 255 */ 256 unsigned long shrink_slab(struct shrink_control *shrink, 257 unsigned long nr_pages_scanned, 258 unsigned long lru_pages) 259 { 260 struct shrinker *shrinker; 261 unsigned long ret = 0; 262 263 if (nr_pages_scanned == 0) 264 nr_pages_scanned = SWAP_CLUSTER_MAX; 265 266 if (!down_read_trylock(&shrinker_rwsem)) { 267 /* Assume we'll be able to shrink next time */ 268 ret = 1; 269 goto out; 270 } 271 272 list_for_each_entry(shrinker, &shrinker_list, list) { 273 unsigned long long delta; 274 long total_scan; 275 long max_pass; 276 int shrink_ret = 0; 277 long nr; 278 long new_nr; 279 long batch_size = shrinker->batch ? shrinker->batch 280 : SHRINK_BATCH; 281 282 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 283 if (max_pass <= 0) 284 continue; 285 286 /* 287 * copy the current shrinker scan count into a local variable 288 * and zero it so that other concurrent shrinker invocations 289 * don't also do this scanning work. 290 */ 291 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); 292 293 total_scan = nr; 294 delta = (4 * nr_pages_scanned) / shrinker->seeks; 295 delta *= max_pass; 296 do_div(delta, lru_pages + 1); 297 total_scan += delta; 298 if (total_scan < 0) { 299 printk(KERN_ERR "shrink_slab: %pF negative objects to " 300 "delete nr=%ld\n", 301 shrinker->shrink, total_scan); 302 total_scan = max_pass; 303 } 304 305 /* 306 * We need to avoid excessive windup on filesystem shrinkers 307 * due to large numbers of GFP_NOFS allocations causing the 308 * shrinkers to return -1 all the time. This results in a large 309 * nr being built up so when a shrink that can do some work 310 * comes along it empties the entire cache due to nr >>> 311 * max_pass. This is bad for sustaining a working set in 312 * memory. 313 * 314 * Hence only allow the shrinker to scan the entire cache when 315 * a large delta change is calculated directly. 316 */ 317 if (delta < max_pass / 4) 318 total_scan = min(total_scan, max_pass / 2); 319 320 /* 321 * Avoid risking looping forever due to too large nr value: 322 * never try to free more than twice the estimate number of 323 * freeable entries. 324 */ 325 if (total_scan > max_pass * 2) 326 total_scan = max_pass * 2; 327 328 trace_mm_shrink_slab_start(shrinker, shrink, nr, 329 nr_pages_scanned, lru_pages, 330 max_pass, delta, total_scan); 331 332 while (total_scan >= batch_size) { 333 int nr_before; 334 335 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 336 shrink_ret = do_shrinker_shrink(shrinker, shrink, 337 batch_size); 338 if (shrink_ret == -1) 339 break; 340 if (shrink_ret < nr_before) 341 ret += nr_before - shrink_ret; 342 count_vm_events(SLABS_SCANNED, batch_size); 343 total_scan -= batch_size; 344 345 cond_resched(); 346 } 347 348 /* 349 * move the unused scan count back into the shrinker in a 350 * manner that handles concurrent updates. If we exhausted the 351 * scan, there is no need to do an update. 352 */ 353 if (total_scan > 0) 354 new_nr = atomic_long_add_return(total_scan, 355 &shrinker->nr_in_batch); 356 else 357 new_nr = atomic_long_read(&shrinker->nr_in_batch); 358 359 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 360 } 361 up_read(&shrinker_rwsem); 362 out: 363 cond_resched(); 364 return ret; 365 } 366 367 static void set_reclaim_mode(int priority, struct scan_control *sc, 368 bool sync) 369 { 370 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; 371 372 /* 373 * Initially assume we are entering either lumpy reclaim or 374 * reclaim/compaction.Depending on the order, we will either set the 375 * sync mode or just reclaim order-0 pages later. 376 */ 377 if (COMPACTION_BUILD) 378 sc->reclaim_mode = RECLAIM_MODE_COMPACTION; 379 else 380 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; 381 382 /* 383 * Avoid using lumpy reclaim or reclaim/compaction if possible by 384 * restricting when its set to either costly allocations or when 385 * under memory pressure 386 */ 387 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 388 sc->reclaim_mode |= syncmode; 389 else if (sc->order && priority < DEF_PRIORITY - 2) 390 sc->reclaim_mode |= syncmode; 391 else 392 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 393 } 394 395 static void reset_reclaim_mode(struct scan_control *sc) 396 { 397 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 398 } 399 400 static inline int is_page_cache_freeable(struct page *page) 401 { 402 /* 403 * A freeable page cache page is referenced only by the caller 404 * that isolated the page, the page cache radix tree and 405 * optional buffer heads at page->private. 406 */ 407 return page_count(page) - page_has_private(page) == 2; 408 } 409 410 static int may_write_to_queue(struct backing_dev_info *bdi, 411 struct scan_control *sc) 412 { 413 if (current->flags & PF_SWAPWRITE) 414 return 1; 415 if (!bdi_write_congested(bdi)) 416 return 1; 417 if (bdi == current->backing_dev_info) 418 return 1; 419 420 /* lumpy reclaim for hugepage often need a lot of write */ 421 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 422 return 1; 423 return 0; 424 } 425 426 /* 427 * We detected a synchronous write error writing a page out. Probably 428 * -ENOSPC. We need to propagate that into the address_space for a subsequent 429 * fsync(), msync() or close(). 430 * 431 * The tricky part is that after writepage we cannot touch the mapping: nothing 432 * prevents it from being freed up. But we have a ref on the page and once 433 * that page is locked, the mapping is pinned. 434 * 435 * We're allowed to run sleeping lock_page() here because we know the caller has 436 * __GFP_FS. 437 */ 438 static void handle_write_error(struct address_space *mapping, 439 struct page *page, int error) 440 { 441 lock_page(page); 442 if (page_mapping(page) == mapping) 443 mapping_set_error(mapping, error); 444 unlock_page(page); 445 } 446 447 /* possible outcome of pageout() */ 448 typedef enum { 449 /* failed to write page out, page is locked */ 450 PAGE_KEEP, 451 /* move page to the active list, page is locked */ 452 PAGE_ACTIVATE, 453 /* page has been sent to the disk successfully, page is unlocked */ 454 PAGE_SUCCESS, 455 /* page is clean and locked */ 456 PAGE_CLEAN, 457 } pageout_t; 458 459 /* 460 * pageout is called by shrink_page_list() for each dirty page. 461 * Calls ->writepage(). 462 */ 463 static pageout_t pageout(struct page *page, struct address_space *mapping, 464 struct scan_control *sc) 465 { 466 /* 467 * If the page is dirty, only perform writeback if that write 468 * will be non-blocking. To prevent this allocation from being 469 * stalled by pagecache activity. But note that there may be 470 * stalls if we need to run get_block(). We could test 471 * PagePrivate for that. 472 * 473 * If this process is currently in __generic_file_aio_write() against 474 * this page's queue, we can perform writeback even if that 475 * will block. 476 * 477 * If the page is swapcache, write it back even if that would 478 * block, for some throttling. This happens by accident, because 479 * swap_backing_dev_info is bust: it doesn't reflect the 480 * congestion state of the swapdevs. Easy to fix, if needed. 481 */ 482 if (!is_page_cache_freeable(page)) 483 return PAGE_KEEP; 484 if (!mapping) { 485 /* 486 * Some data journaling orphaned pages can have 487 * page->mapping == NULL while being dirty with clean buffers. 488 */ 489 if (page_has_private(page)) { 490 if (try_to_free_buffers(page)) { 491 ClearPageDirty(page); 492 printk("%s: orphaned page\n", __func__); 493 return PAGE_CLEAN; 494 } 495 } 496 return PAGE_KEEP; 497 } 498 if (mapping->a_ops->writepage == NULL) 499 return PAGE_ACTIVATE; 500 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 501 return PAGE_KEEP; 502 503 if (clear_page_dirty_for_io(page)) { 504 int res; 505 struct writeback_control wbc = { 506 .sync_mode = WB_SYNC_NONE, 507 .nr_to_write = SWAP_CLUSTER_MAX, 508 .range_start = 0, 509 .range_end = LLONG_MAX, 510 .for_reclaim = 1, 511 }; 512 513 SetPageReclaim(page); 514 res = mapping->a_ops->writepage(page, &wbc); 515 if (res < 0) 516 handle_write_error(mapping, page, res); 517 if (res == AOP_WRITEPAGE_ACTIVATE) { 518 ClearPageReclaim(page); 519 return PAGE_ACTIVATE; 520 } 521 522 if (!PageWriteback(page)) { 523 /* synchronous write or broken a_ops? */ 524 ClearPageReclaim(page); 525 } 526 trace_mm_vmscan_writepage(page, 527 trace_reclaim_flags(page, sc->reclaim_mode)); 528 inc_zone_page_state(page, NR_VMSCAN_WRITE); 529 return PAGE_SUCCESS; 530 } 531 532 return PAGE_CLEAN; 533 } 534 535 /* 536 * Same as remove_mapping, but if the page is removed from the mapping, it 537 * gets returned with a refcount of 0. 538 */ 539 static int __remove_mapping(struct address_space *mapping, struct page *page) 540 { 541 BUG_ON(!PageLocked(page)); 542 BUG_ON(mapping != page_mapping(page)); 543 544 spin_lock_irq(&mapping->tree_lock); 545 /* 546 * The non racy check for a busy page. 547 * 548 * Must be careful with the order of the tests. When someone has 549 * a ref to the page, it may be possible that they dirty it then 550 * drop the reference. So if PageDirty is tested before page_count 551 * here, then the following race may occur: 552 * 553 * get_user_pages(&page); 554 * [user mapping goes away] 555 * write_to(page); 556 * !PageDirty(page) [good] 557 * SetPageDirty(page); 558 * put_page(page); 559 * !page_count(page) [good, discard it] 560 * 561 * [oops, our write_to data is lost] 562 * 563 * Reversing the order of the tests ensures such a situation cannot 564 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 565 * load is not satisfied before that of page->_count. 566 * 567 * Note that if SetPageDirty is always performed via set_page_dirty, 568 * and thus under tree_lock, then this ordering is not required. 569 */ 570 if (!page_freeze_refs(page, 2)) 571 goto cannot_free; 572 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 573 if (unlikely(PageDirty(page))) { 574 page_unfreeze_refs(page, 2); 575 goto cannot_free; 576 } 577 578 if (PageSwapCache(page)) { 579 swp_entry_t swap = { .val = page_private(page) }; 580 __delete_from_swap_cache(page); 581 spin_unlock_irq(&mapping->tree_lock); 582 swapcache_free(swap, page); 583 } else { 584 void (*freepage)(struct page *); 585 586 freepage = mapping->a_ops->freepage; 587 588 __delete_from_page_cache(page); 589 spin_unlock_irq(&mapping->tree_lock); 590 mem_cgroup_uncharge_cache_page(page); 591 592 if (freepage != NULL) 593 freepage(page); 594 } 595 596 return 1; 597 598 cannot_free: 599 spin_unlock_irq(&mapping->tree_lock); 600 return 0; 601 } 602 603 /* 604 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 605 * someone else has a ref on the page, abort and return 0. If it was 606 * successfully detached, return 1. Assumes the caller has a single ref on 607 * this page. 608 */ 609 int remove_mapping(struct address_space *mapping, struct page *page) 610 { 611 if (__remove_mapping(mapping, page)) { 612 /* 613 * Unfreezing the refcount with 1 rather than 2 effectively 614 * drops the pagecache ref for us without requiring another 615 * atomic operation. 616 */ 617 page_unfreeze_refs(page, 1); 618 return 1; 619 } 620 return 0; 621 } 622 623 /** 624 * putback_lru_page - put previously isolated page onto appropriate LRU list 625 * @page: page to be put back to appropriate lru list 626 * 627 * Add previously isolated @page to appropriate LRU list. 628 * Page may still be unevictable for other reasons. 629 * 630 * lru_lock must not be held, interrupts must be enabled. 631 */ 632 void putback_lru_page(struct page *page) 633 { 634 int lru; 635 int active = !!TestClearPageActive(page); 636 int was_unevictable = PageUnevictable(page); 637 638 VM_BUG_ON(PageLRU(page)); 639 640 redo: 641 ClearPageUnevictable(page); 642 643 if (page_evictable(page, NULL)) { 644 /* 645 * For evictable pages, we can use the cache. 646 * In event of a race, worst case is we end up with an 647 * unevictable page on [in]active list. 648 * We know how to handle that. 649 */ 650 lru = active + page_lru_base_type(page); 651 lru_cache_add_lru(page, lru); 652 } else { 653 /* 654 * Put unevictable pages directly on zone's unevictable 655 * list. 656 */ 657 lru = LRU_UNEVICTABLE; 658 add_page_to_unevictable_list(page); 659 /* 660 * When racing with an mlock or AS_UNEVICTABLE clearing 661 * (page is unlocked) make sure that if the other thread 662 * does not observe our setting of PG_lru and fails 663 * isolation/check_move_unevictable_pages, 664 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 665 * the page back to the evictable list. 666 * 667 * The other side is TestClearPageMlocked() or shmem_lock(). 668 */ 669 smp_mb(); 670 } 671 672 /* 673 * page's status can change while we move it among lru. If an evictable 674 * page is on unevictable list, it never be freed. To avoid that, 675 * check after we added it to the list, again. 676 */ 677 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 678 if (!isolate_lru_page(page)) { 679 put_page(page); 680 goto redo; 681 } 682 /* This means someone else dropped this page from LRU 683 * So, it will be freed or putback to LRU again. There is 684 * nothing to do here. 685 */ 686 } 687 688 if (was_unevictable && lru != LRU_UNEVICTABLE) 689 count_vm_event(UNEVICTABLE_PGRESCUED); 690 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 691 count_vm_event(UNEVICTABLE_PGCULLED); 692 693 put_page(page); /* drop ref from isolate */ 694 } 695 696 enum page_references { 697 PAGEREF_RECLAIM, 698 PAGEREF_RECLAIM_CLEAN, 699 PAGEREF_KEEP, 700 PAGEREF_ACTIVATE, 701 }; 702 703 static enum page_references page_check_references(struct page *page, 704 struct mem_cgroup_zone *mz, 705 struct scan_control *sc) 706 { 707 int referenced_ptes, referenced_page; 708 unsigned long vm_flags; 709 710 referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags); 711 referenced_page = TestClearPageReferenced(page); 712 713 /* Lumpy reclaim - ignore references */ 714 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 715 return PAGEREF_RECLAIM; 716 717 /* 718 * Mlock lost the isolation race with us. Let try_to_unmap() 719 * move the page to the unevictable list. 720 */ 721 if (vm_flags & VM_LOCKED) 722 return PAGEREF_RECLAIM; 723 724 if (referenced_ptes) { 725 if (PageAnon(page)) 726 return PAGEREF_ACTIVATE; 727 /* 728 * All mapped pages start out with page table 729 * references from the instantiating fault, so we need 730 * to look twice if a mapped file page is used more 731 * than once. 732 * 733 * Mark it and spare it for another trip around the 734 * inactive list. Another page table reference will 735 * lead to its activation. 736 * 737 * Note: the mark is set for activated pages as well 738 * so that recently deactivated but used pages are 739 * quickly recovered. 740 */ 741 SetPageReferenced(page); 742 743 if (referenced_page || referenced_ptes > 1) 744 return PAGEREF_ACTIVATE; 745 746 /* 747 * Activate file-backed executable pages after first usage. 748 */ 749 if (vm_flags & VM_EXEC) 750 return PAGEREF_ACTIVATE; 751 752 return PAGEREF_KEEP; 753 } 754 755 /* Reclaim if clean, defer dirty pages to writeback */ 756 if (referenced_page && !PageSwapBacked(page)) 757 return PAGEREF_RECLAIM_CLEAN; 758 759 return PAGEREF_RECLAIM; 760 } 761 762 /* 763 * shrink_page_list() returns the number of reclaimed pages 764 */ 765 static unsigned long shrink_page_list(struct list_head *page_list, 766 struct mem_cgroup_zone *mz, 767 struct scan_control *sc, 768 int priority, 769 unsigned long *ret_nr_dirty, 770 unsigned long *ret_nr_writeback) 771 { 772 LIST_HEAD(ret_pages); 773 LIST_HEAD(free_pages); 774 int pgactivate = 0; 775 unsigned long nr_dirty = 0; 776 unsigned long nr_congested = 0; 777 unsigned long nr_reclaimed = 0; 778 unsigned long nr_writeback = 0; 779 780 cond_resched(); 781 782 while (!list_empty(page_list)) { 783 enum page_references references; 784 struct address_space *mapping; 785 struct page *page; 786 int may_enter_fs; 787 788 cond_resched(); 789 790 page = lru_to_page(page_list); 791 list_del(&page->lru); 792 793 if (!trylock_page(page)) 794 goto keep; 795 796 VM_BUG_ON(PageActive(page)); 797 VM_BUG_ON(page_zone(page) != mz->zone); 798 799 sc->nr_scanned++; 800 801 if (unlikely(!page_evictable(page, NULL))) 802 goto cull_mlocked; 803 804 if (!sc->may_unmap && page_mapped(page)) 805 goto keep_locked; 806 807 /* Double the slab pressure for mapped and swapcache pages */ 808 if (page_mapped(page) || PageSwapCache(page)) 809 sc->nr_scanned++; 810 811 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 812 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 813 814 if (PageWriteback(page)) { 815 nr_writeback++; 816 /* 817 * Synchronous reclaim cannot queue pages for 818 * writeback due to the possibility of stack overflow 819 * but if it encounters a page under writeback, wait 820 * for the IO to complete. 821 */ 822 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 823 may_enter_fs) 824 wait_on_page_writeback(page); 825 else { 826 unlock_page(page); 827 goto keep_lumpy; 828 } 829 } 830 831 references = page_check_references(page, mz, sc); 832 switch (references) { 833 case PAGEREF_ACTIVATE: 834 goto activate_locked; 835 case PAGEREF_KEEP: 836 goto keep_locked; 837 case PAGEREF_RECLAIM: 838 case PAGEREF_RECLAIM_CLEAN: 839 ; /* try to reclaim the page below */ 840 } 841 842 /* 843 * Anonymous process memory has backing store? 844 * Try to allocate it some swap space here. 845 */ 846 if (PageAnon(page) && !PageSwapCache(page)) { 847 if (!(sc->gfp_mask & __GFP_IO)) 848 goto keep_locked; 849 if (!add_to_swap(page)) 850 goto activate_locked; 851 may_enter_fs = 1; 852 } 853 854 mapping = page_mapping(page); 855 856 /* 857 * The page is mapped into the page tables of one or more 858 * processes. Try to unmap it here. 859 */ 860 if (page_mapped(page) && mapping) { 861 switch (try_to_unmap(page, TTU_UNMAP)) { 862 case SWAP_FAIL: 863 goto activate_locked; 864 case SWAP_AGAIN: 865 goto keep_locked; 866 case SWAP_MLOCK: 867 goto cull_mlocked; 868 case SWAP_SUCCESS: 869 ; /* try to free the page below */ 870 } 871 } 872 873 if (PageDirty(page)) { 874 nr_dirty++; 875 876 /* 877 * Only kswapd can writeback filesystem pages to 878 * avoid risk of stack overflow but do not writeback 879 * unless under significant pressure. 880 */ 881 if (page_is_file_cache(page) && 882 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 883 /* 884 * Immediately reclaim when written back. 885 * Similar in principal to deactivate_page() 886 * except we already have the page isolated 887 * and know it's dirty 888 */ 889 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 890 SetPageReclaim(page); 891 892 goto keep_locked; 893 } 894 895 if (references == PAGEREF_RECLAIM_CLEAN) 896 goto keep_locked; 897 if (!may_enter_fs) 898 goto keep_locked; 899 if (!sc->may_writepage) 900 goto keep_locked; 901 902 /* Page is dirty, try to write it out here */ 903 switch (pageout(page, mapping, sc)) { 904 case PAGE_KEEP: 905 nr_congested++; 906 goto keep_locked; 907 case PAGE_ACTIVATE: 908 goto activate_locked; 909 case PAGE_SUCCESS: 910 if (PageWriteback(page)) 911 goto keep_lumpy; 912 if (PageDirty(page)) 913 goto keep; 914 915 /* 916 * A synchronous write - probably a ramdisk. Go 917 * ahead and try to reclaim the page. 918 */ 919 if (!trylock_page(page)) 920 goto keep; 921 if (PageDirty(page) || PageWriteback(page)) 922 goto keep_locked; 923 mapping = page_mapping(page); 924 case PAGE_CLEAN: 925 ; /* try to free the page below */ 926 } 927 } 928 929 /* 930 * If the page has buffers, try to free the buffer mappings 931 * associated with this page. If we succeed we try to free 932 * the page as well. 933 * 934 * We do this even if the page is PageDirty(). 935 * try_to_release_page() does not perform I/O, but it is 936 * possible for a page to have PageDirty set, but it is actually 937 * clean (all its buffers are clean). This happens if the 938 * buffers were written out directly, with submit_bh(). ext3 939 * will do this, as well as the blockdev mapping. 940 * try_to_release_page() will discover that cleanness and will 941 * drop the buffers and mark the page clean - it can be freed. 942 * 943 * Rarely, pages can have buffers and no ->mapping. These are 944 * the pages which were not successfully invalidated in 945 * truncate_complete_page(). We try to drop those buffers here 946 * and if that worked, and the page is no longer mapped into 947 * process address space (page_count == 1) it can be freed. 948 * Otherwise, leave the page on the LRU so it is swappable. 949 */ 950 if (page_has_private(page)) { 951 if (!try_to_release_page(page, sc->gfp_mask)) 952 goto activate_locked; 953 if (!mapping && page_count(page) == 1) { 954 unlock_page(page); 955 if (put_page_testzero(page)) 956 goto free_it; 957 else { 958 /* 959 * rare race with speculative reference. 960 * the speculative reference will free 961 * this page shortly, so we may 962 * increment nr_reclaimed here (and 963 * leave it off the LRU). 964 */ 965 nr_reclaimed++; 966 continue; 967 } 968 } 969 } 970 971 if (!mapping || !__remove_mapping(mapping, page)) 972 goto keep_locked; 973 974 /* 975 * At this point, we have no other references and there is 976 * no way to pick any more up (removed from LRU, removed 977 * from pagecache). Can use non-atomic bitops now (and 978 * we obviously don't have to worry about waking up a process 979 * waiting on the page lock, because there are no references. 980 */ 981 __clear_page_locked(page); 982 free_it: 983 nr_reclaimed++; 984 985 /* 986 * Is there need to periodically free_page_list? It would 987 * appear not as the counts should be low 988 */ 989 list_add(&page->lru, &free_pages); 990 continue; 991 992 cull_mlocked: 993 if (PageSwapCache(page)) 994 try_to_free_swap(page); 995 unlock_page(page); 996 putback_lru_page(page); 997 reset_reclaim_mode(sc); 998 continue; 999 1000 activate_locked: 1001 /* Not a candidate for swapping, so reclaim swap space. */ 1002 if (PageSwapCache(page) && vm_swap_full()) 1003 try_to_free_swap(page); 1004 VM_BUG_ON(PageActive(page)); 1005 SetPageActive(page); 1006 pgactivate++; 1007 keep_locked: 1008 unlock_page(page); 1009 keep: 1010 reset_reclaim_mode(sc); 1011 keep_lumpy: 1012 list_add(&page->lru, &ret_pages); 1013 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1014 } 1015 1016 /* 1017 * Tag a zone as congested if all the dirty pages encountered were 1018 * backed by a congested BDI. In this case, reclaimers should just 1019 * back off and wait for congestion to clear because further reclaim 1020 * will encounter the same problem 1021 */ 1022 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) 1023 zone_set_flag(mz->zone, ZONE_CONGESTED); 1024 1025 free_hot_cold_page_list(&free_pages, 1); 1026 1027 list_splice(&ret_pages, page_list); 1028 count_vm_events(PGACTIVATE, pgactivate); 1029 *ret_nr_dirty += nr_dirty; 1030 *ret_nr_writeback += nr_writeback; 1031 return nr_reclaimed; 1032 } 1033 1034 /* 1035 * Attempt to remove the specified page from its LRU. Only take this page 1036 * if it is of the appropriate PageActive status. Pages which are being 1037 * freed elsewhere are also ignored. 1038 * 1039 * page: page to consider 1040 * mode: one of the LRU isolation modes defined above 1041 * 1042 * returns 0 on success, -ve errno on failure. 1043 */ 1044 int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) 1045 { 1046 bool all_lru_mode; 1047 int ret = -EINVAL; 1048 1049 /* Only take pages on the LRU. */ 1050 if (!PageLRU(page)) 1051 return ret; 1052 1053 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) == 1054 (ISOLATE_ACTIVE|ISOLATE_INACTIVE); 1055 1056 /* 1057 * When checking the active state, we need to be sure we are 1058 * dealing with comparible boolean values. Take the logical not 1059 * of each. 1060 */ 1061 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE)) 1062 return ret; 1063 1064 if (!all_lru_mode && !!page_is_file_cache(page) != file) 1065 return ret; 1066 1067 /* 1068 * When this function is being called for lumpy reclaim, we 1069 * initially look into all LRU pages, active, inactive and 1070 * unevictable; only give shrink_page_list evictable pages. 1071 */ 1072 if (PageUnevictable(page)) 1073 return ret; 1074 1075 ret = -EBUSY; 1076 1077 /* 1078 * To minimise LRU disruption, the caller can indicate that it only 1079 * wants to isolate pages it will be able to operate on without 1080 * blocking - clean pages for the most part. 1081 * 1082 * ISOLATE_CLEAN means that only clean pages should be isolated. This 1083 * is used by reclaim when it is cannot write to backing storage 1084 * 1085 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages 1086 * that it is possible to migrate without blocking 1087 */ 1088 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) { 1089 /* All the caller can do on PageWriteback is block */ 1090 if (PageWriteback(page)) 1091 return ret; 1092 1093 if (PageDirty(page)) { 1094 struct address_space *mapping; 1095 1096 /* ISOLATE_CLEAN means only clean pages */ 1097 if (mode & ISOLATE_CLEAN) 1098 return ret; 1099 1100 /* 1101 * Only pages without mappings or that have a 1102 * ->migratepage callback are possible to migrate 1103 * without blocking 1104 */ 1105 mapping = page_mapping(page); 1106 if (mapping && !mapping->a_ops->migratepage) 1107 return ret; 1108 } 1109 } 1110 1111 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1112 return ret; 1113 1114 if (likely(get_page_unless_zero(page))) { 1115 /* 1116 * Be careful not to clear PageLRU until after we're 1117 * sure the page is not being freed elsewhere -- the 1118 * page release code relies on it. 1119 */ 1120 ClearPageLRU(page); 1121 ret = 0; 1122 } 1123 1124 return ret; 1125 } 1126 1127 /* 1128 * zone->lru_lock is heavily contended. Some of the functions that 1129 * shrink the lists perform better by taking out a batch of pages 1130 * and working on them outside the LRU lock. 1131 * 1132 * For pagecache intensive workloads, this function is the hottest 1133 * spot in the kernel (apart from copy_*_user functions). 1134 * 1135 * Appropriate locks must be held before calling this function. 1136 * 1137 * @nr_to_scan: The number of pages to look through on the list. 1138 * @mz: The mem_cgroup_zone to pull pages from. 1139 * @dst: The temp list to put pages on to. 1140 * @nr_scanned: The number of pages that were scanned. 1141 * @sc: The scan_control struct for this reclaim session 1142 * @mode: One of the LRU isolation modes 1143 * @active: True [1] if isolating active pages 1144 * @file: True [1] if isolating file [!anon] pages 1145 * 1146 * returns how many pages were moved onto *@dst. 1147 */ 1148 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1149 struct mem_cgroup_zone *mz, struct list_head *dst, 1150 unsigned long *nr_scanned, struct scan_control *sc, 1151 isolate_mode_t mode, int active, int file) 1152 { 1153 struct lruvec *lruvec; 1154 struct list_head *src; 1155 unsigned long nr_taken = 0; 1156 unsigned long nr_lumpy_taken = 0; 1157 unsigned long nr_lumpy_dirty = 0; 1158 unsigned long nr_lumpy_failed = 0; 1159 unsigned long scan; 1160 int lru = LRU_BASE; 1161 1162 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); 1163 if (active) 1164 lru += LRU_ACTIVE; 1165 if (file) 1166 lru += LRU_FILE; 1167 src = &lruvec->lists[lru]; 1168 1169 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1170 struct page *page; 1171 unsigned long pfn; 1172 unsigned long end_pfn; 1173 unsigned long page_pfn; 1174 int zone_id; 1175 1176 page = lru_to_page(src); 1177 prefetchw_prev_lru_page(page, src, flags); 1178 1179 VM_BUG_ON(!PageLRU(page)); 1180 1181 switch (__isolate_lru_page(page, mode, file)) { 1182 case 0: 1183 mem_cgroup_lru_del(page); 1184 list_move(&page->lru, dst); 1185 nr_taken += hpage_nr_pages(page); 1186 break; 1187 1188 case -EBUSY: 1189 /* else it is being freed elsewhere */ 1190 list_move(&page->lru, src); 1191 continue; 1192 1193 default: 1194 BUG(); 1195 } 1196 1197 if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)) 1198 continue; 1199 1200 /* 1201 * Attempt to take all pages in the order aligned region 1202 * surrounding the tag page. Only take those pages of 1203 * the same active state as that tag page. We may safely 1204 * round the target page pfn down to the requested order 1205 * as the mem_map is guaranteed valid out to MAX_ORDER, 1206 * where that page is in a different zone we will detect 1207 * it from its zone id and abort this block scan. 1208 */ 1209 zone_id = page_zone_id(page); 1210 page_pfn = page_to_pfn(page); 1211 pfn = page_pfn & ~((1 << sc->order) - 1); 1212 end_pfn = pfn + (1 << sc->order); 1213 for (; pfn < end_pfn; pfn++) { 1214 struct page *cursor_page; 1215 1216 /* The target page is in the block, ignore it. */ 1217 if (unlikely(pfn == page_pfn)) 1218 continue; 1219 1220 /* Avoid holes within the zone. */ 1221 if (unlikely(!pfn_valid_within(pfn))) 1222 break; 1223 1224 cursor_page = pfn_to_page(pfn); 1225 1226 /* Check that we have not crossed a zone boundary. */ 1227 if (unlikely(page_zone_id(cursor_page) != zone_id)) 1228 break; 1229 1230 /* 1231 * If we don't have enough swap space, reclaiming of 1232 * anon page which don't already have a swap slot is 1233 * pointless. 1234 */ 1235 if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) && 1236 !PageSwapCache(cursor_page)) 1237 break; 1238 1239 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1240 unsigned int isolated_pages; 1241 1242 mem_cgroup_lru_del(cursor_page); 1243 list_move(&cursor_page->lru, dst); 1244 isolated_pages = hpage_nr_pages(cursor_page); 1245 nr_taken += isolated_pages; 1246 nr_lumpy_taken += isolated_pages; 1247 if (PageDirty(cursor_page)) 1248 nr_lumpy_dirty += isolated_pages; 1249 scan++; 1250 pfn += isolated_pages - 1; 1251 } else { 1252 /* 1253 * Check if the page is freed already. 1254 * 1255 * We can't use page_count() as that 1256 * requires compound_head and we don't 1257 * have a pin on the page here. If a 1258 * page is tail, we may or may not 1259 * have isolated the head, so assume 1260 * it's not free, it'd be tricky to 1261 * track the head status without a 1262 * page pin. 1263 */ 1264 if (!PageTail(cursor_page) && 1265 !atomic_read(&cursor_page->_count)) 1266 continue; 1267 break; 1268 } 1269 } 1270 1271 /* If we break out of the loop above, lumpy reclaim failed */ 1272 if (pfn < end_pfn) 1273 nr_lumpy_failed++; 1274 } 1275 1276 *nr_scanned = scan; 1277 1278 trace_mm_vmscan_lru_isolate(sc->order, 1279 nr_to_scan, scan, 1280 nr_taken, 1281 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, 1282 mode, file); 1283 return nr_taken; 1284 } 1285 1286 /** 1287 * isolate_lru_page - tries to isolate a page from its LRU list 1288 * @page: page to isolate from its LRU list 1289 * 1290 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1291 * vmstat statistic corresponding to whatever LRU list the page was on. 1292 * 1293 * Returns 0 if the page was removed from an LRU list. 1294 * Returns -EBUSY if the page was not on an LRU list. 1295 * 1296 * The returned page will have PageLRU() cleared. If it was found on 1297 * the active list, it will have PageActive set. If it was found on 1298 * the unevictable list, it will have the PageUnevictable bit set. That flag 1299 * may need to be cleared by the caller before letting the page go. 1300 * 1301 * The vmstat statistic corresponding to the list on which the page was 1302 * found will be decremented. 1303 * 1304 * Restrictions: 1305 * (1) Must be called with an elevated refcount on the page. This is a 1306 * fundamentnal difference from isolate_lru_pages (which is called 1307 * without a stable reference). 1308 * (2) the lru_lock must not be held. 1309 * (3) interrupts must be enabled. 1310 */ 1311 int isolate_lru_page(struct page *page) 1312 { 1313 int ret = -EBUSY; 1314 1315 VM_BUG_ON(!page_count(page)); 1316 1317 if (PageLRU(page)) { 1318 struct zone *zone = page_zone(page); 1319 1320 spin_lock_irq(&zone->lru_lock); 1321 if (PageLRU(page)) { 1322 int lru = page_lru(page); 1323 ret = 0; 1324 get_page(page); 1325 ClearPageLRU(page); 1326 1327 del_page_from_lru_list(zone, page, lru); 1328 } 1329 spin_unlock_irq(&zone->lru_lock); 1330 } 1331 return ret; 1332 } 1333 1334 /* 1335 * Are there way too many processes in the direct reclaim path already? 1336 */ 1337 static int too_many_isolated(struct zone *zone, int file, 1338 struct scan_control *sc) 1339 { 1340 unsigned long inactive, isolated; 1341 1342 if (current_is_kswapd()) 1343 return 0; 1344 1345 if (!global_reclaim(sc)) 1346 return 0; 1347 1348 if (file) { 1349 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1350 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1351 } else { 1352 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1353 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1354 } 1355 1356 return isolated > inactive; 1357 } 1358 1359 static noinline_for_stack void 1360 putback_inactive_pages(struct mem_cgroup_zone *mz, 1361 struct list_head *page_list) 1362 { 1363 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1364 struct zone *zone = mz->zone; 1365 LIST_HEAD(pages_to_free); 1366 1367 /* 1368 * Put back any unfreeable pages. 1369 */ 1370 while (!list_empty(page_list)) { 1371 struct page *page = lru_to_page(page_list); 1372 int lru; 1373 1374 VM_BUG_ON(PageLRU(page)); 1375 list_del(&page->lru); 1376 if (unlikely(!page_evictable(page, NULL))) { 1377 spin_unlock_irq(&zone->lru_lock); 1378 putback_lru_page(page); 1379 spin_lock_irq(&zone->lru_lock); 1380 continue; 1381 } 1382 SetPageLRU(page); 1383 lru = page_lru(page); 1384 add_page_to_lru_list(zone, page, lru); 1385 if (is_active_lru(lru)) { 1386 int file = is_file_lru(lru); 1387 int numpages = hpage_nr_pages(page); 1388 reclaim_stat->recent_rotated[file] += numpages; 1389 } 1390 if (put_page_testzero(page)) { 1391 __ClearPageLRU(page); 1392 __ClearPageActive(page); 1393 del_page_from_lru_list(zone, page, lru); 1394 1395 if (unlikely(PageCompound(page))) { 1396 spin_unlock_irq(&zone->lru_lock); 1397 (*get_compound_page_dtor(page))(page); 1398 spin_lock_irq(&zone->lru_lock); 1399 } else 1400 list_add(&page->lru, &pages_to_free); 1401 } 1402 } 1403 1404 /* 1405 * To save our caller's stack, now use input list for pages to free. 1406 */ 1407 list_splice(&pages_to_free, page_list); 1408 } 1409 1410 static noinline_for_stack void 1411 update_isolated_counts(struct mem_cgroup_zone *mz, 1412 struct list_head *page_list, 1413 unsigned long *nr_anon, 1414 unsigned long *nr_file) 1415 { 1416 struct zone *zone = mz->zone; 1417 unsigned int count[NR_LRU_LISTS] = { 0, }; 1418 unsigned long nr_active = 0; 1419 struct page *page; 1420 int lru; 1421 1422 /* 1423 * Count pages and clear active flags 1424 */ 1425 list_for_each_entry(page, page_list, lru) { 1426 int numpages = hpage_nr_pages(page); 1427 lru = page_lru_base_type(page); 1428 if (PageActive(page)) { 1429 lru += LRU_ACTIVE; 1430 ClearPageActive(page); 1431 nr_active += numpages; 1432 } 1433 count[lru] += numpages; 1434 } 1435 1436 preempt_disable(); 1437 __count_vm_events(PGDEACTIVATE, nr_active); 1438 1439 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1440 -count[LRU_ACTIVE_FILE]); 1441 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1442 -count[LRU_INACTIVE_FILE]); 1443 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1444 -count[LRU_ACTIVE_ANON]); 1445 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1446 -count[LRU_INACTIVE_ANON]); 1447 1448 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1449 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1450 1451 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); 1452 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); 1453 preempt_enable(); 1454 } 1455 1456 /* 1457 * Returns true if a direct reclaim should wait on pages under writeback. 1458 * 1459 * If we are direct reclaiming for contiguous pages and we do not reclaim 1460 * everything in the list, try again and wait for writeback IO to complete. 1461 * This will stall high-order allocations noticeably. Only do that when really 1462 * need to free the pages under high memory pressure. 1463 */ 1464 static inline bool should_reclaim_stall(unsigned long nr_taken, 1465 unsigned long nr_freed, 1466 int priority, 1467 struct scan_control *sc) 1468 { 1469 int lumpy_stall_priority; 1470 1471 /* kswapd should not stall on sync IO */ 1472 if (current_is_kswapd()) 1473 return false; 1474 1475 /* Only stall on lumpy reclaim */ 1476 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) 1477 return false; 1478 1479 /* If we have reclaimed everything on the isolated list, no stall */ 1480 if (nr_freed == nr_taken) 1481 return false; 1482 1483 /* 1484 * For high-order allocations, there are two stall thresholds. 1485 * High-cost allocations stall immediately where as lower 1486 * order allocations such as stacks require the scanning 1487 * priority to be much higher before stalling. 1488 */ 1489 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1490 lumpy_stall_priority = DEF_PRIORITY; 1491 else 1492 lumpy_stall_priority = DEF_PRIORITY / 3; 1493 1494 return priority <= lumpy_stall_priority; 1495 } 1496 1497 /* 1498 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1499 * of reclaimed pages 1500 */ 1501 static noinline_for_stack unsigned long 1502 shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, 1503 struct scan_control *sc, int priority, int file) 1504 { 1505 LIST_HEAD(page_list); 1506 unsigned long nr_scanned; 1507 unsigned long nr_reclaimed = 0; 1508 unsigned long nr_taken; 1509 unsigned long nr_anon; 1510 unsigned long nr_file; 1511 unsigned long nr_dirty = 0; 1512 unsigned long nr_writeback = 0; 1513 isolate_mode_t isolate_mode = ISOLATE_INACTIVE; 1514 struct zone *zone = mz->zone; 1515 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1516 1517 while (unlikely(too_many_isolated(zone, file, sc))) { 1518 congestion_wait(BLK_RW_ASYNC, HZ/10); 1519 1520 /* We are about to die and free our memory. Return now. */ 1521 if (fatal_signal_pending(current)) 1522 return SWAP_CLUSTER_MAX; 1523 } 1524 1525 set_reclaim_mode(priority, sc, false); 1526 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 1527 isolate_mode |= ISOLATE_ACTIVE; 1528 1529 lru_add_drain(); 1530 1531 if (!sc->may_unmap) 1532 isolate_mode |= ISOLATE_UNMAPPED; 1533 if (!sc->may_writepage) 1534 isolate_mode |= ISOLATE_CLEAN; 1535 1536 spin_lock_irq(&zone->lru_lock); 1537 1538 nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned, 1539 sc, isolate_mode, 0, file); 1540 if (global_reclaim(sc)) { 1541 zone->pages_scanned += nr_scanned; 1542 if (current_is_kswapd()) 1543 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1544 nr_scanned); 1545 else 1546 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1547 nr_scanned); 1548 } 1549 spin_unlock_irq(&zone->lru_lock); 1550 1551 if (nr_taken == 0) 1552 return 0; 1553 1554 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); 1555 1556 nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority, 1557 &nr_dirty, &nr_writeback); 1558 1559 /* Check if we should syncronously wait for writeback */ 1560 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1561 set_reclaim_mode(priority, sc, true); 1562 nr_reclaimed += shrink_page_list(&page_list, mz, sc, 1563 priority, &nr_dirty, &nr_writeback); 1564 } 1565 1566 spin_lock_irq(&zone->lru_lock); 1567 1568 reclaim_stat->recent_scanned[0] += nr_anon; 1569 reclaim_stat->recent_scanned[1] += nr_file; 1570 1571 if (current_is_kswapd()) 1572 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1573 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1574 1575 putback_inactive_pages(mz, &page_list); 1576 1577 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1578 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1579 1580 spin_unlock_irq(&zone->lru_lock); 1581 1582 free_hot_cold_page_list(&page_list, 1); 1583 1584 /* 1585 * If reclaim is isolating dirty pages under writeback, it implies 1586 * that the long-lived page allocation rate is exceeding the page 1587 * laundering rate. Either the global limits are not being effective 1588 * at throttling processes due to the page distribution throughout 1589 * zones or there is heavy usage of a slow backing device. The 1590 * only option is to throttle from reclaim context which is not ideal 1591 * as there is no guarantee the dirtying process is throttled in the 1592 * same way balance_dirty_pages() manages. 1593 * 1594 * This scales the number of dirty pages that must be under writeback 1595 * before throttling depending on priority. It is a simple backoff 1596 * function that has the most effect in the range DEF_PRIORITY to 1597 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1598 * in trouble and reclaim is considered to be in trouble. 1599 * 1600 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle 1601 * DEF_PRIORITY-1 50% must be PageWriteback 1602 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble 1603 * ... 1604 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1605 * isolated page is PageWriteback 1606 */ 1607 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1608 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1609 1610 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1611 zone_idx(zone), 1612 nr_scanned, nr_reclaimed, 1613 priority, 1614 trace_shrink_flags(file, sc->reclaim_mode)); 1615 return nr_reclaimed; 1616 } 1617 1618 /* 1619 * This moves pages from the active list to the inactive list. 1620 * 1621 * We move them the other way if the page is referenced by one or more 1622 * processes, from rmap. 1623 * 1624 * If the pages are mostly unmapped, the processing is fast and it is 1625 * appropriate to hold zone->lru_lock across the whole operation. But if 1626 * the pages are mapped, the processing is slow (page_referenced()) so we 1627 * should drop zone->lru_lock around each page. It's impossible to balance 1628 * this, so instead we remove the pages from the LRU while processing them. 1629 * It is safe to rely on PG_active against the non-LRU pages in here because 1630 * nobody will play with that bit on a non-LRU page. 1631 * 1632 * The downside is that we have to touch page->_count against each page. 1633 * But we had to alter page->flags anyway. 1634 */ 1635 1636 static void move_active_pages_to_lru(struct zone *zone, 1637 struct list_head *list, 1638 struct list_head *pages_to_free, 1639 enum lru_list lru) 1640 { 1641 unsigned long pgmoved = 0; 1642 struct page *page; 1643 1644 while (!list_empty(list)) { 1645 struct lruvec *lruvec; 1646 1647 page = lru_to_page(list); 1648 1649 VM_BUG_ON(PageLRU(page)); 1650 SetPageLRU(page); 1651 1652 lruvec = mem_cgroup_lru_add_list(zone, page, lru); 1653 list_move(&page->lru, &lruvec->lists[lru]); 1654 pgmoved += hpage_nr_pages(page); 1655 1656 if (put_page_testzero(page)) { 1657 __ClearPageLRU(page); 1658 __ClearPageActive(page); 1659 del_page_from_lru_list(zone, page, lru); 1660 1661 if (unlikely(PageCompound(page))) { 1662 spin_unlock_irq(&zone->lru_lock); 1663 (*get_compound_page_dtor(page))(page); 1664 spin_lock_irq(&zone->lru_lock); 1665 } else 1666 list_add(&page->lru, pages_to_free); 1667 } 1668 } 1669 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1670 if (!is_active_lru(lru)) 1671 __count_vm_events(PGDEACTIVATE, pgmoved); 1672 } 1673 1674 static void shrink_active_list(unsigned long nr_to_scan, 1675 struct mem_cgroup_zone *mz, 1676 struct scan_control *sc, 1677 int priority, int file) 1678 { 1679 unsigned long nr_taken; 1680 unsigned long nr_scanned; 1681 unsigned long vm_flags; 1682 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1683 LIST_HEAD(l_active); 1684 LIST_HEAD(l_inactive); 1685 struct page *page; 1686 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1687 unsigned long nr_rotated = 0; 1688 isolate_mode_t isolate_mode = ISOLATE_ACTIVE; 1689 struct zone *zone = mz->zone; 1690 1691 lru_add_drain(); 1692 1693 reset_reclaim_mode(sc); 1694 1695 if (!sc->may_unmap) 1696 isolate_mode |= ISOLATE_UNMAPPED; 1697 if (!sc->may_writepage) 1698 isolate_mode |= ISOLATE_CLEAN; 1699 1700 spin_lock_irq(&zone->lru_lock); 1701 1702 nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc, 1703 isolate_mode, 1, file); 1704 if (global_reclaim(sc)) 1705 zone->pages_scanned += nr_scanned; 1706 1707 reclaim_stat->recent_scanned[file] += nr_taken; 1708 1709 __count_zone_vm_events(PGREFILL, zone, nr_scanned); 1710 if (file) 1711 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1712 else 1713 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1714 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1715 spin_unlock_irq(&zone->lru_lock); 1716 1717 while (!list_empty(&l_hold)) { 1718 cond_resched(); 1719 page = lru_to_page(&l_hold); 1720 list_del(&page->lru); 1721 1722 if (unlikely(!page_evictable(page, NULL))) { 1723 putback_lru_page(page); 1724 continue; 1725 } 1726 1727 if (unlikely(buffer_heads_over_limit)) { 1728 if (page_has_private(page) && trylock_page(page)) { 1729 if (page_has_private(page)) 1730 try_to_release_page(page, 0); 1731 unlock_page(page); 1732 } 1733 } 1734 1735 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { 1736 nr_rotated += hpage_nr_pages(page); 1737 /* 1738 * Identify referenced, file-backed active pages and 1739 * give them one more trip around the active list. So 1740 * that executable code get better chances to stay in 1741 * memory under moderate memory pressure. Anon pages 1742 * are not likely to be evicted by use-once streaming 1743 * IO, plus JVM can create lots of anon VM_EXEC pages, 1744 * so we ignore them here. 1745 */ 1746 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1747 list_add(&page->lru, &l_active); 1748 continue; 1749 } 1750 } 1751 1752 ClearPageActive(page); /* we are de-activating */ 1753 list_add(&page->lru, &l_inactive); 1754 } 1755 1756 /* 1757 * Move pages back to the lru list. 1758 */ 1759 spin_lock_irq(&zone->lru_lock); 1760 /* 1761 * Count referenced pages from currently used mappings as rotated, 1762 * even though only some of them are actually re-activated. This 1763 * helps balance scan pressure between file and anonymous pages in 1764 * get_scan_ratio. 1765 */ 1766 reclaim_stat->recent_rotated[file] += nr_rotated; 1767 1768 move_active_pages_to_lru(zone, &l_active, &l_hold, 1769 LRU_ACTIVE + file * LRU_FILE); 1770 move_active_pages_to_lru(zone, &l_inactive, &l_hold, 1771 LRU_BASE + file * LRU_FILE); 1772 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1773 spin_unlock_irq(&zone->lru_lock); 1774 1775 free_hot_cold_page_list(&l_hold, 1); 1776 } 1777 1778 #ifdef CONFIG_SWAP 1779 static int inactive_anon_is_low_global(struct zone *zone) 1780 { 1781 unsigned long active, inactive; 1782 1783 active = zone_page_state(zone, NR_ACTIVE_ANON); 1784 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1785 1786 if (inactive * zone->inactive_ratio < active) 1787 return 1; 1788 1789 return 0; 1790 } 1791 1792 /** 1793 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1794 * @zone: zone to check 1795 * @sc: scan control of this context 1796 * 1797 * Returns true if the zone does not have enough inactive anon pages, 1798 * meaning some active anon pages need to be deactivated. 1799 */ 1800 static int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1801 { 1802 /* 1803 * If we don't have swap space, anonymous page deactivation 1804 * is pointless. 1805 */ 1806 if (!total_swap_pages) 1807 return 0; 1808 1809 if (!scanning_global_lru(mz)) 1810 return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup, 1811 mz->zone); 1812 1813 return inactive_anon_is_low_global(mz->zone); 1814 } 1815 #else 1816 static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1817 { 1818 return 0; 1819 } 1820 #endif 1821 1822 static int inactive_file_is_low_global(struct zone *zone) 1823 { 1824 unsigned long active, inactive; 1825 1826 active = zone_page_state(zone, NR_ACTIVE_FILE); 1827 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1828 1829 return (active > inactive); 1830 } 1831 1832 /** 1833 * inactive_file_is_low - check if file pages need to be deactivated 1834 * @mz: memory cgroup and zone to check 1835 * 1836 * When the system is doing streaming IO, memory pressure here 1837 * ensures that active file pages get deactivated, until more 1838 * than half of the file pages are on the inactive list. 1839 * 1840 * Once we get to that situation, protect the system's working 1841 * set from being evicted by disabling active file page aging. 1842 * 1843 * This uses a different ratio than the anonymous pages, because 1844 * the page cache uses a use-once replacement algorithm. 1845 */ 1846 static int inactive_file_is_low(struct mem_cgroup_zone *mz) 1847 { 1848 if (!scanning_global_lru(mz)) 1849 return mem_cgroup_inactive_file_is_low(mz->mem_cgroup, 1850 mz->zone); 1851 1852 return inactive_file_is_low_global(mz->zone); 1853 } 1854 1855 static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file) 1856 { 1857 if (file) 1858 return inactive_file_is_low(mz); 1859 else 1860 return inactive_anon_is_low(mz); 1861 } 1862 1863 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1864 struct mem_cgroup_zone *mz, 1865 struct scan_control *sc, int priority) 1866 { 1867 int file = is_file_lru(lru); 1868 1869 if (is_active_lru(lru)) { 1870 if (inactive_list_is_low(mz, file)) 1871 shrink_active_list(nr_to_scan, mz, sc, priority, file); 1872 return 0; 1873 } 1874 1875 return shrink_inactive_list(nr_to_scan, mz, sc, priority, file); 1876 } 1877 1878 static int vmscan_swappiness(struct mem_cgroup_zone *mz, 1879 struct scan_control *sc) 1880 { 1881 if (global_reclaim(sc)) 1882 return vm_swappiness; 1883 return mem_cgroup_swappiness(mz->mem_cgroup); 1884 } 1885 1886 /* 1887 * Determine how aggressively the anon and file LRU lists should be 1888 * scanned. The relative value of each set of LRU lists is determined 1889 * by looking at the fraction of the pages scanned we did rotate back 1890 * onto the active list instead of evict. 1891 * 1892 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1893 */ 1894 static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, 1895 unsigned long *nr, int priority) 1896 { 1897 unsigned long anon, file, free; 1898 unsigned long anon_prio, file_prio; 1899 unsigned long ap, fp; 1900 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1901 u64 fraction[2], denominator; 1902 enum lru_list lru; 1903 int noswap = 0; 1904 bool force_scan = false; 1905 1906 /* 1907 * If the zone or memcg is small, nr[l] can be 0. This 1908 * results in no scanning on this priority and a potential 1909 * priority drop. Global direct reclaim can go to the next 1910 * zone and tends to have no problems. Global kswapd is for 1911 * zone balancing and it needs to scan a minimum amount. When 1912 * reclaiming for a memcg, a priority drop can cause high 1913 * latencies, so it's better to scan a minimum amount there as 1914 * well. 1915 */ 1916 if (current_is_kswapd() && mz->zone->all_unreclaimable) 1917 force_scan = true; 1918 if (!global_reclaim(sc)) 1919 force_scan = true; 1920 1921 /* If we have no swap space, do not bother scanning anon pages. */ 1922 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1923 noswap = 1; 1924 fraction[0] = 0; 1925 fraction[1] = 1; 1926 denominator = 1; 1927 goto out; 1928 } 1929 1930 anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) + 1931 zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1932 file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) + 1933 zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1934 1935 if (global_reclaim(sc)) { 1936 free = zone_page_state(mz->zone, NR_FREE_PAGES); 1937 /* If we have very few page cache pages, 1938 force-scan anon pages. */ 1939 if (unlikely(file + free <= high_wmark_pages(mz->zone))) { 1940 fraction[0] = 1; 1941 fraction[1] = 0; 1942 denominator = 1; 1943 goto out; 1944 } 1945 } 1946 1947 /* 1948 * With swappiness at 100, anonymous and file have the same priority. 1949 * This scanning priority is essentially the inverse of IO cost. 1950 */ 1951 anon_prio = vmscan_swappiness(mz, sc); 1952 file_prio = 200 - vmscan_swappiness(mz, sc); 1953 1954 /* 1955 * OK, so we have swap space and a fair amount of page cache 1956 * pages. We use the recently rotated / recently scanned 1957 * ratios to determine how valuable each cache is. 1958 * 1959 * Because workloads change over time (and to avoid overflow) 1960 * we keep these statistics as a floating average, which ends 1961 * up weighing recent references more than old ones. 1962 * 1963 * anon in [0], file in [1] 1964 */ 1965 spin_lock_irq(&mz->zone->lru_lock); 1966 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1967 reclaim_stat->recent_scanned[0] /= 2; 1968 reclaim_stat->recent_rotated[0] /= 2; 1969 } 1970 1971 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1972 reclaim_stat->recent_scanned[1] /= 2; 1973 reclaim_stat->recent_rotated[1] /= 2; 1974 } 1975 1976 /* 1977 * The amount of pressure on anon vs file pages is inversely 1978 * proportional to the fraction of recently scanned pages on 1979 * each list that were recently referenced and in active use. 1980 */ 1981 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1982 ap /= reclaim_stat->recent_rotated[0] + 1; 1983 1984 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1985 fp /= reclaim_stat->recent_rotated[1] + 1; 1986 spin_unlock_irq(&mz->zone->lru_lock); 1987 1988 fraction[0] = ap; 1989 fraction[1] = fp; 1990 denominator = ap + fp + 1; 1991 out: 1992 for_each_evictable_lru(lru) { 1993 int file = is_file_lru(lru); 1994 unsigned long scan; 1995 1996 scan = zone_nr_lru_pages(mz, lru); 1997 if (priority || noswap) { 1998 scan >>= priority; 1999 if (!scan && force_scan) 2000 scan = SWAP_CLUSTER_MAX; 2001 scan = div64_u64(scan * fraction[file], denominator); 2002 } 2003 nr[lru] = scan; 2004 } 2005 } 2006 2007 /* 2008 * Reclaim/compaction depends on a number of pages being freed. To avoid 2009 * disruption to the system, a small number of order-0 pages continue to be 2010 * rotated and reclaimed in the normal fashion. However, by the time we get 2011 * back to the allocator and call try_to_compact_zone(), we ensure that 2012 * there are enough free pages for it to be likely successful 2013 */ 2014 static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 2015 unsigned long nr_reclaimed, 2016 unsigned long nr_scanned, 2017 struct scan_control *sc) 2018 { 2019 unsigned long pages_for_compaction; 2020 unsigned long inactive_lru_pages; 2021 2022 /* If not in reclaim/compaction mode, stop */ 2023 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 2024 return false; 2025 2026 /* Consider stopping depending on scan and reclaim activity */ 2027 if (sc->gfp_mask & __GFP_REPEAT) { 2028 /* 2029 * For __GFP_REPEAT allocations, stop reclaiming if the 2030 * full LRU list has been scanned and we are still failing 2031 * to reclaim pages. This full LRU scan is potentially 2032 * expensive but a __GFP_REPEAT caller really wants to succeed 2033 */ 2034 if (!nr_reclaimed && !nr_scanned) 2035 return false; 2036 } else { 2037 /* 2038 * For non-__GFP_REPEAT allocations which can presumably 2039 * fail without consequence, stop if we failed to reclaim 2040 * any pages from the last SWAP_CLUSTER_MAX number of 2041 * pages that were scanned. This will return to the 2042 * caller faster at the risk reclaim/compaction and 2043 * the resulting allocation attempt fails 2044 */ 2045 if (!nr_reclaimed) 2046 return false; 2047 } 2048 2049 /* 2050 * If we have not reclaimed enough pages for compaction and the 2051 * inactive lists are large enough, continue reclaiming 2052 */ 2053 pages_for_compaction = (2UL << sc->order); 2054 inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 2055 if (nr_swap_pages > 0) 2056 inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 2057 if (sc->nr_reclaimed < pages_for_compaction && 2058 inactive_lru_pages > pages_for_compaction) 2059 return true; 2060 2061 /* If compaction would go ahead or the allocation would succeed, stop */ 2062 switch (compaction_suitable(mz->zone, sc->order)) { 2063 case COMPACT_PARTIAL: 2064 case COMPACT_CONTINUE: 2065 return false; 2066 default: 2067 return true; 2068 } 2069 } 2070 2071 /* 2072 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 2073 */ 2074 static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, 2075 struct scan_control *sc) 2076 { 2077 unsigned long nr[NR_LRU_LISTS]; 2078 unsigned long nr_to_scan; 2079 enum lru_list lru; 2080 unsigned long nr_reclaimed, nr_scanned; 2081 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2082 struct blk_plug plug; 2083 2084 restart: 2085 nr_reclaimed = 0; 2086 nr_scanned = sc->nr_scanned; 2087 get_scan_count(mz, sc, nr, priority); 2088 2089 blk_start_plug(&plug); 2090 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2091 nr[LRU_INACTIVE_FILE]) { 2092 for_each_evictable_lru(lru) { 2093 if (nr[lru]) { 2094 nr_to_scan = min_t(unsigned long, 2095 nr[lru], SWAP_CLUSTER_MAX); 2096 nr[lru] -= nr_to_scan; 2097 2098 nr_reclaimed += shrink_list(lru, nr_to_scan, 2099 mz, sc, priority); 2100 } 2101 } 2102 /* 2103 * On large memory systems, scan >> priority can become 2104 * really large. This is fine for the starting priority; 2105 * we want to put equal scanning pressure on each zone. 2106 * However, if the VM has a harder time of freeing pages, 2107 * with multiple processes reclaiming pages, the total 2108 * freeing target can get unreasonably large. 2109 */ 2110 if (nr_reclaimed >= nr_to_reclaim) 2111 nr_to_reclaim = 0; 2112 else 2113 nr_to_reclaim -= nr_reclaimed; 2114 2115 if (!nr_to_reclaim && priority < DEF_PRIORITY) 2116 break; 2117 } 2118 blk_finish_plug(&plug); 2119 sc->nr_reclaimed += nr_reclaimed; 2120 2121 /* 2122 * Even if we did not try to evict anon pages at all, we want to 2123 * rebalance the anon lru active/inactive ratio. 2124 */ 2125 if (inactive_anon_is_low(mz)) 2126 shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0); 2127 2128 /* reclaim/compaction might need reclaim to continue */ 2129 if (should_continue_reclaim(mz, nr_reclaimed, 2130 sc->nr_scanned - nr_scanned, sc)) 2131 goto restart; 2132 2133 throttle_vm_writeout(sc->gfp_mask); 2134 } 2135 2136 static void shrink_zone(int priority, struct zone *zone, 2137 struct scan_control *sc) 2138 { 2139 struct mem_cgroup *root = sc->target_mem_cgroup; 2140 struct mem_cgroup_reclaim_cookie reclaim = { 2141 .zone = zone, 2142 .priority = priority, 2143 }; 2144 struct mem_cgroup *memcg; 2145 2146 memcg = mem_cgroup_iter(root, NULL, &reclaim); 2147 do { 2148 struct mem_cgroup_zone mz = { 2149 .mem_cgroup = memcg, 2150 .zone = zone, 2151 }; 2152 2153 shrink_mem_cgroup_zone(priority, &mz, sc); 2154 /* 2155 * Limit reclaim has historically picked one memcg and 2156 * scanned it with decreasing priority levels until 2157 * nr_to_reclaim had been reclaimed. This priority 2158 * cycle is thus over after a single memcg. 2159 * 2160 * Direct reclaim and kswapd, on the other hand, have 2161 * to scan all memory cgroups to fulfill the overall 2162 * scan target for the zone. 2163 */ 2164 if (!global_reclaim(sc)) { 2165 mem_cgroup_iter_break(root, memcg); 2166 break; 2167 } 2168 memcg = mem_cgroup_iter(root, memcg, &reclaim); 2169 } while (memcg); 2170 } 2171 2172 /* Returns true if compaction should go ahead for a high-order request */ 2173 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 2174 { 2175 unsigned long balance_gap, watermark; 2176 bool watermark_ok; 2177 2178 /* Do not consider compaction for orders reclaim is meant to satisfy */ 2179 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER) 2180 return false; 2181 2182 /* 2183 * Compaction takes time to run and there are potentially other 2184 * callers using the pages just freed. Continue reclaiming until 2185 * there is a buffer of free pages available to give compaction 2186 * a reasonable chance of completing and allocating the page 2187 */ 2188 balance_gap = min(low_wmark_pages(zone), 2189 (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2190 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2191 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); 2192 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); 2193 2194 /* 2195 * If compaction is deferred, reclaim up to a point where 2196 * compaction will have a chance of success when re-enabled 2197 */ 2198 if (compaction_deferred(zone, sc->order)) 2199 return watermark_ok; 2200 2201 /* If compaction is not ready to start, keep reclaiming */ 2202 if (!compaction_suitable(zone, sc->order)) 2203 return false; 2204 2205 return watermark_ok; 2206 } 2207 2208 /* 2209 * This is the direct reclaim path, for page-allocating processes. We only 2210 * try to reclaim pages from zones which will satisfy the caller's allocation 2211 * request. 2212 * 2213 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 2214 * Because: 2215 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 2216 * allocation or 2217 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 2218 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 2219 * zone defense algorithm. 2220 * 2221 * If a zone is deemed to be full of pinned pages then just give it a light 2222 * scan then give up on it. 2223 * 2224 * This function returns true if a zone is being reclaimed for a costly 2225 * high-order allocation and compaction is ready to begin. This indicates to 2226 * the caller that it should consider retrying the allocation instead of 2227 * further reclaim. 2228 */ 2229 static bool shrink_zones(int priority, struct zonelist *zonelist, 2230 struct scan_control *sc) 2231 { 2232 struct zoneref *z; 2233 struct zone *zone; 2234 unsigned long nr_soft_reclaimed; 2235 unsigned long nr_soft_scanned; 2236 bool aborted_reclaim = false; 2237 2238 /* 2239 * If the number of buffer_heads in the machine exceeds the maximum 2240 * allowed level, force direct reclaim to scan the highmem zone as 2241 * highmem pages could be pinning lowmem pages storing buffer_heads 2242 */ 2243 if (buffer_heads_over_limit) 2244 sc->gfp_mask |= __GFP_HIGHMEM; 2245 2246 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2247 gfp_zone(sc->gfp_mask), sc->nodemask) { 2248 if (!populated_zone(zone)) 2249 continue; 2250 /* 2251 * Take care memory controller reclaiming has small influence 2252 * to global LRU. 2253 */ 2254 if (global_reclaim(sc)) { 2255 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2256 continue; 2257 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2258 continue; /* Let kswapd poll it */ 2259 if (COMPACTION_BUILD) { 2260 /* 2261 * If we already have plenty of memory free for 2262 * compaction in this zone, don't free any more. 2263 * Even though compaction is invoked for any 2264 * non-zero order, only frequent costly order 2265 * reclamation is disruptive enough to become a 2266 * noticeable problem, like transparent huge 2267 * page allocations. 2268 */ 2269 if (compaction_ready(zone, sc)) { 2270 aborted_reclaim = true; 2271 continue; 2272 } 2273 } 2274 /* 2275 * This steals pages from memory cgroups over softlimit 2276 * and returns the number of reclaimed pages and 2277 * scanned pages. This works for global memory pressure 2278 * and balancing, not for a memcg's limit. 2279 */ 2280 nr_soft_scanned = 0; 2281 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2282 sc->order, sc->gfp_mask, 2283 &nr_soft_scanned); 2284 sc->nr_reclaimed += nr_soft_reclaimed; 2285 sc->nr_scanned += nr_soft_scanned; 2286 /* need some check for avoid more shrink_zone() */ 2287 } 2288 2289 shrink_zone(priority, zone, sc); 2290 } 2291 2292 return aborted_reclaim; 2293 } 2294 2295 static bool zone_reclaimable(struct zone *zone) 2296 { 2297 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2298 } 2299 2300 /* All zones in zonelist are unreclaimable? */ 2301 static bool all_unreclaimable(struct zonelist *zonelist, 2302 struct scan_control *sc) 2303 { 2304 struct zoneref *z; 2305 struct zone *zone; 2306 2307 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2308 gfp_zone(sc->gfp_mask), sc->nodemask) { 2309 if (!populated_zone(zone)) 2310 continue; 2311 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2312 continue; 2313 if (!zone->all_unreclaimable) 2314 return false; 2315 } 2316 2317 return true; 2318 } 2319 2320 /* 2321 * This is the main entry point to direct page reclaim. 2322 * 2323 * If a full scan of the inactive list fails to free enough memory then we 2324 * are "out of memory" and something needs to be killed. 2325 * 2326 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2327 * high - the zone may be full of dirty or under-writeback pages, which this 2328 * caller can't do much about. We kick the writeback threads and take explicit 2329 * naps in the hope that some of these pages can be written. But if the 2330 * allocating task holds filesystem locks which prevent writeout this might not 2331 * work, and the allocation attempt will fail. 2332 * 2333 * returns: 0, if no pages reclaimed 2334 * else, the number of pages reclaimed 2335 */ 2336 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2337 struct scan_control *sc, 2338 struct shrink_control *shrink) 2339 { 2340 int priority; 2341 unsigned long total_scanned = 0; 2342 struct reclaim_state *reclaim_state = current->reclaim_state; 2343 struct zoneref *z; 2344 struct zone *zone; 2345 unsigned long writeback_threshold; 2346 bool aborted_reclaim; 2347 2348 delayacct_freepages_start(); 2349 2350 if (global_reclaim(sc)) 2351 count_vm_event(ALLOCSTALL); 2352 2353 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2354 sc->nr_scanned = 0; 2355 if (!priority) 2356 disable_swap_token(sc->target_mem_cgroup); 2357 aborted_reclaim = shrink_zones(priority, zonelist, sc); 2358 2359 /* 2360 * Don't shrink slabs when reclaiming memory from 2361 * over limit cgroups 2362 */ 2363 if (global_reclaim(sc)) { 2364 unsigned long lru_pages = 0; 2365 for_each_zone_zonelist(zone, z, zonelist, 2366 gfp_zone(sc->gfp_mask)) { 2367 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2368 continue; 2369 2370 lru_pages += zone_reclaimable_pages(zone); 2371 } 2372 2373 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2374 if (reclaim_state) { 2375 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2376 reclaim_state->reclaimed_slab = 0; 2377 } 2378 } 2379 total_scanned += sc->nr_scanned; 2380 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2381 goto out; 2382 2383 /* 2384 * Try to write back as many pages as we just scanned. This 2385 * tends to cause slow streaming writers to write data to the 2386 * disk smoothly, at the dirtying rate, which is nice. But 2387 * that's undesirable in laptop mode, where we *want* lumpy 2388 * writeout. So in laptop mode, write out the whole world. 2389 */ 2390 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2391 if (total_scanned > writeback_threshold) { 2392 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 2393 WB_REASON_TRY_TO_FREE_PAGES); 2394 sc->may_writepage = 1; 2395 } 2396 2397 /* Take a nap, wait for some writeback to complete */ 2398 if (!sc->hibernation_mode && sc->nr_scanned && 2399 priority < DEF_PRIORITY - 2) { 2400 struct zone *preferred_zone; 2401 2402 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2403 &cpuset_current_mems_allowed, 2404 &preferred_zone); 2405 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2406 } 2407 } 2408 2409 out: 2410 delayacct_freepages_end(); 2411 2412 if (sc->nr_reclaimed) 2413 return sc->nr_reclaimed; 2414 2415 /* 2416 * As hibernation is going on, kswapd is freezed so that it can't mark 2417 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2418 * check. 2419 */ 2420 if (oom_killer_disabled) 2421 return 0; 2422 2423 /* Aborted reclaim to try compaction? don't OOM, then */ 2424 if (aborted_reclaim) 2425 return 1; 2426 2427 /* top priority shrink_zones still had more to do? don't OOM, then */ 2428 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) 2429 return 1; 2430 2431 return 0; 2432 } 2433 2434 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2435 gfp_t gfp_mask, nodemask_t *nodemask) 2436 { 2437 unsigned long nr_reclaimed; 2438 struct scan_control sc = { 2439 .gfp_mask = gfp_mask, 2440 .may_writepage = !laptop_mode, 2441 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2442 .may_unmap = 1, 2443 .may_swap = 1, 2444 .order = order, 2445 .target_mem_cgroup = NULL, 2446 .nodemask = nodemask, 2447 }; 2448 struct shrink_control shrink = { 2449 .gfp_mask = sc.gfp_mask, 2450 }; 2451 2452 trace_mm_vmscan_direct_reclaim_begin(order, 2453 sc.may_writepage, 2454 gfp_mask); 2455 2456 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2457 2458 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2459 2460 return nr_reclaimed; 2461 } 2462 2463 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2464 2465 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, 2466 gfp_t gfp_mask, bool noswap, 2467 struct zone *zone, 2468 unsigned long *nr_scanned) 2469 { 2470 struct scan_control sc = { 2471 .nr_scanned = 0, 2472 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2473 .may_writepage = !laptop_mode, 2474 .may_unmap = 1, 2475 .may_swap = !noswap, 2476 .order = 0, 2477 .target_mem_cgroup = memcg, 2478 }; 2479 struct mem_cgroup_zone mz = { 2480 .mem_cgroup = memcg, 2481 .zone = zone, 2482 }; 2483 2484 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2485 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2486 2487 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2488 sc.may_writepage, 2489 sc.gfp_mask); 2490 2491 /* 2492 * NOTE: Although we can get the priority field, using it 2493 * here is not a good idea, since it limits the pages we can scan. 2494 * if we don't reclaim here, the shrink_zone from balance_pgdat 2495 * will pick up pages from other mem cgroup's as well. We hack 2496 * the priority and make it zero. 2497 */ 2498 shrink_mem_cgroup_zone(0, &mz, &sc); 2499 2500 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2501 2502 *nr_scanned = sc.nr_scanned; 2503 return sc.nr_reclaimed; 2504 } 2505 2506 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 2507 gfp_t gfp_mask, 2508 bool noswap) 2509 { 2510 struct zonelist *zonelist; 2511 unsigned long nr_reclaimed; 2512 int nid; 2513 struct scan_control sc = { 2514 .may_writepage = !laptop_mode, 2515 .may_unmap = 1, 2516 .may_swap = !noswap, 2517 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2518 .order = 0, 2519 .target_mem_cgroup = memcg, 2520 .nodemask = NULL, /* we don't care the placement */ 2521 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2522 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2523 }; 2524 struct shrink_control shrink = { 2525 .gfp_mask = sc.gfp_mask, 2526 }; 2527 2528 /* 2529 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2530 * take care of from where we get pages. So the node where we start the 2531 * scan does not need to be the current node. 2532 */ 2533 nid = mem_cgroup_select_victim_node(memcg); 2534 2535 zonelist = NODE_DATA(nid)->node_zonelists; 2536 2537 trace_mm_vmscan_memcg_reclaim_begin(0, 2538 sc.may_writepage, 2539 sc.gfp_mask); 2540 2541 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2542 2543 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2544 2545 return nr_reclaimed; 2546 } 2547 #endif 2548 2549 static void age_active_anon(struct zone *zone, struct scan_control *sc, 2550 int priority) 2551 { 2552 struct mem_cgroup *memcg; 2553 2554 if (!total_swap_pages) 2555 return; 2556 2557 memcg = mem_cgroup_iter(NULL, NULL, NULL); 2558 do { 2559 struct mem_cgroup_zone mz = { 2560 .mem_cgroup = memcg, 2561 .zone = zone, 2562 }; 2563 2564 if (inactive_anon_is_low(&mz)) 2565 shrink_active_list(SWAP_CLUSTER_MAX, &mz, 2566 sc, priority, 0); 2567 2568 memcg = mem_cgroup_iter(NULL, memcg, NULL); 2569 } while (memcg); 2570 } 2571 2572 /* 2573 * pgdat_balanced is used when checking if a node is balanced for high-order 2574 * allocations. Only zones that meet watermarks and are in a zone allowed 2575 * by the callers classzone_idx are added to balanced_pages. The total of 2576 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2577 * for the node to be considered balanced. Forcing all zones to be balanced 2578 * for high orders can cause excessive reclaim when there are imbalanced zones. 2579 * The choice of 25% is due to 2580 * o a 16M DMA zone that is balanced will not balance a zone on any 2581 * reasonable sized machine 2582 * o On all other machines, the top zone must be at least a reasonable 2583 * percentage of the middle zones. For example, on 32-bit x86, highmem 2584 * would need to be at least 256M for it to be balance a whole node. 2585 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2586 * to balance a node on its own. These seemed like reasonable ratios. 2587 */ 2588 static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2589 int classzone_idx) 2590 { 2591 unsigned long present_pages = 0; 2592 int i; 2593 2594 for (i = 0; i <= classzone_idx; i++) 2595 present_pages += pgdat->node_zones[i].present_pages; 2596 2597 /* A special case here: if zone has no page, we think it's balanced */ 2598 return balanced_pages >= (present_pages >> 2); 2599 } 2600 2601 /* is kswapd sleeping prematurely? */ 2602 static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, 2603 int classzone_idx) 2604 { 2605 int i; 2606 unsigned long balanced = 0; 2607 bool all_zones_ok = true; 2608 2609 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2610 if (remaining) 2611 return true; 2612 2613 /* Check the watermark levels */ 2614 for (i = 0; i <= classzone_idx; i++) { 2615 struct zone *zone = pgdat->node_zones + i; 2616 2617 if (!populated_zone(zone)) 2618 continue; 2619 2620 /* 2621 * balance_pgdat() skips over all_unreclaimable after 2622 * DEF_PRIORITY. Effectively, it considers them balanced so 2623 * they must be considered balanced here as well if kswapd 2624 * is to sleep 2625 */ 2626 if (zone->all_unreclaimable) { 2627 balanced += zone->present_pages; 2628 continue; 2629 } 2630 2631 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2632 i, 0)) 2633 all_zones_ok = false; 2634 else 2635 balanced += zone->present_pages; 2636 } 2637 2638 /* 2639 * For high-order requests, the balanced zones must contain at least 2640 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2641 * must be balanced 2642 */ 2643 if (order) 2644 return !pgdat_balanced(pgdat, balanced, classzone_idx); 2645 else 2646 return !all_zones_ok; 2647 } 2648 2649 /* 2650 * For kswapd, balance_pgdat() will work across all this node's zones until 2651 * they are all at high_wmark_pages(zone). 2652 * 2653 * Returns the final order kswapd was reclaiming at 2654 * 2655 * There is special handling here for zones which are full of pinned pages. 2656 * This can happen if the pages are all mlocked, or if they are all used by 2657 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2658 * What we do is to detect the case where all pages in the zone have been 2659 * scanned twice and there has been zero successful reclaim. Mark the zone as 2660 * dead and from now on, only perform a short scan. Basically we're polling 2661 * the zone for when the problem goes away. 2662 * 2663 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2664 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2665 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2666 * lower zones regardless of the number of free pages in the lower zones. This 2667 * interoperates with the page allocator fallback scheme to ensure that aging 2668 * of pages is balanced across the zones. 2669 */ 2670 static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2671 int *classzone_idx) 2672 { 2673 int all_zones_ok; 2674 unsigned long balanced; 2675 int priority; 2676 int i; 2677 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2678 unsigned long total_scanned; 2679 struct reclaim_state *reclaim_state = current->reclaim_state; 2680 unsigned long nr_soft_reclaimed; 2681 unsigned long nr_soft_scanned; 2682 struct scan_control sc = { 2683 .gfp_mask = GFP_KERNEL, 2684 .may_unmap = 1, 2685 .may_swap = 1, 2686 /* 2687 * kswapd doesn't want to be bailed out while reclaim. because 2688 * we want to put equal scanning pressure on each zone. 2689 */ 2690 .nr_to_reclaim = ULONG_MAX, 2691 .order = order, 2692 .target_mem_cgroup = NULL, 2693 }; 2694 struct shrink_control shrink = { 2695 .gfp_mask = sc.gfp_mask, 2696 }; 2697 loop_again: 2698 total_scanned = 0; 2699 sc.nr_reclaimed = 0; 2700 sc.may_writepage = !laptop_mode; 2701 count_vm_event(PAGEOUTRUN); 2702 2703 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2704 unsigned long lru_pages = 0; 2705 int has_under_min_watermark_zone = 0; 2706 2707 /* The swap token gets in the way of swapout... */ 2708 if (!priority) 2709 disable_swap_token(NULL); 2710 2711 all_zones_ok = 1; 2712 balanced = 0; 2713 2714 /* 2715 * Scan in the highmem->dma direction for the highest 2716 * zone which needs scanning 2717 */ 2718 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2719 struct zone *zone = pgdat->node_zones + i; 2720 2721 if (!populated_zone(zone)) 2722 continue; 2723 2724 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2725 continue; 2726 2727 /* 2728 * Do some background aging of the anon list, to give 2729 * pages a chance to be referenced before reclaiming. 2730 */ 2731 age_active_anon(zone, &sc, priority); 2732 2733 /* 2734 * If the number of buffer_heads in the machine 2735 * exceeds the maximum allowed level and this node 2736 * has a highmem zone, force kswapd to reclaim from 2737 * it to relieve lowmem pressure. 2738 */ 2739 if (buffer_heads_over_limit && is_highmem_idx(i)) { 2740 end_zone = i; 2741 break; 2742 } 2743 2744 if (!zone_watermark_ok_safe(zone, order, 2745 high_wmark_pages(zone), 0, 0)) { 2746 end_zone = i; 2747 break; 2748 } else { 2749 /* If balanced, clear the congested flag */ 2750 zone_clear_flag(zone, ZONE_CONGESTED); 2751 } 2752 } 2753 if (i < 0) 2754 goto out; 2755 2756 for (i = 0; i <= end_zone; i++) { 2757 struct zone *zone = pgdat->node_zones + i; 2758 2759 lru_pages += zone_reclaimable_pages(zone); 2760 } 2761 2762 /* 2763 * Now scan the zone in the dma->highmem direction, stopping 2764 * at the last zone which needs scanning. 2765 * 2766 * We do this because the page allocator works in the opposite 2767 * direction. This prevents the page allocator from allocating 2768 * pages behind kswapd's direction of progress, which would 2769 * cause too much scanning of the lower zones. 2770 */ 2771 for (i = 0; i <= end_zone; i++) { 2772 struct zone *zone = pgdat->node_zones + i; 2773 int nr_slab, testorder; 2774 unsigned long balance_gap; 2775 2776 if (!populated_zone(zone)) 2777 continue; 2778 2779 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2780 continue; 2781 2782 sc.nr_scanned = 0; 2783 2784 nr_soft_scanned = 0; 2785 /* 2786 * Call soft limit reclaim before calling shrink_zone. 2787 */ 2788 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2789 order, sc.gfp_mask, 2790 &nr_soft_scanned); 2791 sc.nr_reclaimed += nr_soft_reclaimed; 2792 total_scanned += nr_soft_scanned; 2793 2794 /* 2795 * We put equal pressure on every zone, unless 2796 * one zone has way too many pages free 2797 * already. The "too many pages" is defined 2798 * as the high wmark plus a "gap" where the 2799 * gap is either the low watermark or 1% 2800 * of the zone, whichever is smaller. 2801 */ 2802 balance_gap = min(low_wmark_pages(zone), 2803 (zone->present_pages + 2804 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2805 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2806 /* 2807 * Kswapd reclaims only single pages with compaction 2808 * enabled. Trying too hard to reclaim until contiguous 2809 * free pages have become available can hurt performance 2810 * by evicting too much useful data from memory. 2811 * Do not reclaim more than needed for compaction. 2812 */ 2813 testorder = order; 2814 if (COMPACTION_BUILD && order && 2815 compaction_suitable(zone, order) != 2816 COMPACT_SKIPPED) 2817 testorder = 0; 2818 2819 if ((buffer_heads_over_limit && is_highmem_idx(i)) || 2820 !zone_watermark_ok_safe(zone, order, 2821 high_wmark_pages(zone) + balance_gap, 2822 end_zone, 0)) { 2823 shrink_zone(priority, zone, &sc); 2824 2825 reclaim_state->reclaimed_slab = 0; 2826 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2827 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2828 total_scanned += sc.nr_scanned; 2829 2830 if (nr_slab == 0 && !zone_reclaimable(zone)) 2831 zone->all_unreclaimable = 1; 2832 } 2833 2834 /* 2835 * If we've done a decent amount of scanning and 2836 * the reclaim ratio is low, start doing writepage 2837 * even in laptop mode 2838 */ 2839 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2840 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2841 sc.may_writepage = 1; 2842 2843 if (zone->all_unreclaimable) { 2844 if (end_zone && end_zone == i) 2845 end_zone--; 2846 continue; 2847 } 2848 2849 if (!zone_watermark_ok_safe(zone, testorder, 2850 high_wmark_pages(zone), end_zone, 0)) { 2851 all_zones_ok = 0; 2852 /* 2853 * We are still under min water mark. This 2854 * means that we have a GFP_ATOMIC allocation 2855 * failure risk. Hurry up! 2856 */ 2857 if (!zone_watermark_ok_safe(zone, order, 2858 min_wmark_pages(zone), end_zone, 0)) 2859 has_under_min_watermark_zone = 1; 2860 } else { 2861 /* 2862 * If a zone reaches its high watermark, 2863 * consider it to be no longer congested. It's 2864 * possible there are dirty pages backed by 2865 * congested BDIs but as pressure is relieved, 2866 * spectulatively avoid congestion waits 2867 */ 2868 zone_clear_flag(zone, ZONE_CONGESTED); 2869 if (i <= *classzone_idx) 2870 balanced += zone->present_pages; 2871 } 2872 2873 } 2874 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2875 break; /* kswapd: all done */ 2876 /* 2877 * OK, kswapd is getting into trouble. Take a nap, then take 2878 * another pass across the zones. 2879 */ 2880 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2881 if (has_under_min_watermark_zone) 2882 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2883 else 2884 congestion_wait(BLK_RW_ASYNC, HZ/10); 2885 } 2886 2887 /* 2888 * We do this so kswapd doesn't build up large priorities for 2889 * example when it is freeing in parallel with allocators. It 2890 * matches the direct reclaim path behaviour in terms of impact 2891 * on zone->*_priority. 2892 */ 2893 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2894 break; 2895 } 2896 out: 2897 2898 /* 2899 * order-0: All zones must meet high watermark for a balanced node 2900 * high-order: Balanced zones must make up at least 25% of the node 2901 * for the node to be balanced 2902 */ 2903 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2904 cond_resched(); 2905 2906 try_to_freeze(); 2907 2908 /* 2909 * Fragmentation may mean that the system cannot be 2910 * rebalanced for high-order allocations in all zones. 2911 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2912 * it means the zones have been fully scanned and are still 2913 * not balanced. For high-order allocations, there is 2914 * little point trying all over again as kswapd may 2915 * infinite loop. 2916 * 2917 * Instead, recheck all watermarks at order-0 as they 2918 * are the most important. If watermarks are ok, kswapd will go 2919 * back to sleep. High-order users can still perform direct 2920 * reclaim if they wish. 2921 */ 2922 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2923 order = sc.order = 0; 2924 2925 goto loop_again; 2926 } 2927 2928 /* 2929 * If kswapd was reclaiming at a higher order, it has the option of 2930 * sleeping without all zones being balanced. Before it does, it must 2931 * ensure that the watermarks for order-0 on *all* zones are met and 2932 * that the congestion flags are cleared. The congestion flag must 2933 * be cleared as kswapd is the only mechanism that clears the flag 2934 * and it is potentially going to sleep here. 2935 */ 2936 if (order) { 2937 int zones_need_compaction = 1; 2938 2939 for (i = 0; i <= end_zone; i++) { 2940 struct zone *zone = pgdat->node_zones + i; 2941 2942 if (!populated_zone(zone)) 2943 continue; 2944 2945 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2946 continue; 2947 2948 /* Would compaction fail due to lack of free memory? */ 2949 if (compaction_suitable(zone, order) == COMPACT_SKIPPED) 2950 goto loop_again; 2951 2952 /* Confirm the zone is balanced for order-0 */ 2953 if (!zone_watermark_ok(zone, 0, 2954 high_wmark_pages(zone), 0, 0)) { 2955 order = sc.order = 0; 2956 goto loop_again; 2957 } 2958 2959 /* Check if the memory needs to be defragmented. */ 2960 if (zone_watermark_ok(zone, order, 2961 low_wmark_pages(zone), *classzone_idx, 0)) 2962 zones_need_compaction = 0; 2963 2964 /* If balanced, clear the congested flag */ 2965 zone_clear_flag(zone, ZONE_CONGESTED); 2966 } 2967 2968 if (zones_need_compaction) 2969 compact_pgdat(pgdat, order); 2970 } 2971 2972 /* 2973 * Return the order we were reclaiming at so sleeping_prematurely() 2974 * makes a decision on the order we were last reclaiming at. However, 2975 * if another caller entered the allocator slow path while kswapd 2976 * was awake, order will remain at the higher level 2977 */ 2978 *classzone_idx = end_zone; 2979 return order; 2980 } 2981 2982 static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2983 { 2984 long remaining = 0; 2985 DEFINE_WAIT(wait); 2986 2987 if (freezing(current) || kthread_should_stop()) 2988 return; 2989 2990 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2991 2992 /* Try to sleep for a short interval */ 2993 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2994 remaining = schedule_timeout(HZ/10); 2995 finish_wait(&pgdat->kswapd_wait, &wait); 2996 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2997 } 2998 2999 /* 3000 * After a short sleep, check if it was a premature sleep. If not, then 3001 * go fully to sleep until explicitly woken up. 3002 */ 3003 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 3004 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 3005 3006 /* 3007 * vmstat counters are not perfectly accurate and the estimated 3008 * value for counters such as NR_FREE_PAGES can deviate from the 3009 * true value by nr_online_cpus * threshold. To avoid the zone 3010 * watermarks being breached while under pressure, we reduce the 3011 * per-cpu vmstat threshold while kswapd is awake and restore 3012 * them before going back to sleep. 3013 */ 3014 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 3015 schedule(); 3016 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 3017 } else { 3018 if (remaining) 3019 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 3020 else 3021 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 3022 } 3023 finish_wait(&pgdat->kswapd_wait, &wait); 3024 } 3025 3026 /* 3027 * The background pageout daemon, started as a kernel thread 3028 * from the init process. 3029 * 3030 * This basically trickles out pages so that we have _some_ 3031 * free memory available even if there is no other activity 3032 * that frees anything up. This is needed for things like routing 3033 * etc, where we otherwise might have all activity going on in 3034 * asynchronous contexts that cannot page things out. 3035 * 3036 * If there are applications that are active memory-allocators 3037 * (most normal use), this basically shouldn't matter. 3038 */ 3039 static int kswapd(void *p) 3040 { 3041 unsigned long order, new_order; 3042 unsigned balanced_order; 3043 int classzone_idx, new_classzone_idx; 3044 int balanced_classzone_idx; 3045 pg_data_t *pgdat = (pg_data_t*)p; 3046 struct task_struct *tsk = current; 3047 3048 struct reclaim_state reclaim_state = { 3049 .reclaimed_slab = 0, 3050 }; 3051 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 3052 3053 lockdep_set_current_reclaim_state(GFP_KERNEL); 3054 3055 if (!cpumask_empty(cpumask)) 3056 set_cpus_allowed_ptr(tsk, cpumask); 3057 current->reclaim_state = &reclaim_state; 3058 3059 /* 3060 * Tell the memory management that we're a "memory allocator", 3061 * and that if we need more memory we should get access to it 3062 * regardless (see "__alloc_pages()"). "kswapd" should 3063 * never get caught in the normal page freeing logic. 3064 * 3065 * (Kswapd normally doesn't need memory anyway, but sometimes 3066 * you need a small amount of memory in order to be able to 3067 * page out something else, and this flag essentially protects 3068 * us from recursively trying to free more memory as we're 3069 * trying to free the first piece of memory in the first place). 3070 */ 3071 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 3072 set_freezable(); 3073 3074 order = new_order = 0; 3075 balanced_order = 0; 3076 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; 3077 balanced_classzone_idx = classzone_idx; 3078 for ( ; ; ) { 3079 int ret; 3080 3081 /* 3082 * If the last balance_pgdat was unsuccessful it's unlikely a 3083 * new request of a similar or harder type will succeed soon 3084 * so consider going to sleep on the basis we reclaimed at 3085 */ 3086 if (balanced_classzone_idx >= new_classzone_idx && 3087 balanced_order == new_order) { 3088 new_order = pgdat->kswapd_max_order; 3089 new_classzone_idx = pgdat->classzone_idx; 3090 pgdat->kswapd_max_order = 0; 3091 pgdat->classzone_idx = pgdat->nr_zones - 1; 3092 } 3093 3094 if (order < new_order || classzone_idx > new_classzone_idx) { 3095 /* 3096 * Don't sleep if someone wants a larger 'order' 3097 * allocation or has tigher zone constraints 3098 */ 3099 order = new_order; 3100 classzone_idx = new_classzone_idx; 3101 } else { 3102 kswapd_try_to_sleep(pgdat, balanced_order, 3103 balanced_classzone_idx); 3104 order = pgdat->kswapd_max_order; 3105 classzone_idx = pgdat->classzone_idx; 3106 new_order = order; 3107 new_classzone_idx = classzone_idx; 3108 pgdat->kswapd_max_order = 0; 3109 pgdat->classzone_idx = pgdat->nr_zones - 1; 3110 } 3111 3112 ret = try_to_freeze(); 3113 if (kthread_should_stop()) 3114 break; 3115 3116 /* 3117 * We can speed up thawing tasks if we don't call balance_pgdat 3118 * after returning from the refrigerator 3119 */ 3120 if (!ret) { 3121 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 3122 balanced_classzone_idx = classzone_idx; 3123 balanced_order = balance_pgdat(pgdat, order, 3124 &balanced_classzone_idx); 3125 } 3126 } 3127 return 0; 3128 } 3129 3130 /* 3131 * A zone is low on free memory, so wake its kswapd task to service it. 3132 */ 3133 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 3134 { 3135 pg_data_t *pgdat; 3136 3137 if (!populated_zone(zone)) 3138 return; 3139 3140 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 3141 return; 3142 pgdat = zone->zone_pgdat; 3143 if (pgdat->kswapd_max_order < order) { 3144 pgdat->kswapd_max_order = order; 3145 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 3146 } 3147 if (!waitqueue_active(&pgdat->kswapd_wait)) 3148 return; 3149 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 3150 return; 3151 3152 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 3153 wake_up_interruptible(&pgdat->kswapd_wait); 3154 } 3155 3156 /* 3157 * The reclaimable count would be mostly accurate. 3158 * The less reclaimable pages may be 3159 * - mlocked pages, which will be moved to unevictable list when encountered 3160 * - mapped pages, which may require several travels to be reclaimed 3161 * - dirty pages, which is not "instantly" reclaimable 3162 */ 3163 unsigned long global_reclaimable_pages(void) 3164 { 3165 int nr; 3166 3167 nr = global_page_state(NR_ACTIVE_FILE) + 3168 global_page_state(NR_INACTIVE_FILE); 3169 3170 if (nr_swap_pages > 0) 3171 nr += global_page_state(NR_ACTIVE_ANON) + 3172 global_page_state(NR_INACTIVE_ANON); 3173 3174 return nr; 3175 } 3176 3177 unsigned long zone_reclaimable_pages(struct zone *zone) 3178 { 3179 int nr; 3180 3181 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 3182 zone_page_state(zone, NR_INACTIVE_FILE); 3183 3184 if (nr_swap_pages > 0) 3185 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 3186 zone_page_state(zone, NR_INACTIVE_ANON); 3187 3188 return nr; 3189 } 3190 3191 #ifdef CONFIG_HIBERNATION 3192 /* 3193 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3194 * freed pages. 3195 * 3196 * Rather than trying to age LRUs the aim is to preserve the overall 3197 * LRU order by reclaiming preferentially 3198 * inactive > active > active referenced > active mapped 3199 */ 3200 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 3201 { 3202 struct reclaim_state reclaim_state; 3203 struct scan_control sc = { 3204 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3205 .may_swap = 1, 3206 .may_unmap = 1, 3207 .may_writepage = 1, 3208 .nr_to_reclaim = nr_to_reclaim, 3209 .hibernation_mode = 1, 3210 .order = 0, 3211 }; 3212 struct shrink_control shrink = { 3213 .gfp_mask = sc.gfp_mask, 3214 }; 3215 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3216 struct task_struct *p = current; 3217 unsigned long nr_reclaimed; 3218 3219 p->flags |= PF_MEMALLOC; 3220 lockdep_set_current_reclaim_state(sc.gfp_mask); 3221 reclaim_state.reclaimed_slab = 0; 3222 p->reclaim_state = &reclaim_state; 3223 3224 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 3225 3226 p->reclaim_state = NULL; 3227 lockdep_clear_current_reclaim_state(); 3228 p->flags &= ~PF_MEMALLOC; 3229 3230 return nr_reclaimed; 3231 } 3232 #endif /* CONFIG_HIBERNATION */ 3233 3234 /* It's optimal to keep kswapds on the same CPUs as their memory, but 3235 not required for correctness. So if the last cpu in a node goes 3236 away, we get changed to run anywhere: as the first one comes back, 3237 restore their cpu bindings. */ 3238 static int __devinit cpu_callback(struct notifier_block *nfb, 3239 unsigned long action, void *hcpu) 3240 { 3241 int nid; 3242 3243 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 3244 for_each_node_state(nid, N_HIGH_MEMORY) { 3245 pg_data_t *pgdat = NODE_DATA(nid); 3246 const struct cpumask *mask; 3247 3248 mask = cpumask_of_node(pgdat->node_id); 3249 3250 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3251 /* One of our CPUs online: restore mask */ 3252 set_cpus_allowed_ptr(pgdat->kswapd, mask); 3253 } 3254 } 3255 return NOTIFY_OK; 3256 } 3257 3258 /* 3259 * This kswapd start function will be called by init and node-hot-add. 3260 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 3261 */ 3262 int kswapd_run(int nid) 3263 { 3264 pg_data_t *pgdat = NODE_DATA(nid); 3265 int ret = 0; 3266 3267 if (pgdat->kswapd) 3268 return 0; 3269 3270 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 3271 if (IS_ERR(pgdat->kswapd)) { 3272 /* failure at boot is fatal */ 3273 BUG_ON(system_state == SYSTEM_BOOTING); 3274 printk("Failed to start kswapd on node %d\n",nid); 3275 ret = -1; 3276 } 3277 return ret; 3278 } 3279 3280 /* 3281 * Called by memory hotplug when all memory in a node is offlined. 3282 */ 3283 void kswapd_stop(int nid) 3284 { 3285 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 3286 3287 if (kswapd) 3288 kthread_stop(kswapd); 3289 } 3290 3291 static int __init kswapd_init(void) 3292 { 3293 int nid; 3294 3295 swap_setup(); 3296 for_each_node_state(nid, N_HIGH_MEMORY) 3297 kswapd_run(nid); 3298 hotcpu_notifier(cpu_callback, 0); 3299 return 0; 3300 } 3301 3302 module_init(kswapd_init) 3303 3304 #ifdef CONFIG_NUMA 3305 /* 3306 * Zone reclaim mode 3307 * 3308 * If non-zero call zone_reclaim when the number of free pages falls below 3309 * the watermarks. 3310 */ 3311 int zone_reclaim_mode __read_mostly; 3312 3313 #define RECLAIM_OFF 0 3314 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 3315 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 3316 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 3317 3318 /* 3319 * Priority for ZONE_RECLAIM. This determines the fraction of pages 3320 * of a node considered for each zone_reclaim. 4 scans 1/16th of 3321 * a zone. 3322 */ 3323 #define ZONE_RECLAIM_PRIORITY 4 3324 3325 /* 3326 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 3327 * occur. 3328 */ 3329 int sysctl_min_unmapped_ratio = 1; 3330 3331 /* 3332 * If the number of slab pages in a zone grows beyond this percentage then 3333 * slab reclaim needs to occur. 3334 */ 3335 int sysctl_min_slab_ratio = 5; 3336 3337 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3338 { 3339 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 3340 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 3341 zone_page_state(zone, NR_ACTIVE_FILE); 3342 3343 /* 3344 * It's possible for there to be more file mapped pages than 3345 * accounted for by the pages on the file LRU lists because 3346 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 3347 */ 3348 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 3349 } 3350 3351 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3352 static long zone_pagecache_reclaimable(struct zone *zone) 3353 { 3354 long nr_pagecache_reclaimable; 3355 long delta = 0; 3356 3357 /* 3358 * If RECLAIM_SWAP is set, then all file pages are considered 3359 * potentially reclaimable. Otherwise, we have to worry about 3360 * pages like swapcache and zone_unmapped_file_pages() provides 3361 * a better estimate 3362 */ 3363 if (zone_reclaim_mode & RECLAIM_SWAP) 3364 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3365 else 3366 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3367 3368 /* If we can't clean pages, remove dirty pages from consideration */ 3369 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3370 delta += zone_page_state(zone, NR_FILE_DIRTY); 3371 3372 /* Watch for any possible underflows due to delta */ 3373 if (unlikely(delta > nr_pagecache_reclaimable)) 3374 delta = nr_pagecache_reclaimable; 3375 3376 return nr_pagecache_reclaimable - delta; 3377 } 3378 3379 /* 3380 * Try to free up some pages from this zone through reclaim. 3381 */ 3382 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3383 { 3384 /* Minimum pages needed in order to stay on node */ 3385 const unsigned long nr_pages = 1 << order; 3386 struct task_struct *p = current; 3387 struct reclaim_state reclaim_state; 3388 int priority; 3389 struct scan_control sc = { 3390 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3391 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3392 .may_swap = 1, 3393 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3394 SWAP_CLUSTER_MAX), 3395 .gfp_mask = gfp_mask, 3396 .order = order, 3397 }; 3398 struct shrink_control shrink = { 3399 .gfp_mask = sc.gfp_mask, 3400 }; 3401 unsigned long nr_slab_pages0, nr_slab_pages1; 3402 3403 cond_resched(); 3404 /* 3405 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3406 * and we also need to be able to write out pages for RECLAIM_WRITE 3407 * and RECLAIM_SWAP. 3408 */ 3409 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3410 lockdep_set_current_reclaim_state(gfp_mask); 3411 reclaim_state.reclaimed_slab = 0; 3412 p->reclaim_state = &reclaim_state; 3413 3414 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3415 /* 3416 * Free memory by calling shrink zone with increasing 3417 * priorities until we have enough memory freed. 3418 */ 3419 priority = ZONE_RECLAIM_PRIORITY; 3420 do { 3421 shrink_zone(priority, zone, &sc); 3422 priority--; 3423 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3424 } 3425 3426 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3427 if (nr_slab_pages0 > zone->min_slab_pages) { 3428 /* 3429 * shrink_slab() does not currently allow us to determine how 3430 * many pages were freed in this zone. So we take the current 3431 * number of slab pages and shake the slab until it is reduced 3432 * by the same nr_pages that we used for reclaiming unmapped 3433 * pages. 3434 * 3435 * Note that shrink_slab will free memory on all zones and may 3436 * take a long time. 3437 */ 3438 for (;;) { 3439 unsigned long lru_pages = zone_reclaimable_pages(zone); 3440 3441 /* No reclaimable slab or very low memory pressure */ 3442 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3443 break; 3444 3445 /* Freed enough memory */ 3446 nr_slab_pages1 = zone_page_state(zone, 3447 NR_SLAB_RECLAIMABLE); 3448 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3449 break; 3450 } 3451 3452 /* 3453 * Update nr_reclaimed by the number of slab pages we 3454 * reclaimed from this zone. 3455 */ 3456 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3457 if (nr_slab_pages1 < nr_slab_pages0) 3458 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3459 } 3460 3461 p->reclaim_state = NULL; 3462 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3463 lockdep_clear_current_reclaim_state(); 3464 return sc.nr_reclaimed >= nr_pages; 3465 } 3466 3467 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3468 { 3469 int node_id; 3470 int ret; 3471 3472 /* 3473 * Zone reclaim reclaims unmapped file backed pages and 3474 * slab pages if we are over the defined limits. 3475 * 3476 * A small portion of unmapped file backed pages is needed for 3477 * file I/O otherwise pages read by file I/O will be immediately 3478 * thrown out if the zone is overallocated. So we do not reclaim 3479 * if less than a specified percentage of the zone is used by 3480 * unmapped file backed pages. 3481 */ 3482 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3483 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3484 return ZONE_RECLAIM_FULL; 3485 3486 if (zone->all_unreclaimable) 3487 return ZONE_RECLAIM_FULL; 3488 3489 /* 3490 * Do not scan if the allocation should not be delayed. 3491 */ 3492 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3493 return ZONE_RECLAIM_NOSCAN; 3494 3495 /* 3496 * Only run zone reclaim on the local zone or on zones that do not 3497 * have associated processors. This will favor the local processor 3498 * over remote processors and spread off node memory allocations 3499 * as wide as possible. 3500 */ 3501 node_id = zone_to_nid(zone); 3502 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3503 return ZONE_RECLAIM_NOSCAN; 3504 3505 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3506 return ZONE_RECLAIM_NOSCAN; 3507 3508 ret = __zone_reclaim(zone, gfp_mask, order); 3509 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3510 3511 if (!ret) 3512 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3513 3514 return ret; 3515 } 3516 #endif 3517 3518 /* 3519 * page_evictable - test whether a page is evictable 3520 * @page: the page to test 3521 * @vma: the VMA in which the page is or will be mapped, may be NULL 3522 * 3523 * Test whether page is evictable--i.e., should be placed on active/inactive 3524 * lists vs unevictable list. The vma argument is !NULL when called from the 3525 * fault path to determine how to instantate a new page. 3526 * 3527 * Reasons page might not be evictable: 3528 * (1) page's mapping marked unevictable 3529 * (2) page is part of an mlocked VMA 3530 * 3531 */ 3532 int page_evictable(struct page *page, struct vm_area_struct *vma) 3533 { 3534 3535 if (mapping_unevictable(page_mapping(page))) 3536 return 0; 3537 3538 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3539 return 0; 3540 3541 return 1; 3542 } 3543 3544 #ifdef CONFIG_SHMEM 3545 /** 3546 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list 3547 * @pages: array of pages to check 3548 * @nr_pages: number of pages to check 3549 * 3550 * Checks pages for evictability and moves them to the appropriate lru list. 3551 * 3552 * This function is only used for SysV IPC SHM_UNLOCK. 3553 */ 3554 void check_move_unevictable_pages(struct page **pages, int nr_pages) 3555 { 3556 struct lruvec *lruvec; 3557 struct zone *zone = NULL; 3558 int pgscanned = 0; 3559 int pgrescued = 0; 3560 int i; 3561 3562 for (i = 0; i < nr_pages; i++) { 3563 struct page *page = pages[i]; 3564 struct zone *pagezone; 3565 3566 pgscanned++; 3567 pagezone = page_zone(page); 3568 if (pagezone != zone) { 3569 if (zone) 3570 spin_unlock_irq(&zone->lru_lock); 3571 zone = pagezone; 3572 spin_lock_irq(&zone->lru_lock); 3573 } 3574 3575 if (!PageLRU(page) || !PageUnevictable(page)) 3576 continue; 3577 3578 if (page_evictable(page, NULL)) { 3579 enum lru_list lru = page_lru_base_type(page); 3580 3581 VM_BUG_ON(PageActive(page)); 3582 ClearPageUnevictable(page); 3583 __dec_zone_state(zone, NR_UNEVICTABLE); 3584 lruvec = mem_cgroup_lru_move_lists(zone, page, 3585 LRU_UNEVICTABLE, lru); 3586 list_move(&page->lru, &lruvec->lists[lru]); 3587 __inc_zone_state(zone, NR_INACTIVE_ANON + lru); 3588 pgrescued++; 3589 } 3590 } 3591 3592 if (zone) { 3593 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 3594 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 3595 spin_unlock_irq(&zone->lru_lock); 3596 } 3597 } 3598 #endif /* CONFIG_SHMEM */ 3599 3600 static void warn_scan_unevictable_pages(void) 3601 { 3602 printk_once(KERN_WARNING 3603 "%s: The scan_unevictable_pages sysctl/node-interface has been " 3604 "disabled for lack of a legitimate use case. If you have " 3605 "one, please send an email to linux-mm@kvack.org.\n", 3606 current->comm); 3607 } 3608 3609 /* 3610 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3611 * all nodes' unevictable lists for evictable pages 3612 */ 3613 unsigned long scan_unevictable_pages; 3614 3615 int scan_unevictable_handler(struct ctl_table *table, int write, 3616 void __user *buffer, 3617 size_t *length, loff_t *ppos) 3618 { 3619 warn_scan_unevictable_pages(); 3620 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3621 scan_unevictable_pages = 0; 3622 return 0; 3623 } 3624 3625 #ifdef CONFIG_NUMA 3626 /* 3627 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3628 * a specified node's per zone unevictable lists for evictable pages. 3629 */ 3630 3631 static ssize_t read_scan_unevictable_node(struct device *dev, 3632 struct device_attribute *attr, 3633 char *buf) 3634 { 3635 warn_scan_unevictable_pages(); 3636 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3637 } 3638 3639 static ssize_t write_scan_unevictable_node(struct device *dev, 3640 struct device_attribute *attr, 3641 const char *buf, size_t count) 3642 { 3643 warn_scan_unevictable_pages(); 3644 return 1; 3645 } 3646 3647 3648 static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3649 read_scan_unevictable_node, 3650 write_scan_unevictable_node); 3651 3652 int scan_unevictable_register_node(struct node *node) 3653 { 3654 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); 3655 } 3656 3657 void scan_unevictable_unregister_node(struct node *node) 3658 { 3659 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); 3660 } 3661 #endif 3662