1 /* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/gfp.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/swap.h> 19 #include <linux/pagemap.h> 20 #include <linux/init.h> 21 #include <linux/highmem.h> 22 #include <linux/vmstat.h> 23 #include <linux/file.h> 24 #include <linux/writeback.h> 25 #include <linux/blkdev.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28 #include <linux/mm_inline.h> 29 #include <linux/pagevec.h> 30 #include <linux/backing-dev.h> 31 #include <linux/rmap.h> 32 #include <linux/topology.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/compaction.h> 36 #include <linux/notifier.h> 37 #include <linux/rwsem.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/delayacct.h> 43 #include <linux/sysctl.h> 44 #include <linux/oom.h> 45 #include <linux/prefetch.h> 46 47 #include <asm/tlbflush.h> 48 #include <asm/div64.h> 49 50 #include <linux/swapops.h> 51 52 #include "internal.h" 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/vmscan.h> 56 57 /* 58 * reclaim_mode determines how the inactive list is shrunk 59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 60 * RECLAIM_MODE_ASYNC: Do not block 61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback 62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 63 * page from the LRU and reclaim all pages within a 64 * naturally aligned range 65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 66 * order-0 pages and then compact the zone 67 */ 68 typedef unsigned __bitwise__ reclaim_mode_t; 69 #define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) 70 #define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) 71 #define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) 72 #define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) 73 #define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) 74 75 struct scan_control { 76 /* Incremented by the number of inactive pages that were scanned */ 77 unsigned long nr_scanned; 78 79 /* Number of pages freed so far during a call to shrink_zones() */ 80 unsigned long nr_reclaimed; 81 82 /* How many pages shrink_list() should reclaim */ 83 unsigned long nr_to_reclaim; 84 85 unsigned long hibernation_mode; 86 87 /* This context's GFP mask */ 88 gfp_t gfp_mask; 89 90 int may_writepage; 91 92 /* Can mapped pages be reclaimed? */ 93 int may_unmap; 94 95 /* Can pages be swapped as part of reclaim? */ 96 int may_swap; 97 98 int order; 99 100 /* 101 * Intend to reclaim enough continuous memory rather than reclaim 102 * enough amount of memory. i.e, mode for high order allocation. 103 */ 104 reclaim_mode_t reclaim_mode; 105 106 /* Which cgroup do we reclaim from */ 107 struct mem_cgroup *mem_cgroup; 108 109 /* 110 * Nodemask of nodes allowed by the caller. If NULL, all nodes 111 * are scanned. 112 */ 113 nodemask_t *nodemask; 114 }; 115 116 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 117 118 #ifdef ARCH_HAS_PREFETCH 119 #define prefetch_prev_lru_page(_page, _base, _field) \ 120 do { \ 121 if ((_page)->lru.prev != _base) { \ 122 struct page *prev; \ 123 \ 124 prev = lru_to_page(&(_page->lru)); \ 125 prefetch(&prev->_field); \ 126 } \ 127 } while (0) 128 #else 129 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 130 #endif 131 132 #ifdef ARCH_HAS_PREFETCHW 133 #define prefetchw_prev_lru_page(_page, _base, _field) \ 134 do { \ 135 if ((_page)->lru.prev != _base) { \ 136 struct page *prev; \ 137 \ 138 prev = lru_to_page(&(_page->lru)); \ 139 prefetchw(&prev->_field); \ 140 } \ 141 } while (0) 142 #else 143 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 144 #endif 145 146 /* 147 * From 0 .. 100. Higher means more swappy. 148 */ 149 int vm_swappiness = 60; 150 long vm_total_pages; /* The total number of pages which the VM controls */ 151 152 static LIST_HEAD(shrinker_list); 153 static DECLARE_RWSEM(shrinker_rwsem); 154 155 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 156 #define scanning_global_lru(sc) (!(sc)->mem_cgroup) 157 #else 158 #define scanning_global_lru(sc) (1) 159 #endif 160 161 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 162 struct scan_control *sc) 163 { 164 if (!scanning_global_lru(sc)) 165 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); 166 167 return &zone->reclaim_stat; 168 } 169 170 static unsigned long zone_nr_lru_pages(struct zone *zone, 171 struct scan_control *sc, enum lru_list lru) 172 { 173 if (!scanning_global_lru(sc)) 174 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, 175 zone_to_nid(zone), zone_idx(zone), BIT(lru)); 176 177 return zone_page_state(zone, NR_LRU_BASE + lru); 178 } 179 180 181 /* 182 * Add a shrinker callback to be called from the vm 183 */ 184 void register_shrinker(struct shrinker *shrinker) 185 { 186 atomic_long_set(&shrinker->nr_in_batch, 0); 187 down_write(&shrinker_rwsem); 188 list_add_tail(&shrinker->list, &shrinker_list); 189 up_write(&shrinker_rwsem); 190 } 191 EXPORT_SYMBOL(register_shrinker); 192 193 /* 194 * Remove one 195 */ 196 void unregister_shrinker(struct shrinker *shrinker) 197 { 198 down_write(&shrinker_rwsem); 199 list_del(&shrinker->list); 200 up_write(&shrinker_rwsem); 201 } 202 EXPORT_SYMBOL(unregister_shrinker); 203 204 static inline int do_shrinker_shrink(struct shrinker *shrinker, 205 struct shrink_control *sc, 206 unsigned long nr_to_scan) 207 { 208 sc->nr_to_scan = nr_to_scan; 209 return (*shrinker->shrink)(shrinker, sc); 210 } 211 212 #define SHRINK_BATCH 128 213 /* 214 * Call the shrink functions to age shrinkable caches 215 * 216 * Here we assume it costs one seek to replace a lru page and that it also 217 * takes a seek to recreate a cache object. With this in mind we age equal 218 * percentages of the lru and ageable caches. This should balance the seeks 219 * generated by these structures. 220 * 221 * If the vm encountered mapped pages on the LRU it increase the pressure on 222 * slab to avoid swapping. 223 * 224 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 225 * 226 * `lru_pages' represents the number of on-LRU pages in all the zones which 227 * are eligible for the caller's allocation attempt. It is used for balancing 228 * slab reclaim versus page reclaim. 229 * 230 * Returns the number of slab objects which we shrunk. 231 */ 232 unsigned long shrink_slab(struct shrink_control *shrink, 233 unsigned long nr_pages_scanned, 234 unsigned long lru_pages) 235 { 236 struct shrinker *shrinker; 237 unsigned long ret = 0; 238 239 if (nr_pages_scanned == 0) 240 nr_pages_scanned = SWAP_CLUSTER_MAX; 241 242 if (!down_read_trylock(&shrinker_rwsem)) { 243 /* Assume we'll be able to shrink next time */ 244 ret = 1; 245 goto out; 246 } 247 248 list_for_each_entry(shrinker, &shrinker_list, list) { 249 unsigned long long delta; 250 long total_scan; 251 long max_pass; 252 int shrink_ret = 0; 253 long nr; 254 long new_nr; 255 long batch_size = shrinker->batch ? shrinker->batch 256 : SHRINK_BATCH; 257 258 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 259 if (max_pass <= 0) 260 continue; 261 262 /* 263 * copy the current shrinker scan count into a local variable 264 * and zero it so that other concurrent shrinker invocations 265 * don't also do this scanning work. 266 */ 267 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); 268 269 total_scan = nr; 270 delta = (4 * nr_pages_scanned) / shrinker->seeks; 271 delta *= max_pass; 272 do_div(delta, lru_pages + 1); 273 total_scan += delta; 274 if (total_scan < 0) { 275 printk(KERN_ERR "shrink_slab: %pF negative objects to " 276 "delete nr=%ld\n", 277 shrinker->shrink, total_scan); 278 total_scan = max_pass; 279 } 280 281 /* 282 * We need to avoid excessive windup on filesystem shrinkers 283 * due to large numbers of GFP_NOFS allocations causing the 284 * shrinkers to return -1 all the time. This results in a large 285 * nr being built up so when a shrink that can do some work 286 * comes along it empties the entire cache due to nr >>> 287 * max_pass. This is bad for sustaining a working set in 288 * memory. 289 * 290 * Hence only allow the shrinker to scan the entire cache when 291 * a large delta change is calculated directly. 292 */ 293 if (delta < max_pass / 4) 294 total_scan = min(total_scan, max_pass / 2); 295 296 /* 297 * Avoid risking looping forever due to too large nr value: 298 * never try to free more than twice the estimate number of 299 * freeable entries. 300 */ 301 if (total_scan > max_pass * 2) 302 total_scan = max_pass * 2; 303 304 trace_mm_shrink_slab_start(shrinker, shrink, nr, 305 nr_pages_scanned, lru_pages, 306 max_pass, delta, total_scan); 307 308 while (total_scan >= batch_size) { 309 int nr_before; 310 311 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 312 shrink_ret = do_shrinker_shrink(shrinker, shrink, 313 batch_size); 314 if (shrink_ret == -1) 315 break; 316 if (shrink_ret < nr_before) 317 ret += nr_before - shrink_ret; 318 count_vm_events(SLABS_SCANNED, batch_size); 319 total_scan -= batch_size; 320 321 cond_resched(); 322 } 323 324 /* 325 * move the unused scan count back into the shrinker in a 326 * manner that handles concurrent updates. If we exhausted the 327 * scan, there is no need to do an update. 328 */ 329 if (total_scan > 0) 330 new_nr = atomic_long_add_return(total_scan, 331 &shrinker->nr_in_batch); 332 else 333 new_nr = atomic_long_read(&shrinker->nr_in_batch); 334 335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 336 } 337 up_read(&shrinker_rwsem); 338 out: 339 cond_resched(); 340 return ret; 341 } 342 343 static void set_reclaim_mode(int priority, struct scan_control *sc, 344 bool sync) 345 { 346 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; 347 348 /* 349 * Initially assume we are entering either lumpy reclaim or 350 * reclaim/compaction.Depending on the order, we will either set the 351 * sync mode or just reclaim order-0 pages later. 352 */ 353 if (COMPACTION_BUILD) 354 sc->reclaim_mode = RECLAIM_MODE_COMPACTION; 355 else 356 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; 357 358 /* 359 * Avoid using lumpy reclaim or reclaim/compaction if possible by 360 * restricting when its set to either costly allocations or when 361 * under memory pressure 362 */ 363 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 364 sc->reclaim_mode |= syncmode; 365 else if (sc->order && priority < DEF_PRIORITY - 2) 366 sc->reclaim_mode |= syncmode; 367 else 368 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 369 } 370 371 static void reset_reclaim_mode(struct scan_control *sc) 372 { 373 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 374 } 375 376 static inline int is_page_cache_freeable(struct page *page) 377 { 378 /* 379 * A freeable page cache page is referenced only by the caller 380 * that isolated the page, the page cache radix tree and 381 * optional buffer heads at page->private. 382 */ 383 return page_count(page) - page_has_private(page) == 2; 384 } 385 386 static int may_write_to_queue(struct backing_dev_info *bdi, 387 struct scan_control *sc) 388 { 389 if (current->flags & PF_SWAPWRITE) 390 return 1; 391 if (!bdi_write_congested(bdi)) 392 return 1; 393 if (bdi == current->backing_dev_info) 394 return 1; 395 396 /* lumpy reclaim for hugepage often need a lot of write */ 397 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 398 return 1; 399 return 0; 400 } 401 402 /* 403 * We detected a synchronous write error writing a page out. Probably 404 * -ENOSPC. We need to propagate that into the address_space for a subsequent 405 * fsync(), msync() or close(). 406 * 407 * The tricky part is that after writepage we cannot touch the mapping: nothing 408 * prevents it from being freed up. But we have a ref on the page and once 409 * that page is locked, the mapping is pinned. 410 * 411 * We're allowed to run sleeping lock_page() here because we know the caller has 412 * __GFP_FS. 413 */ 414 static void handle_write_error(struct address_space *mapping, 415 struct page *page, int error) 416 { 417 lock_page(page); 418 if (page_mapping(page) == mapping) 419 mapping_set_error(mapping, error); 420 unlock_page(page); 421 } 422 423 /* possible outcome of pageout() */ 424 typedef enum { 425 /* failed to write page out, page is locked */ 426 PAGE_KEEP, 427 /* move page to the active list, page is locked */ 428 PAGE_ACTIVATE, 429 /* page has been sent to the disk successfully, page is unlocked */ 430 PAGE_SUCCESS, 431 /* page is clean and locked */ 432 PAGE_CLEAN, 433 } pageout_t; 434 435 /* 436 * pageout is called by shrink_page_list() for each dirty page. 437 * Calls ->writepage(). 438 */ 439 static pageout_t pageout(struct page *page, struct address_space *mapping, 440 struct scan_control *sc) 441 { 442 /* 443 * If the page is dirty, only perform writeback if that write 444 * will be non-blocking. To prevent this allocation from being 445 * stalled by pagecache activity. But note that there may be 446 * stalls if we need to run get_block(). We could test 447 * PagePrivate for that. 448 * 449 * If this process is currently in __generic_file_aio_write() against 450 * this page's queue, we can perform writeback even if that 451 * will block. 452 * 453 * If the page is swapcache, write it back even if that would 454 * block, for some throttling. This happens by accident, because 455 * swap_backing_dev_info is bust: it doesn't reflect the 456 * congestion state of the swapdevs. Easy to fix, if needed. 457 */ 458 if (!is_page_cache_freeable(page)) 459 return PAGE_KEEP; 460 if (!mapping) { 461 /* 462 * Some data journaling orphaned pages can have 463 * page->mapping == NULL while being dirty with clean buffers. 464 */ 465 if (page_has_private(page)) { 466 if (try_to_free_buffers(page)) { 467 ClearPageDirty(page); 468 printk("%s: orphaned page\n", __func__); 469 return PAGE_CLEAN; 470 } 471 } 472 return PAGE_KEEP; 473 } 474 if (mapping->a_ops->writepage == NULL) 475 return PAGE_ACTIVATE; 476 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 477 return PAGE_KEEP; 478 479 if (clear_page_dirty_for_io(page)) { 480 int res; 481 struct writeback_control wbc = { 482 .sync_mode = WB_SYNC_NONE, 483 .nr_to_write = SWAP_CLUSTER_MAX, 484 .range_start = 0, 485 .range_end = LLONG_MAX, 486 .for_reclaim = 1, 487 }; 488 489 SetPageReclaim(page); 490 res = mapping->a_ops->writepage(page, &wbc); 491 if (res < 0) 492 handle_write_error(mapping, page, res); 493 if (res == AOP_WRITEPAGE_ACTIVATE) { 494 ClearPageReclaim(page); 495 return PAGE_ACTIVATE; 496 } 497 498 if (!PageWriteback(page)) { 499 /* synchronous write or broken a_ops? */ 500 ClearPageReclaim(page); 501 } 502 trace_mm_vmscan_writepage(page, 503 trace_reclaim_flags(page, sc->reclaim_mode)); 504 inc_zone_page_state(page, NR_VMSCAN_WRITE); 505 return PAGE_SUCCESS; 506 } 507 508 return PAGE_CLEAN; 509 } 510 511 /* 512 * Same as remove_mapping, but if the page is removed from the mapping, it 513 * gets returned with a refcount of 0. 514 */ 515 static int __remove_mapping(struct address_space *mapping, struct page *page) 516 { 517 BUG_ON(!PageLocked(page)); 518 BUG_ON(mapping != page_mapping(page)); 519 520 spin_lock_irq(&mapping->tree_lock); 521 /* 522 * The non racy check for a busy page. 523 * 524 * Must be careful with the order of the tests. When someone has 525 * a ref to the page, it may be possible that they dirty it then 526 * drop the reference. So if PageDirty is tested before page_count 527 * here, then the following race may occur: 528 * 529 * get_user_pages(&page); 530 * [user mapping goes away] 531 * write_to(page); 532 * !PageDirty(page) [good] 533 * SetPageDirty(page); 534 * put_page(page); 535 * !page_count(page) [good, discard it] 536 * 537 * [oops, our write_to data is lost] 538 * 539 * Reversing the order of the tests ensures such a situation cannot 540 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 541 * load is not satisfied before that of page->_count. 542 * 543 * Note that if SetPageDirty is always performed via set_page_dirty, 544 * and thus under tree_lock, then this ordering is not required. 545 */ 546 if (!page_freeze_refs(page, 2)) 547 goto cannot_free; 548 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 549 if (unlikely(PageDirty(page))) { 550 page_unfreeze_refs(page, 2); 551 goto cannot_free; 552 } 553 554 if (PageSwapCache(page)) { 555 swp_entry_t swap = { .val = page_private(page) }; 556 __delete_from_swap_cache(page); 557 spin_unlock_irq(&mapping->tree_lock); 558 swapcache_free(swap, page); 559 } else { 560 void (*freepage)(struct page *); 561 562 freepage = mapping->a_ops->freepage; 563 564 __delete_from_page_cache(page); 565 spin_unlock_irq(&mapping->tree_lock); 566 mem_cgroup_uncharge_cache_page(page); 567 568 if (freepage != NULL) 569 freepage(page); 570 } 571 572 return 1; 573 574 cannot_free: 575 spin_unlock_irq(&mapping->tree_lock); 576 return 0; 577 } 578 579 /* 580 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 581 * someone else has a ref on the page, abort and return 0. If it was 582 * successfully detached, return 1. Assumes the caller has a single ref on 583 * this page. 584 */ 585 int remove_mapping(struct address_space *mapping, struct page *page) 586 { 587 if (__remove_mapping(mapping, page)) { 588 /* 589 * Unfreezing the refcount with 1 rather than 2 effectively 590 * drops the pagecache ref for us without requiring another 591 * atomic operation. 592 */ 593 page_unfreeze_refs(page, 1); 594 return 1; 595 } 596 return 0; 597 } 598 599 /** 600 * putback_lru_page - put previously isolated page onto appropriate LRU list 601 * @page: page to be put back to appropriate lru list 602 * 603 * Add previously isolated @page to appropriate LRU list. 604 * Page may still be unevictable for other reasons. 605 * 606 * lru_lock must not be held, interrupts must be enabled. 607 */ 608 void putback_lru_page(struct page *page) 609 { 610 int lru; 611 int active = !!TestClearPageActive(page); 612 int was_unevictable = PageUnevictable(page); 613 614 VM_BUG_ON(PageLRU(page)); 615 616 redo: 617 ClearPageUnevictable(page); 618 619 if (page_evictable(page, NULL)) { 620 /* 621 * For evictable pages, we can use the cache. 622 * In event of a race, worst case is we end up with an 623 * unevictable page on [in]active list. 624 * We know how to handle that. 625 */ 626 lru = active + page_lru_base_type(page); 627 lru_cache_add_lru(page, lru); 628 } else { 629 /* 630 * Put unevictable pages directly on zone's unevictable 631 * list. 632 */ 633 lru = LRU_UNEVICTABLE; 634 add_page_to_unevictable_list(page); 635 /* 636 * When racing with an mlock or AS_UNEVICTABLE clearing 637 * (page is unlocked) make sure that if the other thread 638 * does not observe our setting of PG_lru and fails 639 * isolation/check_move_unevictable_page, 640 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 641 * the page back to the evictable list. 642 * 643 * The other side is TestClearPageMlocked() or shmem_lock(). 644 */ 645 smp_mb(); 646 } 647 648 /* 649 * page's status can change while we move it among lru. If an evictable 650 * page is on unevictable list, it never be freed. To avoid that, 651 * check after we added it to the list, again. 652 */ 653 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 654 if (!isolate_lru_page(page)) { 655 put_page(page); 656 goto redo; 657 } 658 /* This means someone else dropped this page from LRU 659 * So, it will be freed or putback to LRU again. There is 660 * nothing to do here. 661 */ 662 } 663 664 if (was_unevictable && lru != LRU_UNEVICTABLE) 665 count_vm_event(UNEVICTABLE_PGRESCUED); 666 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 667 count_vm_event(UNEVICTABLE_PGCULLED); 668 669 put_page(page); /* drop ref from isolate */ 670 } 671 672 enum page_references { 673 PAGEREF_RECLAIM, 674 PAGEREF_RECLAIM_CLEAN, 675 PAGEREF_KEEP, 676 PAGEREF_ACTIVATE, 677 }; 678 679 static enum page_references page_check_references(struct page *page, 680 struct scan_control *sc) 681 { 682 int referenced_ptes, referenced_page; 683 unsigned long vm_flags; 684 685 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); 686 referenced_page = TestClearPageReferenced(page); 687 688 /* Lumpy reclaim - ignore references */ 689 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 690 return PAGEREF_RECLAIM; 691 692 /* 693 * Mlock lost the isolation race with us. Let try_to_unmap() 694 * move the page to the unevictable list. 695 */ 696 if (vm_flags & VM_LOCKED) 697 return PAGEREF_RECLAIM; 698 699 if (referenced_ptes) { 700 if (PageAnon(page)) 701 return PAGEREF_ACTIVATE; 702 /* 703 * All mapped pages start out with page table 704 * references from the instantiating fault, so we need 705 * to look twice if a mapped file page is used more 706 * than once. 707 * 708 * Mark it and spare it for another trip around the 709 * inactive list. Another page table reference will 710 * lead to its activation. 711 * 712 * Note: the mark is set for activated pages as well 713 * so that recently deactivated but used pages are 714 * quickly recovered. 715 */ 716 SetPageReferenced(page); 717 718 if (referenced_page || referenced_ptes > 1) 719 return PAGEREF_ACTIVATE; 720 721 /* 722 * Activate file-backed executable pages after first usage. 723 */ 724 if (vm_flags & VM_EXEC) 725 return PAGEREF_ACTIVATE; 726 727 return PAGEREF_KEEP; 728 } 729 730 /* Reclaim if clean, defer dirty pages to writeback */ 731 if (referenced_page && !PageSwapBacked(page)) 732 return PAGEREF_RECLAIM_CLEAN; 733 734 return PAGEREF_RECLAIM; 735 } 736 737 /* 738 * shrink_page_list() returns the number of reclaimed pages 739 */ 740 static unsigned long shrink_page_list(struct list_head *page_list, 741 struct zone *zone, 742 struct scan_control *sc, 743 int priority, 744 unsigned long *ret_nr_dirty, 745 unsigned long *ret_nr_writeback) 746 { 747 LIST_HEAD(ret_pages); 748 LIST_HEAD(free_pages); 749 int pgactivate = 0; 750 unsigned long nr_dirty = 0; 751 unsigned long nr_congested = 0; 752 unsigned long nr_reclaimed = 0; 753 unsigned long nr_writeback = 0; 754 755 cond_resched(); 756 757 while (!list_empty(page_list)) { 758 enum page_references references; 759 struct address_space *mapping; 760 struct page *page; 761 int may_enter_fs; 762 763 cond_resched(); 764 765 page = lru_to_page(page_list); 766 list_del(&page->lru); 767 768 if (!trylock_page(page)) 769 goto keep; 770 771 VM_BUG_ON(PageActive(page)); 772 VM_BUG_ON(page_zone(page) != zone); 773 774 sc->nr_scanned++; 775 776 if (unlikely(!page_evictable(page, NULL))) 777 goto cull_mlocked; 778 779 if (!sc->may_unmap && page_mapped(page)) 780 goto keep_locked; 781 782 /* Double the slab pressure for mapped and swapcache pages */ 783 if (page_mapped(page) || PageSwapCache(page)) 784 sc->nr_scanned++; 785 786 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 787 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 788 789 if (PageWriteback(page)) { 790 nr_writeback++; 791 /* 792 * Synchronous reclaim cannot queue pages for 793 * writeback due to the possibility of stack overflow 794 * but if it encounters a page under writeback, wait 795 * for the IO to complete. 796 */ 797 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 798 may_enter_fs) 799 wait_on_page_writeback(page); 800 else { 801 unlock_page(page); 802 goto keep_lumpy; 803 } 804 } 805 806 references = page_check_references(page, sc); 807 switch (references) { 808 case PAGEREF_ACTIVATE: 809 goto activate_locked; 810 case PAGEREF_KEEP: 811 goto keep_locked; 812 case PAGEREF_RECLAIM: 813 case PAGEREF_RECLAIM_CLEAN: 814 ; /* try to reclaim the page below */ 815 } 816 817 /* 818 * Anonymous process memory has backing store? 819 * Try to allocate it some swap space here. 820 */ 821 if (PageAnon(page) && !PageSwapCache(page)) { 822 if (!(sc->gfp_mask & __GFP_IO)) 823 goto keep_locked; 824 if (!add_to_swap(page)) 825 goto activate_locked; 826 may_enter_fs = 1; 827 } 828 829 mapping = page_mapping(page); 830 831 /* 832 * The page is mapped into the page tables of one or more 833 * processes. Try to unmap it here. 834 */ 835 if (page_mapped(page) && mapping) { 836 switch (try_to_unmap(page, TTU_UNMAP)) { 837 case SWAP_FAIL: 838 goto activate_locked; 839 case SWAP_AGAIN: 840 goto keep_locked; 841 case SWAP_MLOCK: 842 goto cull_mlocked; 843 case SWAP_SUCCESS: 844 ; /* try to free the page below */ 845 } 846 } 847 848 if (PageDirty(page)) { 849 nr_dirty++; 850 851 /* 852 * Only kswapd can writeback filesystem pages to 853 * avoid risk of stack overflow but do not writeback 854 * unless under significant pressure. 855 */ 856 if (page_is_file_cache(page) && 857 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 858 /* 859 * Immediately reclaim when written back. 860 * Similar in principal to deactivate_page() 861 * except we already have the page isolated 862 * and know it's dirty 863 */ 864 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 865 SetPageReclaim(page); 866 867 goto keep_locked; 868 } 869 870 if (references == PAGEREF_RECLAIM_CLEAN) 871 goto keep_locked; 872 if (!may_enter_fs) 873 goto keep_locked; 874 if (!sc->may_writepage) 875 goto keep_locked; 876 877 /* Page is dirty, try to write it out here */ 878 switch (pageout(page, mapping, sc)) { 879 case PAGE_KEEP: 880 nr_congested++; 881 goto keep_locked; 882 case PAGE_ACTIVATE: 883 goto activate_locked; 884 case PAGE_SUCCESS: 885 if (PageWriteback(page)) 886 goto keep_lumpy; 887 if (PageDirty(page)) 888 goto keep; 889 890 /* 891 * A synchronous write - probably a ramdisk. Go 892 * ahead and try to reclaim the page. 893 */ 894 if (!trylock_page(page)) 895 goto keep; 896 if (PageDirty(page) || PageWriteback(page)) 897 goto keep_locked; 898 mapping = page_mapping(page); 899 case PAGE_CLEAN: 900 ; /* try to free the page below */ 901 } 902 } 903 904 /* 905 * If the page has buffers, try to free the buffer mappings 906 * associated with this page. If we succeed we try to free 907 * the page as well. 908 * 909 * We do this even if the page is PageDirty(). 910 * try_to_release_page() does not perform I/O, but it is 911 * possible for a page to have PageDirty set, but it is actually 912 * clean (all its buffers are clean). This happens if the 913 * buffers were written out directly, with submit_bh(). ext3 914 * will do this, as well as the blockdev mapping. 915 * try_to_release_page() will discover that cleanness and will 916 * drop the buffers and mark the page clean - it can be freed. 917 * 918 * Rarely, pages can have buffers and no ->mapping. These are 919 * the pages which were not successfully invalidated in 920 * truncate_complete_page(). We try to drop those buffers here 921 * and if that worked, and the page is no longer mapped into 922 * process address space (page_count == 1) it can be freed. 923 * Otherwise, leave the page on the LRU so it is swappable. 924 */ 925 if (page_has_private(page)) { 926 if (!try_to_release_page(page, sc->gfp_mask)) 927 goto activate_locked; 928 if (!mapping && page_count(page) == 1) { 929 unlock_page(page); 930 if (put_page_testzero(page)) 931 goto free_it; 932 else { 933 /* 934 * rare race with speculative reference. 935 * the speculative reference will free 936 * this page shortly, so we may 937 * increment nr_reclaimed here (and 938 * leave it off the LRU). 939 */ 940 nr_reclaimed++; 941 continue; 942 } 943 } 944 } 945 946 if (!mapping || !__remove_mapping(mapping, page)) 947 goto keep_locked; 948 949 /* 950 * At this point, we have no other references and there is 951 * no way to pick any more up (removed from LRU, removed 952 * from pagecache). Can use non-atomic bitops now (and 953 * we obviously don't have to worry about waking up a process 954 * waiting on the page lock, because there are no references. 955 */ 956 __clear_page_locked(page); 957 free_it: 958 nr_reclaimed++; 959 960 /* 961 * Is there need to periodically free_page_list? It would 962 * appear not as the counts should be low 963 */ 964 list_add(&page->lru, &free_pages); 965 continue; 966 967 cull_mlocked: 968 if (PageSwapCache(page)) 969 try_to_free_swap(page); 970 unlock_page(page); 971 putback_lru_page(page); 972 reset_reclaim_mode(sc); 973 continue; 974 975 activate_locked: 976 /* Not a candidate for swapping, so reclaim swap space. */ 977 if (PageSwapCache(page) && vm_swap_full()) 978 try_to_free_swap(page); 979 VM_BUG_ON(PageActive(page)); 980 SetPageActive(page); 981 pgactivate++; 982 keep_locked: 983 unlock_page(page); 984 keep: 985 reset_reclaim_mode(sc); 986 keep_lumpy: 987 list_add(&page->lru, &ret_pages); 988 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 989 } 990 991 /* 992 * Tag a zone as congested if all the dirty pages encountered were 993 * backed by a congested BDI. In this case, reclaimers should just 994 * back off and wait for congestion to clear because further reclaim 995 * will encounter the same problem 996 */ 997 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) 998 zone_set_flag(zone, ZONE_CONGESTED); 999 1000 free_hot_cold_page_list(&free_pages, 1); 1001 1002 list_splice(&ret_pages, page_list); 1003 count_vm_events(PGACTIVATE, pgactivate); 1004 *ret_nr_dirty += nr_dirty; 1005 *ret_nr_writeback += nr_writeback; 1006 return nr_reclaimed; 1007 } 1008 1009 /* 1010 * Attempt to remove the specified page from its LRU. Only take this page 1011 * if it is of the appropriate PageActive status. Pages which are being 1012 * freed elsewhere are also ignored. 1013 * 1014 * page: page to consider 1015 * mode: one of the LRU isolation modes defined above 1016 * 1017 * returns 0 on success, -ve errno on failure. 1018 */ 1019 int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) 1020 { 1021 bool all_lru_mode; 1022 int ret = -EINVAL; 1023 1024 /* Only take pages on the LRU. */ 1025 if (!PageLRU(page)) 1026 return ret; 1027 1028 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) == 1029 (ISOLATE_ACTIVE|ISOLATE_INACTIVE); 1030 1031 /* 1032 * When checking the active state, we need to be sure we are 1033 * dealing with comparible boolean values. Take the logical not 1034 * of each. 1035 */ 1036 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE)) 1037 return ret; 1038 1039 if (!all_lru_mode && !!page_is_file_cache(page) != file) 1040 return ret; 1041 1042 /* 1043 * When this function is being called for lumpy reclaim, we 1044 * initially look into all LRU pages, active, inactive and 1045 * unevictable; only give shrink_page_list evictable pages. 1046 */ 1047 if (PageUnevictable(page)) 1048 return ret; 1049 1050 ret = -EBUSY; 1051 1052 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page))) 1053 return ret; 1054 1055 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1056 return ret; 1057 1058 if (likely(get_page_unless_zero(page))) { 1059 /* 1060 * Be careful not to clear PageLRU until after we're 1061 * sure the page is not being freed elsewhere -- the 1062 * page release code relies on it. 1063 */ 1064 ClearPageLRU(page); 1065 ret = 0; 1066 } 1067 1068 return ret; 1069 } 1070 1071 /* 1072 * zone->lru_lock is heavily contended. Some of the functions that 1073 * shrink the lists perform better by taking out a batch of pages 1074 * and working on them outside the LRU lock. 1075 * 1076 * For pagecache intensive workloads, this function is the hottest 1077 * spot in the kernel (apart from copy_*_user functions). 1078 * 1079 * Appropriate locks must be held before calling this function. 1080 * 1081 * @nr_to_scan: The number of pages to look through on the list. 1082 * @src: The LRU list to pull pages off. 1083 * @dst: The temp list to put pages on to. 1084 * @scanned: The number of pages that were scanned. 1085 * @order: The caller's attempted allocation order 1086 * @mode: One of the LRU isolation modes 1087 * @file: True [1] if isolating file [!anon] pages 1088 * 1089 * returns how many pages were moved onto *@dst. 1090 */ 1091 static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1092 struct list_head *src, struct list_head *dst, 1093 unsigned long *scanned, int order, isolate_mode_t mode, 1094 int file) 1095 { 1096 unsigned long nr_taken = 0; 1097 unsigned long nr_lumpy_taken = 0; 1098 unsigned long nr_lumpy_dirty = 0; 1099 unsigned long nr_lumpy_failed = 0; 1100 unsigned long scan; 1101 1102 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1103 struct page *page; 1104 unsigned long pfn; 1105 unsigned long end_pfn; 1106 unsigned long page_pfn; 1107 int zone_id; 1108 1109 page = lru_to_page(src); 1110 prefetchw_prev_lru_page(page, src, flags); 1111 1112 VM_BUG_ON(!PageLRU(page)); 1113 1114 switch (__isolate_lru_page(page, mode, file)) { 1115 case 0: 1116 list_move(&page->lru, dst); 1117 mem_cgroup_del_lru(page); 1118 nr_taken += hpage_nr_pages(page); 1119 break; 1120 1121 case -EBUSY: 1122 /* else it is being freed elsewhere */ 1123 list_move(&page->lru, src); 1124 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1125 continue; 1126 1127 default: 1128 BUG(); 1129 } 1130 1131 if (!order) 1132 continue; 1133 1134 /* 1135 * Attempt to take all pages in the order aligned region 1136 * surrounding the tag page. Only take those pages of 1137 * the same active state as that tag page. We may safely 1138 * round the target page pfn down to the requested order 1139 * as the mem_map is guaranteed valid out to MAX_ORDER, 1140 * where that page is in a different zone we will detect 1141 * it from its zone id and abort this block scan. 1142 */ 1143 zone_id = page_zone_id(page); 1144 page_pfn = page_to_pfn(page); 1145 pfn = page_pfn & ~((1 << order) - 1); 1146 end_pfn = pfn + (1 << order); 1147 for (; pfn < end_pfn; pfn++) { 1148 struct page *cursor_page; 1149 1150 /* The target page is in the block, ignore it. */ 1151 if (unlikely(pfn == page_pfn)) 1152 continue; 1153 1154 /* Avoid holes within the zone. */ 1155 if (unlikely(!pfn_valid_within(pfn))) 1156 break; 1157 1158 cursor_page = pfn_to_page(pfn); 1159 1160 /* Check that we have not crossed a zone boundary. */ 1161 if (unlikely(page_zone_id(cursor_page) != zone_id)) 1162 break; 1163 1164 /* 1165 * If we don't have enough swap space, reclaiming of 1166 * anon page which don't already have a swap slot is 1167 * pointless. 1168 */ 1169 if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) && 1170 !PageSwapCache(cursor_page)) 1171 break; 1172 1173 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1174 list_move(&cursor_page->lru, dst); 1175 mem_cgroup_del_lru(cursor_page); 1176 nr_taken += hpage_nr_pages(cursor_page); 1177 nr_lumpy_taken++; 1178 if (PageDirty(cursor_page)) 1179 nr_lumpy_dirty++; 1180 scan++; 1181 } else { 1182 /* 1183 * Check if the page is freed already. 1184 * 1185 * We can't use page_count() as that 1186 * requires compound_head and we don't 1187 * have a pin on the page here. If a 1188 * page is tail, we may or may not 1189 * have isolated the head, so assume 1190 * it's not free, it'd be tricky to 1191 * track the head status without a 1192 * page pin. 1193 */ 1194 if (!PageTail(cursor_page) && 1195 !atomic_read(&cursor_page->_count)) 1196 continue; 1197 break; 1198 } 1199 } 1200 1201 /* If we break out of the loop above, lumpy reclaim failed */ 1202 if (pfn < end_pfn) 1203 nr_lumpy_failed++; 1204 } 1205 1206 *scanned = scan; 1207 1208 trace_mm_vmscan_lru_isolate(order, 1209 nr_to_scan, scan, 1210 nr_taken, 1211 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, 1212 mode); 1213 return nr_taken; 1214 } 1215 1216 static unsigned long isolate_pages_global(unsigned long nr, 1217 struct list_head *dst, 1218 unsigned long *scanned, int order, 1219 isolate_mode_t mode, 1220 struct zone *z, int active, int file) 1221 { 1222 int lru = LRU_BASE; 1223 if (active) 1224 lru += LRU_ACTIVE; 1225 if (file) 1226 lru += LRU_FILE; 1227 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 1228 mode, file); 1229 } 1230 1231 /* 1232 * clear_active_flags() is a helper for shrink_active_list(), clearing 1233 * any active bits from the pages in the list. 1234 */ 1235 static unsigned long clear_active_flags(struct list_head *page_list, 1236 unsigned int *count) 1237 { 1238 int nr_active = 0; 1239 int lru; 1240 struct page *page; 1241 1242 list_for_each_entry(page, page_list, lru) { 1243 int numpages = hpage_nr_pages(page); 1244 lru = page_lru_base_type(page); 1245 if (PageActive(page)) { 1246 lru += LRU_ACTIVE; 1247 ClearPageActive(page); 1248 nr_active += numpages; 1249 } 1250 if (count) 1251 count[lru] += numpages; 1252 } 1253 1254 return nr_active; 1255 } 1256 1257 /** 1258 * isolate_lru_page - tries to isolate a page from its LRU list 1259 * @page: page to isolate from its LRU list 1260 * 1261 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1262 * vmstat statistic corresponding to whatever LRU list the page was on. 1263 * 1264 * Returns 0 if the page was removed from an LRU list. 1265 * Returns -EBUSY if the page was not on an LRU list. 1266 * 1267 * The returned page will have PageLRU() cleared. If it was found on 1268 * the active list, it will have PageActive set. If it was found on 1269 * the unevictable list, it will have the PageUnevictable bit set. That flag 1270 * may need to be cleared by the caller before letting the page go. 1271 * 1272 * The vmstat statistic corresponding to the list on which the page was 1273 * found will be decremented. 1274 * 1275 * Restrictions: 1276 * (1) Must be called with an elevated refcount on the page. This is a 1277 * fundamentnal difference from isolate_lru_pages (which is called 1278 * without a stable reference). 1279 * (2) the lru_lock must not be held. 1280 * (3) interrupts must be enabled. 1281 */ 1282 int isolate_lru_page(struct page *page) 1283 { 1284 int ret = -EBUSY; 1285 1286 VM_BUG_ON(!page_count(page)); 1287 1288 if (PageLRU(page)) { 1289 struct zone *zone = page_zone(page); 1290 1291 spin_lock_irq(&zone->lru_lock); 1292 if (PageLRU(page)) { 1293 int lru = page_lru(page); 1294 ret = 0; 1295 get_page(page); 1296 ClearPageLRU(page); 1297 1298 del_page_from_lru_list(zone, page, lru); 1299 } 1300 spin_unlock_irq(&zone->lru_lock); 1301 } 1302 return ret; 1303 } 1304 1305 /* 1306 * Are there way too many processes in the direct reclaim path already? 1307 */ 1308 static int too_many_isolated(struct zone *zone, int file, 1309 struct scan_control *sc) 1310 { 1311 unsigned long inactive, isolated; 1312 1313 if (current_is_kswapd()) 1314 return 0; 1315 1316 if (!scanning_global_lru(sc)) 1317 return 0; 1318 1319 if (file) { 1320 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1321 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1322 } else { 1323 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1324 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1325 } 1326 1327 return isolated > inactive; 1328 } 1329 1330 /* 1331 * TODO: Try merging with migrations version of putback_lru_pages 1332 */ 1333 static noinline_for_stack void 1334 putback_lru_pages(struct zone *zone, struct scan_control *sc, 1335 unsigned long nr_anon, unsigned long nr_file, 1336 struct list_head *page_list) 1337 { 1338 struct page *page; 1339 struct pagevec pvec; 1340 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1341 1342 pagevec_init(&pvec, 1); 1343 1344 /* 1345 * Put back any unfreeable pages. 1346 */ 1347 spin_lock(&zone->lru_lock); 1348 while (!list_empty(page_list)) { 1349 int lru; 1350 page = lru_to_page(page_list); 1351 VM_BUG_ON(PageLRU(page)); 1352 list_del(&page->lru); 1353 if (unlikely(!page_evictable(page, NULL))) { 1354 spin_unlock_irq(&zone->lru_lock); 1355 putback_lru_page(page); 1356 spin_lock_irq(&zone->lru_lock); 1357 continue; 1358 } 1359 SetPageLRU(page); 1360 lru = page_lru(page); 1361 add_page_to_lru_list(zone, page, lru); 1362 if (is_active_lru(lru)) { 1363 int file = is_file_lru(lru); 1364 int numpages = hpage_nr_pages(page); 1365 reclaim_stat->recent_rotated[file] += numpages; 1366 } 1367 if (!pagevec_add(&pvec, page)) { 1368 spin_unlock_irq(&zone->lru_lock); 1369 __pagevec_release(&pvec); 1370 spin_lock_irq(&zone->lru_lock); 1371 } 1372 } 1373 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1374 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1375 1376 spin_unlock_irq(&zone->lru_lock); 1377 pagevec_release(&pvec); 1378 } 1379 1380 static noinline_for_stack void update_isolated_counts(struct zone *zone, 1381 struct scan_control *sc, 1382 unsigned long *nr_anon, 1383 unsigned long *nr_file, 1384 struct list_head *isolated_list) 1385 { 1386 unsigned long nr_active; 1387 unsigned int count[NR_LRU_LISTS] = { 0, }; 1388 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1389 1390 nr_active = clear_active_flags(isolated_list, count); 1391 __count_vm_events(PGDEACTIVATE, nr_active); 1392 1393 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1394 -count[LRU_ACTIVE_FILE]); 1395 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1396 -count[LRU_INACTIVE_FILE]); 1397 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1398 -count[LRU_ACTIVE_ANON]); 1399 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1400 -count[LRU_INACTIVE_ANON]); 1401 1402 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1403 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1404 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); 1405 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); 1406 1407 reclaim_stat->recent_scanned[0] += *nr_anon; 1408 reclaim_stat->recent_scanned[1] += *nr_file; 1409 } 1410 1411 /* 1412 * Returns true if a direct reclaim should wait on pages under writeback. 1413 * 1414 * If we are direct reclaiming for contiguous pages and we do not reclaim 1415 * everything in the list, try again and wait for writeback IO to complete. 1416 * This will stall high-order allocations noticeably. Only do that when really 1417 * need to free the pages under high memory pressure. 1418 */ 1419 static inline bool should_reclaim_stall(unsigned long nr_taken, 1420 unsigned long nr_freed, 1421 int priority, 1422 struct scan_control *sc) 1423 { 1424 int lumpy_stall_priority; 1425 1426 /* kswapd should not stall on sync IO */ 1427 if (current_is_kswapd()) 1428 return false; 1429 1430 /* Only stall on lumpy reclaim */ 1431 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) 1432 return false; 1433 1434 /* If we have reclaimed everything on the isolated list, no stall */ 1435 if (nr_freed == nr_taken) 1436 return false; 1437 1438 /* 1439 * For high-order allocations, there are two stall thresholds. 1440 * High-cost allocations stall immediately where as lower 1441 * order allocations such as stacks require the scanning 1442 * priority to be much higher before stalling. 1443 */ 1444 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1445 lumpy_stall_priority = DEF_PRIORITY; 1446 else 1447 lumpy_stall_priority = DEF_PRIORITY / 3; 1448 1449 return priority <= lumpy_stall_priority; 1450 } 1451 1452 /* 1453 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1454 * of reclaimed pages 1455 */ 1456 static noinline_for_stack unsigned long 1457 shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, 1458 struct scan_control *sc, int priority, int file) 1459 { 1460 LIST_HEAD(page_list); 1461 unsigned long nr_scanned; 1462 unsigned long nr_reclaimed = 0; 1463 unsigned long nr_taken; 1464 unsigned long nr_anon; 1465 unsigned long nr_file; 1466 unsigned long nr_dirty = 0; 1467 unsigned long nr_writeback = 0; 1468 isolate_mode_t reclaim_mode = ISOLATE_INACTIVE; 1469 1470 while (unlikely(too_many_isolated(zone, file, sc))) { 1471 congestion_wait(BLK_RW_ASYNC, HZ/10); 1472 1473 /* We are about to die and free our memory. Return now. */ 1474 if (fatal_signal_pending(current)) 1475 return SWAP_CLUSTER_MAX; 1476 } 1477 1478 set_reclaim_mode(priority, sc, false); 1479 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 1480 reclaim_mode |= ISOLATE_ACTIVE; 1481 1482 lru_add_drain(); 1483 1484 if (!sc->may_unmap) 1485 reclaim_mode |= ISOLATE_UNMAPPED; 1486 if (!sc->may_writepage) 1487 reclaim_mode |= ISOLATE_CLEAN; 1488 1489 spin_lock_irq(&zone->lru_lock); 1490 1491 if (scanning_global_lru(sc)) { 1492 nr_taken = isolate_pages_global(nr_to_scan, &page_list, 1493 &nr_scanned, sc->order, reclaim_mode, zone, 0, file); 1494 zone->pages_scanned += nr_scanned; 1495 if (current_is_kswapd()) 1496 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1497 nr_scanned); 1498 else 1499 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1500 nr_scanned); 1501 } else { 1502 nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list, 1503 &nr_scanned, sc->order, reclaim_mode, zone, 1504 sc->mem_cgroup, 0, file); 1505 /* 1506 * mem_cgroup_isolate_pages() keeps track of 1507 * scanned pages on its own. 1508 */ 1509 } 1510 1511 if (nr_taken == 0) { 1512 spin_unlock_irq(&zone->lru_lock); 1513 return 0; 1514 } 1515 1516 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list); 1517 1518 spin_unlock_irq(&zone->lru_lock); 1519 1520 nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority, 1521 &nr_dirty, &nr_writeback); 1522 1523 /* Check if we should syncronously wait for writeback */ 1524 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1525 set_reclaim_mode(priority, sc, true); 1526 nr_reclaimed += shrink_page_list(&page_list, zone, sc, 1527 priority, &nr_dirty, &nr_writeback); 1528 } 1529 1530 local_irq_disable(); 1531 if (current_is_kswapd()) 1532 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1533 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1534 1535 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); 1536 1537 /* 1538 * If reclaim is isolating dirty pages under writeback, it implies 1539 * that the long-lived page allocation rate is exceeding the page 1540 * laundering rate. Either the global limits are not being effective 1541 * at throttling processes due to the page distribution throughout 1542 * zones or there is heavy usage of a slow backing device. The 1543 * only option is to throttle from reclaim context which is not ideal 1544 * as there is no guarantee the dirtying process is throttled in the 1545 * same way balance_dirty_pages() manages. 1546 * 1547 * This scales the number of dirty pages that must be under writeback 1548 * before throttling depending on priority. It is a simple backoff 1549 * function that has the most effect in the range DEF_PRIORITY to 1550 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1551 * in trouble and reclaim is considered to be in trouble. 1552 * 1553 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle 1554 * DEF_PRIORITY-1 50% must be PageWriteback 1555 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble 1556 * ... 1557 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1558 * isolated page is PageWriteback 1559 */ 1560 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1561 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1562 1563 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1564 zone_idx(zone), 1565 nr_scanned, nr_reclaimed, 1566 priority, 1567 trace_shrink_flags(file, sc->reclaim_mode)); 1568 return nr_reclaimed; 1569 } 1570 1571 /* 1572 * This moves pages from the active list to the inactive list. 1573 * 1574 * We move them the other way if the page is referenced by one or more 1575 * processes, from rmap. 1576 * 1577 * If the pages are mostly unmapped, the processing is fast and it is 1578 * appropriate to hold zone->lru_lock across the whole operation. But if 1579 * the pages are mapped, the processing is slow (page_referenced()) so we 1580 * should drop zone->lru_lock around each page. It's impossible to balance 1581 * this, so instead we remove the pages from the LRU while processing them. 1582 * It is safe to rely on PG_active against the non-LRU pages in here because 1583 * nobody will play with that bit on a non-LRU page. 1584 * 1585 * The downside is that we have to touch page->_count against each page. 1586 * But we had to alter page->flags anyway. 1587 */ 1588 1589 static void move_active_pages_to_lru(struct zone *zone, 1590 struct list_head *list, 1591 enum lru_list lru) 1592 { 1593 unsigned long pgmoved = 0; 1594 struct pagevec pvec; 1595 struct page *page; 1596 1597 pagevec_init(&pvec, 1); 1598 1599 while (!list_empty(list)) { 1600 page = lru_to_page(list); 1601 1602 VM_BUG_ON(PageLRU(page)); 1603 SetPageLRU(page); 1604 1605 list_move(&page->lru, &zone->lru[lru].list); 1606 mem_cgroup_add_lru_list(page, lru); 1607 pgmoved += hpage_nr_pages(page); 1608 1609 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1610 spin_unlock_irq(&zone->lru_lock); 1611 if (buffer_heads_over_limit) 1612 pagevec_strip(&pvec); 1613 __pagevec_release(&pvec); 1614 spin_lock_irq(&zone->lru_lock); 1615 } 1616 } 1617 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1618 if (!is_active_lru(lru)) 1619 __count_vm_events(PGDEACTIVATE, pgmoved); 1620 } 1621 1622 static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1623 struct scan_control *sc, int priority, int file) 1624 { 1625 unsigned long nr_taken; 1626 unsigned long pgscanned; 1627 unsigned long vm_flags; 1628 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1629 LIST_HEAD(l_active); 1630 LIST_HEAD(l_inactive); 1631 struct page *page; 1632 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1633 unsigned long nr_rotated = 0; 1634 isolate_mode_t reclaim_mode = ISOLATE_ACTIVE; 1635 1636 lru_add_drain(); 1637 1638 if (!sc->may_unmap) 1639 reclaim_mode |= ISOLATE_UNMAPPED; 1640 if (!sc->may_writepage) 1641 reclaim_mode |= ISOLATE_CLEAN; 1642 1643 spin_lock_irq(&zone->lru_lock); 1644 if (scanning_global_lru(sc)) { 1645 nr_taken = isolate_pages_global(nr_pages, &l_hold, 1646 &pgscanned, sc->order, 1647 reclaim_mode, zone, 1648 1, file); 1649 zone->pages_scanned += pgscanned; 1650 } else { 1651 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, 1652 &pgscanned, sc->order, 1653 reclaim_mode, zone, 1654 sc->mem_cgroup, 1, file); 1655 /* 1656 * mem_cgroup_isolate_pages() keeps track of 1657 * scanned pages on its own. 1658 */ 1659 } 1660 1661 reclaim_stat->recent_scanned[file] += nr_taken; 1662 1663 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1664 if (file) 1665 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1666 else 1667 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1668 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1669 spin_unlock_irq(&zone->lru_lock); 1670 1671 while (!list_empty(&l_hold)) { 1672 cond_resched(); 1673 page = lru_to_page(&l_hold); 1674 list_del(&page->lru); 1675 1676 if (unlikely(!page_evictable(page, NULL))) { 1677 putback_lru_page(page); 1678 continue; 1679 } 1680 1681 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1682 nr_rotated += hpage_nr_pages(page); 1683 /* 1684 * Identify referenced, file-backed active pages and 1685 * give them one more trip around the active list. So 1686 * that executable code get better chances to stay in 1687 * memory under moderate memory pressure. Anon pages 1688 * are not likely to be evicted by use-once streaming 1689 * IO, plus JVM can create lots of anon VM_EXEC pages, 1690 * so we ignore them here. 1691 */ 1692 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1693 list_add(&page->lru, &l_active); 1694 continue; 1695 } 1696 } 1697 1698 ClearPageActive(page); /* we are de-activating */ 1699 list_add(&page->lru, &l_inactive); 1700 } 1701 1702 /* 1703 * Move pages back to the lru list. 1704 */ 1705 spin_lock_irq(&zone->lru_lock); 1706 /* 1707 * Count referenced pages from currently used mappings as rotated, 1708 * even though only some of them are actually re-activated. This 1709 * helps balance scan pressure between file and anonymous pages in 1710 * get_scan_ratio. 1711 */ 1712 reclaim_stat->recent_rotated[file] += nr_rotated; 1713 1714 move_active_pages_to_lru(zone, &l_active, 1715 LRU_ACTIVE + file * LRU_FILE); 1716 move_active_pages_to_lru(zone, &l_inactive, 1717 LRU_BASE + file * LRU_FILE); 1718 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1719 spin_unlock_irq(&zone->lru_lock); 1720 } 1721 1722 #ifdef CONFIG_SWAP 1723 static int inactive_anon_is_low_global(struct zone *zone) 1724 { 1725 unsigned long active, inactive; 1726 1727 active = zone_page_state(zone, NR_ACTIVE_ANON); 1728 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1729 1730 if (inactive * zone->inactive_ratio < active) 1731 return 1; 1732 1733 return 0; 1734 } 1735 1736 /** 1737 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1738 * @zone: zone to check 1739 * @sc: scan control of this context 1740 * 1741 * Returns true if the zone does not have enough inactive anon pages, 1742 * meaning some active anon pages need to be deactivated. 1743 */ 1744 static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1745 { 1746 int low; 1747 1748 /* 1749 * If we don't have swap space, anonymous page deactivation 1750 * is pointless. 1751 */ 1752 if (!total_swap_pages) 1753 return 0; 1754 1755 if (scanning_global_lru(sc)) 1756 low = inactive_anon_is_low_global(zone); 1757 else 1758 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); 1759 return low; 1760 } 1761 #else 1762 static inline int inactive_anon_is_low(struct zone *zone, 1763 struct scan_control *sc) 1764 { 1765 return 0; 1766 } 1767 #endif 1768 1769 static int inactive_file_is_low_global(struct zone *zone) 1770 { 1771 unsigned long active, inactive; 1772 1773 active = zone_page_state(zone, NR_ACTIVE_FILE); 1774 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1775 1776 return (active > inactive); 1777 } 1778 1779 /** 1780 * inactive_file_is_low - check if file pages need to be deactivated 1781 * @zone: zone to check 1782 * @sc: scan control of this context 1783 * 1784 * When the system is doing streaming IO, memory pressure here 1785 * ensures that active file pages get deactivated, until more 1786 * than half of the file pages are on the inactive list. 1787 * 1788 * Once we get to that situation, protect the system's working 1789 * set from being evicted by disabling active file page aging. 1790 * 1791 * This uses a different ratio than the anonymous pages, because 1792 * the page cache uses a use-once replacement algorithm. 1793 */ 1794 static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) 1795 { 1796 int low; 1797 1798 if (scanning_global_lru(sc)) 1799 low = inactive_file_is_low_global(zone); 1800 else 1801 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup, zone); 1802 return low; 1803 } 1804 1805 static int inactive_list_is_low(struct zone *zone, struct scan_control *sc, 1806 int file) 1807 { 1808 if (file) 1809 return inactive_file_is_low(zone, sc); 1810 else 1811 return inactive_anon_is_low(zone, sc); 1812 } 1813 1814 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1815 struct zone *zone, struct scan_control *sc, int priority) 1816 { 1817 int file = is_file_lru(lru); 1818 1819 if (is_active_lru(lru)) { 1820 if (inactive_list_is_low(zone, sc, file)) 1821 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1822 return 0; 1823 } 1824 1825 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1826 } 1827 1828 static int vmscan_swappiness(struct scan_control *sc) 1829 { 1830 if (scanning_global_lru(sc)) 1831 return vm_swappiness; 1832 return mem_cgroup_swappiness(sc->mem_cgroup); 1833 } 1834 1835 /* 1836 * Determine how aggressively the anon and file LRU lists should be 1837 * scanned. The relative value of each set of LRU lists is determined 1838 * by looking at the fraction of the pages scanned we did rotate back 1839 * onto the active list instead of evict. 1840 * 1841 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1842 */ 1843 static void get_scan_count(struct zone *zone, struct scan_control *sc, 1844 unsigned long *nr, int priority) 1845 { 1846 unsigned long anon, file, free; 1847 unsigned long anon_prio, file_prio; 1848 unsigned long ap, fp; 1849 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1850 u64 fraction[2], denominator; 1851 enum lru_list l; 1852 int noswap = 0; 1853 bool force_scan = false; 1854 1855 /* 1856 * If the zone or memcg is small, nr[l] can be 0. This 1857 * results in no scanning on this priority and a potential 1858 * priority drop. Global direct reclaim can go to the next 1859 * zone and tends to have no problems. Global kswapd is for 1860 * zone balancing and it needs to scan a minimum amount. When 1861 * reclaiming for a memcg, a priority drop can cause high 1862 * latencies, so it's better to scan a minimum amount there as 1863 * well. 1864 */ 1865 if (scanning_global_lru(sc) && current_is_kswapd()) 1866 force_scan = true; 1867 if (!scanning_global_lru(sc)) 1868 force_scan = true; 1869 1870 /* If we have no swap space, do not bother scanning anon pages. */ 1871 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1872 noswap = 1; 1873 fraction[0] = 0; 1874 fraction[1] = 1; 1875 denominator = 1; 1876 goto out; 1877 } 1878 1879 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1880 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1881 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1882 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1883 1884 if (scanning_global_lru(sc)) { 1885 free = zone_page_state(zone, NR_FREE_PAGES); 1886 /* If we have very few page cache pages, 1887 force-scan anon pages. */ 1888 if (unlikely(file + free <= high_wmark_pages(zone))) { 1889 fraction[0] = 1; 1890 fraction[1] = 0; 1891 denominator = 1; 1892 goto out; 1893 } 1894 } 1895 1896 /* 1897 * With swappiness at 100, anonymous and file have the same priority. 1898 * This scanning priority is essentially the inverse of IO cost. 1899 */ 1900 anon_prio = vmscan_swappiness(sc); 1901 file_prio = 200 - vmscan_swappiness(sc); 1902 1903 /* 1904 * OK, so we have swap space and a fair amount of page cache 1905 * pages. We use the recently rotated / recently scanned 1906 * ratios to determine how valuable each cache is. 1907 * 1908 * Because workloads change over time (and to avoid overflow) 1909 * we keep these statistics as a floating average, which ends 1910 * up weighing recent references more than old ones. 1911 * 1912 * anon in [0], file in [1] 1913 */ 1914 spin_lock_irq(&zone->lru_lock); 1915 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1916 reclaim_stat->recent_scanned[0] /= 2; 1917 reclaim_stat->recent_rotated[0] /= 2; 1918 } 1919 1920 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1921 reclaim_stat->recent_scanned[1] /= 2; 1922 reclaim_stat->recent_rotated[1] /= 2; 1923 } 1924 1925 /* 1926 * The amount of pressure on anon vs file pages is inversely 1927 * proportional to the fraction of recently scanned pages on 1928 * each list that were recently referenced and in active use. 1929 */ 1930 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1931 ap /= reclaim_stat->recent_rotated[0] + 1; 1932 1933 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1934 fp /= reclaim_stat->recent_rotated[1] + 1; 1935 spin_unlock_irq(&zone->lru_lock); 1936 1937 fraction[0] = ap; 1938 fraction[1] = fp; 1939 denominator = ap + fp + 1; 1940 out: 1941 for_each_evictable_lru(l) { 1942 int file = is_file_lru(l); 1943 unsigned long scan; 1944 1945 scan = zone_nr_lru_pages(zone, sc, l); 1946 if (priority || noswap) { 1947 scan >>= priority; 1948 if (!scan && force_scan) 1949 scan = SWAP_CLUSTER_MAX; 1950 scan = div64_u64(scan * fraction[file], denominator); 1951 } 1952 nr[l] = scan; 1953 } 1954 } 1955 1956 /* 1957 * Reclaim/compaction depends on a number of pages being freed. To avoid 1958 * disruption to the system, a small number of order-0 pages continue to be 1959 * rotated and reclaimed in the normal fashion. However, by the time we get 1960 * back to the allocator and call try_to_compact_zone(), we ensure that 1961 * there are enough free pages for it to be likely successful 1962 */ 1963 static inline bool should_continue_reclaim(struct zone *zone, 1964 unsigned long nr_reclaimed, 1965 unsigned long nr_scanned, 1966 struct scan_control *sc) 1967 { 1968 unsigned long pages_for_compaction; 1969 unsigned long inactive_lru_pages; 1970 1971 /* If not in reclaim/compaction mode, stop */ 1972 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1973 return false; 1974 1975 /* Consider stopping depending on scan and reclaim activity */ 1976 if (sc->gfp_mask & __GFP_REPEAT) { 1977 /* 1978 * For __GFP_REPEAT allocations, stop reclaiming if the 1979 * full LRU list has been scanned and we are still failing 1980 * to reclaim pages. This full LRU scan is potentially 1981 * expensive but a __GFP_REPEAT caller really wants to succeed 1982 */ 1983 if (!nr_reclaimed && !nr_scanned) 1984 return false; 1985 } else { 1986 /* 1987 * For non-__GFP_REPEAT allocations which can presumably 1988 * fail without consequence, stop if we failed to reclaim 1989 * any pages from the last SWAP_CLUSTER_MAX number of 1990 * pages that were scanned. This will return to the 1991 * caller faster at the risk reclaim/compaction and 1992 * the resulting allocation attempt fails 1993 */ 1994 if (!nr_reclaimed) 1995 return false; 1996 } 1997 1998 /* 1999 * If we have not reclaimed enough pages for compaction and the 2000 * inactive lists are large enough, continue reclaiming 2001 */ 2002 pages_for_compaction = (2UL << sc->order); 2003 inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 2004 if (nr_swap_pages > 0) 2005 inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 2006 if (sc->nr_reclaimed < pages_for_compaction && 2007 inactive_lru_pages > pages_for_compaction) 2008 return true; 2009 2010 /* If compaction would go ahead or the allocation would succeed, stop */ 2011 switch (compaction_suitable(zone, sc->order)) { 2012 case COMPACT_PARTIAL: 2013 case COMPACT_CONTINUE: 2014 return false; 2015 default: 2016 return true; 2017 } 2018 } 2019 2020 /* 2021 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 2022 */ 2023 static void shrink_zone(int priority, struct zone *zone, 2024 struct scan_control *sc) 2025 { 2026 unsigned long nr[NR_LRU_LISTS]; 2027 unsigned long nr_to_scan; 2028 enum lru_list l; 2029 unsigned long nr_reclaimed, nr_scanned; 2030 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2031 struct blk_plug plug; 2032 2033 restart: 2034 nr_reclaimed = 0; 2035 nr_scanned = sc->nr_scanned; 2036 get_scan_count(zone, sc, nr, priority); 2037 2038 blk_start_plug(&plug); 2039 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2040 nr[LRU_INACTIVE_FILE]) { 2041 for_each_evictable_lru(l) { 2042 if (nr[l]) { 2043 nr_to_scan = min_t(unsigned long, 2044 nr[l], SWAP_CLUSTER_MAX); 2045 nr[l] -= nr_to_scan; 2046 2047 nr_reclaimed += shrink_list(l, nr_to_scan, 2048 zone, sc, priority); 2049 } 2050 } 2051 /* 2052 * On large memory systems, scan >> priority can become 2053 * really large. This is fine for the starting priority; 2054 * we want to put equal scanning pressure on each zone. 2055 * However, if the VM has a harder time of freeing pages, 2056 * with multiple processes reclaiming pages, the total 2057 * freeing target can get unreasonably large. 2058 */ 2059 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 2060 break; 2061 } 2062 blk_finish_plug(&plug); 2063 sc->nr_reclaimed += nr_reclaimed; 2064 2065 /* 2066 * Even if we did not try to evict anon pages at all, we want to 2067 * rebalance the anon lru active/inactive ratio. 2068 */ 2069 if (inactive_anon_is_low(zone, sc)) 2070 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 2071 2072 /* reclaim/compaction might need reclaim to continue */ 2073 if (should_continue_reclaim(zone, nr_reclaimed, 2074 sc->nr_scanned - nr_scanned, sc)) 2075 goto restart; 2076 2077 throttle_vm_writeout(sc->gfp_mask); 2078 } 2079 2080 /* 2081 * This is the direct reclaim path, for page-allocating processes. We only 2082 * try to reclaim pages from zones which will satisfy the caller's allocation 2083 * request. 2084 * 2085 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 2086 * Because: 2087 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 2088 * allocation or 2089 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 2090 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 2091 * zone defense algorithm. 2092 * 2093 * If a zone is deemed to be full of pinned pages then just give it a light 2094 * scan then give up on it. 2095 * 2096 * This function returns true if a zone is being reclaimed for a costly 2097 * high-order allocation and compaction is either ready to begin or deferred. 2098 * This indicates to the caller that it should retry the allocation or fail. 2099 */ 2100 static bool shrink_zones(int priority, struct zonelist *zonelist, 2101 struct scan_control *sc) 2102 { 2103 struct zoneref *z; 2104 struct zone *zone; 2105 unsigned long nr_soft_reclaimed; 2106 unsigned long nr_soft_scanned; 2107 bool should_abort_reclaim = false; 2108 2109 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2110 gfp_zone(sc->gfp_mask), sc->nodemask) { 2111 if (!populated_zone(zone)) 2112 continue; 2113 /* 2114 * Take care memory controller reclaiming has small influence 2115 * to global LRU. 2116 */ 2117 if (scanning_global_lru(sc)) { 2118 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2119 continue; 2120 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2121 continue; /* Let kswapd poll it */ 2122 if (COMPACTION_BUILD) { 2123 /* 2124 * If we already have plenty of memory free for 2125 * compaction in this zone, don't free any more. 2126 * Even though compaction is invoked for any 2127 * non-zero order, only frequent costly order 2128 * reclamation is disruptive enough to become a 2129 * noticable problem, like transparent huge page 2130 * allocations. 2131 */ 2132 if (sc->order > PAGE_ALLOC_COSTLY_ORDER && 2133 (compaction_suitable(zone, sc->order) || 2134 compaction_deferred(zone))) { 2135 should_abort_reclaim = true; 2136 continue; 2137 } 2138 } 2139 /* 2140 * This steals pages from memory cgroups over softlimit 2141 * and returns the number of reclaimed pages and 2142 * scanned pages. This works for global memory pressure 2143 * and balancing, not for a memcg's limit. 2144 */ 2145 nr_soft_scanned = 0; 2146 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2147 sc->order, sc->gfp_mask, 2148 &nr_soft_scanned); 2149 sc->nr_reclaimed += nr_soft_reclaimed; 2150 sc->nr_scanned += nr_soft_scanned; 2151 /* need some check for avoid more shrink_zone() */ 2152 } 2153 2154 shrink_zone(priority, zone, sc); 2155 } 2156 2157 return should_abort_reclaim; 2158 } 2159 2160 static bool zone_reclaimable(struct zone *zone) 2161 { 2162 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2163 } 2164 2165 /* All zones in zonelist are unreclaimable? */ 2166 static bool all_unreclaimable(struct zonelist *zonelist, 2167 struct scan_control *sc) 2168 { 2169 struct zoneref *z; 2170 struct zone *zone; 2171 2172 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2173 gfp_zone(sc->gfp_mask), sc->nodemask) { 2174 if (!populated_zone(zone)) 2175 continue; 2176 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2177 continue; 2178 if (!zone->all_unreclaimable) 2179 return false; 2180 } 2181 2182 return true; 2183 } 2184 2185 /* 2186 * This is the main entry point to direct page reclaim. 2187 * 2188 * If a full scan of the inactive list fails to free enough memory then we 2189 * are "out of memory" and something needs to be killed. 2190 * 2191 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2192 * high - the zone may be full of dirty or under-writeback pages, which this 2193 * caller can't do much about. We kick the writeback threads and take explicit 2194 * naps in the hope that some of these pages can be written. But if the 2195 * allocating task holds filesystem locks which prevent writeout this might not 2196 * work, and the allocation attempt will fail. 2197 * 2198 * returns: 0, if no pages reclaimed 2199 * else, the number of pages reclaimed 2200 */ 2201 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2202 struct scan_control *sc, 2203 struct shrink_control *shrink) 2204 { 2205 int priority; 2206 unsigned long total_scanned = 0; 2207 struct reclaim_state *reclaim_state = current->reclaim_state; 2208 struct zoneref *z; 2209 struct zone *zone; 2210 unsigned long writeback_threshold; 2211 2212 get_mems_allowed(); 2213 delayacct_freepages_start(); 2214 2215 if (scanning_global_lru(sc)) 2216 count_vm_event(ALLOCSTALL); 2217 2218 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2219 sc->nr_scanned = 0; 2220 if (!priority) 2221 disable_swap_token(sc->mem_cgroup); 2222 if (shrink_zones(priority, zonelist, sc)) 2223 break; 2224 2225 /* 2226 * Don't shrink slabs when reclaiming memory from 2227 * over limit cgroups 2228 */ 2229 if (scanning_global_lru(sc)) { 2230 unsigned long lru_pages = 0; 2231 for_each_zone_zonelist(zone, z, zonelist, 2232 gfp_zone(sc->gfp_mask)) { 2233 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2234 continue; 2235 2236 lru_pages += zone_reclaimable_pages(zone); 2237 } 2238 2239 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2240 if (reclaim_state) { 2241 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2242 reclaim_state->reclaimed_slab = 0; 2243 } 2244 } 2245 total_scanned += sc->nr_scanned; 2246 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2247 goto out; 2248 2249 /* 2250 * Try to write back as many pages as we just scanned. This 2251 * tends to cause slow streaming writers to write data to the 2252 * disk smoothly, at the dirtying rate, which is nice. But 2253 * that's undesirable in laptop mode, where we *want* lumpy 2254 * writeout. So in laptop mode, write out the whole world. 2255 */ 2256 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2257 if (total_scanned > writeback_threshold) { 2258 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 2259 WB_REASON_TRY_TO_FREE_PAGES); 2260 sc->may_writepage = 1; 2261 } 2262 2263 /* Take a nap, wait for some writeback to complete */ 2264 if (!sc->hibernation_mode && sc->nr_scanned && 2265 priority < DEF_PRIORITY - 2) { 2266 struct zone *preferred_zone; 2267 2268 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2269 &cpuset_current_mems_allowed, 2270 &preferred_zone); 2271 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2272 } 2273 } 2274 2275 out: 2276 delayacct_freepages_end(); 2277 put_mems_allowed(); 2278 2279 if (sc->nr_reclaimed) 2280 return sc->nr_reclaimed; 2281 2282 /* 2283 * As hibernation is going on, kswapd is freezed so that it can't mark 2284 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2285 * check. 2286 */ 2287 if (oom_killer_disabled) 2288 return 0; 2289 2290 /* top priority shrink_zones still had more to do? don't OOM, then */ 2291 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2292 return 1; 2293 2294 return 0; 2295 } 2296 2297 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2298 gfp_t gfp_mask, nodemask_t *nodemask) 2299 { 2300 unsigned long nr_reclaimed; 2301 struct scan_control sc = { 2302 .gfp_mask = gfp_mask, 2303 .may_writepage = !laptop_mode, 2304 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2305 .may_unmap = 1, 2306 .may_swap = 1, 2307 .order = order, 2308 .mem_cgroup = NULL, 2309 .nodemask = nodemask, 2310 }; 2311 struct shrink_control shrink = { 2312 .gfp_mask = sc.gfp_mask, 2313 }; 2314 2315 trace_mm_vmscan_direct_reclaim_begin(order, 2316 sc.may_writepage, 2317 gfp_mask); 2318 2319 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2320 2321 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2322 2323 return nr_reclaimed; 2324 } 2325 2326 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2327 2328 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2329 gfp_t gfp_mask, bool noswap, 2330 struct zone *zone, 2331 unsigned long *nr_scanned) 2332 { 2333 struct scan_control sc = { 2334 .nr_scanned = 0, 2335 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2336 .may_writepage = !laptop_mode, 2337 .may_unmap = 1, 2338 .may_swap = !noswap, 2339 .order = 0, 2340 .mem_cgroup = mem, 2341 }; 2342 2343 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2344 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2345 2346 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2347 sc.may_writepage, 2348 sc.gfp_mask); 2349 2350 /* 2351 * NOTE: Although we can get the priority field, using it 2352 * here is not a good idea, since it limits the pages we can scan. 2353 * if we don't reclaim here, the shrink_zone from balance_pgdat 2354 * will pick up pages from other mem cgroup's as well. We hack 2355 * the priority and make it zero. 2356 */ 2357 shrink_zone(0, zone, &sc); 2358 2359 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2360 2361 *nr_scanned = sc.nr_scanned; 2362 return sc.nr_reclaimed; 2363 } 2364 2365 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2366 gfp_t gfp_mask, 2367 bool noswap) 2368 { 2369 struct zonelist *zonelist; 2370 unsigned long nr_reclaimed; 2371 int nid; 2372 struct scan_control sc = { 2373 .may_writepage = !laptop_mode, 2374 .may_unmap = 1, 2375 .may_swap = !noswap, 2376 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2377 .order = 0, 2378 .mem_cgroup = mem_cont, 2379 .nodemask = NULL, /* we don't care the placement */ 2380 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2381 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2382 }; 2383 struct shrink_control shrink = { 2384 .gfp_mask = sc.gfp_mask, 2385 }; 2386 2387 /* 2388 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2389 * take care of from where we get pages. So the node where we start the 2390 * scan does not need to be the current node. 2391 */ 2392 nid = mem_cgroup_select_victim_node(mem_cont); 2393 2394 zonelist = NODE_DATA(nid)->node_zonelists; 2395 2396 trace_mm_vmscan_memcg_reclaim_begin(0, 2397 sc.may_writepage, 2398 sc.gfp_mask); 2399 2400 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2401 2402 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2403 2404 return nr_reclaimed; 2405 } 2406 #endif 2407 2408 /* 2409 * pgdat_balanced is used when checking if a node is balanced for high-order 2410 * allocations. Only zones that meet watermarks and are in a zone allowed 2411 * by the callers classzone_idx are added to balanced_pages. The total of 2412 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2413 * for the node to be considered balanced. Forcing all zones to be balanced 2414 * for high orders can cause excessive reclaim when there are imbalanced zones. 2415 * The choice of 25% is due to 2416 * o a 16M DMA zone that is balanced will not balance a zone on any 2417 * reasonable sized machine 2418 * o On all other machines, the top zone must be at least a reasonable 2419 * percentage of the middle zones. For example, on 32-bit x86, highmem 2420 * would need to be at least 256M for it to be balance a whole node. 2421 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2422 * to balance a node on its own. These seemed like reasonable ratios. 2423 */ 2424 static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2425 int classzone_idx) 2426 { 2427 unsigned long present_pages = 0; 2428 int i; 2429 2430 for (i = 0; i <= classzone_idx; i++) 2431 present_pages += pgdat->node_zones[i].present_pages; 2432 2433 /* A special case here: if zone has no page, we think it's balanced */ 2434 return balanced_pages >= (present_pages >> 2); 2435 } 2436 2437 /* is kswapd sleeping prematurely? */ 2438 static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, 2439 int classzone_idx) 2440 { 2441 int i; 2442 unsigned long balanced = 0; 2443 bool all_zones_ok = true; 2444 2445 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2446 if (remaining) 2447 return true; 2448 2449 /* Check the watermark levels */ 2450 for (i = 0; i <= classzone_idx; i++) { 2451 struct zone *zone = pgdat->node_zones + i; 2452 2453 if (!populated_zone(zone)) 2454 continue; 2455 2456 /* 2457 * balance_pgdat() skips over all_unreclaimable after 2458 * DEF_PRIORITY. Effectively, it considers them balanced so 2459 * they must be considered balanced here as well if kswapd 2460 * is to sleep 2461 */ 2462 if (zone->all_unreclaimable) { 2463 balanced += zone->present_pages; 2464 continue; 2465 } 2466 2467 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2468 i, 0)) 2469 all_zones_ok = false; 2470 else 2471 balanced += zone->present_pages; 2472 } 2473 2474 /* 2475 * For high-order requests, the balanced zones must contain at least 2476 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2477 * must be balanced 2478 */ 2479 if (order) 2480 return !pgdat_balanced(pgdat, balanced, classzone_idx); 2481 else 2482 return !all_zones_ok; 2483 } 2484 2485 /* 2486 * For kswapd, balance_pgdat() will work across all this node's zones until 2487 * they are all at high_wmark_pages(zone). 2488 * 2489 * Returns the final order kswapd was reclaiming at 2490 * 2491 * There is special handling here for zones which are full of pinned pages. 2492 * This can happen if the pages are all mlocked, or if they are all used by 2493 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2494 * What we do is to detect the case where all pages in the zone have been 2495 * scanned twice and there has been zero successful reclaim. Mark the zone as 2496 * dead and from now on, only perform a short scan. Basically we're polling 2497 * the zone for when the problem goes away. 2498 * 2499 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2500 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2501 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2502 * lower zones regardless of the number of free pages in the lower zones. This 2503 * interoperates with the page allocator fallback scheme to ensure that aging 2504 * of pages is balanced across the zones. 2505 */ 2506 static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2507 int *classzone_idx) 2508 { 2509 int all_zones_ok; 2510 unsigned long balanced; 2511 int priority; 2512 int i; 2513 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2514 unsigned long total_scanned; 2515 struct reclaim_state *reclaim_state = current->reclaim_state; 2516 unsigned long nr_soft_reclaimed; 2517 unsigned long nr_soft_scanned; 2518 struct scan_control sc = { 2519 .gfp_mask = GFP_KERNEL, 2520 .may_unmap = 1, 2521 .may_swap = 1, 2522 /* 2523 * kswapd doesn't want to be bailed out while reclaim. because 2524 * we want to put equal scanning pressure on each zone. 2525 */ 2526 .nr_to_reclaim = ULONG_MAX, 2527 .order = order, 2528 .mem_cgroup = NULL, 2529 }; 2530 struct shrink_control shrink = { 2531 .gfp_mask = sc.gfp_mask, 2532 }; 2533 loop_again: 2534 total_scanned = 0; 2535 sc.nr_reclaimed = 0; 2536 sc.may_writepage = !laptop_mode; 2537 count_vm_event(PAGEOUTRUN); 2538 2539 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2540 unsigned long lru_pages = 0; 2541 int has_under_min_watermark_zone = 0; 2542 2543 /* The swap token gets in the way of swapout... */ 2544 if (!priority) 2545 disable_swap_token(NULL); 2546 2547 all_zones_ok = 1; 2548 balanced = 0; 2549 2550 /* 2551 * Scan in the highmem->dma direction for the highest 2552 * zone which needs scanning 2553 */ 2554 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2555 struct zone *zone = pgdat->node_zones + i; 2556 2557 if (!populated_zone(zone)) 2558 continue; 2559 2560 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2561 continue; 2562 2563 /* 2564 * Do some background aging of the anon list, to give 2565 * pages a chance to be referenced before reclaiming. 2566 */ 2567 if (inactive_anon_is_low(zone, &sc)) 2568 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2569 &sc, priority, 0); 2570 2571 if (!zone_watermark_ok_safe(zone, order, 2572 high_wmark_pages(zone), 0, 0)) { 2573 end_zone = i; 2574 break; 2575 } else { 2576 /* If balanced, clear the congested flag */ 2577 zone_clear_flag(zone, ZONE_CONGESTED); 2578 } 2579 } 2580 if (i < 0) 2581 goto out; 2582 2583 for (i = 0; i <= end_zone; i++) { 2584 struct zone *zone = pgdat->node_zones + i; 2585 2586 lru_pages += zone_reclaimable_pages(zone); 2587 } 2588 2589 /* 2590 * Now scan the zone in the dma->highmem direction, stopping 2591 * at the last zone which needs scanning. 2592 * 2593 * We do this because the page allocator works in the opposite 2594 * direction. This prevents the page allocator from allocating 2595 * pages behind kswapd's direction of progress, which would 2596 * cause too much scanning of the lower zones. 2597 */ 2598 for (i = 0; i <= end_zone; i++) { 2599 struct zone *zone = pgdat->node_zones + i; 2600 int nr_slab; 2601 unsigned long balance_gap; 2602 2603 if (!populated_zone(zone)) 2604 continue; 2605 2606 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2607 continue; 2608 2609 sc.nr_scanned = 0; 2610 2611 nr_soft_scanned = 0; 2612 /* 2613 * Call soft limit reclaim before calling shrink_zone. 2614 */ 2615 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2616 order, sc.gfp_mask, 2617 &nr_soft_scanned); 2618 sc.nr_reclaimed += nr_soft_reclaimed; 2619 total_scanned += nr_soft_scanned; 2620 2621 /* 2622 * We put equal pressure on every zone, unless 2623 * one zone has way too many pages free 2624 * already. The "too many pages" is defined 2625 * as the high wmark plus a "gap" where the 2626 * gap is either the low watermark or 1% 2627 * of the zone, whichever is smaller. 2628 */ 2629 balance_gap = min(low_wmark_pages(zone), 2630 (zone->present_pages + 2631 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2632 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2633 if (!zone_watermark_ok_safe(zone, order, 2634 high_wmark_pages(zone) + balance_gap, 2635 end_zone, 0)) { 2636 shrink_zone(priority, zone, &sc); 2637 2638 reclaim_state->reclaimed_slab = 0; 2639 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2640 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2641 total_scanned += sc.nr_scanned; 2642 2643 if (nr_slab == 0 && !zone_reclaimable(zone)) 2644 zone->all_unreclaimable = 1; 2645 } 2646 2647 /* 2648 * If we've done a decent amount of scanning and 2649 * the reclaim ratio is low, start doing writepage 2650 * even in laptop mode 2651 */ 2652 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2653 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2654 sc.may_writepage = 1; 2655 2656 if (zone->all_unreclaimable) { 2657 if (end_zone && end_zone == i) 2658 end_zone--; 2659 continue; 2660 } 2661 2662 if (!zone_watermark_ok_safe(zone, order, 2663 high_wmark_pages(zone), end_zone, 0)) { 2664 all_zones_ok = 0; 2665 /* 2666 * We are still under min water mark. This 2667 * means that we have a GFP_ATOMIC allocation 2668 * failure risk. Hurry up! 2669 */ 2670 if (!zone_watermark_ok_safe(zone, order, 2671 min_wmark_pages(zone), end_zone, 0)) 2672 has_under_min_watermark_zone = 1; 2673 } else { 2674 /* 2675 * If a zone reaches its high watermark, 2676 * consider it to be no longer congested. It's 2677 * possible there are dirty pages backed by 2678 * congested BDIs but as pressure is relieved, 2679 * spectulatively avoid congestion waits 2680 */ 2681 zone_clear_flag(zone, ZONE_CONGESTED); 2682 if (i <= *classzone_idx) 2683 balanced += zone->present_pages; 2684 } 2685 2686 } 2687 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2688 break; /* kswapd: all done */ 2689 /* 2690 * OK, kswapd is getting into trouble. Take a nap, then take 2691 * another pass across the zones. 2692 */ 2693 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2694 if (has_under_min_watermark_zone) 2695 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2696 else 2697 congestion_wait(BLK_RW_ASYNC, HZ/10); 2698 } 2699 2700 /* 2701 * We do this so kswapd doesn't build up large priorities for 2702 * example when it is freeing in parallel with allocators. It 2703 * matches the direct reclaim path behaviour in terms of impact 2704 * on zone->*_priority. 2705 */ 2706 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2707 break; 2708 } 2709 out: 2710 2711 /* 2712 * order-0: All zones must meet high watermark for a balanced node 2713 * high-order: Balanced zones must make up at least 25% of the node 2714 * for the node to be balanced 2715 */ 2716 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2717 cond_resched(); 2718 2719 try_to_freeze(); 2720 2721 /* 2722 * Fragmentation may mean that the system cannot be 2723 * rebalanced for high-order allocations in all zones. 2724 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2725 * it means the zones have been fully scanned and are still 2726 * not balanced. For high-order allocations, there is 2727 * little point trying all over again as kswapd may 2728 * infinite loop. 2729 * 2730 * Instead, recheck all watermarks at order-0 as they 2731 * are the most important. If watermarks are ok, kswapd will go 2732 * back to sleep. High-order users can still perform direct 2733 * reclaim if they wish. 2734 */ 2735 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2736 order = sc.order = 0; 2737 2738 goto loop_again; 2739 } 2740 2741 /* 2742 * If kswapd was reclaiming at a higher order, it has the option of 2743 * sleeping without all zones being balanced. Before it does, it must 2744 * ensure that the watermarks for order-0 on *all* zones are met and 2745 * that the congestion flags are cleared. The congestion flag must 2746 * be cleared as kswapd is the only mechanism that clears the flag 2747 * and it is potentially going to sleep here. 2748 */ 2749 if (order) { 2750 for (i = 0; i <= end_zone; i++) { 2751 struct zone *zone = pgdat->node_zones + i; 2752 2753 if (!populated_zone(zone)) 2754 continue; 2755 2756 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2757 continue; 2758 2759 /* Confirm the zone is balanced for order-0 */ 2760 if (!zone_watermark_ok(zone, 0, 2761 high_wmark_pages(zone), 0, 0)) { 2762 order = sc.order = 0; 2763 goto loop_again; 2764 } 2765 2766 /* If balanced, clear the congested flag */ 2767 zone_clear_flag(zone, ZONE_CONGESTED); 2768 if (i <= *classzone_idx) 2769 balanced += zone->present_pages; 2770 } 2771 } 2772 2773 /* 2774 * Return the order we were reclaiming at so sleeping_prematurely() 2775 * makes a decision on the order we were last reclaiming at. However, 2776 * if another caller entered the allocator slow path while kswapd 2777 * was awake, order will remain at the higher level 2778 */ 2779 *classzone_idx = end_zone; 2780 return order; 2781 } 2782 2783 static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2784 { 2785 long remaining = 0; 2786 DEFINE_WAIT(wait); 2787 2788 if (freezing(current) || kthread_should_stop()) 2789 return; 2790 2791 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2792 2793 /* Try to sleep for a short interval */ 2794 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2795 remaining = schedule_timeout(HZ/10); 2796 finish_wait(&pgdat->kswapd_wait, &wait); 2797 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2798 } 2799 2800 /* 2801 * After a short sleep, check if it was a premature sleep. If not, then 2802 * go fully to sleep until explicitly woken up. 2803 */ 2804 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2805 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2806 2807 /* 2808 * vmstat counters are not perfectly accurate and the estimated 2809 * value for counters such as NR_FREE_PAGES can deviate from the 2810 * true value by nr_online_cpus * threshold. To avoid the zone 2811 * watermarks being breached while under pressure, we reduce the 2812 * per-cpu vmstat threshold while kswapd is awake and restore 2813 * them before going back to sleep. 2814 */ 2815 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 2816 schedule(); 2817 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 2818 } else { 2819 if (remaining) 2820 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2821 else 2822 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2823 } 2824 finish_wait(&pgdat->kswapd_wait, &wait); 2825 } 2826 2827 /* 2828 * The background pageout daemon, started as a kernel thread 2829 * from the init process. 2830 * 2831 * This basically trickles out pages so that we have _some_ 2832 * free memory available even if there is no other activity 2833 * that frees anything up. This is needed for things like routing 2834 * etc, where we otherwise might have all activity going on in 2835 * asynchronous contexts that cannot page things out. 2836 * 2837 * If there are applications that are active memory-allocators 2838 * (most normal use), this basically shouldn't matter. 2839 */ 2840 static int kswapd(void *p) 2841 { 2842 unsigned long order, new_order; 2843 unsigned balanced_order; 2844 int classzone_idx, new_classzone_idx; 2845 int balanced_classzone_idx; 2846 pg_data_t *pgdat = (pg_data_t*)p; 2847 struct task_struct *tsk = current; 2848 2849 struct reclaim_state reclaim_state = { 2850 .reclaimed_slab = 0, 2851 }; 2852 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2853 2854 lockdep_set_current_reclaim_state(GFP_KERNEL); 2855 2856 if (!cpumask_empty(cpumask)) 2857 set_cpus_allowed_ptr(tsk, cpumask); 2858 current->reclaim_state = &reclaim_state; 2859 2860 /* 2861 * Tell the memory management that we're a "memory allocator", 2862 * and that if we need more memory we should get access to it 2863 * regardless (see "__alloc_pages()"). "kswapd" should 2864 * never get caught in the normal page freeing logic. 2865 * 2866 * (Kswapd normally doesn't need memory anyway, but sometimes 2867 * you need a small amount of memory in order to be able to 2868 * page out something else, and this flag essentially protects 2869 * us from recursively trying to free more memory as we're 2870 * trying to free the first piece of memory in the first place). 2871 */ 2872 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2873 set_freezable(); 2874 2875 order = new_order = 0; 2876 balanced_order = 0; 2877 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; 2878 balanced_classzone_idx = classzone_idx; 2879 for ( ; ; ) { 2880 int ret; 2881 2882 /* 2883 * If the last balance_pgdat was unsuccessful it's unlikely a 2884 * new request of a similar or harder type will succeed soon 2885 * so consider going to sleep on the basis we reclaimed at 2886 */ 2887 if (balanced_classzone_idx >= new_classzone_idx && 2888 balanced_order == new_order) { 2889 new_order = pgdat->kswapd_max_order; 2890 new_classzone_idx = pgdat->classzone_idx; 2891 pgdat->kswapd_max_order = 0; 2892 pgdat->classzone_idx = pgdat->nr_zones - 1; 2893 } 2894 2895 if (order < new_order || classzone_idx > new_classzone_idx) { 2896 /* 2897 * Don't sleep if someone wants a larger 'order' 2898 * allocation or has tigher zone constraints 2899 */ 2900 order = new_order; 2901 classzone_idx = new_classzone_idx; 2902 } else { 2903 kswapd_try_to_sleep(pgdat, balanced_order, 2904 balanced_classzone_idx); 2905 order = pgdat->kswapd_max_order; 2906 classzone_idx = pgdat->classzone_idx; 2907 new_order = order; 2908 new_classzone_idx = classzone_idx; 2909 pgdat->kswapd_max_order = 0; 2910 pgdat->classzone_idx = pgdat->nr_zones - 1; 2911 } 2912 2913 ret = try_to_freeze(); 2914 if (kthread_should_stop()) 2915 break; 2916 2917 /* 2918 * We can speed up thawing tasks if we don't call balance_pgdat 2919 * after returning from the refrigerator 2920 */ 2921 if (!ret) { 2922 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 2923 balanced_classzone_idx = classzone_idx; 2924 balanced_order = balance_pgdat(pgdat, order, 2925 &balanced_classzone_idx); 2926 } 2927 } 2928 return 0; 2929 } 2930 2931 /* 2932 * A zone is low on free memory, so wake its kswapd task to service it. 2933 */ 2934 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 2935 { 2936 pg_data_t *pgdat; 2937 2938 if (!populated_zone(zone)) 2939 return; 2940 2941 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2942 return; 2943 pgdat = zone->zone_pgdat; 2944 if (pgdat->kswapd_max_order < order) { 2945 pgdat->kswapd_max_order = order; 2946 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 2947 } 2948 if (!waitqueue_active(&pgdat->kswapd_wait)) 2949 return; 2950 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 2951 return; 2952 2953 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 2954 wake_up_interruptible(&pgdat->kswapd_wait); 2955 } 2956 2957 /* 2958 * The reclaimable count would be mostly accurate. 2959 * The less reclaimable pages may be 2960 * - mlocked pages, which will be moved to unevictable list when encountered 2961 * - mapped pages, which may require several travels to be reclaimed 2962 * - dirty pages, which is not "instantly" reclaimable 2963 */ 2964 unsigned long global_reclaimable_pages(void) 2965 { 2966 int nr; 2967 2968 nr = global_page_state(NR_ACTIVE_FILE) + 2969 global_page_state(NR_INACTIVE_FILE); 2970 2971 if (nr_swap_pages > 0) 2972 nr += global_page_state(NR_ACTIVE_ANON) + 2973 global_page_state(NR_INACTIVE_ANON); 2974 2975 return nr; 2976 } 2977 2978 unsigned long zone_reclaimable_pages(struct zone *zone) 2979 { 2980 int nr; 2981 2982 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2983 zone_page_state(zone, NR_INACTIVE_FILE); 2984 2985 if (nr_swap_pages > 0) 2986 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2987 zone_page_state(zone, NR_INACTIVE_ANON); 2988 2989 return nr; 2990 } 2991 2992 #ifdef CONFIG_HIBERNATION 2993 /* 2994 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 2995 * freed pages. 2996 * 2997 * Rather than trying to age LRUs the aim is to preserve the overall 2998 * LRU order by reclaiming preferentially 2999 * inactive > active > active referenced > active mapped 3000 */ 3001 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 3002 { 3003 struct reclaim_state reclaim_state; 3004 struct scan_control sc = { 3005 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3006 .may_swap = 1, 3007 .may_unmap = 1, 3008 .may_writepage = 1, 3009 .nr_to_reclaim = nr_to_reclaim, 3010 .hibernation_mode = 1, 3011 .order = 0, 3012 }; 3013 struct shrink_control shrink = { 3014 .gfp_mask = sc.gfp_mask, 3015 }; 3016 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3017 struct task_struct *p = current; 3018 unsigned long nr_reclaimed; 3019 3020 p->flags |= PF_MEMALLOC; 3021 lockdep_set_current_reclaim_state(sc.gfp_mask); 3022 reclaim_state.reclaimed_slab = 0; 3023 p->reclaim_state = &reclaim_state; 3024 3025 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 3026 3027 p->reclaim_state = NULL; 3028 lockdep_clear_current_reclaim_state(); 3029 p->flags &= ~PF_MEMALLOC; 3030 3031 return nr_reclaimed; 3032 } 3033 #endif /* CONFIG_HIBERNATION */ 3034 3035 /* It's optimal to keep kswapds on the same CPUs as their memory, but 3036 not required for correctness. So if the last cpu in a node goes 3037 away, we get changed to run anywhere: as the first one comes back, 3038 restore their cpu bindings. */ 3039 static int __devinit cpu_callback(struct notifier_block *nfb, 3040 unsigned long action, void *hcpu) 3041 { 3042 int nid; 3043 3044 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 3045 for_each_node_state(nid, N_HIGH_MEMORY) { 3046 pg_data_t *pgdat = NODE_DATA(nid); 3047 const struct cpumask *mask; 3048 3049 mask = cpumask_of_node(pgdat->node_id); 3050 3051 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3052 /* One of our CPUs online: restore mask */ 3053 set_cpus_allowed_ptr(pgdat->kswapd, mask); 3054 } 3055 } 3056 return NOTIFY_OK; 3057 } 3058 3059 /* 3060 * This kswapd start function will be called by init and node-hot-add. 3061 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 3062 */ 3063 int kswapd_run(int nid) 3064 { 3065 pg_data_t *pgdat = NODE_DATA(nid); 3066 int ret = 0; 3067 3068 if (pgdat->kswapd) 3069 return 0; 3070 3071 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 3072 if (IS_ERR(pgdat->kswapd)) { 3073 /* failure at boot is fatal */ 3074 BUG_ON(system_state == SYSTEM_BOOTING); 3075 printk("Failed to start kswapd on node %d\n",nid); 3076 ret = -1; 3077 } 3078 return ret; 3079 } 3080 3081 /* 3082 * Called by memory hotplug when all memory in a node is offlined. 3083 */ 3084 void kswapd_stop(int nid) 3085 { 3086 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 3087 3088 if (kswapd) 3089 kthread_stop(kswapd); 3090 } 3091 3092 static int __init kswapd_init(void) 3093 { 3094 int nid; 3095 3096 swap_setup(); 3097 for_each_node_state(nid, N_HIGH_MEMORY) 3098 kswapd_run(nid); 3099 hotcpu_notifier(cpu_callback, 0); 3100 return 0; 3101 } 3102 3103 module_init(kswapd_init) 3104 3105 #ifdef CONFIG_NUMA 3106 /* 3107 * Zone reclaim mode 3108 * 3109 * If non-zero call zone_reclaim when the number of free pages falls below 3110 * the watermarks. 3111 */ 3112 int zone_reclaim_mode __read_mostly; 3113 3114 #define RECLAIM_OFF 0 3115 #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 3116 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 3117 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 3118 3119 /* 3120 * Priority for ZONE_RECLAIM. This determines the fraction of pages 3121 * of a node considered for each zone_reclaim. 4 scans 1/16th of 3122 * a zone. 3123 */ 3124 #define ZONE_RECLAIM_PRIORITY 4 3125 3126 /* 3127 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 3128 * occur. 3129 */ 3130 int sysctl_min_unmapped_ratio = 1; 3131 3132 /* 3133 * If the number of slab pages in a zone grows beyond this percentage then 3134 * slab reclaim needs to occur. 3135 */ 3136 int sysctl_min_slab_ratio = 5; 3137 3138 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3139 { 3140 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 3141 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 3142 zone_page_state(zone, NR_ACTIVE_FILE); 3143 3144 /* 3145 * It's possible for there to be more file mapped pages than 3146 * accounted for by the pages on the file LRU lists because 3147 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 3148 */ 3149 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 3150 } 3151 3152 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3153 static long zone_pagecache_reclaimable(struct zone *zone) 3154 { 3155 long nr_pagecache_reclaimable; 3156 long delta = 0; 3157 3158 /* 3159 * If RECLAIM_SWAP is set, then all file pages are considered 3160 * potentially reclaimable. Otherwise, we have to worry about 3161 * pages like swapcache and zone_unmapped_file_pages() provides 3162 * a better estimate 3163 */ 3164 if (zone_reclaim_mode & RECLAIM_SWAP) 3165 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3166 else 3167 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3168 3169 /* If we can't clean pages, remove dirty pages from consideration */ 3170 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3171 delta += zone_page_state(zone, NR_FILE_DIRTY); 3172 3173 /* Watch for any possible underflows due to delta */ 3174 if (unlikely(delta > nr_pagecache_reclaimable)) 3175 delta = nr_pagecache_reclaimable; 3176 3177 return nr_pagecache_reclaimable - delta; 3178 } 3179 3180 /* 3181 * Try to free up some pages from this zone through reclaim. 3182 */ 3183 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3184 { 3185 /* Minimum pages needed in order to stay on node */ 3186 const unsigned long nr_pages = 1 << order; 3187 struct task_struct *p = current; 3188 struct reclaim_state reclaim_state; 3189 int priority; 3190 struct scan_control sc = { 3191 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3192 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3193 .may_swap = 1, 3194 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3195 SWAP_CLUSTER_MAX), 3196 .gfp_mask = gfp_mask, 3197 .order = order, 3198 }; 3199 struct shrink_control shrink = { 3200 .gfp_mask = sc.gfp_mask, 3201 }; 3202 unsigned long nr_slab_pages0, nr_slab_pages1; 3203 3204 cond_resched(); 3205 /* 3206 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3207 * and we also need to be able to write out pages for RECLAIM_WRITE 3208 * and RECLAIM_SWAP. 3209 */ 3210 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3211 lockdep_set_current_reclaim_state(gfp_mask); 3212 reclaim_state.reclaimed_slab = 0; 3213 p->reclaim_state = &reclaim_state; 3214 3215 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3216 /* 3217 * Free memory by calling shrink zone with increasing 3218 * priorities until we have enough memory freed. 3219 */ 3220 priority = ZONE_RECLAIM_PRIORITY; 3221 do { 3222 shrink_zone(priority, zone, &sc); 3223 priority--; 3224 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3225 } 3226 3227 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3228 if (nr_slab_pages0 > zone->min_slab_pages) { 3229 /* 3230 * shrink_slab() does not currently allow us to determine how 3231 * many pages were freed in this zone. So we take the current 3232 * number of slab pages and shake the slab until it is reduced 3233 * by the same nr_pages that we used for reclaiming unmapped 3234 * pages. 3235 * 3236 * Note that shrink_slab will free memory on all zones and may 3237 * take a long time. 3238 */ 3239 for (;;) { 3240 unsigned long lru_pages = zone_reclaimable_pages(zone); 3241 3242 /* No reclaimable slab or very low memory pressure */ 3243 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3244 break; 3245 3246 /* Freed enough memory */ 3247 nr_slab_pages1 = zone_page_state(zone, 3248 NR_SLAB_RECLAIMABLE); 3249 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3250 break; 3251 } 3252 3253 /* 3254 * Update nr_reclaimed by the number of slab pages we 3255 * reclaimed from this zone. 3256 */ 3257 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3258 if (nr_slab_pages1 < nr_slab_pages0) 3259 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3260 } 3261 3262 p->reclaim_state = NULL; 3263 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3264 lockdep_clear_current_reclaim_state(); 3265 return sc.nr_reclaimed >= nr_pages; 3266 } 3267 3268 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3269 { 3270 int node_id; 3271 int ret; 3272 3273 /* 3274 * Zone reclaim reclaims unmapped file backed pages and 3275 * slab pages if we are over the defined limits. 3276 * 3277 * A small portion of unmapped file backed pages is needed for 3278 * file I/O otherwise pages read by file I/O will be immediately 3279 * thrown out if the zone is overallocated. So we do not reclaim 3280 * if less than a specified percentage of the zone is used by 3281 * unmapped file backed pages. 3282 */ 3283 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3284 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3285 return ZONE_RECLAIM_FULL; 3286 3287 if (zone->all_unreclaimable) 3288 return ZONE_RECLAIM_FULL; 3289 3290 /* 3291 * Do not scan if the allocation should not be delayed. 3292 */ 3293 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3294 return ZONE_RECLAIM_NOSCAN; 3295 3296 /* 3297 * Only run zone reclaim on the local zone or on zones that do not 3298 * have associated processors. This will favor the local processor 3299 * over remote processors and spread off node memory allocations 3300 * as wide as possible. 3301 */ 3302 node_id = zone_to_nid(zone); 3303 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3304 return ZONE_RECLAIM_NOSCAN; 3305 3306 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3307 return ZONE_RECLAIM_NOSCAN; 3308 3309 ret = __zone_reclaim(zone, gfp_mask, order); 3310 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3311 3312 if (!ret) 3313 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3314 3315 return ret; 3316 } 3317 #endif 3318 3319 /* 3320 * page_evictable - test whether a page is evictable 3321 * @page: the page to test 3322 * @vma: the VMA in which the page is or will be mapped, may be NULL 3323 * 3324 * Test whether page is evictable--i.e., should be placed on active/inactive 3325 * lists vs unevictable list. The vma argument is !NULL when called from the 3326 * fault path to determine how to instantate a new page. 3327 * 3328 * Reasons page might not be evictable: 3329 * (1) page's mapping marked unevictable 3330 * (2) page is part of an mlocked VMA 3331 * 3332 */ 3333 int page_evictable(struct page *page, struct vm_area_struct *vma) 3334 { 3335 3336 if (mapping_unevictable(page_mapping(page))) 3337 return 0; 3338 3339 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3340 return 0; 3341 3342 return 1; 3343 } 3344 3345 /** 3346 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3347 * @page: page to check evictability and move to appropriate lru list 3348 * @zone: zone page is in 3349 * 3350 * Checks a page for evictability and moves the page to the appropriate 3351 * zone lru list. 3352 * 3353 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3354 * have PageUnevictable set. 3355 */ 3356 static void check_move_unevictable_page(struct page *page, struct zone *zone) 3357 { 3358 VM_BUG_ON(PageActive(page)); 3359 3360 retry: 3361 ClearPageUnevictable(page); 3362 if (page_evictable(page, NULL)) { 3363 enum lru_list l = page_lru_base_type(page); 3364 3365 __dec_zone_state(zone, NR_UNEVICTABLE); 3366 list_move(&page->lru, &zone->lru[l].list); 3367 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 3368 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 3369 __count_vm_event(UNEVICTABLE_PGRESCUED); 3370 } else { 3371 /* 3372 * rotate unevictable list 3373 */ 3374 SetPageUnevictable(page); 3375 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 3376 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 3377 if (page_evictable(page, NULL)) 3378 goto retry; 3379 } 3380 } 3381 3382 /** 3383 * scan_mapping_unevictable_pages - scan an address space for evictable pages 3384 * @mapping: struct address_space to scan for evictable pages 3385 * 3386 * Scan all pages in mapping. Check unevictable pages for 3387 * evictability and move them to the appropriate zone lru list. 3388 */ 3389 void scan_mapping_unevictable_pages(struct address_space *mapping) 3390 { 3391 pgoff_t next = 0; 3392 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 3393 PAGE_CACHE_SHIFT; 3394 struct zone *zone; 3395 struct pagevec pvec; 3396 3397 if (mapping->nrpages == 0) 3398 return; 3399 3400 pagevec_init(&pvec, 0); 3401 while (next < end && 3402 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 3403 int i; 3404 int pg_scanned = 0; 3405 3406 zone = NULL; 3407 3408 for (i = 0; i < pagevec_count(&pvec); i++) { 3409 struct page *page = pvec.pages[i]; 3410 pgoff_t page_index = page->index; 3411 struct zone *pagezone = page_zone(page); 3412 3413 pg_scanned++; 3414 if (page_index > next) 3415 next = page_index; 3416 next++; 3417 3418 if (pagezone != zone) { 3419 if (zone) 3420 spin_unlock_irq(&zone->lru_lock); 3421 zone = pagezone; 3422 spin_lock_irq(&zone->lru_lock); 3423 } 3424 3425 if (PageLRU(page) && PageUnevictable(page)) 3426 check_move_unevictable_page(page, zone); 3427 } 3428 if (zone) 3429 spin_unlock_irq(&zone->lru_lock); 3430 pagevec_release(&pvec); 3431 3432 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 3433 } 3434 3435 } 3436 3437 static void warn_scan_unevictable_pages(void) 3438 { 3439 printk_once(KERN_WARNING 3440 "%s: The scan_unevictable_pages sysctl/node-interface has been " 3441 "disabled for lack of a legitimate use case. If you have " 3442 "one, please send an email to linux-mm@kvack.org.\n", 3443 current->comm); 3444 } 3445 3446 /* 3447 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3448 * all nodes' unevictable lists for evictable pages 3449 */ 3450 unsigned long scan_unevictable_pages; 3451 3452 int scan_unevictable_handler(struct ctl_table *table, int write, 3453 void __user *buffer, 3454 size_t *length, loff_t *ppos) 3455 { 3456 warn_scan_unevictable_pages(); 3457 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3458 scan_unevictable_pages = 0; 3459 return 0; 3460 } 3461 3462 #ifdef CONFIG_NUMA 3463 /* 3464 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3465 * a specified node's per zone unevictable lists for evictable pages. 3466 */ 3467 3468 static ssize_t read_scan_unevictable_node(struct device *dev, 3469 struct device_attribute *attr, 3470 char *buf) 3471 { 3472 warn_scan_unevictable_pages(); 3473 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3474 } 3475 3476 static ssize_t write_scan_unevictable_node(struct device *dev, 3477 struct device_attribute *attr, 3478 const char *buf, size_t count) 3479 { 3480 warn_scan_unevictable_pages(); 3481 return 1; 3482 } 3483 3484 3485 static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3486 read_scan_unevictable_node, 3487 write_scan_unevictable_node); 3488 3489 int scan_unevictable_register_node(struct node *node) 3490 { 3491 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); 3492 } 3493 3494 void scan_unevictable_unregister_node(struct node *node) 3495 { 3496 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); 3497 } 3498 #endif 3499