1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 10 * Multiqueue VM started 5.8.00, Rik van Riel. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/vmpressure.h> 25 #include <linux/vmstat.h> 26 #include <linux/file.h> 27 #include <linux/writeback.h> 28 #include <linux/blkdev.h> 29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ 30 #include <linux/mm_inline.h> 31 #include <linux/backing-dev.h> 32 #include <linux/rmap.h> 33 #include <linux/topology.h> 34 #include <linux/cpu.h> 35 #include <linux/cpuset.h> 36 #include <linux/compaction.h> 37 #include <linux/notifier.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/migrate.h> 43 #include <linux/delayacct.h> 44 #include <linux/sysctl.h> 45 #include <linux/memory-tiers.h> 46 #include <linux/oom.h> 47 #include <linux/pagevec.h> 48 #include <linux/prefetch.h> 49 #include <linux/printk.h> 50 #include <linux/dax.h> 51 #include <linux/psi.h> 52 #include <linux/pagewalk.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/ctype.h> 55 #include <linux/debugfs.h> 56 #include <linux/khugepaged.h> 57 #include <linux/rculist_nulls.h> 58 #include <linux/random.h> 59 #include <linux/mmu_notifier.h> 60 61 #include <asm/tlbflush.h> 62 #include <asm/div64.h> 63 64 #include <linux/swapops.h> 65 #include <linux/balloon_compaction.h> 66 #include <linux/sched/sysctl.h> 67 68 #include "internal.h" 69 #include "swap.h" 70 71 #define CREATE_TRACE_POINTS 72 #include <trace/events/vmscan.h> 73 74 struct scan_control { 75 /* How many pages shrink_list() should reclaim */ 76 unsigned long nr_to_reclaim; 77 78 /* 79 * Nodemask of nodes allowed by the caller. If NULL, all nodes 80 * are scanned. 81 */ 82 nodemask_t *nodemask; 83 84 /* 85 * The memory cgroup that hit its limit and as a result is the 86 * primary target of this reclaim invocation. 87 */ 88 struct mem_cgroup *target_mem_cgroup; 89 90 /* 91 * Scan pressure balancing between anon and file LRUs 92 */ 93 unsigned long anon_cost; 94 unsigned long file_cost; 95 96 #ifdef CONFIG_MEMCG 97 /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */ 98 int *proactive_swappiness; 99 #endif 100 101 /* Can active folios be deactivated as part of reclaim? */ 102 #define DEACTIVATE_ANON 1 103 #define DEACTIVATE_FILE 2 104 unsigned int may_deactivate:2; 105 unsigned int force_deactivate:1; 106 unsigned int skipped_deactivate:1; 107 108 /* Writepage batching in laptop mode; RECLAIM_WRITE */ 109 unsigned int may_writepage:1; 110 111 /* Can mapped folios be reclaimed? */ 112 unsigned int may_unmap:1; 113 114 /* Can folios be swapped as part of reclaim? */ 115 unsigned int may_swap:1; 116 117 /* Not allow cache_trim_mode to be turned on as part of reclaim? */ 118 unsigned int no_cache_trim_mode:1; 119 120 /* Has cache_trim_mode failed at least once? */ 121 unsigned int cache_trim_mode_failed:1; 122 123 /* Proactive reclaim invoked by userspace through memory.reclaim */ 124 unsigned int proactive:1; 125 126 /* 127 * Cgroup memory below memory.low is protected as long as we 128 * don't threaten to OOM. If any cgroup is reclaimed at 129 * reduced force or passed over entirely due to its memory.low 130 * setting (memcg_low_skipped), and nothing is reclaimed as a 131 * result, then go back for one more cycle that reclaims the protected 132 * memory (memcg_low_reclaim) to avert OOM. 133 */ 134 unsigned int memcg_low_reclaim:1; 135 unsigned int memcg_low_skipped:1; 136 137 /* Shared cgroup tree walk failed, rescan the whole tree */ 138 unsigned int memcg_full_walk:1; 139 140 unsigned int hibernation_mode:1; 141 142 /* One of the zones is ready for compaction */ 143 unsigned int compaction_ready:1; 144 145 /* There is easily reclaimable cold cache in the current node */ 146 unsigned int cache_trim_mode:1; 147 148 /* The file folios on the current node are dangerously low */ 149 unsigned int file_is_tiny:1; 150 151 /* Always discard instead of demoting to lower tier memory */ 152 unsigned int no_demotion:1; 153 154 /* Allocation order */ 155 s8 order; 156 157 /* Scan (total_size >> priority) pages at once */ 158 s8 priority; 159 160 /* The highest zone to isolate folios for reclaim from */ 161 s8 reclaim_idx; 162 163 /* This context's GFP mask */ 164 gfp_t gfp_mask; 165 166 /* Incremented by the number of inactive pages that were scanned */ 167 unsigned long nr_scanned; 168 169 /* Number of pages freed so far during a call to shrink_zones() */ 170 unsigned long nr_reclaimed; 171 172 struct { 173 unsigned int dirty; 174 unsigned int unqueued_dirty; 175 unsigned int congested; 176 unsigned int writeback; 177 unsigned int immediate; 178 unsigned int file_taken; 179 unsigned int taken; 180 } nr; 181 182 /* for recording the reclaimed slab by now */ 183 struct reclaim_state reclaim_state; 184 }; 185 186 #ifdef ARCH_HAS_PREFETCHW 187 #define prefetchw_prev_lru_folio(_folio, _base, _field) \ 188 do { \ 189 if ((_folio)->lru.prev != _base) { \ 190 struct folio *prev; \ 191 \ 192 prev = lru_to_folio(&(_folio->lru)); \ 193 prefetchw(&prev->_field); \ 194 } \ 195 } while (0) 196 #else 197 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) 198 #endif 199 200 /* 201 * From 0 .. MAX_SWAPPINESS. Higher means more swappy. 202 */ 203 int vm_swappiness = 60; 204 205 #ifdef CONFIG_MEMCG 206 207 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ 208 static bool cgroup_reclaim(struct scan_control *sc) 209 { 210 return sc->target_mem_cgroup; 211 } 212 213 /* 214 * Returns true for reclaim on the root cgroup. This is true for direct 215 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. 216 */ 217 static bool root_reclaim(struct scan_control *sc) 218 { 219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 220 } 221 222 /** 223 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 224 * @sc: scan_control in question 225 * 226 * The normal page dirty throttling mechanism in balance_dirty_pages() is 227 * completely broken with the legacy memcg and direct stalling in 228 * shrink_folio_list() is used for throttling instead, which lacks all the 229 * niceties such as fairness, adaptive pausing, bandwidth proportional 230 * allocation and configurability. 231 * 232 * This function tests whether the vmscan currently in progress can assume 233 * that the normal dirty throttling mechanism is operational. 234 */ 235 static bool writeback_throttling_sane(struct scan_control *sc) 236 { 237 if (!cgroup_reclaim(sc)) 238 return true; 239 #ifdef CONFIG_CGROUP_WRITEBACK 240 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 241 return true; 242 #endif 243 return false; 244 } 245 246 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 247 { 248 if (sc->proactive && sc->proactive_swappiness) 249 return *sc->proactive_swappiness; 250 return mem_cgroup_swappiness(memcg); 251 } 252 #else 253 static bool cgroup_reclaim(struct scan_control *sc) 254 { 255 return false; 256 } 257 258 static bool root_reclaim(struct scan_control *sc) 259 { 260 return true; 261 } 262 263 static bool writeback_throttling_sane(struct scan_control *sc) 264 { 265 return true; 266 } 267 268 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 269 { 270 return READ_ONCE(vm_swappiness); 271 } 272 #endif 273 274 static void set_task_reclaim_state(struct task_struct *task, 275 struct reclaim_state *rs) 276 { 277 /* Check for an overwrite */ 278 WARN_ON_ONCE(rs && task->reclaim_state); 279 280 /* Check for the nulling of an already-nulled member */ 281 WARN_ON_ONCE(!rs && !task->reclaim_state); 282 283 task->reclaim_state = rs; 284 } 285 286 /* 287 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to 288 * scan_control->nr_reclaimed. 289 */ 290 static void flush_reclaim_state(struct scan_control *sc) 291 { 292 /* 293 * Currently, reclaim_state->reclaimed includes three types of pages 294 * freed outside of vmscan: 295 * (1) Slab pages. 296 * (2) Clean file pages from pruned inodes (on highmem systems). 297 * (3) XFS freed buffer pages. 298 * 299 * For all of these cases, we cannot universally link the pages to a 300 * single memcg. For example, a memcg-aware shrinker can free one object 301 * charged to the target memcg, causing an entire page to be freed. 302 * If we count the entire page as reclaimed from the memcg, we end up 303 * overestimating the reclaimed amount (potentially under-reclaiming). 304 * 305 * Only count such pages for global reclaim to prevent under-reclaiming 306 * from the target memcg; preventing unnecessary retries during memcg 307 * charging and false positives from proactive reclaim. 308 * 309 * For uncommon cases where the freed pages were actually mostly 310 * charged to the target memcg, we end up underestimating the reclaimed 311 * amount. This should be fine. The freed pages will be uncharged 312 * anyway, even if they are not counted here properly, and we will be 313 * able to make forward progress in charging (which is usually in a 314 * retry loop). 315 * 316 * We can go one step further, and report the uncharged objcg pages in 317 * memcg reclaim, to make reporting more accurate and reduce 318 * underestimation, but it's probably not worth the complexity for now. 319 */ 320 if (current->reclaim_state && root_reclaim(sc)) { 321 sc->nr_reclaimed += current->reclaim_state->reclaimed; 322 current->reclaim_state->reclaimed = 0; 323 } 324 } 325 326 static bool can_demote(int nid, struct scan_control *sc) 327 { 328 if (!numa_demotion_enabled) 329 return false; 330 if (sc && sc->no_demotion) 331 return false; 332 if (next_demotion_node(nid) == NUMA_NO_NODE) 333 return false; 334 335 return true; 336 } 337 338 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, 339 int nid, 340 struct scan_control *sc) 341 { 342 if (memcg == NULL) { 343 /* 344 * For non-memcg reclaim, is there 345 * space in any swap device? 346 */ 347 if (get_nr_swap_pages() > 0) 348 return true; 349 } else { 350 /* Is the memcg below its swap limit? */ 351 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) 352 return true; 353 } 354 355 /* 356 * The page can not be swapped. 357 * 358 * Can it be reclaimed from this node via demotion? 359 */ 360 return can_demote(nid, sc); 361 } 362 363 /* 364 * This misses isolated folios which are not accounted for to save counters. 365 * As the data only determines if reclaim or compaction continues, it is 366 * not expected that isolated folios will be a dominating factor. 367 */ 368 unsigned long zone_reclaimable_pages(struct zone *zone) 369 { 370 unsigned long nr; 371 372 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 373 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 374 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 375 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 376 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 377 378 return nr; 379 } 380 381 /** 382 * lruvec_lru_size - Returns the number of pages on the given LRU list. 383 * @lruvec: lru vector 384 * @lru: lru to use 385 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) 386 */ 387 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, 388 int zone_idx) 389 { 390 unsigned long size = 0; 391 int zid; 392 393 for (zid = 0; zid <= zone_idx; zid++) { 394 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 395 396 if (!managed_zone(zone)) 397 continue; 398 399 if (!mem_cgroup_disabled()) 400 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 401 else 402 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); 403 } 404 return size; 405 } 406 407 static unsigned long drop_slab_node(int nid) 408 { 409 unsigned long freed = 0; 410 struct mem_cgroup *memcg = NULL; 411 412 memcg = mem_cgroup_iter(NULL, NULL, NULL); 413 do { 414 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 415 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 416 417 return freed; 418 } 419 420 void drop_slab(void) 421 { 422 int nid; 423 int shift = 0; 424 unsigned long freed; 425 426 do { 427 freed = 0; 428 for_each_online_node(nid) { 429 if (fatal_signal_pending(current)) 430 return; 431 432 freed += drop_slab_node(nid); 433 } 434 } while ((freed >> shift++) > 1); 435 } 436 437 static int reclaimer_offset(void) 438 { 439 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 440 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); 441 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 442 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); 443 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 444 PGSCAN_DIRECT - PGSCAN_KSWAPD); 445 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 446 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); 447 448 if (current_is_kswapd()) 449 return 0; 450 if (current_is_khugepaged()) 451 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; 452 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; 453 } 454 455 static inline int is_page_cache_freeable(struct folio *folio) 456 { 457 /* 458 * A freeable page cache folio is referenced only by the caller 459 * that isolated the folio, the page cache and optional filesystem 460 * private data at folio->private. 461 */ 462 return folio_ref_count(folio) - folio_test_private(folio) == 463 1 + folio_nr_pages(folio); 464 } 465 466 /* 467 * We detected a synchronous write error writing a folio out. Probably 468 * -ENOSPC. We need to propagate that into the address_space for a subsequent 469 * fsync(), msync() or close(). 470 * 471 * The tricky part is that after writepage we cannot touch the mapping: nothing 472 * prevents it from being freed up. But we have a ref on the folio and once 473 * that folio is locked, the mapping is pinned. 474 * 475 * We're allowed to run sleeping folio_lock() here because we know the caller has 476 * __GFP_FS. 477 */ 478 static void handle_write_error(struct address_space *mapping, 479 struct folio *folio, int error) 480 { 481 folio_lock(folio); 482 if (folio_mapping(folio) == mapping) 483 mapping_set_error(mapping, error); 484 folio_unlock(folio); 485 } 486 487 static bool skip_throttle_noprogress(pg_data_t *pgdat) 488 { 489 int reclaimable = 0, write_pending = 0; 490 int i; 491 492 /* 493 * If kswapd is disabled, reschedule if necessary but do not 494 * throttle as the system is likely near OOM. 495 */ 496 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 497 return true; 498 499 /* 500 * If there are a lot of dirty/writeback folios then do not 501 * throttle as throttling will occur when the folios cycle 502 * towards the end of the LRU if still under writeback. 503 */ 504 for (i = 0; i < MAX_NR_ZONES; i++) { 505 struct zone *zone = pgdat->node_zones + i; 506 507 if (!managed_zone(zone)) 508 continue; 509 510 reclaimable += zone_reclaimable_pages(zone); 511 write_pending += zone_page_state_snapshot(zone, 512 NR_ZONE_WRITE_PENDING); 513 } 514 if (2 * write_pending <= reclaimable) 515 return true; 516 517 return false; 518 } 519 520 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) 521 { 522 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; 523 long timeout, ret; 524 DEFINE_WAIT(wait); 525 526 /* 527 * Do not throttle user workers, kthreads other than kswapd or 528 * workqueues. They may be required for reclaim to make 529 * forward progress (e.g. journalling workqueues or kthreads). 530 */ 531 if (!current_is_kswapd() && 532 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { 533 cond_resched(); 534 return; 535 } 536 537 /* 538 * These figures are pulled out of thin air. 539 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many 540 * parallel reclaimers which is a short-lived event so the timeout is 541 * short. Failing to make progress or waiting on writeback are 542 * potentially long-lived events so use a longer timeout. This is shaky 543 * logic as a failure to make progress could be due to anything from 544 * writeback to a slow device to excessive referenced folios at the tail 545 * of the inactive LRU. 546 */ 547 switch(reason) { 548 case VMSCAN_THROTTLE_WRITEBACK: 549 timeout = HZ/10; 550 551 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { 552 WRITE_ONCE(pgdat->nr_reclaim_start, 553 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); 554 } 555 556 break; 557 case VMSCAN_THROTTLE_CONGESTED: 558 fallthrough; 559 case VMSCAN_THROTTLE_NOPROGRESS: 560 if (skip_throttle_noprogress(pgdat)) { 561 cond_resched(); 562 return; 563 } 564 565 timeout = 1; 566 567 break; 568 case VMSCAN_THROTTLE_ISOLATED: 569 timeout = HZ/50; 570 break; 571 default: 572 WARN_ON_ONCE(1); 573 timeout = HZ; 574 break; 575 } 576 577 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 578 ret = schedule_timeout(timeout); 579 finish_wait(wqh, &wait); 580 581 if (reason == VMSCAN_THROTTLE_WRITEBACK) 582 atomic_dec(&pgdat->nr_writeback_throttled); 583 584 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), 585 jiffies_to_usecs(timeout - ret), 586 reason); 587 } 588 589 /* 590 * Account for folios written if tasks are throttled waiting on dirty 591 * folios to clean. If enough folios have been cleaned since throttling 592 * started then wakeup the throttled tasks. 593 */ 594 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 595 int nr_throttled) 596 { 597 unsigned long nr_written; 598 599 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); 600 601 /* 602 * This is an inaccurate read as the per-cpu deltas may not 603 * be synchronised. However, given that the system is 604 * writeback throttled, it is not worth taking the penalty 605 * of getting an accurate count. At worst, the throttle 606 * timeout guarantees forward progress. 607 */ 608 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - 609 READ_ONCE(pgdat->nr_reclaim_start); 610 611 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) 612 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); 613 } 614 615 /* possible outcome of pageout() */ 616 typedef enum { 617 /* failed to write folio out, folio is locked */ 618 PAGE_KEEP, 619 /* move folio to the active list, folio is locked */ 620 PAGE_ACTIVATE, 621 /* folio has been sent to the disk successfully, folio is unlocked */ 622 PAGE_SUCCESS, 623 /* folio is clean and locked */ 624 PAGE_CLEAN, 625 } pageout_t; 626 627 /* 628 * pageout is called by shrink_folio_list() for each dirty folio. 629 * Calls ->writepage(). 630 */ 631 static pageout_t pageout(struct folio *folio, struct address_space *mapping, 632 struct swap_iocb **plug, struct list_head *folio_list) 633 { 634 /* 635 * If the folio is dirty, only perform writeback if that write 636 * will be non-blocking. To prevent this allocation from being 637 * stalled by pagecache activity. But note that there may be 638 * stalls if we need to run get_block(). We could test 639 * PagePrivate for that. 640 * 641 * If this process is currently in __generic_file_write_iter() against 642 * this folio's queue, we can perform writeback even if that 643 * will block. 644 * 645 * If the folio is swapcache, write it back even if that would 646 * block, for some throttling. This happens by accident, because 647 * swap_backing_dev_info is bust: it doesn't reflect the 648 * congestion state of the swapdevs. Easy to fix, if needed. 649 */ 650 if (!is_page_cache_freeable(folio)) 651 return PAGE_KEEP; 652 if (!mapping) { 653 /* 654 * Some data journaling orphaned folios can have 655 * folio->mapping == NULL while being dirty with clean buffers. 656 */ 657 if (folio_test_private(folio)) { 658 if (try_to_free_buffers(folio)) { 659 folio_clear_dirty(folio); 660 pr_info("%s: orphaned folio\n", __func__); 661 return PAGE_CLEAN; 662 } 663 } 664 return PAGE_KEEP; 665 } 666 if (mapping->a_ops->writepage == NULL) 667 return PAGE_ACTIVATE; 668 669 if (folio_clear_dirty_for_io(folio)) { 670 int res; 671 struct writeback_control wbc = { 672 .sync_mode = WB_SYNC_NONE, 673 .nr_to_write = SWAP_CLUSTER_MAX, 674 .range_start = 0, 675 .range_end = LLONG_MAX, 676 .for_reclaim = 1, 677 .swap_plug = plug, 678 }; 679 680 /* 681 * The large shmem folio can be split if CONFIG_THP_SWAP is 682 * not enabled or contiguous swap entries are failed to 683 * allocate. 684 */ 685 if (shmem_mapping(mapping) && folio_test_large(folio)) 686 wbc.list = folio_list; 687 688 folio_set_reclaim(folio); 689 res = mapping->a_ops->writepage(&folio->page, &wbc); 690 if (res < 0) 691 handle_write_error(mapping, folio, res); 692 if (res == AOP_WRITEPAGE_ACTIVATE) { 693 folio_clear_reclaim(folio); 694 return PAGE_ACTIVATE; 695 } 696 697 if (!folio_test_writeback(folio)) { 698 /* synchronous write or broken a_ops? */ 699 folio_clear_reclaim(folio); 700 } 701 trace_mm_vmscan_write_folio(folio); 702 node_stat_add_folio(folio, NR_VMSCAN_WRITE); 703 return PAGE_SUCCESS; 704 } 705 706 return PAGE_CLEAN; 707 } 708 709 /* 710 * Same as remove_mapping, but if the folio is removed from the mapping, it 711 * gets returned with a refcount of 0. 712 */ 713 static int __remove_mapping(struct address_space *mapping, struct folio *folio, 714 bool reclaimed, struct mem_cgroup *target_memcg) 715 { 716 int refcount; 717 void *shadow = NULL; 718 719 BUG_ON(!folio_test_locked(folio)); 720 BUG_ON(mapping != folio_mapping(folio)); 721 722 if (!folio_test_swapcache(folio)) 723 spin_lock(&mapping->host->i_lock); 724 xa_lock_irq(&mapping->i_pages); 725 /* 726 * The non racy check for a busy folio. 727 * 728 * Must be careful with the order of the tests. When someone has 729 * a ref to the folio, it may be possible that they dirty it then 730 * drop the reference. So if the dirty flag is tested before the 731 * refcount here, then the following race may occur: 732 * 733 * get_user_pages(&page); 734 * [user mapping goes away] 735 * write_to(page); 736 * !folio_test_dirty(folio) [good] 737 * folio_set_dirty(folio); 738 * folio_put(folio); 739 * !refcount(folio) [good, discard it] 740 * 741 * [oops, our write_to data is lost] 742 * 743 * Reversing the order of the tests ensures such a situation cannot 744 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags 745 * load is not satisfied before that of folio->_refcount. 746 * 747 * Note that if the dirty flag is always set via folio_mark_dirty, 748 * and thus under the i_pages lock, then this ordering is not required. 749 */ 750 refcount = 1 + folio_nr_pages(folio); 751 if (!folio_ref_freeze(folio, refcount)) 752 goto cannot_free; 753 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ 754 if (unlikely(folio_test_dirty(folio))) { 755 folio_ref_unfreeze(folio, refcount); 756 goto cannot_free; 757 } 758 759 if (folio_test_swapcache(folio)) { 760 swp_entry_t swap = folio->swap; 761 762 if (reclaimed && !mapping_exiting(mapping)) 763 shadow = workingset_eviction(folio, target_memcg); 764 __delete_from_swap_cache(folio, swap, shadow); 765 mem_cgroup_swapout(folio, swap); 766 xa_unlock_irq(&mapping->i_pages); 767 put_swap_folio(folio, swap); 768 } else { 769 void (*free_folio)(struct folio *); 770 771 free_folio = mapping->a_ops->free_folio; 772 /* 773 * Remember a shadow entry for reclaimed file cache in 774 * order to detect refaults, thus thrashing, later on. 775 * 776 * But don't store shadows in an address space that is 777 * already exiting. This is not just an optimization, 778 * inode reclaim needs to empty out the radix tree or 779 * the nodes are lost. Don't plant shadows behind its 780 * back. 781 * 782 * We also don't store shadows for DAX mappings because the 783 * only page cache folios found in these are zero pages 784 * covering holes, and because we don't want to mix DAX 785 * exceptional entries and shadow exceptional entries in the 786 * same address_space. 787 */ 788 if (reclaimed && folio_is_file_lru(folio) && 789 !mapping_exiting(mapping) && !dax_mapping(mapping)) 790 shadow = workingset_eviction(folio, target_memcg); 791 __filemap_remove_folio(folio, shadow); 792 xa_unlock_irq(&mapping->i_pages); 793 if (mapping_shrinkable(mapping)) 794 inode_add_lru(mapping->host); 795 spin_unlock(&mapping->host->i_lock); 796 797 if (free_folio) 798 free_folio(folio); 799 } 800 801 return 1; 802 803 cannot_free: 804 xa_unlock_irq(&mapping->i_pages); 805 if (!folio_test_swapcache(folio)) 806 spin_unlock(&mapping->host->i_lock); 807 return 0; 808 } 809 810 /** 811 * remove_mapping() - Attempt to remove a folio from its mapping. 812 * @mapping: The address space. 813 * @folio: The folio to remove. 814 * 815 * If the folio is dirty, under writeback or if someone else has a ref 816 * on it, removal will fail. 817 * Return: The number of pages removed from the mapping. 0 if the folio 818 * could not be removed. 819 * Context: The caller should have a single refcount on the folio and 820 * hold its lock. 821 */ 822 long remove_mapping(struct address_space *mapping, struct folio *folio) 823 { 824 if (__remove_mapping(mapping, folio, false, NULL)) { 825 /* 826 * Unfreezing the refcount with 1 effectively 827 * drops the pagecache ref for us without requiring another 828 * atomic operation. 829 */ 830 folio_ref_unfreeze(folio, 1); 831 return folio_nr_pages(folio); 832 } 833 return 0; 834 } 835 836 /** 837 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. 838 * @folio: Folio to be returned to an LRU list. 839 * 840 * Add previously isolated @folio to appropriate LRU list. 841 * The folio may still be unevictable for other reasons. 842 * 843 * Context: lru_lock must not be held, interrupts must be enabled. 844 */ 845 void folio_putback_lru(struct folio *folio) 846 { 847 folio_add_lru(folio); 848 folio_put(folio); /* drop ref from isolate */ 849 } 850 851 enum folio_references { 852 FOLIOREF_RECLAIM, 853 FOLIOREF_RECLAIM_CLEAN, 854 FOLIOREF_KEEP, 855 FOLIOREF_ACTIVATE, 856 }; 857 858 static enum folio_references folio_check_references(struct folio *folio, 859 struct scan_control *sc) 860 { 861 int referenced_ptes, referenced_folio; 862 unsigned long vm_flags; 863 864 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 865 &vm_flags); 866 referenced_folio = folio_test_clear_referenced(folio); 867 868 /* 869 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. 870 * Let the folio, now marked Mlocked, be moved to the unevictable list. 871 */ 872 if (vm_flags & VM_LOCKED) 873 return FOLIOREF_ACTIVATE; 874 875 /* 876 * There are two cases to consider. 877 * 1) Rmap lock contention: rotate. 878 * 2) Skip the non-shared swapbacked folio mapped solely by 879 * the exiting or OOM-reaped process. 880 */ 881 if (referenced_ptes == -1) 882 return FOLIOREF_KEEP; 883 884 if (referenced_ptes) { 885 /* 886 * All mapped folios start out with page table 887 * references from the instantiating fault, so we need 888 * to look twice if a mapped file/anon folio is used more 889 * than once. 890 * 891 * Mark it and spare it for another trip around the 892 * inactive list. Another page table reference will 893 * lead to its activation. 894 * 895 * Note: the mark is set for activated folios as well 896 * so that recently deactivated but used folios are 897 * quickly recovered. 898 */ 899 folio_set_referenced(folio); 900 901 if (referenced_folio || referenced_ptes > 1) 902 return FOLIOREF_ACTIVATE; 903 904 /* 905 * Activate file-backed executable folios after first usage. 906 */ 907 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) 908 return FOLIOREF_ACTIVATE; 909 910 return FOLIOREF_KEEP; 911 } 912 913 /* Reclaim if clean, defer dirty folios to writeback */ 914 if (referenced_folio && folio_is_file_lru(folio)) 915 return FOLIOREF_RECLAIM_CLEAN; 916 917 return FOLIOREF_RECLAIM; 918 } 919 920 /* Check if a folio is dirty or under writeback */ 921 static void folio_check_dirty_writeback(struct folio *folio, 922 bool *dirty, bool *writeback) 923 { 924 struct address_space *mapping; 925 926 /* 927 * Anonymous folios are not handled by flushers and must be written 928 * from reclaim context. Do not stall reclaim based on them. 929 * MADV_FREE anonymous folios are put into inactive file list too. 930 * They could be mistakenly treated as file lru. So further anon 931 * test is needed. 932 */ 933 if (!folio_is_file_lru(folio) || 934 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { 935 *dirty = false; 936 *writeback = false; 937 return; 938 } 939 940 /* By default assume that the folio flags are accurate */ 941 *dirty = folio_test_dirty(folio); 942 *writeback = folio_test_writeback(folio); 943 944 /* Verify dirty/writeback state if the filesystem supports it */ 945 if (!folio_test_private(folio)) 946 return; 947 948 mapping = folio_mapping(folio); 949 if (mapping && mapping->a_ops->is_dirty_writeback) 950 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); 951 } 952 953 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private) 954 { 955 struct folio *dst; 956 nodemask_t *allowed_mask; 957 struct migration_target_control *mtc; 958 959 mtc = (struct migration_target_control *)private; 960 961 allowed_mask = mtc->nmask; 962 /* 963 * make sure we allocate from the target node first also trying to 964 * demote or reclaim pages from the target node via kswapd if we are 965 * low on free memory on target node. If we don't do this and if 966 * we have free memory on the slower(lower) memtier, we would start 967 * allocating pages from slower(lower) memory tiers without even forcing 968 * a demotion of cold pages from the target memtier. This can result 969 * in the kernel placing hot pages in slower(lower) memory tiers. 970 */ 971 mtc->nmask = NULL; 972 mtc->gfp_mask |= __GFP_THISNODE; 973 dst = alloc_migration_target(src, (unsigned long)mtc); 974 if (dst) 975 return dst; 976 977 mtc->gfp_mask &= ~__GFP_THISNODE; 978 mtc->nmask = allowed_mask; 979 980 return alloc_migration_target(src, (unsigned long)mtc); 981 } 982 983 /* 984 * Take folios on @demote_folios and attempt to demote them to another node. 985 * Folios which are not demoted are left on @demote_folios. 986 */ 987 static unsigned int demote_folio_list(struct list_head *demote_folios, 988 struct pglist_data *pgdat) 989 { 990 int target_nid = next_demotion_node(pgdat->node_id); 991 unsigned int nr_succeeded; 992 nodemask_t allowed_mask; 993 994 struct migration_target_control mtc = { 995 /* 996 * Allocate from 'node', or fail quickly and quietly. 997 * When this happens, 'page' will likely just be discarded 998 * instead of migrated. 999 */ 1000 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | 1001 __GFP_NOMEMALLOC | GFP_NOWAIT, 1002 .nid = target_nid, 1003 .nmask = &allowed_mask, 1004 .reason = MR_DEMOTION, 1005 }; 1006 1007 if (list_empty(demote_folios)) 1008 return 0; 1009 1010 if (target_nid == NUMA_NO_NODE) 1011 return 0; 1012 1013 node_get_allowed_targets(pgdat, &allowed_mask); 1014 1015 /* Demotion ignores all cpuset and mempolicy settings */ 1016 migrate_pages(demote_folios, alloc_migrate_folio, NULL, 1017 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, 1018 &nr_succeeded); 1019 1020 return nr_succeeded; 1021 } 1022 1023 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) 1024 { 1025 if (gfp_mask & __GFP_FS) 1026 return true; 1027 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) 1028 return false; 1029 /* 1030 * We can "enter_fs" for swap-cache with only __GFP_IO 1031 * providing this isn't SWP_FS_OPS. 1032 * ->flags can be updated non-atomicially (scan_swap_map_slots), 1033 * but that will never affect SWP_FS_OPS, so the data_race 1034 * is safe. 1035 */ 1036 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); 1037 } 1038 1039 /* 1040 * shrink_folio_list() returns the number of reclaimed pages 1041 */ 1042 static unsigned int shrink_folio_list(struct list_head *folio_list, 1043 struct pglist_data *pgdat, struct scan_control *sc, 1044 struct reclaim_stat *stat, bool ignore_references) 1045 { 1046 struct folio_batch free_folios; 1047 LIST_HEAD(ret_folios); 1048 LIST_HEAD(demote_folios); 1049 unsigned int nr_reclaimed = 0; 1050 unsigned int pgactivate = 0; 1051 bool do_demote_pass; 1052 struct swap_iocb *plug = NULL; 1053 1054 folio_batch_init(&free_folios); 1055 memset(stat, 0, sizeof(*stat)); 1056 cond_resched(); 1057 do_demote_pass = can_demote(pgdat->node_id, sc); 1058 1059 retry: 1060 while (!list_empty(folio_list)) { 1061 struct address_space *mapping; 1062 struct folio *folio; 1063 enum folio_references references = FOLIOREF_RECLAIM; 1064 bool dirty, writeback; 1065 unsigned int nr_pages; 1066 1067 cond_resched(); 1068 1069 folio = lru_to_folio(folio_list); 1070 list_del(&folio->lru); 1071 1072 if (!folio_trylock(folio)) 1073 goto keep; 1074 1075 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1076 1077 nr_pages = folio_nr_pages(folio); 1078 1079 /* Account the number of base pages */ 1080 sc->nr_scanned += nr_pages; 1081 1082 if (unlikely(!folio_evictable(folio))) 1083 goto activate_locked; 1084 1085 if (!sc->may_unmap && folio_mapped(folio)) 1086 goto keep_locked; 1087 1088 /* folio_update_gen() tried to promote this page? */ 1089 if (lru_gen_enabled() && !ignore_references && 1090 folio_mapped(folio) && folio_test_referenced(folio)) 1091 goto keep_locked; 1092 1093 /* 1094 * The number of dirty pages determines if a node is marked 1095 * reclaim_congested. kswapd will stall and start writing 1096 * folios if the tail of the LRU is all dirty unqueued folios. 1097 */ 1098 folio_check_dirty_writeback(folio, &dirty, &writeback); 1099 if (dirty || writeback) 1100 stat->nr_dirty += nr_pages; 1101 1102 if (dirty && !writeback) 1103 stat->nr_unqueued_dirty += nr_pages; 1104 1105 /* 1106 * Treat this folio as congested if folios are cycling 1107 * through the LRU so quickly that the folios marked 1108 * for immediate reclaim are making it to the end of 1109 * the LRU a second time. 1110 */ 1111 if (writeback && folio_test_reclaim(folio)) 1112 stat->nr_congested += nr_pages; 1113 1114 /* 1115 * If a folio at the tail of the LRU is under writeback, there 1116 * are three cases to consider. 1117 * 1118 * 1) If reclaim is encountering an excessive number 1119 * of folios under writeback and this folio has both 1120 * the writeback and reclaim flags set, then it 1121 * indicates that folios are being queued for I/O but 1122 * are being recycled through the LRU before the I/O 1123 * can complete. Waiting on the folio itself risks an 1124 * indefinite stall if it is impossible to writeback 1125 * the folio due to I/O error or disconnected storage 1126 * so instead note that the LRU is being scanned too 1127 * quickly and the caller can stall after the folio 1128 * list has been processed. 1129 * 1130 * 2) Global or new memcg reclaim encounters a folio that is 1131 * not marked for immediate reclaim, or the caller does not 1132 * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1133 * not to fs). In this case mark the folio for immediate 1134 * reclaim and continue scanning. 1135 * 1136 * Require may_enter_fs() because we would wait on fs, which 1137 * may not have submitted I/O yet. And the loop driver might 1138 * enter reclaim, and deadlock if it waits on a folio for 1139 * which it is needed to do the write (loop masks off 1140 * __GFP_IO|__GFP_FS for this reason); but more thought 1141 * would probably show more reasons. 1142 * 1143 * 3) Legacy memcg encounters a folio that already has the 1144 * reclaim flag set. memcg does not have any dirty folio 1145 * throttling so we could easily OOM just because too many 1146 * folios are in writeback and there is nothing else to 1147 * reclaim. Wait for the writeback to complete. 1148 * 1149 * In cases 1) and 2) we activate the folios to get them out of 1150 * the way while we continue scanning for clean folios on the 1151 * inactive list and refilling from the active list. The 1152 * observation here is that waiting for disk writes is more 1153 * expensive than potentially causing reloads down the line. 1154 * Since they're marked for immediate reclaim, they won't put 1155 * memory pressure on the cache working set any longer than it 1156 * takes to write them to disk. 1157 */ 1158 if (folio_test_writeback(folio)) { 1159 /* Case 1 above */ 1160 if (current_is_kswapd() && 1161 folio_test_reclaim(folio) && 1162 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1163 stat->nr_immediate += nr_pages; 1164 goto activate_locked; 1165 1166 /* Case 2 above */ 1167 } else if (writeback_throttling_sane(sc) || 1168 !folio_test_reclaim(folio) || 1169 !may_enter_fs(folio, sc->gfp_mask)) { 1170 /* 1171 * This is slightly racy - 1172 * folio_end_writeback() might have 1173 * just cleared the reclaim flag, then 1174 * setting the reclaim flag here ends up 1175 * interpreted as the readahead flag - but 1176 * that does not matter enough to care. 1177 * What we do want is for this folio to 1178 * have the reclaim flag set next time 1179 * memcg reclaim reaches the tests above, 1180 * so it will then wait for writeback to 1181 * avoid OOM; and it's also appropriate 1182 * in global reclaim. 1183 */ 1184 folio_set_reclaim(folio); 1185 stat->nr_writeback += nr_pages; 1186 goto activate_locked; 1187 1188 /* Case 3 above */ 1189 } else { 1190 folio_unlock(folio); 1191 folio_wait_writeback(folio); 1192 /* then go back and try same folio again */ 1193 list_add_tail(&folio->lru, folio_list); 1194 continue; 1195 } 1196 } 1197 1198 if (!ignore_references) 1199 references = folio_check_references(folio, sc); 1200 1201 switch (references) { 1202 case FOLIOREF_ACTIVATE: 1203 goto activate_locked; 1204 case FOLIOREF_KEEP: 1205 stat->nr_ref_keep += nr_pages; 1206 goto keep_locked; 1207 case FOLIOREF_RECLAIM: 1208 case FOLIOREF_RECLAIM_CLEAN: 1209 ; /* try to reclaim the folio below */ 1210 } 1211 1212 /* 1213 * Before reclaiming the folio, try to relocate 1214 * its contents to another node. 1215 */ 1216 if (do_demote_pass && 1217 (thp_migration_supported() || !folio_test_large(folio))) { 1218 list_add(&folio->lru, &demote_folios); 1219 folio_unlock(folio); 1220 continue; 1221 } 1222 1223 /* 1224 * Anonymous process memory has backing store? 1225 * Try to allocate it some swap space here. 1226 * Lazyfree folio could be freed directly 1227 */ 1228 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { 1229 if (!folio_test_swapcache(folio)) { 1230 if (!(sc->gfp_mask & __GFP_IO)) 1231 goto keep_locked; 1232 if (folio_maybe_dma_pinned(folio)) 1233 goto keep_locked; 1234 if (folio_test_large(folio)) { 1235 /* cannot split folio, skip it */ 1236 if (!can_split_folio(folio, 1, NULL)) 1237 goto activate_locked; 1238 /* 1239 * Split partially mapped folios right away. 1240 * We can free the unmapped pages without IO. 1241 */ 1242 if (data_race(!list_empty(&folio->_deferred_list) && 1243 folio_test_partially_mapped(folio)) && 1244 split_folio_to_list(folio, folio_list)) 1245 goto activate_locked; 1246 } 1247 if (!add_to_swap(folio)) { 1248 int __maybe_unused order = folio_order(folio); 1249 1250 if (!folio_test_large(folio)) 1251 goto activate_locked_split; 1252 /* Fallback to swap normal pages */ 1253 if (split_folio_to_list(folio, folio_list)) 1254 goto activate_locked; 1255 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1256 if (nr_pages >= HPAGE_PMD_NR) { 1257 count_memcg_folio_events(folio, 1258 THP_SWPOUT_FALLBACK, 1); 1259 count_vm_event(THP_SWPOUT_FALLBACK); 1260 } 1261 #endif 1262 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); 1263 if (!add_to_swap(folio)) 1264 goto activate_locked_split; 1265 } 1266 } 1267 } 1268 1269 /* 1270 * If the folio was split above, the tail pages will make 1271 * their own pass through this function and be accounted 1272 * then. 1273 */ 1274 if ((nr_pages > 1) && !folio_test_large(folio)) { 1275 sc->nr_scanned -= (nr_pages - 1); 1276 nr_pages = 1; 1277 } 1278 1279 /* 1280 * The folio is mapped into the page tables of one or more 1281 * processes. Try to unmap it here. 1282 */ 1283 if (folio_mapped(folio)) { 1284 enum ttu_flags flags = TTU_BATCH_FLUSH; 1285 bool was_swapbacked = folio_test_swapbacked(folio); 1286 1287 if (folio_test_pmd_mappable(folio)) 1288 flags |= TTU_SPLIT_HUGE_PMD; 1289 /* 1290 * Without TTU_SYNC, try_to_unmap will only begin to 1291 * hold PTL from the first present PTE within a large 1292 * folio. Some initial PTEs might be skipped due to 1293 * races with parallel PTE writes in which PTEs can be 1294 * cleared temporarily before being written new present 1295 * values. This will lead to a large folio is still 1296 * mapped while some subpages have been partially 1297 * unmapped after try_to_unmap; TTU_SYNC helps 1298 * try_to_unmap acquire PTL from the first PTE, 1299 * eliminating the influence of temporary PTE values. 1300 */ 1301 if (folio_test_large(folio)) 1302 flags |= TTU_SYNC; 1303 1304 try_to_unmap(folio, flags); 1305 if (folio_mapped(folio)) { 1306 stat->nr_unmap_fail += nr_pages; 1307 if (!was_swapbacked && 1308 folio_test_swapbacked(folio)) 1309 stat->nr_lazyfree_fail += nr_pages; 1310 goto activate_locked; 1311 } 1312 } 1313 1314 /* 1315 * Folio is unmapped now so it cannot be newly pinned anymore. 1316 * No point in trying to reclaim folio if it is pinned. 1317 * Furthermore we don't want to reclaim underlying fs metadata 1318 * if the folio is pinned and thus potentially modified by the 1319 * pinning process as that may upset the filesystem. 1320 */ 1321 if (folio_maybe_dma_pinned(folio)) 1322 goto activate_locked; 1323 1324 mapping = folio_mapping(folio); 1325 if (folio_test_dirty(folio)) { 1326 /* 1327 * Only kswapd can writeback filesystem folios 1328 * to avoid risk of stack overflow. But avoid 1329 * injecting inefficient single-folio I/O into 1330 * flusher writeback as much as possible: only 1331 * write folios when we've encountered many 1332 * dirty folios, and when we've already scanned 1333 * the rest of the LRU for clean folios and see 1334 * the same dirty folios again (with the reclaim 1335 * flag set). 1336 */ 1337 if (folio_is_file_lru(folio) && 1338 (!current_is_kswapd() || 1339 !folio_test_reclaim(folio) || 1340 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 1341 /* 1342 * Immediately reclaim when written back. 1343 * Similar in principle to folio_deactivate() 1344 * except we already have the folio isolated 1345 * and know it's dirty 1346 */ 1347 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, 1348 nr_pages); 1349 folio_set_reclaim(folio); 1350 1351 goto activate_locked; 1352 } 1353 1354 if (references == FOLIOREF_RECLAIM_CLEAN) 1355 goto keep_locked; 1356 if (!may_enter_fs(folio, sc->gfp_mask)) 1357 goto keep_locked; 1358 if (!sc->may_writepage) 1359 goto keep_locked; 1360 1361 /* 1362 * Folio is dirty. Flush the TLB if a writable entry 1363 * potentially exists to avoid CPU writes after I/O 1364 * starts and then write it out here. 1365 */ 1366 try_to_unmap_flush_dirty(); 1367 switch (pageout(folio, mapping, &plug, folio_list)) { 1368 case PAGE_KEEP: 1369 goto keep_locked; 1370 case PAGE_ACTIVATE: 1371 /* 1372 * If shmem folio is split when writeback to swap, 1373 * the tail pages will make their own pass through 1374 * this function and be accounted then. 1375 */ 1376 if (nr_pages > 1 && !folio_test_large(folio)) { 1377 sc->nr_scanned -= (nr_pages - 1); 1378 nr_pages = 1; 1379 } 1380 goto activate_locked; 1381 case PAGE_SUCCESS: 1382 if (nr_pages > 1 && !folio_test_large(folio)) { 1383 sc->nr_scanned -= (nr_pages - 1); 1384 nr_pages = 1; 1385 } 1386 stat->nr_pageout += nr_pages; 1387 1388 if (folio_test_writeback(folio)) 1389 goto keep; 1390 if (folio_test_dirty(folio)) 1391 goto keep; 1392 1393 /* 1394 * A synchronous write - probably a ramdisk. Go 1395 * ahead and try to reclaim the folio. 1396 */ 1397 if (!folio_trylock(folio)) 1398 goto keep; 1399 if (folio_test_dirty(folio) || 1400 folio_test_writeback(folio)) 1401 goto keep_locked; 1402 mapping = folio_mapping(folio); 1403 fallthrough; 1404 case PAGE_CLEAN: 1405 ; /* try to free the folio below */ 1406 } 1407 } 1408 1409 /* 1410 * If the folio has buffers, try to free the buffer 1411 * mappings associated with this folio. If we succeed 1412 * we try to free the folio as well. 1413 * 1414 * We do this even if the folio is dirty. 1415 * filemap_release_folio() does not perform I/O, but it 1416 * is possible for a folio to have the dirty flag set, 1417 * but it is actually clean (all its buffers are clean). 1418 * This happens if the buffers were written out directly, 1419 * with submit_bh(). ext3 will do this, as well as 1420 * the blockdev mapping. filemap_release_folio() will 1421 * discover that cleanness and will drop the buffers 1422 * and mark the folio clean - it can be freed. 1423 * 1424 * Rarely, folios can have buffers and no ->mapping. 1425 * These are the folios which were not successfully 1426 * invalidated in truncate_cleanup_folio(). We try to 1427 * drop those buffers here and if that worked, and the 1428 * folio is no longer mapped into process address space 1429 * (refcount == 1) it can be freed. Otherwise, leave 1430 * the folio on the LRU so it is swappable. 1431 */ 1432 if (folio_needs_release(folio)) { 1433 if (!filemap_release_folio(folio, sc->gfp_mask)) 1434 goto activate_locked; 1435 if (!mapping && folio_ref_count(folio) == 1) { 1436 folio_unlock(folio); 1437 if (folio_put_testzero(folio)) 1438 goto free_it; 1439 else { 1440 /* 1441 * rare race with speculative reference. 1442 * the speculative reference will free 1443 * this folio shortly, so we may 1444 * increment nr_reclaimed here (and 1445 * leave it off the LRU). 1446 */ 1447 nr_reclaimed += nr_pages; 1448 continue; 1449 } 1450 } 1451 } 1452 1453 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 1454 /* follow __remove_mapping for reference */ 1455 if (!folio_ref_freeze(folio, 1)) 1456 goto keep_locked; 1457 /* 1458 * The folio has only one reference left, which is 1459 * from the isolation. After the caller puts the 1460 * folio back on the lru and drops the reference, the 1461 * folio will be freed anyway. It doesn't matter 1462 * which lru it goes on. So we don't bother checking 1463 * the dirty flag here. 1464 */ 1465 count_vm_events(PGLAZYFREED, nr_pages); 1466 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); 1467 } else if (!mapping || !__remove_mapping(mapping, folio, true, 1468 sc->target_mem_cgroup)) 1469 goto keep_locked; 1470 1471 folio_unlock(folio); 1472 free_it: 1473 /* 1474 * Folio may get swapped out as a whole, need to account 1475 * all pages in it. 1476 */ 1477 nr_reclaimed += nr_pages; 1478 1479 folio_unqueue_deferred_split(folio); 1480 if (folio_batch_add(&free_folios, folio) == 0) { 1481 mem_cgroup_uncharge_folios(&free_folios); 1482 try_to_unmap_flush(); 1483 free_unref_folios(&free_folios); 1484 } 1485 continue; 1486 1487 activate_locked_split: 1488 /* 1489 * The tail pages that are failed to add into swap cache 1490 * reach here. Fixup nr_scanned and nr_pages. 1491 */ 1492 if (nr_pages > 1) { 1493 sc->nr_scanned -= (nr_pages - 1); 1494 nr_pages = 1; 1495 } 1496 activate_locked: 1497 /* Not a candidate for swapping, so reclaim swap space. */ 1498 if (folio_test_swapcache(folio) && 1499 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) 1500 folio_free_swap(folio); 1501 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1502 if (!folio_test_mlocked(folio)) { 1503 int type = folio_is_file_lru(folio); 1504 folio_set_active(folio); 1505 stat->nr_activate[type] += nr_pages; 1506 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); 1507 } 1508 keep_locked: 1509 folio_unlock(folio); 1510 keep: 1511 list_add(&folio->lru, &ret_folios); 1512 VM_BUG_ON_FOLIO(folio_test_lru(folio) || 1513 folio_test_unevictable(folio), folio); 1514 } 1515 /* 'folio_list' is always empty here */ 1516 1517 /* Migrate folios selected for demotion */ 1518 stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); 1519 nr_reclaimed += stat->nr_demoted; 1520 /* Folios that could not be demoted are still in @demote_folios */ 1521 if (!list_empty(&demote_folios)) { 1522 /* Folios which weren't demoted go back on @folio_list */ 1523 list_splice_init(&demote_folios, folio_list); 1524 1525 /* 1526 * goto retry to reclaim the undemoted folios in folio_list if 1527 * desired. 1528 * 1529 * Reclaiming directly from top tier nodes is not often desired 1530 * due to it breaking the LRU ordering: in general memory 1531 * should be reclaimed from lower tier nodes and demoted from 1532 * top tier nodes. 1533 * 1534 * However, disabling reclaim from top tier nodes entirely 1535 * would cause ooms in edge scenarios where lower tier memory 1536 * is unreclaimable for whatever reason, eg memory being 1537 * mlocked or too hot to reclaim. We can disable reclaim 1538 * from top tier nodes in proactive reclaim though as that is 1539 * not real memory pressure. 1540 */ 1541 if (!sc->proactive) { 1542 do_demote_pass = false; 1543 goto retry; 1544 } 1545 } 1546 1547 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 1548 1549 mem_cgroup_uncharge_folios(&free_folios); 1550 try_to_unmap_flush(); 1551 free_unref_folios(&free_folios); 1552 1553 list_splice(&ret_folios, folio_list); 1554 count_vm_events(PGACTIVATE, pgactivate); 1555 1556 if (plug) 1557 swap_write_unplug(plug); 1558 return nr_reclaimed; 1559 } 1560 1561 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1562 struct list_head *folio_list) 1563 { 1564 struct scan_control sc = { 1565 .gfp_mask = GFP_KERNEL, 1566 .may_unmap = 1, 1567 }; 1568 struct reclaim_stat stat; 1569 unsigned int nr_reclaimed; 1570 struct folio *folio, *next; 1571 LIST_HEAD(clean_folios); 1572 unsigned int noreclaim_flag; 1573 1574 list_for_each_entry_safe(folio, next, folio_list, lru) { 1575 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && 1576 !folio_test_dirty(folio) && !__folio_test_movable(folio) && 1577 !folio_test_unevictable(folio)) { 1578 folio_clear_active(folio); 1579 list_move(&folio->lru, &clean_folios); 1580 } 1581 } 1582 1583 /* 1584 * We should be safe here since we are only dealing with file pages and 1585 * we are not kswapd and therefore cannot write dirty file pages. But 1586 * call memalloc_noreclaim_save() anyway, just in case these conditions 1587 * change in the future. 1588 */ 1589 noreclaim_flag = memalloc_noreclaim_save(); 1590 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, 1591 &stat, true); 1592 memalloc_noreclaim_restore(noreclaim_flag); 1593 1594 list_splice(&clean_folios, folio_list); 1595 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1596 -(long)nr_reclaimed); 1597 /* 1598 * Since lazyfree pages are isolated from file LRU from the beginning, 1599 * they will rotate back to anonymous LRU in the end if it failed to 1600 * discard so isolated count will be mismatched. 1601 * Compensate the isolated count for both LRU lists. 1602 */ 1603 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, 1604 stat.nr_lazyfree_fail); 1605 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1606 -(long)stat.nr_lazyfree_fail); 1607 return nr_reclaimed; 1608 } 1609 1610 /* 1611 * Update LRU sizes after isolating pages. The LRU size updates must 1612 * be complete before mem_cgroup_update_lru_size due to a sanity check. 1613 */ 1614 static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1615 enum lru_list lru, unsigned long *nr_zone_taken) 1616 { 1617 int zid; 1618 1619 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1620 if (!nr_zone_taken[zid]) 1621 continue; 1622 1623 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1624 } 1625 1626 } 1627 1628 /* 1629 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. 1630 * 1631 * lruvec->lru_lock is heavily contended. Some of the functions that 1632 * shrink the lists perform better by taking out a batch of pages 1633 * and working on them outside the LRU lock. 1634 * 1635 * For pagecache intensive workloads, this function is the hottest 1636 * spot in the kernel (apart from copy_*_user functions). 1637 * 1638 * Lru_lock must be held before calling this function. 1639 * 1640 * @nr_to_scan: The number of eligible pages to look through on the list. 1641 * @lruvec: The LRU vector to pull pages from. 1642 * @dst: The temp list to put pages on to. 1643 * @nr_scanned: The number of pages that were scanned. 1644 * @sc: The scan_control struct for this reclaim session 1645 * @lru: LRU list id for isolating 1646 * 1647 * returns how many pages were moved onto *@dst. 1648 */ 1649 static unsigned long isolate_lru_folios(unsigned long nr_to_scan, 1650 struct lruvec *lruvec, struct list_head *dst, 1651 unsigned long *nr_scanned, struct scan_control *sc, 1652 enum lru_list lru) 1653 { 1654 struct list_head *src = &lruvec->lists[lru]; 1655 unsigned long nr_taken = 0; 1656 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 1657 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1658 unsigned long skipped = 0; 1659 unsigned long scan, total_scan, nr_pages; 1660 LIST_HEAD(folios_skipped); 1661 1662 total_scan = 0; 1663 scan = 0; 1664 while (scan < nr_to_scan && !list_empty(src)) { 1665 struct list_head *move_to = src; 1666 struct folio *folio; 1667 1668 folio = lru_to_folio(src); 1669 prefetchw_prev_lru_folio(folio, src, flags); 1670 1671 nr_pages = folio_nr_pages(folio); 1672 total_scan += nr_pages; 1673 1674 if (folio_zonenum(folio) > sc->reclaim_idx) { 1675 nr_skipped[folio_zonenum(folio)] += nr_pages; 1676 move_to = &folios_skipped; 1677 goto move; 1678 } 1679 1680 /* 1681 * Do not count skipped folios because that makes the function 1682 * return with no isolated folios if the LRU mostly contains 1683 * ineligible folios. This causes the VM to not reclaim any 1684 * folios, triggering a premature OOM. 1685 * Account all pages in a folio. 1686 */ 1687 scan += nr_pages; 1688 1689 if (!folio_test_lru(folio)) 1690 goto move; 1691 if (!sc->may_unmap && folio_mapped(folio)) 1692 goto move; 1693 1694 /* 1695 * Be careful not to clear the lru flag until after we're 1696 * sure the folio is not being freed elsewhere -- the 1697 * folio release code relies on it. 1698 */ 1699 if (unlikely(!folio_try_get(folio))) 1700 goto move; 1701 1702 if (!folio_test_clear_lru(folio)) { 1703 /* Another thread is already isolating this folio */ 1704 folio_put(folio); 1705 goto move; 1706 } 1707 1708 nr_taken += nr_pages; 1709 nr_zone_taken[folio_zonenum(folio)] += nr_pages; 1710 move_to = dst; 1711 move: 1712 list_move(&folio->lru, move_to); 1713 } 1714 1715 /* 1716 * Splice any skipped folios to the start of the LRU list. Note that 1717 * this disrupts the LRU order when reclaiming for lower zones but 1718 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1719 * scanning would soon rescan the same folios to skip and waste lots 1720 * of cpu cycles. 1721 */ 1722 if (!list_empty(&folios_skipped)) { 1723 int zid; 1724 1725 list_splice(&folios_skipped, src); 1726 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1727 if (!nr_skipped[zid]) 1728 continue; 1729 1730 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 1731 skipped += nr_skipped[zid]; 1732 } 1733 } 1734 *nr_scanned = total_scan; 1735 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1736 total_scan, skipped, nr_taken, lru); 1737 update_lru_sizes(lruvec, lru, nr_zone_taken); 1738 return nr_taken; 1739 } 1740 1741 /** 1742 * folio_isolate_lru() - Try to isolate a folio from its LRU list. 1743 * @folio: Folio to isolate from its LRU list. 1744 * 1745 * Isolate a @folio from an LRU list and adjust the vmstat statistic 1746 * corresponding to whatever LRU list the folio was on. 1747 * 1748 * The folio will have its LRU flag cleared. If it was found on the 1749 * active list, it will have the Active flag set. If it was found on the 1750 * unevictable list, it will have the Unevictable flag set. These flags 1751 * may need to be cleared by the caller before letting the page go. 1752 * 1753 * Context: 1754 * 1755 * (1) Must be called with an elevated refcount on the folio. This is a 1756 * fundamental difference from isolate_lru_folios() (which is called 1757 * without a stable reference). 1758 * (2) The lru_lock must not be held. 1759 * (3) Interrupts must be enabled. 1760 * 1761 * Return: true if the folio was removed from an LRU list. 1762 * false if the folio was not on an LRU list. 1763 */ 1764 bool folio_isolate_lru(struct folio *folio) 1765 { 1766 bool ret = false; 1767 1768 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); 1769 1770 if (folio_test_clear_lru(folio)) { 1771 struct lruvec *lruvec; 1772 1773 folio_get(folio); 1774 lruvec = folio_lruvec_lock_irq(folio); 1775 lruvec_del_folio(lruvec, folio); 1776 unlock_page_lruvec_irq(lruvec); 1777 ret = true; 1778 } 1779 1780 return ret; 1781 } 1782 1783 /* 1784 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1785 * then get rescheduled. When there are massive number of tasks doing page 1786 * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1787 * the LRU list will go small and be scanned faster than necessary, leading to 1788 * unnecessary swapping, thrashing and OOM. 1789 */ 1790 static bool too_many_isolated(struct pglist_data *pgdat, int file, 1791 struct scan_control *sc) 1792 { 1793 unsigned long inactive, isolated; 1794 bool too_many; 1795 1796 if (current_is_kswapd()) 1797 return false; 1798 1799 if (!writeback_throttling_sane(sc)) 1800 return false; 1801 1802 if (file) { 1803 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1804 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 1805 } else { 1806 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1807 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 1808 } 1809 1810 /* 1811 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 1812 * won't get blocked by normal direct-reclaimers, forming a circular 1813 * deadlock. 1814 */ 1815 if (gfp_has_io_fs(sc->gfp_mask)) 1816 inactive >>= 3; 1817 1818 too_many = isolated > inactive; 1819 1820 /* Wake up tasks throttled due to too_many_isolated. */ 1821 if (!too_many) 1822 wake_throttle_isolated(pgdat); 1823 1824 return too_many; 1825 } 1826 1827 /* 1828 * move_folios_to_lru() moves folios from private @list to appropriate LRU list. 1829 * 1830 * Returns the number of pages moved to the given lruvec. 1831 */ 1832 static unsigned int move_folios_to_lru(struct lruvec *lruvec, 1833 struct list_head *list) 1834 { 1835 int nr_pages, nr_moved = 0; 1836 struct folio_batch free_folios; 1837 1838 folio_batch_init(&free_folios); 1839 while (!list_empty(list)) { 1840 struct folio *folio = lru_to_folio(list); 1841 1842 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1843 list_del(&folio->lru); 1844 if (unlikely(!folio_evictable(folio))) { 1845 spin_unlock_irq(&lruvec->lru_lock); 1846 folio_putback_lru(folio); 1847 spin_lock_irq(&lruvec->lru_lock); 1848 continue; 1849 } 1850 1851 /* 1852 * The folio_set_lru needs to be kept here for list integrity. 1853 * Otherwise: 1854 * #0 move_folios_to_lru #1 release_pages 1855 * if (!folio_put_testzero()) 1856 * if (folio_put_testzero()) 1857 * !lru //skip lru_lock 1858 * folio_set_lru() 1859 * list_add(&folio->lru,) 1860 * list_add(&folio->lru,) 1861 */ 1862 folio_set_lru(folio); 1863 1864 if (unlikely(folio_put_testzero(folio))) { 1865 __folio_clear_lru_flags(folio); 1866 1867 folio_unqueue_deferred_split(folio); 1868 if (folio_batch_add(&free_folios, folio) == 0) { 1869 spin_unlock_irq(&lruvec->lru_lock); 1870 mem_cgroup_uncharge_folios(&free_folios); 1871 free_unref_folios(&free_folios); 1872 spin_lock_irq(&lruvec->lru_lock); 1873 } 1874 1875 continue; 1876 } 1877 1878 /* 1879 * All pages were isolated from the same lruvec (and isolation 1880 * inhibits memcg migration). 1881 */ 1882 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 1883 lruvec_add_folio(lruvec, folio); 1884 nr_pages = folio_nr_pages(folio); 1885 nr_moved += nr_pages; 1886 if (folio_test_active(folio)) 1887 workingset_age_nonresident(lruvec, nr_pages); 1888 } 1889 1890 if (free_folios.nr) { 1891 spin_unlock_irq(&lruvec->lru_lock); 1892 mem_cgroup_uncharge_folios(&free_folios); 1893 free_unref_folios(&free_folios); 1894 spin_lock_irq(&lruvec->lru_lock); 1895 } 1896 1897 return nr_moved; 1898 } 1899 1900 /* 1901 * If a kernel thread (such as nfsd for loop-back mounts) services a backing 1902 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case 1903 * we should not throttle. Otherwise it is safe to do so. 1904 */ 1905 static int current_may_throttle(void) 1906 { 1907 return !(current->flags & PF_LOCAL_THROTTLE); 1908 } 1909 1910 /* 1911 * shrink_inactive_list() is a helper for shrink_node(). It returns the number 1912 * of reclaimed pages 1913 */ 1914 static unsigned long shrink_inactive_list(unsigned long nr_to_scan, 1915 struct lruvec *lruvec, struct scan_control *sc, 1916 enum lru_list lru) 1917 { 1918 LIST_HEAD(folio_list); 1919 unsigned long nr_scanned; 1920 unsigned int nr_reclaimed = 0; 1921 unsigned long nr_taken; 1922 struct reclaim_stat stat; 1923 bool file = is_file_lru(lru); 1924 enum vm_event_item item; 1925 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1926 bool stalled = false; 1927 1928 while (unlikely(too_many_isolated(pgdat, file, sc))) { 1929 if (stalled) 1930 return 0; 1931 1932 /* wait a bit for the reclaimer. */ 1933 stalled = true; 1934 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 1935 1936 /* We are about to die and free our memory. Return now. */ 1937 if (fatal_signal_pending(current)) 1938 return SWAP_CLUSTER_MAX; 1939 } 1940 1941 lru_add_drain(); 1942 1943 spin_lock_irq(&lruvec->lru_lock); 1944 1945 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, 1946 &nr_scanned, sc, lru); 1947 1948 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1949 item = PGSCAN_KSWAPD + reclaimer_offset(); 1950 if (!cgroup_reclaim(sc)) 1951 __count_vm_events(item, nr_scanned); 1952 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 1953 __count_vm_events(PGSCAN_ANON + file, nr_scanned); 1954 1955 spin_unlock_irq(&lruvec->lru_lock); 1956 1957 if (nr_taken == 0) 1958 return 0; 1959 1960 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); 1961 1962 spin_lock_irq(&lruvec->lru_lock); 1963 move_folios_to_lru(lruvec, &folio_list); 1964 1965 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), 1966 stat.nr_demoted); 1967 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 1968 item = PGSTEAL_KSWAPD + reclaimer_offset(); 1969 if (!cgroup_reclaim(sc)) 1970 __count_vm_events(item, nr_reclaimed); 1971 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 1972 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); 1973 spin_unlock_irq(&lruvec->lru_lock); 1974 1975 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); 1976 1977 /* 1978 * If dirty folios are scanned that are not queued for IO, it 1979 * implies that flushers are not doing their job. This can 1980 * happen when memory pressure pushes dirty folios to the end of 1981 * the LRU before the dirty limits are breached and the dirty 1982 * data has expired. It can also happen when the proportion of 1983 * dirty folios grows not through writes but through memory 1984 * pressure reclaiming all the clean cache. And in some cases, 1985 * the flushers simply cannot keep up with the allocation 1986 * rate. Nudge the flusher threads in case they are asleep. 1987 */ 1988 if (stat.nr_unqueued_dirty == nr_taken) { 1989 wakeup_flusher_threads(WB_REASON_VMSCAN); 1990 /* 1991 * For cgroupv1 dirty throttling is achieved by waking up 1992 * the kernel flusher here and later waiting on folios 1993 * which are in writeback to finish (see shrink_folio_list()). 1994 * 1995 * Flusher may not be able to issue writeback quickly 1996 * enough for cgroupv1 writeback throttling to work 1997 * on a large system. 1998 */ 1999 if (!writeback_throttling_sane(sc)) 2000 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 2001 } 2002 2003 sc->nr.dirty += stat.nr_dirty; 2004 sc->nr.congested += stat.nr_congested; 2005 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 2006 sc->nr.writeback += stat.nr_writeback; 2007 sc->nr.immediate += stat.nr_immediate; 2008 sc->nr.taken += nr_taken; 2009 if (file) 2010 sc->nr.file_taken += nr_taken; 2011 2012 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2013 nr_scanned, nr_reclaimed, &stat, sc->priority, file); 2014 return nr_reclaimed; 2015 } 2016 2017 /* 2018 * shrink_active_list() moves folios from the active LRU to the inactive LRU. 2019 * 2020 * We move them the other way if the folio is referenced by one or more 2021 * processes. 2022 * 2023 * If the folios are mostly unmapped, the processing is fast and it is 2024 * appropriate to hold lru_lock across the whole operation. But if 2025 * the folios are mapped, the processing is slow (folio_referenced()), so 2026 * we should drop lru_lock around each folio. It's impossible to balance 2027 * this, so instead we remove the folios from the LRU while processing them. 2028 * It is safe to rely on the active flag against the non-LRU folios in here 2029 * because nobody will play with that bit on a non-LRU folio. 2030 * 2031 * The downside is that we have to touch folio->_refcount against each folio. 2032 * But we had to alter folio->flags anyway. 2033 */ 2034 static void shrink_active_list(unsigned long nr_to_scan, 2035 struct lruvec *lruvec, 2036 struct scan_control *sc, 2037 enum lru_list lru) 2038 { 2039 unsigned long nr_taken; 2040 unsigned long nr_scanned; 2041 unsigned long vm_flags; 2042 LIST_HEAD(l_hold); /* The folios which were snipped off */ 2043 LIST_HEAD(l_active); 2044 LIST_HEAD(l_inactive); 2045 unsigned nr_deactivate, nr_activate; 2046 unsigned nr_rotated = 0; 2047 bool file = is_file_lru(lru); 2048 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2049 2050 lru_add_drain(); 2051 2052 spin_lock_irq(&lruvec->lru_lock); 2053 2054 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, 2055 &nr_scanned, sc, lru); 2056 2057 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2058 2059 if (!cgroup_reclaim(sc)) 2060 __count_vm_events(PGREFILL, nr_scanned); 2061 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 2062 2063 spin_unlock_irq(&lruvec->lru_lock); 2064 2065 while (!list_empty(&l_hold)) { 2066 struct folio *folio; 2067 2068 cond_resched(); 2069 folio = lru_to_folio(&l_hold); 2070 list_del(&folio->lru); 2071 2072 if (unlikely(!folio_evictable(folio))) { 2073 folio_putback_lru(folio); 2074 continue; 2075 } 2076 2077 if (unlikely(buffer_heads_over_limit)) { 2078 if (folio_needs_release(folio) && 2079 folio_trylock(folio)) { 2080 filemap_release_folio(folio, 0); 2081 folio_unlock(folio); 2082 } 2083 } 2084 2085 /* Referenced or rmap lock contention: rotate */ 2086 if (folio_referenced(folio, 0, sc->target_mem_cgroup, 2087 &vm_flags) != 0) { 2088 /* 2089 * Identify referenced, file-backed active folios and 2090 * give them one more trip around the active list. So 2091 * that executable code get better chances to stay in 2092 * memory under moderate memory pressure. Anon folios 2093 * are not likely to be evicted by use-once streaming 2094 * IO, plus JVM can create lots of anon VM_EXEC folios, 2095 * so we ignore them here. 2096 */ 2097 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { 2098 nr_rotated += folio_nr_pages(folio); 2099 list_add(&folio->lru, &l_active); 2100 continue; 2101 } 2102 } 2103 2104 folio_clear_active(folio); /* we are de-activating */ 2105 folio_set_workingset(folio); 2106 list_add(&folio->lru, &l_inactive); 2107 } 2108 2109 /* 2110 * Move folios back to the lru list. 2111 */ 2112 spin_lock_irq(&lruvec->lru_lock); 2113 2114 nr_activate = move_folios_to_lru(lruvec, &l_active); 2115 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); 2116 2117 __count_vm_events(PGDEACTIVATE, nr_deactivate); 2118 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 2119 2120 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2121 spin_unlock_irq(&lruvec->lru_lock); 2122 2123 if (nr_rotated) 2124 lru_note_cost(lruvec, file, 0, nr_rotated); 2125 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2126 nr_deactivate, nr_rotated, sc->priority, file); 2127 } 2128 2129 static unsigned int reclaim_folio_list(struct list_head *folio_list, 2130 struct pglist_data *pgdat) 2131 { 2132 struct reclaim_stat stat; 2133 unsigned int nr_reclaimed; 2134 struct folio *folio; 2135 struct scan_control sc = { 2136 .gfp_mask = GFP_KERNEL, 2137 .may_writepage = 1, 2138 .may_unmap = 1, 2139 .may_swap = 1, 2140 .no_demotion = 1, 2141 }; 2142 2143 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true); 2144 while (!list_empty(folio_list)) { 2145 folio = lru_to_folio(folio_list); 2146 list_del(&folio->lru); 2147 folio_putback_lru(folio); 2148 } 2149 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); 2150 2151 return nr_reclaimed; 2152 } 2153 2154 unsigned long reclaim_pages(struct list_head *folio_list) 2155 { 2156 int nid; 2157 unsigned int nr_reclaimed = 0; 2158 LIST_HEAD(node_folio_list); 2159 unsigned int noreclaim_flag; 2160 2161 if (list_empty(folio_list)) 2162 return nr_reclaimed; 2163 2164 noreclaim_flag = memalloc_noreclaim_save(); 2165 2166 nid = folio_nid(lru_to_folio(folio_list)); 2167 do { 2168 struct folio *folio = lru_to_folio(folio_list); 2169 2170 if (nid == folio_nid(folio)) { 2171 folio_clear_active(folio); 2172 list_move(&folio->lru, &node_folio_list); 2173 continue; 2174 } 2175 2176 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2177 nid = folio_nid(lru_to_folio(folio_list)); 2178 } while (!list_empty(folio_list)); 2179 2180 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2181 2182 memalloc_noreclaim_restore(noreclaim_flag); 2183 2184 return nr_reclaimed; 2185 } 2186 2187 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2188 struct lruvec *lruvec, struct scan_control *sc) 2189 { 2190 if (is_active_lru(lru)) { 2191 if (sc->may_deactivate & (1 << is_file_lru(lru))) 2192 shrink_active_list(nr_to_scan, lruvec, sc, lru); 2193 else 2194 sc->skipped_deactivate = 1; 2195 return 0; 2196 } 2197 2198 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2199 } 2200 2201 /* 2202 * The inactive anon list should be small enough that the VM never has 2203 * to do too much work. 2204 * 2205 * The inactive file list should be small enough to leave most memory 2206 * to the established workingset on the scan-resistant active list, 2207 * but large enough to avoid thrashing the aggregate readahead window. 2208 * 2209 * Both inactive lists should also be large enough that each inactive 2210 * folio has a chance to be referenced again before it is reclaimed. 2211 * 2212 * If that fails and refaulting is observed, the inactive list grows. 2213 * 2214 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios 2215 * on this LRU, maintained by the pageout code. An inactive_ratio 2216 * of 3 means 3:1 or 25% of the folios are kept on the inactive list. 2217 * 2218 * total target max 2219 * memory ratio inactive 2220 * ------------------------------------- 2221 * 10MB 1 5MB 2222 * 100MB 1 50MB 2223 * 1GB 3 250MB 2224 * 10GB 10 0.9GB 2225 * 100GB 31 3GB 2226 * 1TB 101 10GB 2227 * 10TB 320 32GB 2228 */ 2229 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) 2230 { 2231 enum lru_list active_lru = inactive_lru + LRU_ACTIVE; 2232 unsigned long inactive, active; 2233 unsigned long inactive_ratio; 2234 unsigned long gb; 2235 2236 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); 2237 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); 2238 2239 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2240 if (gb) 2241 inactive_ratio = int_sqrt(10 * gb); 2242 else 2243 inactive_ratio = 1; 2244 2245 return inactive * inactive_ratio < active; 2246 } 2247 2248 enum scan_balance { 2249 SCAN_EQUAL, 2250 SCAN_FRACT, 2251 SCAN_ANON, 2252 SCAN_FILE, 2253 }; 2254 2255 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) 2256 { 2257 unsigned long file; 2258 struct lruvec *target_lruvec; 2259 2260 if (lru_gen_enabled()) 2261 return; 2262 2263 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 2264 2265 /* 2266 * Flush the memory cgroup stats in rate-limited way as we don't need 2267 * most accurate stats here. We may switch to regular stats flushing 2268 * in the future once it is cheap enough. 2269 */ 2270 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); 2271 2272 /* 2273 * Determine the scan balance between anon and file LRUs. 2274 */ 2275 spin_lock_irq(&target_lruvec->lru_lock); 2276 sc->anon_cost = target_lruvec->anon_cost; 2277 sc->file_cost = target_lruvec->file_cost; 2278 spin_unlock_irq(&target_lruvec->lru_lock); 2279 2280 /* 2281 * Target desirable inactive:active list ratios for the anon 2282 * and file LRU lists. 2283 */ 2284 if (!sc->force_deactivate) { 2285 unsigned long refaults; 2286 2287 /* 2288 * When refaults are being observed, it means a new 2289 * workingset is being established. Deactivate to get 2290 * rid of any stale active pages quickly. 2291 */ 2292 refaults = lruvec_page_state(target_lruvec, 2293 WORKINGSET_ACTIVATE_ANON); 2294 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || 2295 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) 2296 sc->may_deactivate |= DEACTIVATE_ANON; 2297 else 2298 sc->may_deactivate &= ~DEACTIVATE_ANON; 2299 2300 refaults = lruvec_page_state(target_lruvec, 2301 WORKINGSET_ACTIVATE_FILE); 2302 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || 2303 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) 2304 sc->may_deactivate |= DEACTIVATE_FILE; 2305 else 2306 sc->may_deactivate &= ~DEACTIVATE_FILE; 2307 } else 2308 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; 2309 2310 /* 2311 * If we have plenty of inactive file pages that aren't 2312 * thrashing, try to reclaim those first before touching 2313 * anonymous pages. 2314 */ 2315 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); 2316 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && 2317 !sc->no_cache_trim_mode) 2318 sc->cache_trim_mode = 1; 2319 else 2320 sc->cache_trim_mode = 0; 2321 2322 /* 2323 * Prevent the reclaimer from falling into the cache trap: as 2324 * cache pages start out inactive, every cache fault will tip 2325 * the scan balance towards the file LRU. And as the file LRU 2326 * shrinks, so does the window for rotation from references. 2327 * This means we have a runaway feedback loop where a tiny 2328 * thrashing file LRU becomes infinitely more attractive than 2329 * anon pages. Try to detect this based on file LRU size. 2330 */ 2331 if (!cgroup_reclaim(sc)) { 2332 unsigned long total_high_wmark = 0; 2333 unsigned long free, anon; 2334 int z; 2335 2336 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2337 file = node_page_state(pgdat, NR_ACTIVE_FILE) + 2338 node_page_state(pgdat, NR_INACTIVE_FILE); 2339 2340 for (z = 0; z < MAX_NR_ZONES; z++) { 2341 struct zone *zone = &pgdat->node_zones[z]; 2342 2343 if (!managed_zone(zone)) 2344 continue; 2345 2346 total_high_wmark += high_wmark_pages(zone); 2347 } 2348 2349 /* 2350 * Consider anon: if that's low too, this isn't a 2351 * runaway file reclaim problem, but rather just 2352 * extreme pressure. Reclaim as per usual then. 2353 */ 2354 anon = node_page_state(pgdat, NR_INACTIVE_ANON); 2355 2356 sc->file_is_tiny = 2357 file + free <= total_high_wmark && 2358 !(sc->may_deactivate & DEACTIVATE_ANON) && 2359 anon >> sc->priority; 2360 } 2361 } 2362 2363 /* 2364 * Determine how aggressively the anon and file LRU lists should be 2365 * scanned. 2366 * 2367 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan 2368 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan 2369 */ 2370 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 2371 unsigned long *nr) 2372 { 2373 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2374 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2375 unsigned long anon_cost, file_cost, total_cost; 2376 int swappiness = sc_swappiness(sc, memcg); 2377 u64 fraction[ANON_AND_FILE]; 2378 u64 denominator = 0; /* gcc */ 2379 enum scan_balance scan_balance; 2380 unsigned long ap, fp; 2381 enum lru_list lru; 2382 2383 /* If we have no swap space, do not bother scanning anon folios. */ 2384 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { 2385 scan_balance = SCAN_FILE; 2386 goto out; 2387 } 2388 2389 /* 2390 * Global reclaim will swap to prevent OOM even with no 2391 * swappiness, but memcg users want to use this knob to 2392 * disable swapping for individual groups completely when 2393 * using the memory controller's swap limit feature would be 2394 * too expensive. 2395 */ 2396 if (cgroup_reclaim(sc) && !swappiness) { 2397 scan_balance = SCAN_FILE; 2398 goto out; 2399 } 2400 2401 /* 2402 * Do not apply any pressure balancing cleverness when the 2403 * system is close to OOM, scan both anon and file equally 2404 * (unless the swappiness setting disagrees with swapping). 2405 */ 2406 if (!sc->priority && swappiness) { 2407 scan_balance = SCAN_EQUAL; 2408 goto out; 2409 } 2410 2411 /* 2412 * If the system is almost out of file pages, force-scan anon. 2413 */ 2414 if (sc->file_is_tiny) { 2415 scan_balance = SCAN_ANON; 2416 goto out; 2417 } 2418 2419 /* 2420 * If there is enough inactive page cache, we do not reclaim 2421 * anything from the anonymous working right now. 2422 */ 2423 if (sc->cache_trim_mode) { 2424 scan_balance = SCAN_FILE; 2425 goto out; 2426 } 2427 2428 scan_balance = SCAN_FRACT; 2429 /* 2430 * Calculate the pressure balance between anon and file pages. 2431 * 2432 * The amount of pressure we put on each LRU is inversely 2433 * proportional to the cost of reclaiming each list, as 2434 * determined by the share of pages that are refaulting, times 2435 * the relative IO cost of bringing back a swapped out 2436 * anonymous page vs reloading a filesystem page (swappiness). 2437 * 2438 * Although we limit that influence to ensure no list gets 2439 * left behind completely: at least a third of the pressure is 2440 * applied, before swappiness. 2441 * 2442 * With swappiness at 100, anon and file have equal IO cost. 2443 */ 2444 total_cost = sc->anon_cost + sc->file_cost; 2445 anon_cost = total_cost + sc->anon_cost; 2446 file_cost = total_cost + sc->file_cost; 2447 total_cost = anon_cost + file_cost; 2448 2449 ap = swappiness * (total_cost + 1); 2450 ap /= anon_cost + 1; 2451 2452 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); 2453 fp /= file_cost + 1; 2454 2455 fraction[0] = ap; 2456 fraction[1] = fp; 2457 denominator = ap + fp; 2458 out: 2459 for_each_evictable_lru(lru) { 2460 bool file = is_file_lru(lru); 2461 unsigned long lruvec_size; 2462 unsigned long low, min; 2463 unsigned long scan; 2464 2465 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2466 mem_cgroup_protection(sc->target_mem_cgroup, memcg, 2467 &min, &low); 2468 2469 if (min || low) { 2470 /* 2471 * Scale a cgroup's reclaim pressure by proportioning 2472 * its current usage to its memory.low or memory.min 2473 * setting. 2474 * 2475 * This is important, as otherwise scanning aggression 2476 * becomes extremely binary -- from nothing as we 2477 * approach the memory protection threshold, to totally 2478 * nominal as we exceed it. This results in requiring 2479 * setting extremely liberal protection thresholds. It 2480 * also means we simply get no protection at all if we 2481 * set it too low, which is not ideal. 2482 * 2483 * If there is any protection in place, we reduce scan 2484 * pressure by how much of the total memory used is 2485 * within protection thresholds. 2486 * 2487 * There is one special case: in the first reclaim pass, 2488 * we skip over all groups that are within their low 2489 * protection. If that fails to reclaim enough pages to 2490 * satisfy the reclaim goal, we come back and override 2491 * the best-effort low protection. However, we still 2492 * ideally want to honor how well-behaved groups are in 2493 * that case instead of simply punishing them all 2494 * equally. As such, we reclaim them based on how much 2495 * memory they are using, reducing the scan pressure 2496 * again by how much of the total memory used is under 2497 * hard protection. 2498 */ 2499 unsigned long cgroup_size = mem_cgroup_size(memcg); 2500 unsigned long protection; 2501 2502 /* memory.low scaling, make sure we retry before OOM */ 2503 if (!sc->memcg_low_reclaim && low > min) { 2504 protection = low; 2505 sc->memcg_low_skipped = 1; 2506 } else { 2507 protection = min; 2508 } 2509 2510 /* Avoid TOCTOU with earlier protection check */ 2511 cgroup_size = max(cgroup_size, protection); 2512 2513 scan = lruvec_size - lruvec_size * protection / 2514 (cgroup_size + 1); 2515 2516 /* 2517 * Minimally target SWAP_CLUSTER_MAX pages to keep 2518 * reclaim moving forwards, avoiding decrementing 2519 * sc->priority further than desirable. 2520 */ 2521 scan = max(scan, SWAP_CLUSTER_MAX); 2522 } else { 2523 scan = lruvec_size; 2524 } 2525 2526 scan >>= sc->priority; 2527 2528 /* 2529 * If the cgroup's already been deleted, make sure to 2530 * scrape out the remaining cache. 2531 */ 2532 if (!scan && !mem_cgroup_online(memcg)) 2533 scan = min(lruvec_size, SWAP_CLUSTER_MAX); 2534 2535 switch (scan_balance) { 2536 case SCAN_EQUAL: 2537 /* Scan lists relative to size */ 2538 break; 2539 case SCAN_FRACT: 2540 /* 2541 * Scan types proportional to swappiness and 2542 * their relative recent reclaim efficiency. 2543 * Make sure we don't miss the last page on 2544 * the offlined memory cgroups because of a 2545 * round-off error. 2546 */ 2547 scan = mem_cgroup_online(memcg) ? 2548 div64_u64(scan * fraction[file], denominator) : 2549 DIV64_U64_ROUND_UP(scan * fraction[file], 2550 denominator); 2551 break; 2552 case SCAN_FILE: 2553 case SCAN_ANON: 2554 /* Scan one type exclusively */ 2555 if ((scan_balance == SCAN_FILE) != file) 2556 scan = 0; 2557 break; 2558 default: 2559 /* Look ma, no brain */ 2560 BUG(); 2561 } 2562 2563 nr[lru] = scan; 2564 } 2565 } 2566 2567 /* 2568 * Anonymous LRU management is a waste if there is 2569 * ultimately no way to reclaim the memory. 2570 */ 2571 static bool can_age_anon_pages(struct pglist_data *pgdat, 2572 struct scan_control *sc) 2573 { 2574 /* Aging the anon LRU is valuable if swap is present: */ 2575 if (total_swap_pages > 0) 2576 return true; 2577 2578 /* Also valuable if anon pages can be demoted: */ 2579 return can_demote(pgdat->node_id, sc); 2580 } 2581 2582 #ifdef CONFIG_LRU_GEN 2583 2584 #ifdef CONFIG_LRU_GEN_ENABLED 2585 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); 2586 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) 2587 #else 2588 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); 2589 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) 2590 #endif 2591 2592 static bool should_walk_mmu(void) 2593 { 2594 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); 2595 } 2596 2597 static bool should_clear_pmd_young(void) 2598 { 2599 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); 2600 } 2601 2602 /****************************************************************************** 2603 * shorthand helpers 2604 ******************************************************************************/ 2605 2606 #define DEFINE_MAX_SEQ(lruvec) \ 2607 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) 2608 2609 #define DEFINE_MIN_SEQ(lruvec) \ 2610 unsigned long min_seq[ANON_AND_FILE] = { \ 2611 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ 2612 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ 2613 } 2614 2615 #define for_each_gen_type_zone(gen, type, zone) \ 2616 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ 2617 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ 2618 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) 2619 2620 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) 2621 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) 2622 2623 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) 2624 { 2625 struct pglist_data *pgdat = NODE_DATA(nid); 2626 2627 #ifdef CONFIG_MEMCG 2628 if (memcg) { 2629 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; 2630 2631 /* see the comment in mem_cgroup_lruvec() */ 2632 if (!lruvec->pgdat) 2633 lruvec->pgdat = pgdat; 2634 2635 return lruvec; 2636 } 2637 #endif 2638 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2639 2640 return &pgdat->__lruvec; 2641 } 2642 2643 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) 2644 { 2645 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2646 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2647 2648 if (!sc->may_swap) 2649 return 0; 2650 2651 if (!can_demote(pgdat->node_id, sc) && 2652 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) 2653 return 0; 2654 2655 return sc_swappiness(sc, memcg); 2656 } 2657 2658 static int get_nr_gens(struct lruvec *lruvec, int type) 2659 { 2660 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; 2661 } 2662 2663 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) 2664 { 2665 /* see the comment on lru_gen_folio */ 2666 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && 2667 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && 2668 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; 2669 } 2670 2671 /****************************************************************************** 2672 * Bloom filters 2673 ******************************************************************************/ 2674 2675 /* 2676 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when 2677 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of 2678 * bits in a bitmap, k is the number of hash functions and n is the number of 2679 * inserted items. 2680 * 2681 * Page table walkers use one of the two filters to reduce their search space. 2682 * To get rid of non-leaf entries that no longer have enough leaf entries, the 2683 * aging uses the double-buffering technique to flip to the other filter each 2684 * time it produces a new generation. For non-leaf entries that have enough 2685 * leaf entries, the aging carries them over to the next generation in 2686 * walk_pmd_range(); the eviction also report them when walking the rmap 2687 * in lru_gen_look_around(). 2688 * 2689 * For future optimizations: 2690 * 1. It's not necessary to keep both filters all the time. The spare one can be 2691 * freed after the RCU grace period and reallocated if needed again. 2692 * 2. And when reallocating, it's worth scaling its size according to the number 2693 * of inserted entries in the other filter, to reduce the memory overhead on 2694 * small systems and false positives on large systems. 2695 * 3. Jenkins' hash function is an alternative to Knuth's. 2696 */ 2697 #define BLOOM_FILTER_SHIFT 15 2698 2699 static inline int filter_gen_from_seq(unsigned long seq) 2700 { 2701 return seq % NR_BLOOM_FILTERS; 2702 } 2703 2704 static void get_item_key(void *item, int *key) 2705 { 2706 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); 2707 2708 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); 2709 2710 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); 2711 key[1] = hash >> BLOOM_FILTER_SHIFT; 2712 } 2713 2714 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2715 void *item) 2716 { 2717 int key[2]; 2718 unsigned long *filter; 2719 int gen = filter_gen_from_seq(seq); 2720 2721 filter = READ_ONCE(mm_state->filters[gen]); 2722 if (!filter) 2723 return true; 2724 2725 get_item_key(item, key); 2726 2727 return test_bit(key[0], filter) && test_bit(key[1], filter); 2728 } 2729 2730 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2731 void *item) 2732 { 2733 int key[2]; 2734 unsigned long *filter; 2735 int gen = filter_gen_from_seq(seq); 2736 2737 filter = READ_ONCE(mm_state->filters[gen]); 2738 if (!filter) 2739 return; 2740 2741 get_item_key(item, key); 2742 2743 if (!test_bit(key[0], filter)) 2744 set_bit(key[0], filter); 2745 if (!test_bit(key[1], filter)) 2746 set_bit(key[1], filter); 2747 } 2748 2749 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq) 2750 { 2751 unsigned long *filter; 2752 int gen = filter_gen_from_seq(seq); 2753 2754 filter = mm_state->filters[gen]; 2755 if (filter) { 2756 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); 2757 return; 2758 } 2759 2760 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), 2761 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 2762 WRITE_ONCE(mm_state->filters[gen], filter); 2763 } 2764 2765 /****************************************************************************** 2766 * mm_struct list 2767 ******************************************************************************/ 2768 2769 #ifdef CONFIG_LRU_GEN_WALKS_MMU 2770 2771 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2772 { 2773 static struct lru_gen_mm_list mm_list = { 2774 .fifo = LIST_HEAD_INIT(mm_list.fifo), 2775 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), 2776 }; 2777 2778 #ifdef CONFIG_MEMCG 2779 if (memcg) 2780 return &memcg->mm_list; 2781 #endif 2782 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2783 2784 return &mm_list; 2785 } 2786 2787 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2788 { 2789 return &lruvec->mm_state; 2790 } 2791 2792 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2793 { 2794 int key; 2795 struct mm_struct *mm; 2796 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 2797 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 2798 2799 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); 2800 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); 2801 2802 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) 2803 return NULL; 2804 2805 clear_bit(key, &mm->lru_gen.bitmap); 2806 2807 return mmget_not_zero(mm) ? mm : NULL; 2808 } 2809 2810 void lru_gen_add_mm(struct mm_struct *mm) 2811 { 2812 int nid; 2813 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); 2814 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2815 2816 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); 2817 #ifdef CONFIG_MEMCG 2818 VM_WARN_ON_ONCE(mm->lru_gen.memcg); 2819 mm->lru_gen.memcg = memcg; 2820 #endif 2821 spin_lock(&mm_list->lock); 2822 2823 for_each_node_state(nid, N_MEMORY) { 2824 struct lruvec *lruvec = get_lruvec(memcg, nid); 2825 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2826 2827 /* the first addition since the last iteration */ 2828 if (mm_state->tail == &mm_list->fifo) 2829 mm_state->tail = &mm->lru_gen.list; 2830 } 2831 2832 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); 2833 2834 spin_unlock(&mm_list->lock); 2835 } 2836 2837 void lru_gen_del_mm(struct mm_struct *mm) 2838 { 2839 int nid; 2840 struct lru_gen_mm_list *mm_list; 2841 struct mem_cgroup *memcg = NULL; 2842 2843 if (list_empty(&mm->lru_gen.list)) 2844 return; 2845 2846 #ifdef CONFIG_MEMCG 2847 memcg = mm->lru_gen.memcg; 2848 #endif 2849 mm_list = get_mm_list(memcg); 2850 2851 spin_lock(&mm_list->lock); 2852 2853 for_each_node(nid) { 2854 struct lruvec *lruvec = get_lruvec(memcg, nid); 2855 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2856 2857 /* where the current iteration continues after */ 2858 if (mm_state->head == &mm->lru_gen.list) 2859 mm_state->head = mm_state->head->prev; 2860 2861 /* where the last iteration ended before */ 2862 if (mm_state->tail == &mm->lru_gen.list) 2863 mm_state->tail = mm_state->tail->next; 2864 } 2865 2866 list_del_init(&mm->lru_gen.list); 2867 2868 spin_unlock(&mm_list->lock); 2869 2870 #ifdef CONFIG_MEMCG 2871 mem_cgroup_put(mm->lru_gen.memcg); 2872 mm->lru_gen.memcg = NULL; 2873 #endif 2874 } 2875 2876 #ifdef CONFIG_MEMCG 2877 void lru_gen_migrate_mm(struct mm_struct *mm) 2878 { 2879 struct mem_cgroup *memcg; 2880 struct task_struct *task = rcu_dereference_protected(mm->owner, true); 2881 2882 VM_WARN_ON_ONCE(task->mm != mm); 2883 lockdep_assert_held(&task->alloc_lock); 2884 2885 /* for mm_update_next_owner() */ 2886 if (mem_cgroup_disabled()) 2887 return; 2888 2889 /* migration can happen before addition */ 2890 if (!mm->lru_gen.memcg) 2891 return; 2892 2893 rcu_read_lock(); 2894 memcg = mem_cgroup_from_task(task); 2895 rcu_read_unlock(); 2896 if (memcg == mm->lru_gen.memcg) 2897 return; 2898 2899 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); 2900 2901 lru_gen_del_mm(mm); 2902 lru_gen_add_mm(mm); 2903 } 2904 #endif 2905 2906 #else /* !CONFIG_LRU_GEN_WALKS_MMU */ 2907 2908 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2909 { 2910 return NULL; 2911 } 2912 2913 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2914 { 2915 return NULL; 2916 } 2917 2918 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2919 { 2920 return NULL; 2921 } 2922 2923 #endif 2924 2925 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last) 2926 { 2927 int i; 2928 int hist; 2929 struct lruvec *lruvec = walk->lruvec; 2930 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2931 2932 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); 2933 2934 hist = lru_hist_from_seq(walk->seq); 2935 2936 for (i = 0; i < NR_MM_STATS; i++) { 2937 WRITE_ONCE(mm_state->stats[hist][i], 2938 mm_state->stats[hist][i] + walk->mm_stats[i]); 2939 walk->mm_stats[i] = 0; 2940 } 2941 2942 if (NR_HIST_GENS > 1 && last) { 2943 hist = lru_hist_from_seq(walk->seq + 1); 2944 2945 for (i = 0; i < NR_MM_STATS; i++) 2946 WRITE_ONCE(mm_state->stats[hist][i], 0); 2947 } 2948 } 2949 2950 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter) 2951 { 2952 bool first = false; 2953 bool last = false; 2954 struct mm_struct *mm = NULL; 2955 struct lruvec *lruvec = walk->lruvec; 2956 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2957 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2958 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2959 2960 /* 2961 * mm_state->seq is incremented after each iteration of mm_list. There 2962 * are three interesting cases for this page table walker: 2963 * 1. It tries to start a new iteration with a stale max_seq: there is 2964 * nothing left to do. 2965 * 2. It started the next iteration: it needs to reset the Bloom filter 2966 * so that a fresh set of PTE tables can be recorded. 2967 * 3. It ended the current iteration: it needs to reset the mm stats 2968 * counters and tell its caller to increment max_seq. 2969 */ 2970 spin_lock(&mm_list->lock); 2971 2972 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); 2973 2974 if (walk->seq <= mm_state->seq) 2975 goto done; 2976 2977 if (!mm_state->head) 2978 mm_state->head = &mm_list->fifo; 2979 2980 if (mm_state->head == &mm_list->fifo) 2981 first = true; 2982 2983 do { 2984 mm_state->head = mm_state->head->next; 2985 if (mm_state->head == &mm_list->fifo) { 2986 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 2987 last = true; 2988 break; 2989 } 2990 2991 /* force scan for those added after the last iteration */ 2992 if (!mm_state->tail || mm_state->tail == mm_state->head) { 2993 mm_state->tail = mm_state->head->next; 2994 walk->force_scan = true; 2995 } 2996 } while (!(mm = get_next_mm(walk))); 2997 done: 2998 if (*iter || last) 2999 reset_mm_stats(walk, last); 3000 3001 spin_unlock(&mm_list->lock); 3002 3003 if (mm && first) 3004 reset_bloom_filter(mm_state, walk->seq + 1); 3005 3006 if (*iter) 3007 mmput_async(*iter); 3008 3009 *iter = mm; 3010 3011 return last; 3012 } 3013 3014 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq) 3015 { 3016 bool success = false; 3017 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3018 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 3019 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3020 3021 spin_lock(&mm_list->lock); 3022 3023 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); 3024 3025 if (seq > mm_state->seq) { 3026 mm_state->head = NULL; 3027 mm_state->tail = NULL; 3028 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 3029 success = true; 3030 } 3031 3032 spin_unlock(&mm_list->lock); 3033 3034 return success; 3035 } 3036 3037 /****************************************************************************** 3038 * PID controller 3039 ******************************************************************************/ 3040 3041 /* 3042 * A feedback loop based on Proportional-Integral-Derivative (PID) controller. 3043 * 3044 * The P term is refaulted/(evicted+protected) from a tier in the generation 3045 * currently being evicted; the I term is the exponential moving average of the 3046 * P term over the generations previously evicted, using the smoothing factor 3047 * 1/2; the D term isn't supported. 3048 * 3049 * The setpoint (SP) is always the first tier of one type; the process variable 3050 * (PV) is either any tier of the other type or any other tier of the same 3051 * type. 3052 * 3053 * The error is the difference between the SP and the PV; the correction is to 3054 * turn off protection when SP>PV or turn on protection when SP<PV. 3055 * 3056 * For future optimizations: 3057 * 1. The D term may discount the other two terms over time so that long-lived 3058 * generations can resist stale information. 3059 */ 3060 struct ctrl_pos { 3061 unsigned long refaulted; 3062 unsigned long total; 3063 int gain; 3064 }; 3065 3066 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, 3067 struct ctrl_pos *pos) 3068 { 3069 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3070 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 3071 3072 pos->refaulted = lrugen->avg_refaulted[type][tier] + 3073 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3074 pos->total = lrugen->avg_total[type][tier] + 3075 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3076 if (tier) 3077 pos->total += lrugen->protected[hist][type][tier - 1]; 3078 pos->gain = gain; 3079 } 3080 3081 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) 3082 { 3083 int hist, tier; 3084 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3085 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; 3086 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; 3087 3088 lockdep_assert_held(&lruvec->lru_lock); 3089 3090 if (!carryover && !clear) 3091 return; 3092 3093 hist = lru_hist_from_seq(seq); 3094 3095 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 3096 if (carryover) { 3097 unsigned long sum; 3098 3099 sum = lrugen->avg_refaulted[type][tier] + 3100 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3101 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); 3102 3103 sum = lrugen->avg_total[type][tier] + 3104 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3105 if (tier) 3106 sum += lrugen->protected[hist][type][tier - 1]; 3107 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); 3108 } 3109 3110 if (clear) { 3111 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); 3112 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); 3113 if (tier) 3114 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); 3115 } 3116 } 3117 } 3118 3119 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) 3120 { 3121 /* 3122 * Return true if the PV has a limited number of refaults or a lower 3123 * refaulted/total than the SP. 3124 */ 3125 return pv->refaulted < MIN_LRU_BATCH || 3126 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= 3127 (sp->refaulted + 1) * pv->total * pv->gain; 3128 } 3129 3130 /****************************************************************************** 3131 * the aging 3132 ******************************************************************************/ 3133 3134 /* promote pages accessed through page tables */ 3135 static int folio_update_gen(struct folio *folio, int gen) 3136 { 3137 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3138 3139 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 3140 3141 do { 3142 /* lru_gen_del_folio() has isolated this page? */ 3143 if (!(old_flags & LRU_GEN_MASK)) { 3144 /* for shrink_folio_list() */ 3145 new_flags = old_flags | BIT(PG_referenced); 3146 continue; 3147 } 3148 3149 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3150 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; 3151 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3152 3153 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3154 } 3155 3156 /* protect pages accessed multiple times through file descriptors */ 3157 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 3158 { 3159 int type = folio_is_file_lru(folio); 3160 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3161 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3162 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3163 3164 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); 3165 3166 do { 3167 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3168 /* folio_update_gen() has promoted this page? */ 3169 if (new_gen >= 0 && new_gen != old_gen) 3170 return new_gen; 3171 3172 new_gen = (old_gen + 1) % MAX_NR_GENS; 3173 3174 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3175 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; 3176 /* for folio_end_writeback() */ 3177 if (reclaiming) 3178 new_flags |= BIT(PG_reclaim); 3179 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3180 3181 lru_gen_update_size(lruvec, folio, old_gen, new_gen); 3182 3183 return new_gen; 3184 } 3185 3186 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, 3187 int old_gen, int new_gen) 3188 { 3189 int type = folio_is_file_lru(folio); 3190 int zone = folio_zonenum(folio); 3191 int delta = folio_nr_pages(folio); 3192 3193 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); 3194 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); 3195 3196 walk->batched++; 3197 3198 walk->nr_pages[old_gen][type][zone] -= delta; 3199 walk->nr_pages[new_gen][type][zone] += delta; 3200 } 3201 3202 static void reset_batch_size(struct lru_gen_mm_walk *walk) 3203 { 3204 int gen, type, zone; 3205 struct lruvec *lruvec = walk->lruvec; 3206 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3207 3208 walk->batched = 0; 3209 3210 for_each_gen_type_zone(gen, type, zone) { 3211 enum lru_list lru = type * LRU_INACTIVE_FILE; 3212 int delta = walk->nr_pages[gen][type][zone]; 3213 3214 if (!delta) 3215 continue; 3216 3217 walk->nr_pages[gen][type][zone] = 0; 3218 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], 3219 lrugen->nr_pages[gen][type][zone] + delta); 3220 3221 if (lru_gen_is_active(lruvec, gen)) 3222 lru += LRU_ACTIVE; 3223 __update_lru_size(lruvec, lru, zone, delta); 3224 } 3225 } 3226 3227 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) 3228 { 3229 struct address_space *mapping; 3230 struct vm_area_struct *vma = args->vma; 3231 struct lru_gen_mm_walk *walk = args->private; 3232 3233 if (!vma_is_accessible(vma)) 3234 return true; 3235 3236 if (is_vm_hugetlb_page(vma)) 3237 return true; 3238 3239 if (!vma_has_recency(vma)) 3240 return true; 3241 3242 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) 3243 return true; 3244 3245 if (vma == get_gate_vma(vma->vm_mm)) 3246 return true; 3247 3248 if (vma_is_anonymous(vma)) 3249 return !walk->can_swap; 3250 3251 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) 3252 return true; 3253 3254 mapping = vma->vm_file->f_mapping; 3255 if (mapping_unevictable(mapping)) 3256 return true; 3257 3258 if (shmem_mapping(mapping)) 3259 return !walk->can_swap; 3260 3261 /* to exclude special mappings like dax, etc. */ 3262 return !mapping->a_ops->read_folio; 3263 } 3264 3265 /* 3266 * Some userspace memory allocators map many single-page VMAs. Instead of 3267 * returning back to the PGD table for each of such VMAs, finish an entire PMD 3268 * table to reduce zigzags and improve cache performance. 3269 */ 3270 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, 3271 unsigned long *vm_start, unsigned long *vm_end) 3272 { 3273 unsigned long start = round_up(*vm_end, size); 3274 unsigned long end = (start | ~mask) + 1; 3275 VMA_ITERATOR(vmi, args->mm, start); 3276 3277 VM_WARN_ON_ONCE(mask & size); 3278 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); 3279 3280 for_each_vma(vmi, args->vma) { 3281 if (end && end <= args->vma->vm_start) 3282 return false; 3283 3284 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) 3285 continue; 3286 3287 *vm_start = max(start, args->vma->vm_start); 3288 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; 3289 3290 return true; 3291 } 3292 3293 return false; 3294 } 3295 3296 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr, 3297 struct pglist_data *pgdat) 3298 { 3299 unsigned long pfn = pte_pfn(pte); 3300 3301 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3302 3303 if (!pte_present(pte) || is_zero_pfn(pfn)) 3304 return -1; 3305 3306 if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) 3307 return -1; 3308 3309 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) 3310 return -1; 3311 3312 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3313 return -1; 3314 3315 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3316 return -1; 3317 3318 return pfn; 3319 } 3320 3321 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr, 3322 struct pglist_data *pgdat) 3323 { 3324 unsigned long pfn = pmd_pfn(pmd); 3325 3326 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3327 3328 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) 3329 return -1; 3330 3331 if (WARN_ON_ONCE(pmd_devmap(pmd))) 3332 return -1; 3333 3334 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) 3335 return -1; 3336 3337 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3338 return -1; 3339 3340 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3341 return -1; 3342 3343 return pfn; 3344 } 3345 3346 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, 3347 struct pglist_data *pgdat, bool can_swap) 3348 { 3349 struct folio *folio; 3350 3351 folio = pfn_folio(pfn); 3352 if (folio_nid(folio) != pgdat->node_id) 3353 return NULL; 3354 3355 if (folio_memcg(folio) != memcg) 3356 return NULL; 3357 3358 /* file VMAs can contain anon pages from COW */ 3359 if (!folio_is_file_lru(folio) && !can_swap) 3360 return NULL; 3361 3362 return folio; 3363 } 3364 3365 static bool suitable_to_scan(int total, int young) 3366 { 3367 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); 3368 3369 /* suitable if the average number of young PTEs per cacheline is >=1 */ 3370 return young * n >= total; 3371 } 3372 3373 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, 3374 struct mm_walk *args) 3375 { 3376 int i; 3377 pte_t *pte; 3378 spinlock_t *ptl; 3379 unsigned long addr; 3380 int total = 0; 3381 int young = 0; 3382 struct lru_gen_mm_walk *walk = args->private; 3383 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3384 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3385 DEFINE_MAX_SEQ(walk->lruvec); 3386 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3387 pmd_t pmdval; 3388 3389 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, 3390 &ptl); 3391 if (!pte) 3392 return false; 3393 if (!spin_trylock(ptl)) { 3394 pte_unmap(pte); 3395 return false; 3396 } 3397 3398 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 3399 pte_unmap_unlock(pte, ptl); 3400 return false; 3401 } 3402 3403 arch_enter_lazy_mmu_mode(); 3404 restart: 3405 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { 3406 unsigned long pfn; 3407 struct folio *folio; 3408 pte_t ptent = ptep_get(pte + i); 3409 3410 total++; 3411 walk->mm_stats[MM_LEAF_TOTAL]++; 3412 3413 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); 3414 if (pfn == -1) 3415 continue; 3416 3417 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3418 if (!folio) 3419 continue; 3420 3421 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) 3422 continue; 3423 3424 young++; 3425 walk->mm_stats[MM_LEAF_YOUNG]++; 3426 3427 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 3428 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3429 !folio_test_swapcache(folio))) 3430 folio_mark_dirty(folio); 3431 3432 old_gen = folio_update_gen(folio, new_gen); 3433 if (old_gen >= 0 && old_gen != new_gen) 3434 update_batch_size(walk, folio, old_gen, new_gen); 3435 } 3436 3437 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) 3438 goto restart; 3439 3440 arch_leave_lazy_mmu_mode(); 3441 pte_unmap_unlock(pte, ptl); 3442 3443 return suitable_to_scan(total, young); 3444 } 3445 3446 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, 3447 struct mm_walk *args, unsigned long *bitmap, unsigned long *first) 3448 { 3449 int i; 3450 pmd_t *pmd; 3451 spinlock_t *ptl; 3452 struct lru_gen_mm_walk *walk = args->private; 3453 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3454 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3455 DEFINE_MAX_SEQ(walk->lruvec); 3456 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3457 3458 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3459 3460 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ 3461 if (*first == -1) { 3462 *first = addr; 3463 bitmap_zero(bitmap, MIN_LRU_BATCH); 3464 return; 3465 } 3466 3467 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); 3468 if (i && i <= MIN_LRU_BATCH) { 3469 __set_bit(i - 1, bitmap); 3470 return; 3471 } 3472 3473 pmd = pmd_offset(pud, *first); 3474 3475 ptl = pmd_lockptr(args->mm, pmd); 3476 if (!spin_trylock(ptl)) 3477 goto done; 3478 3479 arch_enter_lazy_mmu_mode(); 3480 3481 do { 3482 unsigned long pfn; 3483 struct folio *folio; 3484 3485 /* don't round down the first address */ 3486 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; 3487 3488 if (!pmd_present(pmd[i])) 3489 goto next; 3490 3491 if (!pmd_trans_huge(pmd[i])) { 3492 if (!walk->force_scan && should_clear_pmd_young() && 3493 !mm_has_notifiers(args->mm)) 3494 pmdp_test_and_clear_young(vma, addr, pmd + i); 3495 goto next; 3496 } 3497 3498 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat); 3499 if (pfn == -1) 3500 goto next; 3501 3502 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3503 if (!folio) 3504 goto next; 3505 3506 if (!pmdp_clear_young_notify(vma, addr, pmd + i)) 3507 goto next; 3508 3509 walk->mm_stats[MM_LEAF_YOUNG]++; 3510 3511 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && 3512 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3513 !folio_test_swapcache(folio))) 3514 folio_mark_dirty(folio); 3515 3516 old_gen = folio_update_gen(folio, new_gen); 3517 if (old_gen >= 0 && old_gen != new_gen) 3518 update_batch_size(walk, folio, old_gen, new_gen); 3519 next: 3520 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; 3521 } while (i <= MIN_LRU_BATCH); 3522 3523 arch_leave_lazy_mmu_mode(); 3524 spin_unlock(ptl); 3525 done: 3526 *first = -1; 3527 } 3528 3529 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, 3530 struct mm_walk *args) 3531 { 3532 int i; 3533 pmd_t *pmd; 3534 unsigned long next; 3535 unsigned long addr; 3536 struct vm_area_struct *vma; 3537 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); 3538 unsigned long first = -1; 3539 struct lru_gen_mm_walk *walk = args->private; 3540 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 3541 3542 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3543 3544 /* 3545 * Finish an entire PMD in two passes: the first only reaches to PTE 3546 * tables to avoid taking the PMD lock; the second, if necessary, takes 3547 * the PMD lock to clear the accessed bit in PMD entries. 3548 */ 3549 pmd = pmd_offset(pud, start & PUD_MASK); 3550 restart: 3551 /* walk_pte_range() may call get_next_vma() */ 3552 vma = args->vma; 3553 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { 3554 pmd_t val = pmdp_get_lockless(pmd + i); 3555 3556 next = pmd_addr_end(addr, end); 3557 3558 if (!pmd_present(val) || is_huge_zero_pmd(val)) { 3559 walk->mm_stats[MM_LEAF_TOTAL]++; 3560 continue; 3561 } 3562 3563 if (pmd_trans_huge(val)) { 3564 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3565 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat); 3566 3567 walk->mm_stats[MM_LEAF_TOTAL]++; 3568 3569 if (pfn != -1) 3570 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3571 continue; 3572 } 3573 3574 if (!walk->force_scan && should_clear_pmd_young() && 3575 !mm_has_notifiers(args->mm)) { 3576 if (!pmd_young(val)) 3577 continue; 3578 3579 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3580 } 3581 3582 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) 3583 continue; 3584 3585 walk->mm_stats[MM_NONLEAF_FOUND]++; 3586 3587 if (!walk_pte_range(&val, addr, next, args)) 3588 continue; 3589 3590 walk->mm_stats[MM_NONLEAF_ADDED]++; 3591 3592 /* carry over to the next generation */ 3593 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); 3594 } 3595 3596 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); 3597 3598 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) 3599 goto restart; 3600 } 3601 3602 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, 3603 struct mm_walk *args) 3604 { 3605 int i; 3606 pud_t *pud; 3607 unsigned long addr; 3608 unsigned long next; 3609 struct lru_gen_mm_walk *walk = args->private; 3610 3611 VM_WARN_ON_ONCE(p4d_leaf(*p4d)); 3612 3613 pud = pud_offset(p4d, start & P4D_MASK); 3614 restart: 3615 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { 3616 pud_t val = READ_ONCE(pud[i]); 3617 3618 next = pud_addr_end(addr, end); 3619 3620 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) 3621 continue; 3622 3623 walk_pmd_range(&val, addr, next, args); 3624 3625 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { 3626 end = (addr | ~PUD_MASK) + 1; 3627 goto done; 3628 } 3629 } 3630 3631 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) 3632 goto restart; 3633 3634 end = round_up(end, P4D_SIZE); 3635 done: 3636 if (!end || !args->vma) 3637 return 1; 3638 3639 walk->next_addr = max(end, args->vma->vm_start); 3640 3641 return -EAGAIN; 3642 } 3643 3644 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) 3645 { 3646 static const struct mm_walk_ops mm_walk_ops = { 3647 .test_walk = should_skip_vma, 3648 .p4d_entry = walk_pud_range, 3649 .walk_lock = PGWALK_RDLOCK, 3650 }; 3651 int err; 3652 struct lruvec *lruvec = walk->lruvec; 3653 3654 walk->next_addr = FIRST_USER_ADDRESS; 3655 3656 do { 3657 DEFINE_MAX_SEQ(lruvec); 3658 3659 err = -EBUSY; 3660 3661 /* another thread might have called inc_max_seq() */ 3662 if (walk->seq != max_seq) 3663 break; 3664 3665 /* the caller might be holding the lock for write */ 3666 if (mmap_read_trylock(mm)) { 3667 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); 3668 3669 mmap_read_unlock(mm); 3670 } 3671 3672 if (walk->batched) { 3673 spin_lock_irq(&lruvec->lru_lock); 3674 reset_batch_size(walk); 3675 spin_unlock_irq(&lruvec->lru_lock); 3676 } 3677 3678 cond_resched(); 3679 } while (err == -EAGAIN); 3680 } 3681 3682 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) 3683 { 3684 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3685 3686 if (pgdat && current_is_kswapd()) { 3687 VM_WARN_ON_ONCE(walk); 3688 3689 walk = &pgdat->mm_walk; 3690 } else if (!walk && force_alloc) { 3691 VM_WARN_ON_ONCE(current_is_kswapd()); 3692 3693 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 3694 } 3695 3696 current->reclaim_state->mm_walk = walk; 3697 3698 return walk; 3699 } 3700 3701 static void clear_mm_walk(void) 3702 { 3703 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3704 3705 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); 3706 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); 3707 3708 current->reclaim_state->mm_walk = NULL; 3709 3710 if (!current_is_kswapd()) 3711 kfree(walk); 3712 } 3713 3714 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) 3715 { 3716 int zone; 3717 int remaining = MAX_LRU_BATCH; 3718 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3719 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3720 3721 if (type == LRU_GEN_ANON && !can_swap) 3722 goto done; 3723 3724 /* prevent cold/hot inversion if force_scan is true */ 3725 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3726 struct list_head *head = &lrugen->folios[old_gen][type][zone]; 3727 3728 while (!list_empty(head)) { 3729 struct folio *folio = lru_to_folio(head); 3730 3731 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 3732 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 3733 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 3734 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 3735 3736 new_gen = folio_inc_gen(lruvec, folio, false); 3737 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); 3738 3739 if (!--remaining) 3740 return false; 3741 } 3742 } 3743 done: 3744 reset_ctrl_pos(lruvec, type, true); 3745 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); 3746 3747 return true; 3748 } 3749 3750 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) 3751 { 3752 int gen, type, zone; 3753 bool success = false; 3754 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3755 DEFINE_MIN_SEQ(lruvec); 3756 3757 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3758 3759 /* find the oldest populated generation */ 3760 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3761 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { 3762 gen = lru_gen_from_seq(min_seq[type]); 3763 3764 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3765 if (!list_empty(&lrugen->folios[gen][type][zone])) 3766 goto next; 3767 } 3768 3769 min_seq[type]++; 3770 } 3771 next: 3772 ; 3773 } 3774 3775 /* see the comment on lru_gen_folio */ 3776 if (can_swap) { 3777 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); 3778 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); 3779 } 3780 3781 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3782 if (min_seq[type] == lrugen->min_seq[type]) 3783 continue; 3784 3785 reset_ctrl_pos(lruvec, type, true); 3786 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); 3787 success = true; 3788 } 3789 3790 return success; 3791 } 3792 3793 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3794 bool can_swap, bool force_scan) 3795 { 3796 bool success; 3797 int prev, next; 3798 int type, zone; 3799 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3800 restart: 3801 if (seq < READ_ONCE(lrugen->max_seq)) 3802 return false; 3803 3804 spin_lock_irq(&lruvec->lru_lock); 3805 3806 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3807 3808 success = seq == lrugen->max_seq; 3809 if (!success) 3810 goto unlock; 3811 3812 for (type = ANON_AND_FILE - 1; type >= 0; type--) { 3813 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) 3814 continue; 3815 3816 VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); 3817 3818 if (inc_min_seq(lruvec, type, can_swap)) 3819 continue; 3820 3821 spin_unlock_irq(&lruvec->lru_lock); 3822 cond_resched(); 3823 goto restart; 3824 } 3825 3826 /* 3827 * Update the active/inactive LRU sizes for compatibility. Both sides of 3828 * the current max_seq need to be covered, since max_seq+1 can overlap 3829 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do 3830 * overlap, cold/hot inversion happens. 3831 */ 3832 prev = lru_gen_from_seq(lrugen->max_seq - 1); 3833 next = lru_gen_from_seq(lrugen->max_seq + 1); 3834 3835 for (type = 0; type < ANON_AND_FILE; type++) { 3836 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3837 enum lru_list lru = type * LRU_INACTIVE_FILE; 3838 long delta = lrugen->nr_pages[prev][type][zone] - 3839 lrugen->nr_pages[next][type][zone]; 3840 3841 if (!delta) 3842 continue; 3843 3844 __update_lru_size(lruvec, lru, zone, delta); 3845 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); 3846 } 3847 } 3848 3849 for (type = 0; type < ANON_AND_FILE; type++) 3850 reset_ctrl_pos(lruvec, type, false); 3851 3852 WRITE_ONCE(lrugen->timestamps[next], jiffies); 3853 /* make sure preceding modifications appear */ 3854 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); 3855 unlock: 3856 spin_unlock_irq(&lruvec->lru_lock); 3857 3858 return success; 3859 } 3860 3861 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3862 bool can_swap, bool force_scan) 3863 { 3864 bool success; 3865 struct lru_gen_mm_walk *walk; 3866 struct mm_struct *mm = NULL; 3867 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3868 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3869 3870 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); 3871 3872 if (!mm_state) 3873 return inc_max_seq(lruvec, seq, can_swap, force_scan); 3874 3875 /* see the comment in iterate_mm_list() */ 3876 if (seq <= READ_ONCE(mm_state->seq)) 3877 return false; 3878 3879 /* 3880 * If the hardware doesn't automatically set the accessed bit, fallback 3881 * to lru_gen_look_around(), which only clears the accessed bit in a 3882 * handful of PTEs. Spreading the work out over a period of time usually 3883 * is less efficient, but it avoids bursty page faults. 3884 */ 3885 if (!should_walk_mmu()) { 3886 success = iterate_mm_list_nowalk(lruvec, seq); 3887 goto done; 3888 } 3889 3890 walk = set_mm_walk(NULL, true); 3891 if (!walk) { 3892 success = iterate_mm_list_nowalk(lruvec, seq); 3893 goto done; 3894 } 3895 3896 walk->lruvec = lruvec; 3897 walk->seq = seq; 3898 walk->can_swap = can_swap; 3899 walk->force_scan = force_scan; 3900 3901 do { 3902 success = iterate_mm_list(walk, &mm); 3903 if (mm) 3904 walk_mm(mm, walk); 3905 } while (mm); 3906 done: 3907 if (success) { 3908 success = inc_max_seq(lruvec, seq, can_swap, force_scan); 3909 WARN_ON_ONCE(!success); 3910 } 3911 3912 return success; 3913 } 3914 3915 /****************************************************************************** 3916 * working set protection 3917 ******************************************************************************/ 3918 3919 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) 3920 { 3921 int priority; 3922 unsigned long reclaimable; 3923 3924 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) 3925 return; 3926 /* 3927 * Determine the initial priority based on 3928 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, 3929 * where reclaimed_to_scanned_ratio = inactive / total. 3930 */ 3931 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); 3932 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 3933 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); 3934 3935 /* round down reclaimable and round up sc->nr_to_reclaim */ 3936 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); 3937 3938 /* 3939 * The estimation is based on LRU pages only, so cap it to prevent 3940 * overshoots of shrinker objects by large margins. 3941 */ 3942 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); 3943 } 3944 3945 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) 3946 { 3947 int gen, type, zone; 3948 unsigned long total = 0; 3949 bool can_swap = get_swappiness(lruvec, sc); 3950 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3951 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3952 DEFINE_MAX_SEQ(lruvec); 3953 DEFINE_MIN_SEQ(lruvec); 3954 3955 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3956 unsigned long seq; 3957 3958 for (seq = min_seq[type]; seq <= max_seq; seq++) { 3959 gen = lru_gen_from_seq(seq); 3960 3961 for (zone = 0; zone < MAX_NR_ZONES; zone++) 3962 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 3963 } 3964 } 3965 3966 /* whether the size is big enough to be helpful */ 3967 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; 3968 } 3969 3970 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, 3971 unsigned long min_ttl) 3972 { 3973 int gen; 3974 unsigned long birth; 3975 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3976 DEFINE_MIN_SEQ(lruvec); 3977 3978 if (mem_cgroup_below_min(NULL, memcg)) 3979 return false; 3980 3981 if (!lruvec_is_sizable(lruvec, sc)) 3982 return false; 3983 3984 /* see the comment on lru_gen_folio */ 3985 gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); 3986 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 3987 3988 return time_is_before_jiffies(birth + min_ttl); 3989 } 3990 3991 /* to protect the working set of the last N jiffies */ 3992 static unsigned long lru_gen_min_ttl __read_mostly; 3993 3994 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 3995 { 3996 struct mem_cgroup *memcg; 3997 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); 3998 bool reclaimable = !min_ttl; 3999 4000 VM_WARN_ON_ONCE(!current_is_kswapd()); 4001 4002 set_initial_priority(pgdat, sc); 4003 4004 memcg = mem_cgroup_iter(NULL, NULL, NULL); 4005 do { 4006 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4007 4008 mem_cgroup_calculate_protection(NULL, memcg); 4009 4010 if (!reclaimable) 4011 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); 4012 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 4013 4014 /* 4015 * The main goal is to OOM kill if every generation from all memcgs is 4016 * younger than min_ttl. However, another possibility is all memcgs are 4017 * either too small or below min. 4018 */ 4019 if (!reclaimable && mutex_trylock(&oom_lock)) { 4020 struct oom_control oc = { 4021 .gfp_mask = sc->gfp_mask, 4022 }; 4023 4024 out_of_memory(&oc); 4025 4026 mutex_unlock(&oom_lock); 4027 } 4028 } 4029 4030 /****************************************************************************** 4031 * rmap/PT walk feedback 4032 ******************************************************************************/ 4033 4034 /* 4035 * This function exploits spatial locality when shrink_folio_list() walks the 4036 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If 4037 * the scan was done cacheline efficiently, it adds the PMD entry pointing to 4038 * the PTE table to the Bloom filter. This forms a feedback loop between the 4039 * eviction and the aging. 4040 */ 4041 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) 4042 { 4043 int i; 4044 unsigned long start; 4045 unsigned long end; 4046 struct lru_gen_mm_walk *walk; 4047 int young = 1; 4048 pte_t *pte = pvmw->pte; 4049 unsigned long addr = pvmw->address; 4050 struct vm_area_struct *vma = pvmw->vma; 4051 struct folio *folio = pfn_folio(pvmw->pfn); 4052 bool can_swap = !folio_is_file_lru(folio); 4053 struct mem_cgroup *memcg = folio_memcg(folio); 4054 struct pglist_data *pgdat = folio_pgdat(folio); 4055 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4056 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 4057 DEFINE_MAX_SEQ(lruvec); 4058 int old_gen, new_gen = lru_gen_from_seq(max_seq); 4059 4060 lockdep_assert_held(pvmw->ptl); 4061 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); 4062 4063 if (!ptep_clear_young_notify(vma, addr, pte)) 4064 return false; 4065 4066 if (spin_is_contended(pvmw->ptl)) 4067 return true; 4068 4069 /* exclude special VMAs containing anon pages from COW */ 4070 if (vma->vm_flags & VM_SPECIAL) 4071 return true; 4072 4073 /* avoid taking the LRU lock under the PTL when possible */ 4074 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; 4075 4076 start = max(addr & PMD_MASK, vma->vm_start); 4077 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; 4078 4079 if (end - start == PAGE_SIZE) 4080 return true; 4081 4082 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { 4083 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) 4084 end = start + MIN_LRU_BATCH * PAGE_SIZE; 4085 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) 4086 start = end - MIN_LRU_BATCH * PAGE_SIZE; 4087 else { 4088 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; 4089 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; 4090 } 4091 } 4092 4093 arch_enter_lazy_mmu_mode(); 4094 4095 pte -= (addr - start) / PAGE_SIZE; 4096 4097 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { 4098 unsigned long pfn; 4099 pte_t ptent = ptep_get(pte + i); 4100 4101 pfn = get_pte_pfn(ptent, vma, addr, pgdat); 4102 if (pfn == -1) 4103 continue; 4104 4105 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); 4106 if (!folio) 4107 continue; 4108 4109 if (!ptep_clear_young_notify(vma, addr, pte + i)) 4110 continue; 4111 4112 young++; 4113 4114 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 4115 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 4116 !folio_test_swapcache(folio))) 4117 folio_mark_dirty(folio); 4118 4119 if (walk) { 4120 old_gen = folio_update_gen(folio, new_gen); 4121 if (old_gen >= 0 && old_gen != new_gen) 4122 update_batch_size(walk, folio, old_gen, new_gen); 4123 4124 continue; 4125 } 4126 4127 old_gen = folio_lru_gen(folio); 4128 if (old_gen < 0) 4129 folio_set_referenced(folio); 4130 else if (old_gen != new_gen) { 4131 folio_clear_lru_refs(folio); 4132 folio_activate(folio); 4133 } 4134 } 4135 4136 arch_leave_lazy_mmu_mode(); 4137 4138 /* feedback from rmap walkers to page table walkers */ 4139 if (mm_state && suitable_to_scan(i, young)) 4140 update_bloom_filter(mm_state, max_seq, pvmw->pmd); 4141 4142 return true; 4143 } 4144 4145 /****************************************************************************** 4146 * memcg LRU 4147 ******************************************************************************/ 4148 4149 /* see the comment on MEMCG_NR_GENS */ 4150 enum { 4151 MEMCG_LRU_NOP, 4152 MEMCG_LRU_HEAD, 4153 MEMCG_LRU_TAIL, 4154 MEMCG_LRU_OLD, 4155 MEMCG_LRU_YOUNG, 4156 }; 4157 4158 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) 4159 { 4160 int seg; 4161 int old, new; 4162 unsigned long flags; 4163 int bin = get_random_u32_below(MEMCG_NR_BINS); 4164 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4165 4166 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); 4167 4168 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); 4169 4170 seg = 0; 4171 new = old = lruvec->lrugen.gen; 4172 4173 /* see the comment on MEMCG_NR_GENS */ 4174 if (op == MEMCG_LRU_HEAD) 4175 seg = MEMCG_LRU_HEAD; 4176 else if (op == MEMCG_LRU_TAIL) 4177 seg = MEMCG_LRU_TAIL; 4178 else if (op == MEMCG_LRU_OLD) 4179 new = get_memcg_gen(pgdat->memcg_lru.seq); 4180 else if (op == MEMCG_LRU_YOUNG) 4181 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); 4182 else 4183 VM_WARN_ON_ONCE(true); 4184 4185 WRITE_ONCE(lruvec->lrugen.seg, seg); 4186 WRITE_ONCE(lruvec->lrugen.gen, new); 4187 4188 hlist_nulls_del_rcu(&lruvec->lrugen.list); 4189 4190 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) 4191 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4192 else 4193 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4194 4195 pgdat->memcg_lru.nr_memcgs[old]--; 4196 pgdat->memcg_lru.nr_memcgs[new]++; 4197 4198 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) 4199 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4200 4201 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); 4202 } 4203 4204 #ifdef CONFIG_MEMCG 4205 4206 void lru_gen_online_memcg(struct mem_cgroup *memcg) 4207 { 4208 int gen; 4209 int nid; 4210 int bin = get_random_u32_below(MEMCG_NR_BINS); 4211 4212 for_each_node(nid) { 4213 struct pglist_data *pgdat = NODE_DATA(nid); 4214 struct lruvec *lruvec = get_lruvec(memcg, nid); 4215 4216 spin_lock_irq(&pgdat->memcg_lru.lock); 4217 4218 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); 4219 4220 gen = get_memcg_gen(pgdat->memcg_lru.seq); 4221 4222 lruvec->lrugen.gen = gen; 4223 4224 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); 4225 pgdat->memcg_lru.nr_memcgs[gen]++; 4226 4227 spin_unlock_irq(&pgdat->memcg_lru.lock); 4228 } 4229 } 4230 4231 void lru_gen_offline_memcg(struct mem_cgroup *memcg) 4232 { 4233 int nid; 4234 4235 for_each_node(nid) { 4236 struct lruvec *lruvec = get_lruvec(memcg, nid); 4237 4238 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); 4239 } 4240 } 4241 4242 void lru_gen_release_memcg(struct mem_cgroup *memcg) 4243 { 4244 int gen; 4245 int nid; 4246 4247 for_each_node(nid) { 4248 struct pglist_data *pgdat = NODE_DATA(nid); 4249 struct lruvec *lruvec = get_lruvec(memcg, nid); 4250 4251 spin_lock_irq(&pgdat->memcg_lru.lock); 4252 4253 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) 4254 goto unlock; 4255 4256 gen = lruvec->lrugen.gen; 4257 4258 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); 4259 pgdat->memcg_lru.nr_memcgs[gen]--; 4260 4261 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) 4262 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4263 unlock: 4264 spin_unlock_irq(&pgdat->memcg_lru.lock); 4265 } 4266 } 4267 4268 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 4269 { 4270 struct lruvec *lruvec = get_lruvec(memcg, nid); 4271 4272 /* see the comment on MEMCG_NR_GENS */ 4273 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) 4274 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); 4275 } 4276 4277 #endif /* CONFIG_MEMCG */ 4278 4279 /****************************************************************************** 4280 * the eviction 4281 ******************************************************************************/ 4282 4283 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, 4284 int tier_idx) 4285 { 4286 bool success; 4287 int gen = folio_lru_gen(folio); 4288 int type = folio_is_file_lru(folio); 4289 int zone = folio_zonenum(folio); 4290 int delta = folio_nr_pages(folio); 4291 int refs = folio_lru_refs(folio); 4292 int tier = lru_tier_from_refs(refs); 4293 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4294 4295 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); 4296 4297 /* unevictable */ 4298 if (!folio_evictable(folio)) { 4299 success = lru_gen_del_folio(lruvec, folio, true); 4300 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4301 folio_set_unevictable(folio); 4302 lruvec_add_folio(lruvec, folio); 4303 __count_vm_events(UNEVICTABLE_PGCULLED, delta); 4304 return true; 4305 } 4306 4307 /* promoted */ 4308 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { 4309 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4310 return true; 4311 } 4312 4313 /* protected */ 4314 if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) { 4315 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 4316 4317 gen = folio_inc_gen(lruvec, folio, false); 4318 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4319 4320 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 4321 lrugen->protected[hist][type][tier - 1] + delta); 4322 return true; 4323 } 4324 4325 /* ineligible */ 4326 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { 4327 gen = folio_inc_gen(lruvec, folio, false); 4328 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4329 return true; 4330 } 4331 4332 /* waiting for writeback */ 4333 if (folio_test_locked(folio) || folio_test_writeback(folio) || 4334 (type == LRU_GEN_FILE && folio_test_dirty(folio))) { 4335 gen = folio_inc_gen(lruvec, folio, true); 4336 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4337 return true; 4338 } 4339 4340 return false; 4341 } 4342 4343 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) 4344 { 4345 bool success; 4346 4347 /* swap constrained */ 4348 if (!(sc->gfp_mask & __GFP_IO) && 4349 (folio_test_dirty(folio) || 4350 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) 4351 return false; 4352 4353 /* raced with release_pages() */ 4354 if (!folio_try_get(folio)) 4355 return false; 4356 4357 /* raced with another isolation */ 4358 if (!folio_test_clear_lru(folio)) { 4359 folio_put(folio); 4360 return false; 4361 } 4362 4363 /* see the comment on MAX_NR_TIERS */ 4364 if (!folio_test_referenced(folio)) 4365 folio_clear_lru_refs(folio); 4366 4367 /* for shrink_folio_list() */ 4368 folio_clear_reclaim(folio); 4369 folio_clear_referenced(folio); 4370 4371 success = lru_gen_del_folio(lruvec, folio, true); 4372 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4373 4374 return true; 4375 } 4376 4377 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, 4378 int type, int tier, struct list_head *list) 4379 { 4380 int i; 4381 int gen; 4382 enum vm_event_item item; 4383 int sorted = 0; 4384 int scanned = 0; 4385 int isolated = 0; 4386 int skipped = 0; 4387 int remaining = MAX_LRU_BATCH; 4388 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4389 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4390 4391 VM_WARN_ON_ONCE(!list_empty(list)); 4392 4393 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) 4394 return 0; 4395 4396 gen = lru_gen_from_seq(lrugen->min_seq[type]); 4397 4398 for (i = MAX_NR_ZONES; i > 0; i--) { 4399 LIST_HEAD(moved); 4400 int skipped_zone = 0; 4401 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; 4402 struct list_head *head = &lrugen->folios[gen][type][zone]; 4403 4404 while (!list_empty(head)) { 4405 struct folio *folio = lru_to_folio(head); 4406 int delta = folio_nr_pages(folio); 4407 4408 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 4409 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 4410 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 4411 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 4412 4413 scanned += delta; 4414 4415 if (sort_folio(lruvec, folio, sc, tier)) 4416 sorted += delta; 4417 else if (isolate_folio(lruvec, folio, sc)) { 4418 list_add(&folio->lru, list); 4419 isolated += delta; 4420 } else { 4421 list_move(&folio->lru, &moved); 4422 skipped_zone += delta; 4423 } 4424 4425 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) 4426 break; 4427 } 4428 4429 if (skipped_zone) { 4430 list_splice(&moved, head); 4431 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone); 4432 skipped += skipped_zone; 4433 } 4434 4435 if (!remaining || isolated >= MIN_LRU_BATCH) 4436 break; 4437 } 4438 4439 item = PGSCAN_KSWAPD + reclaimer_offset(); 4440 if (!cgroup_reclaim(sc)) { 4441 __count_vm_events(item, isolated); 4442 __count_vm_events(PGREFILL, sorted); 4443 } 4444 __count_memcg_events(memcg, item, isolated); 4445 __count_memcg_events(memcg, PGREFILL, sorted); 4446 __count_vm_events(PGSCAN_ANON + type, isolated); 4447 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, 4448 scanned, skipped, isolated, 4449 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4450 4451 /* 4452 * There might not be eligible folios due to reclaim_idx. Check the 4453 * remaining to prevent livelock if it's not making progress. 4454 */ 4455 return isolated || !remaining ? scanned : 0; 4456 } 4457 4458 static int get_tier_idx(struct lruvec *lruvec, int type) 4459 { 4460 int tier; 4461 struct ctrl_pos sp, pv; 4462 4463 /* 4464 * To leave a margin for fluctuations, use a larger gain factor (1:2). 4465 * This value is chosen because any other tier would have at least twice 4466 * as many refaults as the first tier. 4467 */ 4468 read_ctrl_pos(lruvec, type, 0, 1, &sp); 4469 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4470 read_ctrl_pos(lruvec, type, tier, 2, &pv); 4471 if (!positive_ctrl_err(&sp, &pv)) 4472 break; 4473 } 4474 4475 return tier - 1; 4476 } 4477 4478 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) 4479 { 4480 int type, tier; 4481 struct ctrl_pos sp, pv; 4482 int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; 4483 4484 /* 4485 * Compare the first tier of anon with that of file to determine which 4486 * type to scan. Also need to compare other tiers of the selected type 4487 * with the first tier of the other type to determine the last tier (of 4488 * the selected type) to evict. 4489 */ 4490 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); 4491 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); 4492 type = positive_ctrl_err(&sp, &pv); 4493 4494 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); 4495 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4496 read_ctrl_pos(lruvec, type, tier, gain[type], &pv); 4497 if (!positive_ctrl_err(&sp, &pv)) 4498 break; 4499 } 4500 4501 *tier_idx = tier - 1; 4502 4503 return type; 4504 } 4505 4506 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, 4507 int *type_scanned, struct list_head *list) 4508 { 4509 int i; 4510 int type; 4511 int scanned; 4512 int tier = -1; 4513 DEFINE_MIN_SEQ(lruvec); 4514 4515 /* 4516 * Try to make the obvious choice first, and if anon and file are both 4517 * available from the same generation, 4518 * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon 4519 * first. 4520 * 2. If !__GFP_IO, file first since clean pagecache is more likely to 4521 * exist than clean swapcache. 4522 */ 4523 if (!swappiness) 4524 type = LRU_GEN_FILE; 4525 else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) 4526 type = LRU_GEN_ANON; 4527 else if (swappiness == 1) 4528 type = LRU_GEN_FILE; 4529 else if (swappiness == MAX_SWAPPINESS) 4530 type = LRU_GEN_ANON; 4531 else if (!(sc->gfp_mask & __GFP_IO)) 4532 type = LRU_GEN_FILE; 4533 else 4534 type = get_type_to_scan(lruvec, swappiness, &tier); 4535 4536 for (i = !swappiness; i < ANON_AND_FILE; i++) { 4537 if (tier < 0) 4538 tier = get_tier_idx(lruvec, type); 4539 4540 scanned = scan_folios(lruvec, sc, type, tier, list); 4541 if (scanned) 4542 break; 4543 4544 type = !type; 4545 tier = -1; 4546 } 4547 4548 *type_scanned = type; 4549 4550 return scanned; 4551 } 4552 4553 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) 4554 { 4555 int type; 4556 int scanned; 4557 int reclaimed; 4558 LIST_HEAD(list); 4559 LIST_HEAD(clean); 4560 struct folio *folio; 4561 struct folio *next; 4562 enum vm_event_item item; 4563 struct reclaim_stat stat; 4564 struct lru_gen_mm_walk *walk; 4565 bool skip_retry = false; 4566 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4567 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4568 4569 spin_lock_irq(&lruvec->lru_lock); 4570 4571 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); 4572 4573 scanned += try_to_inc_min_seq(lruvec, swappiness); 4574 4575 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) 4576 scanned = 0; 4577 4578 spin_unlock_irq(&lruvec->lru_lock); 4579 4580 if (list_empty(&list)) 4581 return scanned; 4582 retry: 4583 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); 4584 sc->nr_reclaimed += reclaimed; 4585 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 4586 scanned, reclaimed, &stat, sc->priority, 4587 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4588 4589 list_for_each_entry_safe_reverse(folio, next, &list, lru) { 4590 if (!folio_evictable(folio)) { 4591 list_del(&folio->lru); 4592 folio_putback_lru(folio); 4593 continue; 4594 } 4595 4596 if (folio_test_reclaim(folio) && 4597 (folio_test_dirty(folio) || folio_test_writeback(folio))) { 4598 /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ 4599 if (folio_test_workingset(folio)) 4600 folio_set_referenced(folio); 4601 continue; 4602 } 4603 4604 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || 4605 folio_mapped(folio) || folio_test_locked(folio) || 4606 folio_test_dirty(folio) || folio_test_writeback(folio)) { 4607 /* don't add rejected folios to the oldest generation */ 4608 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 4609 BIT(PG_active)); 4610 continue; 4611 } 4612 4613 /* retry folios that may have missed folio_rotate_reclaimable() */ 4614 list_move(&folio->lru, &clean); 4615 } 4616 4617 spin_lock_irq(&lruvec->lru_lock); 4618 4619 move_folios_to_lru(lruvec, &list); 4620 4621 walk = current->reclaim_state->mm_walk; 4622 if (walk && walk->batched) { 4623 walk->lruvec = lruvec; 4624 reset_batch_size(walk); 4625 } 4626 4627 item = PGSTEAL_KSWAPD + reclaimer_offset(); 4628 if (!cgroup_reclaim(sc)) 4629 __count_vm_events(item, reclaimed); 4630 __count_memcg_events(memcg, item, reclaimed); 4631 __count_vm_events(PGSTEAL_ANON + type, reclaimed); 4632 4633 spin_unlock_irq(&lruvec->lru_lock); 4634 4635 list_splice_init(&clean, &list); 4636 4637 if (!list_empty(&list)) { 4638 skip_retry = true; 4639 goto retry; 4640 } 4641 4642 return scanned; 4643 } 4644 4645 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, 4646 bool can_swap, unsigned long *nr_to_scan) 4647 { 4648 int gen, type, zone; 4649 unsigned long old = 0; 4650 unsigned long young = 0; 4651 unsigned long total = 0; 4652 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4653 DEFINE_MIN_SEQ(lruvec); 4654 4655 /* whether this lruvec is completely out of cold folios */ 4656 if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { 4657 *nr_to_scan = 0; 4658 return true; 4659 } 4660 4661 for (type = !can_swap; type < ANON_AND_FILE; type++) { 4662 unsigned long seq; 4663 4664 for (seq = min_seq[type]; seq <= max_seq; seq++) { 4665 unsigned long size = 0; 4666 4667 gen = lru_gen_from_seq(seq); 4668 4669 for (zone = 0; zone < MAX_NR_ZONES; zone++) 4670 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 4671 4672 total += size; 4673 if (seq == max_seq) 4674 young += size; 4675 else if (seq + MIN_NR_GENS == max_seq) 4676 old += size; 4677 } 4678 } 4679 4680 *nr_to_scan = total; 4681 4682 /* 4683 * The aging tries to be lazy to reduce the overhead, while the eviction 4684 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the 4685 * ideal number of generations is MIN_NR_GENS+1. 4686 */ 4687 if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) 4688 return false; 4689 4690 /* 4691 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) 4692 * of the total number of pages for each generation. A reasonable range 4693 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The 4694 * aging cares about the upper bound of hot pages, while the eviction 4695 * cares about the lower bound of cold pages. 4696 */ 4697 if (young * MIN_NR_GENS > total) 4698 return true; 4699 if (old * (MIN_NR_GENS + 2) < total) 4700 return true; 4701 4702 return false; 4703 } 4704 4705 /* 4706 * For future optimizations: 4707 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg 4708 * reclaim. 4709 */ 4710 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) 4711 { 4712 bool success; 4713 unsigned long nr_to_scan; 4714 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4715 DEFINE_MAX_SEQ(lruvec); 4716 4717 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) 4718 return -1; 4719 4720 success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan); 4721 4722 /* try to scrape all its memory if this memcg was deleted */ 4723 if (nr_to_scan && !mem_cgroup_online(memcg)) 4724 return nr_to_scan; 4725 4726 /* try to get away with not aging at the default priority */ 4727 if (!success || sc->priority == DEF_PRIORITY) 4728 return nr_to_scan >> sc->priority; 4729 4730 /* stop scanning this lruvec as it's low on cold folios */ 4731 return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; 4732 } 4733 4734 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) 4735 { 4736 int i; 4737 enum zone_watermarks mark; 4738 4739 /* don't abort memcg reclaim to ensure fairness */ 4740 if (!root_reclaim(sc)) 4741 return false; 4742 4743 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) 4744 return true; 4745 4746 /* check the order to exclude compaction-induced reclaim */ 4747 if (!current_is_kswapd() || sc->order) 4748 return false; 4749 4750 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ? 4751 WMARK_PROMO : WMARK_HIGH; 4752 4753 for (i = 0; i <= sc->reclaim_idx; i++) { 4754 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; 4755 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH; 4756 4757 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) 4758 return false; 4759 } 4760 4761 /* kswapd should abort if all eligible zones are safe */ 4762 return true; 4763 } 4764 4765 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4766 { 4767 long nr_to_scan; 4768 unsigned long scanned = 0; 4769 int swappiness = get_swappiness(lruvec, sc); 4770 4771 while (true) { 4772 int delta; 4773 4774 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); 4775 if (nr_to_scan <= 0) 4776 break; 4777 4778 delta = evict_folios(lruvec, sc, swappiness); 4779 if (!delta) 4780 break; 4781 4782 scanned += delta; 4783 if (scanned >= nr_to_scan) 4784 break; 4785 4786 if (should_abort_scan(lruvec, sc)) 4787 break; 4788 4789 cond_resched(); 4790 } 4791 4792 /* whether this lruvec should be rotated */ 4793 return nr_to_scan < 0; 4794 } 4795 4796 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) 4797 { 4798 bool success; 4799 unsigned long scanned = sc->nr_scanned; 4800 unsigned long reclaimed = sc->nr_reclaimed; 4801 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4802 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4803 4804 /* lru_gen_age_node() called mem_cgroup_calculate_protection() */ 4805 if (mem_cgroup_below_min(NULL, memcg)) 4806 return MEMCG_LRU_YOUNG; 4807 4808 if (mem_cgroup_below_low(NULL, memcg)) { 4809 /* see the comment on MEMCG_NR_GENS */ 4810 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) 4811 return MEMCG_LRU_TAIL; 4812 4813 memcg_memory_event(memcg, MEMCG_LOW); 4814 } 4815 4816 success = try_to_shrink_lruvec(lruvec, sc); 4817 4818 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); 4819 4820 if (!sc->proactive) 4821 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, 4822 sc->nr_reclaimed - reclaimed); 4823 4824 flush_reclaim_state(sc); 4825 4826 if (success && mem_cgroup_online(memcg)) 4827 return MEMCG_LRU_YOUNG; 4828 4829 if (!success && lruvec_is_sizable(lruvec, sc)) 4830 return 0; 4831 4832 /* one retry if offlined or too small */ 4833 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? 4834 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; 4835 } 4836 4837 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) 4838 { 4839 int op; 4840 int gen; 4841 int bin; 4842 int first_bin; 4843 struct lruvec *lruvec; 4844 struct lru_gen_folio *lrugen; 4845 struct mem_cgroup *memcg; 4846 struct hlist_nulls_node *pos; 4847 4848 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); 4849 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); 4850 restart: 4851 op = 0; 4852 memcg = NULL; 4853 4854 rcu_read_lock(); 4855 4856 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { 4857 if (op) { 4858 lru_gen_rotate_memcg(lruvec, op); 4859 op = 0; 4860 } 4861 4862 mem_cgroup_put(memcg); 4863 memcg = NULL; 4864 4865 if (gen != READ_ONCE(lrugen->gen)) 4866 continue; 4867 4868 lruvec = container_of(lrugen, struct lruvec, lrugen); 4869 memcg = lruvec_memcg(lruvec); 4870 4871 if (!mem_cgroup_tryget(memcg)) { 4872 lru_gen_release_memcg(memcg); 4873 memcg = NULL; 4874 continue; 4875 } 4876 4877 rcu_read_unlock(); 4878 4879 op = shrink_one(lruvec, sc); 4880 4881 rcu_read_lock(); 4882 4883 if (should_abort_scan(lruvec, sc)) 4884 break; 4885 } 4886 4887 rcu_read_unlock(); 4888 4889 if (op) 4890 lru_gen_rotate_memcg(lruvec, op); 4891 4892 mem_cgroup_put(memcg); 4893 4894 if (!is_a_nulls(pos)) 4895 return; 4896 4897 /* restart if raced with lru_gen_rotate_memcg() */ 4898 if (gen != get_nulls_value(pos)) 4899 goto restart; 4900 4901 /* try the rest of the bins of the current generation */ 4902 bin = get_memcg_bin(bin + 1); 4903 if (bin != first_bin) 4904 goto restart; 4905 } 4906 4907 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4908 { 4909 struct blk_plug plug; 4910 4911 VM_WARN_ON_ONCE(root_reclaim(sc)); 4912 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 4913 4914 lru_add_drain(); 4915 4916 blk_start_plug(&plug); 4917 4918 set_mm_walk(NULL, sc->proactive); 4919 4920 if (try_to_shrink_lruvec(lruvec, sc)) 4921 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); 4922 4923 clear_mm_walk(); 4924 4925 blk_finish_plug(&plug); 4926 } 4927 4928 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 4929 { 4930 struct blk_plug plug; 4931 unsigned long reclaimed = sc->nr_reclaimed; 4932 4933 VM_WARN_ON_ONCE(!root_reclaim(sc)); 4934 4935 /* 4936 * Unmapped clean folios are already prioritized. Scanning for more of 4937 * them is likely futile and can cause high reclaim latency when there 4938 * is a large number of memcgs. 4939 */ 4940 if (!sc->may_writepage || !sc->may_unmap) 4941 goto done; 4942 4943 lru_add_drain(); 4944 4945 blk_start_plug(&plug); 4946 4947 set_mm_walk(pgdat, sc->proactive); 4948 4949 set_initial_priority(pgdat, sc); 4950 4951 if (current_is_kswapd()) 4952 sc->nr_reclaimed = 0; 4953 4954 if (mem_cgroup_disabled()) 4955 shrink_one(&pgdat->__lruvec, sc); 4956 else 4957 shrink_many(pgdat, sc); 4958 4959 if (current_is_kswapd()) 4960 sc->nr_reclaimed += reclaimed; 4961 4962 clear_mm_walk(); 4963 4964 blk_finish_plug(&plug); 4965 done: 4966 if (sc->nr_reclaimed > reclaimed) 4967 pgdat->kswapd_failures = 0; 4968 } 4969 4970 /****************************************************************************** 4971 * state change 4972 ******************************************************************************/ 4973 4974 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) 4975 { 4976 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4977 4978 if (lrugen->enabled) { 4979 enum lru_list lru; 4980 4981 for_each_evictable_lru(lru) { 4982 if (!list_empty(&lruvec->lists[lru])) 4983 return false; 4984 } 4985 } else { 4986 int gen, type, zone; 4987 4988 for_each_gen_type_zone(gen, type, zone) { 4989 if (!list_empty(&lrugen->folios[gen][type][zone])) 4990 return false; 4991 } 4992 } 4993 4994 return true; 4995 } 4996 4997 static bool fill_evictable(struct lruvec *lruvec) 4998 { 4999 enum lru_list lru; 5000 int remaining = MAX_LRU_BATCH; 5001 5002 for_each_evictable_lru(lru) { 5003 int type = is_file_lru(lru); 5004 bool active = is_active_lru(lru); 5005 struct list_head *head = &lruvec->lists[lru]; 5006 5007 while (!list_empty(head)) { 5008 bool success; 5009 struct folio *folio = lru_to_folio(head); 5010 5011 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5012 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); 5013 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5014 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); 5015 5016 lruvec_del_folio(lruvec, folio); 5017 success = lru_gen_add_folio(lruvec, folio, false); 5018 VM_WARN_ON_ONCE(!success); 5019 5020 if (!--remaining) 5021 return false; 5022 } 5023 } 5024 5025 return true; 5026 } 5027 5028 static bool drain_evictable(struct lruvec *lruvec) 5029 { 5030 int gen, type, zone; 5031 int remaining = MAX_LRU_BATCH; 5032 5033 for_each_gen_type_zone(gen, type, zone) { 5034 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; 5035 5036 while (!list_empty(head)) { 5037 bool success; 5038 struct folio *folio = lru_to_folio(head); 5039 5040 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5041 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 5042 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5043 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 5044 5045 success = lru_gen_del_folio(lruvec, folio, false); 5046 VM_WARN_ON_ONCE(!success); 5047 lruvec_add_folio(lruvec, folio); 5048 5049 if (!--remaining) 5050 return false; 5051 } 5052 } 5053 5054 return true; 5055 } 5056 5057 static void lru_gen_change_state(bool enabled) 5058 { 5059 static DEFINE_MUTEX(state_mutex); 5060 5061 struct mem_cgroup *memcg; 5062 5063 cgroup_lock(); 5064 cpus_read_lock(); 5065 get_online_mems(); 5066 mutex_lock(&state_mutex); 5067 5068 if (enabled == lru_gen_enabled()) 5069 goto unlock; 5070 5071 if (enabled) 5072 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5073 else 5074 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5075 5076 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5077 do { 5078 int nid; 5079 5080 for_each_node(nid) { 5081 struct lruvec *lruvec = get_lruvec(memcg, nid); 5082 5083 spin_lock_irq(&lruvec->lru_lock); 5084 5085 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 5086 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); 5087 5088 lruvec->lrugen.enabled = enabled; 5089 5090 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { 5091 spin_unlock_irq(&lruvec->lru_lock); 5092 cond_resched(); 5093 spin_lock_irq(&lruvec->lru_lock); 5094 } 5095 5096 spin_unlock_irq(&lruvec->lru_lock); 5097 } 5098 5099 cond_resched(); 5100 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5101 unlock: 5102 mutex_unlock(&state_mutex); 5103 put_online_mems(); 5104 cpus_read_unlock(); 5105 cgroup_unlock(); 5106 } 5107 5108 /****************************************************************************** 5109 * sysfs interface 5110 ******************************************************************************/ 5111 5112 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5113 { 5114 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); 5115 } 5116 5117 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5118 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, 5119 const char *buf, size_t len) 5120 { 5121 unsigned int msecs; 5122 5123 if (kstrtouint(buf, 0, &msecs)) 5124 return -EINVAL; 5125 5126 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); 5127 5128 return len; 5129 } 5130 5131 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); 5132 5133 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5134 { 5135 unsigned int caps = 0; 5136 5137 if (get_cap(LRU_GEN_CORE)) 5138 caps |= BIT(LRU_GEN_CORE); 5139 5140 if (should_walk_mmu()) 5141 caps |= BIT(LRU_GEN_MM_WALK); 5142 5143 if (should_clear_pmd_young()) 5144 caps |= BIT(LRU_GEN_NONLEAF_YOUNG); 5145 5146 return sysfs_emit(buf, "0x%04x\n", caps); 5147 } 5148 5149 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5150 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 5151 const char *buf, size_t len) 5152 { 5153 int i; 5154 unsigned int caps; 5155 5156 if (tolower(*buf) == 'n') 5157 caps = 0; 5158 else if (tolower(*buf) == 'y') 5159 caps = -1; 5160 else if (kstrtouint(buf, 0, &caps)) 5161 return -EINVAL; 5162 5163 for (i = 0; i < NR_LRU_GEN_CAPS; i++) { 5164 bool enabled = caps & BIT(i); 5165 5166 if (i == LRU_GEN_CORE) 5167 lru_gen_change_state(enabled); 5168 else if (enabled) 5169 static_branch_enable(&lru_gen_caps[i]); 5170 else 5171 static_branch_disable(&lru_gen_caps[i]); 5172 } 5173 5174 return len; 5175 } 5176 5177 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); 5178 5179 static struct attribute *lru_gen_attrs[] = { 5180 &lru_gen_min_ttl_attr.attr, 5181 &lru_gen_enabled_attr.attr, 5182 NULL 5183 }; 5184 5185 static const struct attribute_group lru_gen_attr_group = { 5186 .name = "lru_gen", 5187 .attrs = lru_gen_attrs, 5188 }; 5189 5190 /****************************************************************************** 5191 * debugfs interface 5192 ******************************************************************************/ 5193 5194 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) 5195 { 5196 struct mem_cgroup *memcg; 5197 loff_t nr_to_skip = *pos; 5198 5199 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); 5200 if (!m->private) 5201 return ERR_PTR(-ENOMEM); 5202 5203 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5204 do { 5205 int nid; 5206 5207 for_each_node_state(nid, N_MEMORY) { 5208 if (!nr_to_skip--) 5209 return get_lruvec(memcg, nid); 5210 } 5211 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5212 5213 return NULL; 5214 } 5215 5216 static void lru_gen_seq_stop(struct seq_file *m, void *v) 5217 { 5218 if (!IS_ERR_OR_NULL(v)) 5219 mem_cgroup_iter_break(NULL, lruvec_memcg(v)); 5220 5221 kvfree(m->private); 5222 m->private = NULL; 5223 } 5224 5225 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) 5226 { 5227 int nid = lruvec_pgdat(v)->node_id; 5228 struct mem_cgroup *memcg = lruvec_memcg(v); 5229 5230 ++*pos; 5231 5232 nid = next_memory_node(nid); 5233 if (nid == MAX_NUMNODES) { 5234 memcg = mem_cgroup_iter(NULL, memcg, NULL); 5235 if (!memcg) 5236 return NULL; 5237 5238 nid = first_memory_node; 5239 } 5240 5241 return get_lruvec(memcg, nid); 5242 } 5243 5244 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, 5245 unsigned long max_seq, unsigned long *min_seq, 5246 unsigned long seq) 5247 { 5248 int i; 5249 int type, tier; 5250 int hist = lru_hist_from_seq(seq); 5251 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5252 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5253 5254 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 5255 seq_printf(m, " %10d", tier); 5256 for (type = 0; type < ANON_AND_FILE; type++) { 5257 const char *s = "xxx"; 5258 unsigned long n[3] = {}; 5259 5260 if (seq == max_seq) { 5261 s = "RTx"; 5262 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); 5263 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); 5264 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { 5265 s = "rep"; 5266 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); 5267 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); 5268 if (tier) 5269 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); 5270 } 5271 5272 for (i = 0; i < 3; i++) 5273 seq_printf(m, " %10lu%c", n[i], s[i]); 5274 } 5275 seq_putc(m, '\n'); 5276 } 5277 5278 if (!mm_state) 5279 return; 5280 5281 seq_puts(m, " "); 5282 for (i = 0; i < NR_MM_STATS; i++) { 5283 const char *s = "xxxx"; 5284 unsigned long n = 0; 5285 5286 if (seq == max_seq && NR_HIST_GENS == 1) { 5287 s = "TYFA"; 5288 n = READ_ONCE(mm_state->stats[hist][i]); 5289 } else if (seq != max_seq && NR_HIST_GENS > 1) { 5290 s = "tyfa"; 5291 n = READ_ONCE(mm_state->stats[hist][i]); 5292 } 5293 5294 seq_printf(m, " %10lu%c", n, s[i]); 5295 } 5296 seq_putc(m, '\n'); 5297 } 5298 5299 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5300 static int lru_gen_seq_show(struct seq_file *m, void *v) 5301 { 5302 unsigned long seq; 5303 bool full = !debugfs_real_fops(m->file)->write; 5304 struct lruvec *lruvec = v; 5305 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5306 int nid = lruvec_pgdat(lruvec)->node_id; 5307 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 5308 DEFINE_MAX_SEQ(lruvec); 5309 DEFINE_MIN_SEQ(lruvec); 5310 5311 if (nid == first_memory_node) { 5312 const char *path = memcg ? m->private : ""; 5313 5314 #ifdef CONFIG_MEMCG 5315 if (memcg) 5316 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); 5317 #endif 5318 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); 5319 } 5320 5321 seq_printf(m, " node %5d\n", nid); 5322 5323 if (!full) 5324 seq = min_seq[LRU_GEN_ANON]; 5325 else if (max_seq >= MAX_NR_GENS) 5326 seq = max_seq - MAX_NR_GENS + 1; 5327 else 5328 seq = 0; 5329 5330 for (; seq <= max_seq; seq++) { 5331 int type, zone; 5332 int gen = lru_gen_from_seq(seq); 5333 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 5334 5335 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); 5336 5337 for (type = 0; type < ANON_AND_FILE; type++) { 5338 unsigned long size = 0; 5339 char mark = full && seq < min_seq[type] ? 'x' : ' '; 5340 5341 for (zone = 0; zone < MAX_NR_ZONES; zone++) 5342 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 5343 5344 seq_printf(m, " %10lu%c", size, mark); 5345 } 5346 5347 seq_putc(m, '\n'); 5348 5349 if (full) 5350 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); 5351 } 5352 5353 return 0; 5354 } 5355 5356 static const struct seq_operations lru_gen_seq_ops = { 5357 .start = lru_gen_seq_start, 5358 .stop = lru_gen_seq_stop, 5359 .next = lru_gen_seq_next, 5360 .show = lru_gen_seq_show, 5361 }; 5362 5363 static int run_aging(struct lruvec *lruvec, unsigned long seq, 5364 bool can_swap, bool force_scan) 5365 { 5366 DEFINE_MAX_SEQ(lruvec); 5367 DEFINE_MIN_SEQ(lruvec); 5368 5369 if (seq < max_seq) 5370 return 0; 5371 5372 if (seq > max_seq) 5373 return -EINVAL; 5374 5375 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) 5376 return -ERANGE; 5377 5378 try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan); 5379 5380 return 0; 5381 } 5382 5383 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, 5384 int swappiness, unsigned long nr_to_reclaim) 5385 { 5386 DEFINE_MAX_SEQ(lruvec); 5387 5388 if (seq + MIN_NR_GENS > max_seq) 5389 return -EINVAL; 5390 5391 sc->nr_reclaimed = 0; 5392 5393 while (!signal_pending(current)) { 5394 DEFINE_MIN_SEQ(lruvec); 5395 5396 if (seq < min_seq[!swappiness]) 5397 return 0; 5398 5399 if (sc->nr_reclaimed >= nr_to_reclaim) 5400 return 0; 5401 5402 if (!evict_folios(lruvec, sc, swappiness)) 5403 return 0; 5404 5405 cond_resched(); 5406 } 5407 5408 return -EINTR; 5409 } 5410 5411 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, 5412 struct scan_control *sc, int swappiness, unsigned long opt) 5413 { 5414 struct lruvec *lruvec; 5415 int err = -EINVAL; 5416 struct mem_cgroup *memcg = NULL; 5417 5418 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) 5419 return -EINVAL; 5420 5421 if (!mem_cgroup_disabled()) { 5422 rcu_read_lock(); 5423 5424 memcg = mem_cgroup_from_id(memcg_id); 5425 if (!mem_cgroup_tryget(memcg)) 5426 memcg = NULL; 5427 5428 rcu_read_unlock(); 5429 5430 if (!memcg) 5431 return -EINVAL; 5432 } 5433 5434 if (memcg_id != mem_cgroup_id(memcg)) 5435 goto done; 5436 5437 lruvec = get_lruvec(memcg, nid); 5438 5439 if (swappiness < MIN_SWAPPINESS) 5440 swappiness = get_swappiness(lruvec, sc); 5441 else if (swappiness > MAX_SWAPPINESS) 5442 goto done; 5443 5444 switch (cmd) { 5445 case '+': 5446 err = run_aging(lruvec, seq, swappiness, opt); 5447 break; 5448 case '-': 5449 err = run_eviction(lruvec, seq, sc, swappiness, opt); 5450 break; 5451 } 5452 done: 5453 mem_cgroup_put(memcg); 5454 5455 return err; 5456 } 5457 5458 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5459 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, 5460 size_t len, loff_t *pos) 5461 { 5462 void *buf; 5463 char *cur, *next; 5464 unsigned int flags; 5465 struct blk_plug plug; 5466 int err = -EINVAL; 5467 struct scan_control sc = { 5468 .may_writepage = true, 5469 .may_unmap = true, 5470 .may_swap = true, 5471 .reclaim_idx = MAX_NR_ZONES - 1, 5472 .gfp_mask = GFP_KERNEL, 5473 }; 5474 5475 buf = kvmalloc(len + 1, GFP_KERNEL); 5476 if (!buf) 5477 return -ENOMEM; 5478 5479 if (copy_from_user(buf, src, len)) { 5480 kvfree(buf); 5481 return -EFAULT; 5482 } 5483 5484 set_task_reclaim_state(current, &sc.reclaim_state); 5485 flags = memalloc_noreclaim_save(); 5486 blk_start_plug(&plug); 5487 if (!set_mm_walk(NULL, true)) { 5488 err = -ENOMEM; 5489 goto done; 5490 } 5491 5492 next = buf; 5493 next[len] = '\0'; 5494 5495 while ((cur = strsep(&next, ",;\n"))) { 5496 int n; 5497 int end; 5498 char cmd; 5499 unsigned int memcg_id; 5500 unsigned int nid; 5501 unsigned long seq; 5502 unsigned int swappiness = -1; 5503 unsigned long opt = -1; 5504 5505 cur = skip_spaces(cur); 5506 if (!*cur) 5507 continue; 5508 5509 n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, 5510 &seq, &end, &swappiness, &end, &opt, &end); 5511 if (n < 4 || cur[end]) { 5512 err = -EINVAL; 5513 break; 5514 } 5515 5516 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); 5517 if (err) 5518 break; 5519 } 5520 done: 5521 clear_mm_walk(); 5522 blk_finish_plug(&plug); 5523 memalloc_noreclaim_restore(flags); 5524 set_task_reclaim_state(current, NULL); 5525 5526 kvfree(buf); 5527 5528 return err ? : len; 5529 } 5530 5531 static int lru_gen_seq_open(struct inode *inode, struct file *file) 5532 { 5533 return seq_open(file, &lru_gen_seq_ops); 5534 } 5535 5536 static const struct file_operations lru_gen_rw_fops = { 5537 .open = lru_gen_seq_open, 5538 .read = seq_read, 5539 .write = lru_gen_seq_write, 5540 .llseek = seq_lseek, 5541 .release = seq_release, 5542 }; 5543 5544 static const struct file_operations lru_gen_ro_fops = { 5545 .open = lru_gen_seq_open, 5546 .read = seq_read, 5547 .llseek = seq_lseek, 5548 .release = seq_release, 5549 }; 5550 5551 /****************************************************************************** 5552 * initialization 5553 ******************************************************************************/ 5554 5555 void lru_gen_init_pgdat(struct pglist_data *pgdat) 5556 { 5557 int i, j; 5558 5559 spin_lock_init(&pgdat->memcg_lru.lock); 5560 5561 for (i = 0; i < MEMCG_NR_GENS; i++) { 5562 for (j = 0; j < MEMCG_NR_BINS; j++) 5563 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); 5564 } 5565 } 5566 5567 void lru_gen_init_lruvec(struct lruvec *lruvec) 5568 { 5569 int i; 5570 int gen, type, zone; 5571 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5572 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5573 5574 lrugen->max_seq = MIN_NR_GENS + 1; 5575 lrugen->enabled = lru_gen_enabled(); 5576 5577 for (i = 0; i <= MIN_NR_GENS + 1; i++) 5578 lrugen->timestamps[i] = jiffies; 5579 5580 for_each_gen_type_zone(gen, type, zone) 5581 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); 5582 5583 if (mm_state) 5584 mm_state->seq = MIN_NR_GENS; 5585 } 5586 5587 #ifdef CONFIG_MEMCG 5588 5589 void lru_gen_init_memcg(struct mem_cgroup *memcg) 5590 { 5591 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5592 5593 if (!mm_list) 5594 return; 5595 5596 INIT_LIST_HEAD(&mm_list->fifo); 5597 spin_lock_init(&mm_list->lock); 5598 } 5599 5600 void lru_gen_exit_memcg(struct mem_cgroup *memcg) 5601 { 5602 int i; 5603 int nid; 5604 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5605 5606 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); 5607 5608 for_each_node(nid) { 5609 struct lruvec *lruvec = get_lruvec(memcg, nid); 5610 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5611 5612 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, 5613 sizeof(lruvec->lrugen.nr_pages))); 5614 5615 lruvec->lrugen.list.next = LIST_POISON1; 5616 5617 if (!mm_state) 5618 continue; 5619 5620 for (i = 0; i < NR_BLOOM_FILTERS; i++) { 5621 bitmap_free(mm_state->filters[i]); 5622 mm_state->filters[i] = NULL; 5623 } 5624 } 5625 } 5626 5627 #endif /* CONFIG_MEMCG */ 5628 5629 static int __init init_lru_gen(void) 5630 { 5631 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); 5632 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); 5633 5634 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) 5635 pr_err("lru_gen: failed to create sysfs group\n"); 5636 5637 debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); 5638 debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); 5639 5640 return 0; 5641 }; 5642 late_initcall(init_lru_gen); 5643 5644 #else /* !CONFIG_LRU_GEN */ 5645 5646 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 5647 { 5648 BUILD_BUG(); 5649 } 5650 5651 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5652 { 5653 BUILD_BUG(); 5654 } 5655 5656 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5657 { 5658 BUILD_BUG(); 5659 } 5660 5661 #endif /* CONFIG_LRU_GEN */ 5662 5663 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5664 { 5665 unsigned long nr[NR_LRU_LISTS]; 5666 unsigned long targets[NR_LRU_LISTS]; 5667 unsigned long nr_to_scan; 5668 enum lru_list lru; 5669 unsigned long nr_reclaimed = 0; 5670 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 5671 bool proportional_reclaim; 5672 struct blk_plug plug; 5673 5674 if (lru_gen_enabled() && !root_reclaim(sc)) { 5675 lru_gen_shrink_lruvec(lruvec, sc); 5676 return; 5677 } 5678 5679 get_scan_count(lruvec, sc, nr); 5680 5681 /* Record the original scan target for proportional adjustments later */ 5682 memcpy(targets, nr, sizeof(nr)); 5683 5684 /* 5685 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 5686 * event that can occur when there is little memory pressure e.g. 5687 * multiple streaming readers/writers. Hence, we do not abort scanning 5688 * when the requested number of pages are reclaimed when scanning at 5689 * DEF_PRIORITY on the assumption that the fact we are direct 5690 * reclaiming implies that kswapd is not keeping up and it is best to 5691 * do a batch of work at once. For memcg reclaim one check is made to 5692 * abort proportional reclaim if either the file or anon lru has already 5693 * dropped to zero at the first pass. 5694 */ 5695 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && 5696 sc->priority == DEF_PRIORITY); 5697 5698 blk_start_plug(&plug); 5699 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 5700 nr[LRU_INACTIVE_FILE]) { 5701 unsigned long nr_anon, nr_file, percentage; 5702 unsigned long nr_scanned; 5703 5704 for_each_evictable_lru(lru) { 5705 if (nr[lru]) { 5706 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 5707 nr[lru] -= nr_to_scan; 5708 5709 nr_reclaimed += shrink_list(lru, nr_to_scan, 5710 lruvec, sc); 5711 } 5712 } 5713 5714 cond_resched(); 5715 5716 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) 5717 continue; 5718 5719 /* 5720 * For kswapd and memcg, reclaim at least the number of pages 5721 * requested. Ensure that the anon and file LRUs are scanned 5722 * proportionally what was requested by get_scan_count(). We 5723 * stop reclaiming one LRU and reduce the amount scanning 5724 * proportional to the original scan target. 5725 */ 5726 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 5727 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 5728 5729 /* 5730 * It's just vindictive to attack the larger once the smaller 5731 * has gone to zero. And given the way we stop scanning the 5732 * smaller below, this makes sure that we only make one nudge 5733 * towards proportionality once we've got nr_to_reclaim. 5734 */ 5735 if (!nr_file || !nr_anon) 5736 break; 5737 5738 if (nr_file > nr_anon) { 5739 unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 5740 targets[LRU_ACTIVE_ANON] + 1; 5741 lru = LRU_BASE; 5742 percentage = nr_anon * 100 / scan_target; 5743 } else { 5744 unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 5745 targets[LRU_ACTIVE_FILE] + 1; 5746 lru = LRU_FILE; 5747 percentage = nr_file * 100 / scan_target; 5748 } 5749 5750 /* Stop scanning the smaller of the LRU */ 5751 nr[lru] = 0; 5752 nr[lru + LRU_ACTIVE] = 0; 5753 5754 /* 5755 * Recalculate the other LRU scan count based on its original 5756 * scan target and the percentage scanning already complete 5757 */ 5758 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 5759 nr_scanned = targets[lru] - nr[lru]; 5760 nr[lru] = targets[lru] * (100 - percentage) / 100; 5761 nr[lru] -= min(nr[lru], nr_scanned); 5762 5763 lru += LRU_ACTIVE; 5764 nr_scanned = targets[lru] - nr[lru]; 5765 nr[lru] = targets[lru] * (100 - percentage) / 100; 5766 nr[lru] -= min(nr[lru], nr_scanned); 5767 } 5768 blk_finish_plug(&plug); 5769 sc->nr_reclaimed += nr_reclaimed; 5770 5771 /* 5772 * Even if we did not try to evict anon pages at all, we want to 5773 * rebalance the anon lru active/inactive ratio. 5774 */ 5775 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && 5776 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 5777 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 5778 sc, LRU_ACTIVE_ANON); 5779 } 5780 5781 /* Use reclaim/compaction for costly allocs or under memory pressure */ 5782 static bool in_reclaim_compaction(struct scan_control *sc) 5783 { 5784 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && 5785 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 5786 sc->priority < DEF_PRIORITY - 2)) 5787 return true; 5788 5789 return false; 5790 } 5791 5792 /* 5793 * Reclaim/compaction is used for high-order allocation requests. It reclaims 5794 * order-0 pages before compacting the zone. should_continue_reclaim() returns 5795 * true if more pages should be reclaimed such that when the page allocator 5796 * calls try_to_compact_pages() that it will have enough free pages to succeed. 5797 * It will give up earlier than that if there is difficulty reclaiming pages. 5798 */ 5799 static inline bool should_continue_reclaim(struct pglist_data *pgdat, 5800 unsigned long nr_reclaimed, 5801 struct scan_control *sc) 5802 { 5803 unsigned long pages_for_compaction; 5804 unsigned long inactive_lru_pages; 5805 int z; 5806 5807 /* If not in reclaim/compaction mode, stop */ 5808 if (!in_reclaim_compaction(sc)) 5809 return false; 5810 5811 /* 5812 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX 5813 * number of pages that were scanned. This will return to the caller 5814 * with the risk reclaim/compaction and the resulting allocation attempt 5815 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL 5816 * allocations through requiring that the full LRU list has been scanned 5817 * first, by assuming that zero delta of sc->nr_scanned means full LRU 5818 * scan, but that approximation was wrong, and there were corner cases 5819 * where always a non-zero amount of pages were scanned. 5820 */ 5821 if (!nr_reclaimed) 5822 return false; 5823 5824 /* If compaction would go ahead or the allocation would succeed, stop */ 5825 for (z = 0; z <= sc->reclaim_idx; z++) { 5826 struct zone *zone = &pgdat->node_zones[z]; 5827 if (!managed_zone(zone)) 5828 continue; 5829 5830 /* Allocation can already succeed, nothing to do */ 5831 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 5832 sc->reclaim_idx, 0)) 5833 return false; 5834 5835 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) 5836 return false; 5837 } 5838 5839 /* 5840 * If we have not reclaimed enough pages for compaction and the 5841 * inactive lists are large enough, continue reclaiming 5842 */ 5843 pages_for_compaction = compact_gap(sc->order); 5844 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 5845 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 5846 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 5847 5848 return inactive_lru_pages > pages_for_compaction; 5849 } 5850 5851 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) 5852 { 5853 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; 5854 struct mem_cgroup_reclaim_cookie reclaim = { 5855 .pgdat = pgdat, 5856 }; 5857 struct mem_cgroup_reclaim_cookie *partial = &reclaim; 5858 struct mem_cgroup *memcg; 5859 5860 /* 5861 * In most cases, direct reclaimers can do partial walks 5862 * through the cgroup tree, using an iterator state that 5863 * persists across invocations. This strikes a balance between 5864 * fairness and allocation latency. 5865 * 5866 * For kswapd, reliable forward progress is more important 5867 * than a quick return to idle. Always do full walks. 5868 */ 5869 if (current_is_kswapd() || sc->memcg_full_walk) 5870 partial = NULL; 5871 5872 memcg = mem_cgroup_iter(target_memcg, NULL, partial); 5873 do { 5874 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 5875 unsigned long reclaimed; 5876 unsigned long scanned; 5877 5878 /* 5879 * This loop can become CPU-bound when target memcgs 5880 * aren't eligible for reclaim - either because they 5881 * don't have any reclaimable pages, or because their 5882 * memory is explicitly protected. Avoid soft lockups. 5883 */ 5884 cond_resched(); 5885 5886 mem_cgroup_calculate_protection(target_memcg, memcg); 5887 5888 if (mem_cgroup_below_min(target_memcg, memcg)) { 5889 /* 5890 * Hard protection. 5891 * If there is no reclaimable memory, OOM. 5892 */ 5893 continue; 5894 } else if (mem_cgroup_below_low(target_memcg, memcg)) { 5895 /* 5896 * Soft protection. 5897 * Respect the protection only as long as 5898 * there is an unprotected supply 5899 * of reclaimable memory from other cgroups. 5900 */ 5901 if (!sc->memcg_low_reclaim) { 5902 sc->memcg_low_skipped = 1; 5903 continue; 5904 } 5905 memcg_memory_event(memcg, MEMCG_LOW); 5906 } 5907 5908 reclaimed = sc->nr_reclaimed; 5909 scanned = sc->nr_scanned; 5910 5911 shrink_lruvec(lruvec, sc); 5912 5913 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 5914 sc->priority); 5915 5916 /* Record the group's reclaim efficiency */ 5917 if (!sc->proactive) 5918 vmpressure(sc->gfp_mask, memcg, false, 5919 sc->nr_scanned - scanned, 5920 sc->nr_reclaimed - reclaimed); 5921 5922 /* If partial walks are allowed, bail once goal is reached */ 5923 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { 5924 mem_cgroup_iter_break(target_memcg, memcg); 5925 break; 5926 } 5927 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial))); 5928 } 5929 5930 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 5931 { 5932 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 5933 struct lruvec *target_lruvec; 5934 bool reclaimable = false; 5935 5936 if (lru_gen_enabled() && root_reclaim(sc)) { 5937 lru_gen_shrink_node(pgdat, sc); 5938 return; 5939 } 5940 5941 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 5942 5943 again: 5944 memset(&sc->nr, 0, sizeof(sc->nr)); 5945 5946 nr_reclaimed = sc->nr_reclaimed; 5947 nr_scanned = sc->nr_scanned; 5948 5949 prepare_scan_control(pgdat, sc); 5950 5951 shrink_node_memcgs(pgdat, sc); 5952 5953 flush_reclaim_state(sc); 5954 5955 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; 5956 5957 /* Record the subtree's reclaim efficiency */ 5958 if (!sc->proactive) 5959 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 5960 sc->nr_scanned - nr_scanned, nr_node_reclaimed); 5961 5962 if (nr_node_reclaimed) 5963 reclaimable = true; 5964 5965 if (current_is_kswapd()) { 5966 /* 5967 * If reclaim is isolating dirty pages under writeback, 5968 * it implies that the long-lived page allocation rate 5969 * is exceeding the page laundering rate. Either the 5970 * global limits are not being effective at throttling 5971 * processes due to the page distribution throughout 5972 * zones or there is heavy usage of a slow backing 5973 * device. The only option is to throttle from reclaim 5974 * context which is not ideal as there is no guarantee 5975 * the dirtying process is throttled in the same way 5976 * balance_dirty_pages() manages. 5977 * 5978 * Once a node is flagged PGDAT_WRITEBACK, kswapd will 5979 * count the number of pages under pages flagged for 5980 * immediate reclaim and stall if any are encountered 5981 * in the nr_immediate check below. 5982 */ 5983 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 5984 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 5985 5986 /* Allow kswapd to start writing pages during reclaim.*/ 5987 if (sc->nr.unqueued_dirty && 5988 sc->nr.unqueued_dirty == sc->nr.file_taken) 5989 set_bit(PGDAT_DIRTY, &pgdat->flags); 5990 5991 /* 5992 * If kswapd scans pages marked for immediate 5993 * reclaim and under writeback (nr_immediate), it 5994 * implies that pages are cycling through the LRU 5995 * faster than they are written so forcibly stall 5996 * until some pages complete writeback. 5997 */ 5998 if (sc->nr.immediate) 5999 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 6000 } 6001 6002 /* 6003 * Tag a node/memcg as congested if all the dirty pages were marked 6004 * for writeback and immediate reclaim (counted in nr.congested). 6005 * 6006 * Legacy memcg will stall in page writeback so avoid forcibly 6007 * stalling in reclaim_throttle(). 6008 */ 6009 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { 6010 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) 6011 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); 6012 6013 if (current_is_kswapd()) 6014 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); 6015 } 6016 6017 /* 6018 * Stall direct reclaim for IO completions if the lruvec is 6019 * node is congested. Allow kswapd to continue until it 6020 * starts encountering unqueued dirty pages or cycling through 6021 * the LRU too quickly. 6022 */ 6023 if (!current_is_kswapd() && current_may_throttle() && 6024 !sc->hibernation_mode && 6025 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || 6026 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) 6027 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); 6028 6029 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) 6030 goto again; 6031 6032 /* 6033 * Kswapd gives up on balancing particular nodes after too 6034 * many failures to reclaim anything from them and goes to 6035 * sleep. On reclaim progress, reset the failure counter. A 6036 * successful direct reclaim run will revive a dormant kswapd. 6037 */ 6038 if (reclaimable) 6039 pgdat->kswapd_failures = 0; 6040 else if (sc->cache_trim_mode) 6041 sc->cache_trim_mode_failed = 1; 6042 } 6043 6044 /* 6045 * Returns true if compaction should go ahead for a costly-order request, or 6046 * the allocation would already succeed without compaction. Return false if we 6047 * should reclaim first. 6048 */ 6049 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 6050 { 6051 unsigned long watermark; 6052 6053 if (!gfp_compaction_allowed(sc->gfp_mask)) 6054 return false; 6055 6056 /* Allocation can already succeed, nothing to do */ 6057 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 6058 sc->reclaim_idx, 0)) 6059 return true; 6060 6061 /* Compaction cannot yet proceed. Do reclaim. */ 6062 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) 6063 return false; 6064 6065 /* 6066 * Compaction is already possible, but it takes time to run and there 6067 * are potentially other callers using the pages just freed. So proceed 6068 * with reclaim to make a buffer of free pages available to give 6069 * compaction a reasonable chance of completing and allocating the page. 6070 * Note that we won't actually reclaim the whole buffer in one attempt 6071 * as the target watermark in should_continue_reclaim() is lower. But if 6072 * we are already above the high+gap watermark, don't reclaim at all. 6073 */ 6074 watermark = high_wmark_pages(zone) + compact_gap(sc->order); 6075 6076 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 6077 } 6078 6079 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) 6080 { 6081 /* 6082 * If reclaim is making progress greater than 12% efficiency then 6083 * wake all the NOPROGRESS throttled tasks. 6084 */ 6085 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { 6086 wait_queue_head_t *wqh; 6087 6088 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; 6089 if (waitqueue_active(wqh)) 6090 wake_up(wqh); 6091 6092 return; 6093 } 6094 6095 /* 6096 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will 6097 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages 6098 * under writeback and marked for immediate reclaim at the tail of the 6099 * LRU. 6100 */ 6101 if (current_is_kswapd() || cgroup_reclaim(sc)) 6102 return; 6103 6104 /* Throttle if making no progress at high prioities. */ 6105 if (sc->priority == 1 && !sc->nr_reclaimed) 6106 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); 6107 } 6108 6109 /* 6110 * This is the direct reclaim path, for page-allocating processes. We only 6111 * try to reclaim pages from zones which will satisfy the caller's allocation 6112 * request. 6113 * 6114 * If a zone is deemed to be full of pinned pages then just give it a light 6115 * scan then give up on it. 6116 */ 6117 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 6118 { 6119 struct zoneref *z; 6120 struct zone *zone; 6121 unsigned long nr_soft_reclaimed; 6122 unsigned long nr_soft_scanned; 6123 gfp_t orig_mask; 6124 pg_data_t *last_pgdat = NULL; 6125 pg_data_t *first_pgdat = NULL; 6126 6127 /* 6128 * If the number of buffer_heads in the machine exceeds the maximum 6129 * allowed level, force direct reclaim to scan the highmem zone as 6130 * highmem pages could be pinning lowmem pages storing buffer_heads 6131 */ 6132 orig_mask = sc->gfp_mask; 6133 if (buffer_heads_over_limit) { 6134 sc->gfp_mask |= __GFP_HIGHMEM; 6135 sc->reclaim_idx = gfp_zone(sc->gfp_mask); 6136 } 6137 6138 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6139 sc->reclaim_idx, sc->nodemask) { 6140 /* 6141 * Take care memory controller reclaiming has small influence 6142 * to global LRU. 6143 */ 6144 if (!cgroup_reclaim(sc)) { 6145 if (!cpuset_zone_allowed(zone, 6146 GFP_KERNEL | __GFP_HARDWALL)) 6147 continue; 6148 6149 /* 6150 * If we already have plenty of memory free for 6151 * compaction in this zone, don't free any more. 6152 * Even though compaction is invoked for any 6153 * non-zero order, only frequent costly order 6154 * reclamation is disruptive enough to become a 6155 * noticeable problem, like transparent huge 6156 * page allocations. 6157 */ 6158 if (IS_ENABLED(CONFIG_COMPACTION) && 6159 sc->order > PAGE_ALLOC_COSTLY_ORDER && 6160 compaction_ready(zone, sc)) { 6161 sc->compaction_ready = true; 6162 continue; 6163 } 6164 6165 /* 6166 * Shrink each node in the zonelist once. If the 6167 * zonelist is ordered by zone (not the default) then a 6168 * node may be shrunk multiple times but in that case 6169 * the user prefers lower zones being preserved. 6170 */ 6171 if (zone->zone_pgdat == last_pgdat) 6172 continue; 6173 6174 /* 6175 * This steals pages from memory cgroups over softlimit 6176 * and returns the number of reclaimed pages and 6177 * scanned pages. This works for global memory pressure 6178 * and balancing, not for a memcg's limit. 6179 */ 6180 nr_soft_scanned = 0; 6181 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, 6182 sc->order, sc->gfp_mask, 6183 &nr_soft_scanned); 6184 sc->nr_reclaimed += nr_soft_reclaimed; 6185 sc->nr_scanned += nr_soft_scanned; 6186 /* need some check for avoid more shrink_zone() */ 6187 } 6188 6189 if (!first_pgdat) 6190 first_pgdat = zone->zone_pgdat; 6191 6192 /* See comment about same check for global reclaim above */ 6193 if (zone->zone_pgdat == last_pgdat) 6194 continue; 6195 last_pgdat = zone->zone_pgdat; 6196 shrink_node(zone->zone_pgdat, sc); 6197 } 6198 6199 if (first_pgdat) 6200 consider_reclaim_throttle(first_pgdat, sc); 6201 6202 /* 6203 * Restore to original mask to avoid the impact on the caller if we 6204 * promoted it to __GFP_HIGHMEM. 6205 */ 6206 sc->gfp_mask = orig_mask; 6207 } 6208 6209 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) 6210 { 6211 struct lruvec *target_lruvec; 6212 unsigned long refaults; 6213 6214 if (lru_gen_enabled()) 6215 return; 6216 6217 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 6218 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); 6219 target_lruvec->refaults[WORKINGSET_ANON] = refaults; 6220 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); 6221 target_lruvec->refaults[WORKINGSET_FILE] = refaults; 6222 } 6223 6224 /* 6225 * This is the main entry point to direct page reclaim. 6226 * 6227 * If a full scan of the inactive list fails to free enough memory then we 6228 * are "out of memory" and something needs to be killed. 6229 * 6230 * If the caller is !__GFP_FS then the probability of a failure is reasonably 6231 * high - the zone may be full of dirty or under-writeback pages, which this 6232 * caller can't do much about. We kick the writeback threads and take explicit 6233 * naps in the hope that some of these pages can be written. But if the 6234 * allocating task holds filesystem locks which prevent writeout this might not 6235 * work, and the allocation attempt will fail. 6236 * 6237 * returns: 0, if no pages reclaimed 6238 * else, the number of pages reclaimed 6239 */ 6240 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 6241 struct scan_control *sc) 6242 { 6243 int initial_priority = sc->priority; 6244 pg_data_t *last_pgdat; 6245 struct zoneref *z; 6246 struct zone *zone; 6247 retry: 6248 delayacct_freepages_start(); 6249 6250 if (!cgroup_reclaim(sc)) 6251 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 6252 6253 do { 6254 if (!sc->proactive) 6255 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 6256 sc->priority); 6257 sc->nr_scanned = 0; 6258 shrink_zones(zonelist, sc); 6259 6260 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 6261 break; 6262 6263 if (sc->compaction_ready) 6264 break; 6265 6266 /* 6267 * If we're getting trouble reclaiming, start doing 6268 * writepage even in laptop mode. 6269 */ 6270 if (sc->priority < DEF_PRIORITY - 2) 6271 sc->may_writepage = 1; 6272 } while (--sc->priority >= 0); 6273 6274 last_pgdat = NULL; 6275 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 6276 sc->nodemask) { 6277 if (zone->zone_pgdat == last_pgdat) 6278 continue; 6279 last_pgdat = zone->zone_pgdat; 6280 6281 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 6282 6283 if (cgroup_reclaim(sc)) { 6284 struct lruvec *lruvec; 6285 6286 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, 6287 zone->zone_pgdat); 6288 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6289 } 6290 } 6291 6292 delayacct_freepages_end(); 6293 6294 if (sc->nr_reclaimed) 6295 return sc->nr_reclaimed; 6296 6297 /* Aborted reclaim to try compaction? don't OOM, then */ 6298 if (sc->compaction_ready) 6299 return 1; 6300 6301 /* 6302 * In most cases, direct reclaimers can do partial walks 6303 * through the cgroup tree to meet the reclaim goal while 6304 * keeping latency low. Since the iterator state is shared 6305 * among all direct reclaim invocations (to retain fairness 6306 * among cgroups), though, high concurrency can result in 6307 * individual threads not seeing enough cgroups to make 6308 * meaningful forward progress. Avoid false OOMs in this case. 6309 */ 6310 if (!sc->memcg_full_walk) { 6311 sc->priority = initial_priority; 6312 sc->memcg_full_walk = 1; 6313 goto retry; 6314 } 6315 6316 /* 6317 * We make inactive:active ratio decisions based on the node's 6318 * composition of memory, but a restrictive reclaim_idx or a 6319 * memory.low cgroup setting can exempt large amounts of 6320 * memory from reclaim. Neither of which are very common, so 6321 * instead of doing costly eligibility calculations of the 6322 * entire cgroup subtree up front, we assume the estimates are 6323 * good, and retry with forcible deactivation if that fails. 6324 */ 6325 if (sc->skipped_deactivate) { 6326 sc->priority = initial_priority; 6327 sc->force_deactivate = 1; 6328 sc->skipped_deactivate = 0; 6329 goto retry; 6330 } 6331 6332 /* Untapped cgroup reserves? Don't OOM, retry. */ 6333 if (sc->memcg_low_skipped) { 6334 sc->priority = initial_priority; 6335 sc->force_deactivate = 0; 6336 sc->memcg_low_reclaim = 1; 6337 sc->memcg_low_skipped = 0; 6338 goto retry; 6339 } 6340 6341 return 0; 6342 } 6343 6344 static bool allow_direct_reclaim(pg_data_t *pgdat) 6345 { 6346 struct zone *zone; 6347 unsigned long pfmemalloc_reserve = 0; 6348 unsigned long free_pages = 0; 6349 int i; 6350 bool wmark_ok; 6351 6352 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6353 return true; 6354 6355 for (i = 0; i <= ZONE_NORMAL; i++) { 6356 zone = &pgdat->node_zones[i]; 6357 if (!managed_zone(zone)) 6358 continue; 6359 6360 if (!zone_reclaimable_pages(zone)) 6361 continue; 6362 6363 pfmemalloc_reserve += min_wmark_pages(zone); 6364 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); 6365 } 6366 6367 /* If there are no reserves (unexpected config) then do not throttle */ 6368 if (!pfmemalloc_reserve) 6369 return true; 6370 6371 wmark_ok = free_pages > pfmemalloc_reserve / 2; 6372 6373 /* kswapd must be awake if processes are being throttled */ 6374 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 6375 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 6376 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 6377 6378 wake_up_interruptible(&pgdat->kswapd_wait); 6379 } 6380 6381 return wmark_ok; 6382 } 6383 6384 /* 6385 * Throttle direct reclaimers if backing storage is backed by the network 6386 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 6387 * depleted. kswapd will continue to make progress and wake the processes 6388 * when the low watermark is reached. 6389 * 6390 * Returns true if a fatal signal was delivered during throttling. If this 6391 * happens, the page allocator should not consider triggering the OOM killer. 6392 */ 6393 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 6394 nodemask_t *nodemask) 6395 { 6396 struct zoneref *z; 6397 struct zone *zone; 6398 pg_data_t *pgdat = NULL; 6399 6400 /* 6401 * Kernel threads should not be throttled as they may be indirectly 6402 * responsible for cleaning pages necessary for reclaim to make forward 6403 * progress. kjournald for example may enter direct reclaim while 6404 * committing a transaction where throttling it could forcing other 6405 * processes to block on log_wait_commit(). 6406 */ 6407 if (current->flags & PF_KTHREAD) 6408 goto out; 6409 6410 /* 6411 * If a fatal signal is pending, this process should not throttle. 6412 * It should return quickly so it can exit and free its memory 6413 */ 6414 if (fatal_signal_pending(current)) 6415 goto out; 6416 6417 /* 6418 * Check if the pfmemalloc reserves are ok by finding the first node 6419 * with a usable ZONE_NORMAL or lower zone. The expectation is that 6420 * GFP_KERNEL will be required for allocating network buffers when 6421 * swapping over the network so ZONE_HIGHMEM is unusable. 6422 * 6423 * Throttling is based on the first usable node and throttled processes 6424 * wait on a queue until kswapd makes progress and wakes them. There 6425 * is an affinity then between processes waking up and where reclaim 6426 * progress has been made assuming the process wakes on the same node. 6427 * More importantly, processes running on remote nodes will not compete 6428 * for remote pfmemalloc reserves and processes on different nodes 6429 * should make reasonable progress. 6430 */ 6431 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6432 gfp_zone(gfp_mask), nodemask) { 6433 if (zone_idx(zone) > ZONE_NORMAL) 6434 continue; 6435 6436 /* Throttle based on the first usable node */ 6437 pgdat = zone->zone_pgdat; 6438 if (allow_direct_reclaim(pgdat)) 6439 goto out; 6440 break; 6441 } 6442 6443 /* If no zone was usable by the allocation flags then do not throttle */ 6444 if (!pgdat) 6445 goto out; 6446 6447 /* Account for the throttling */ 6448 count_vm_event(PGSCAN_DIRECT_THROTTLE); 6449 6450 /* 6451 * If the caller cannot enter the filesystem, it's possible that it 6452 * is due to the caller holding an FS lock or performing a journal 6453 * transaction in the case of a filesystem like ext[3|4]. In this case, 6454 * it is not safe to block on pfmemalloc_wait as kswapd could be 6455 * blocked waiting on the same lock. Instead, throttle for up to a 6456 * second before continuing. 6457 */ 6458 if (!(gfp_mask & __GFP_FS)) 6459 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 6460 allow_direct_reclaim(pgdat), HZ); 6461 else 6462 /* Throttle until kswapd wakes the process */ 6463 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 6464 allow_direct_reclaim(pgdat)); 6465 6466 if (fatal_signal_pending(current)) 6467 return true; 6468 6469 out: 6470 return false; 6471 } 6472 6473 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 6474 gfp_t gfp_mask, nodemask_t *nodemask) 6475 { 6476 unsigned long nr_reclaimed; 6477 struct scan_control sc = { 6478 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6479 .gfp_mask = current_gfp_context(gfp_mask), 6480 .reclaim_idx = gfp_zone(gfp_mask), 6481 .order = order, 6482 .nodemask = nodemask, 6483 .priority = DEF_PRIORITY, 6484 .may_writepage = !laptop_mode, 6485 .may_unmap = 1, 6486 .may_swap = 1, 6487 }; 6488 6489 /* 6490 * scan_control uses s8 fields for order, priority, and reclaim_idx. 6491 * Confirm they are large enough for max values. 6492 */ 6493 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX); 6494 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 6495 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 6496 6497 /* 6498 * Do not enter reclaim if fatal signal was delivered while throttled. 6499 * 1 is returned so that the page allocator does not OOM kill at this 6500 * point. 6501 */ 6502 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 6503 return 1; 6504 6505 set_task_reclaim_state(current, &sc.reclaim_state); 6506 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 6507 6508 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6509 6510 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 6511 set_task_reclaim_state(current, NULL); 6512 6513 return nr_reclaimed; 6514 } 6515 6516 #ifdef CONFIG_MEMCG 6517 6518 /* Only used by soft limit reclaim. Do not reuse for anything else. */ 6519 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 6520 gfp_t gfp_mask, bool noswap, 6521 pg_data_t *pgdat, 6522 unsigned long *nr_scanned) 6523 { 6524 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 6525 struct scan_control sc = { 6526 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6527 .target_mem_cgroup = memcg, 6528 .may_writepage = !laptop_mode, 6529 .may_unmap = 1, 6530 .reclaim_idx = MAX_NR_ZONES - 1, 6531 .may_swap = !noswap, 6532 }; 6533 6534 WARN_ON_ONCE(!current->reclaim_state); 6535 6536 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 6537 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 6538 6539 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 6540 sc.gfp_mask); 6541 6542 /* 6543 * NOTE: Although we can get the priority field, using it 6544 * here is not a good idea, since it limits the pages we can scan. 6545 * if we don't reclaim here, the shrink_node from balance_pgdat 6546 * will pick up pages from other mem cgroup's as well. We hack 6547 * the priority and make it zero. 6548 */ 6549 shrink_lruvec(lruvec, &sc); 6550 6551 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 6552 6553 *nr_scanned = sc.nr_scanned; 6554 6555 return sc.nr_reclaimed; 6556 } 6557 6558 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 6559 unsigned long nr_pages, 6560 gfp_t gfp_mask, 6561 unsigned int reclaim_options, 6562 int *swappiness) 6563 { 6564 unsigned long nr_reclaimed; 6565 unsigned int noreclaim_flag; 6566 struct scan_control sc = { 6567 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 6568 .proactive_swappiness = swappiness, 6569 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 6570 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 6571 .reclaim_idx = MAX_NR_ZONES - 1, 6572 .target_mem_cgroup = memcg, 6573 .priority = DEF_PRIORITY, 6574 .may_writepage = !laptop_mode, 6575 .may_unmap = 1, 6576 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), 6577 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), 6578 }; 6579 /* 6580 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put 6581 * equal pressure on all the nodes. This is based on the assumption that 6582 * the reclaim does not bail out early. 6583 */ 6584 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 6585 6586 set_task_reclaim_state(current, &sc.reclaim_state); 6587 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 6588 noreclaim_flag = memalloc_noreclaim_save(); 6589 6590 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6591 6592 memalloc_noreclaim_restore(noreclaim_flag); 6593 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 6594 set_task_reclaim_state(current, NULL); 6595 6596 return nr_reclaimed; 6597 } 6598 #endif 6599 6600 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) 6601 { 6602 struct mem_cgroup *memcg; 6603 struct lruvec *lruvec; 6604 6605 if (lru_gen_enabled()) { 6606 lru_gen_age_node(pgdat, sc); 6607 return; 6608 } 6609 6610 if (!can_age_anon_pages(pgdat, sc)) 6611 return; 6612 6613 lruvec = mem_cgroup_lruvec(NULL, pgdat); 6614 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 6615 return; 6616 6617 memcg = mem_cgroup_iter(NULL, NULL, NULL); 6618 do { 6619 lruvec = mem_cgroup_lruvec(memcg, pgdat); 6620 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 6621 sc, LRU_ACTIVE_ANON); 6622 memcg = mem_cgroup_iter(NULL, memcg, NULL); 6623 } while (memcg); 6624 } 6625 6626 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 6627 { 6628 int i; 6629 struct zone *zone; 6630 6631 /* 6632 * Check for watermark boosts top-down as the higher zones 6633 * are more likely to be boosted. Both watermarks and boosts 6634 * should not be checked at the same time as reclaim would 6635 * start prematurely when there is no boosting and a lower 6636 * zone is balanced. 6637 */ 6638 for (i = highest_zoneidx; i >= 0; i--) { 6639 zone = pgdat->node_zones + i; 6640 if (!managed_zone(zone)) 6641 continue; 6642 6643 if (zone->watermark_boost) 6644 return true; 6645 } 6646 6647 return false; 6648 } 6649 6650 /* 6651 * Returns true if there is an eligible zone balanced for the request order 6652 * and highest_zoneidx 6653 */ 6654 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 6655 { 6656 int i; 6657 unsigned long mark = -1; 6658 struct zone *zone; 6659 6660 /* 6661 * Check watermarks bottom-up as lower zones are more likely to 6662 * meet watermarks. 6663 */ 6664 for (i = 0; i <= highest_zoneidx; i++) { 6665 zone = pgdat->node_zones + i; 6666 6667 if (!managed_zone(zone)) 6668 continue; 6669 6670 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) 6671 mark = promo_wmark_pages(zone); 6672 else 6673 mark = high_wmark_pages(zone); 6674 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 6675 return true; 6676 } 6677 6678 /* 6679 * If a node has no managed zone within highest_zoneidx, it does not 6680 * need balancing by definition. This can happen if a zone-restricted 6681 * allocation tries to wake a remote kswapd. 6682 */ 6683 if (mark == -1) 6684 return true; 6685 6686 return false; 6687 } 6688 6689 /* Clear pgdat state for congested, dirty or under writeback. */ 6690 static void clear_pgdat_congested(pg_data_t *pgdat) 6691 { 6692 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); 6693 6694 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); 6695 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6696 clear_bit(PGDAT_DIRTY, &pgdat->flags); 6697 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 6698 } 6699 6700 /* 6701 * Prepare kswapd for sleeping. This verifies that there are no processes 6702 * waiting in throttle_direct_reclaim() and that watermarks have been met. 6703 * 6704 * Returns true if kswapd is ready to sleep 6705 */ 6706 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 6707 int highest_zoneidx) 6708 { 6709 /* 6710 * The throttled processes are normally woken up in balance_pgdat() as 6711 * soon as allow_direct_reclaim() is true. But there is a potential 6712 * race between when kswapd checks the watermarks and a process gets 6713 * throttled. There is also a potential race if processes get 6714 * throttled, kswapd wakes, a large process exits thereby balancing the 6715 * zones, which causes kswapd to exit balance_pgdat() before reaching 6716 * the wake up checks. If kswapd is going to sleep, no process should 6717 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 6718 * the wake up is premature, processes will wake kswapd and get 6719 * throttled again. The difference from wake ups in balance_pgdat() is 6720 * that here we are under prepare_to_wait(). 6721 */ 6722 if (waitqueue_active(&pgdat->pfmemalloc_wait)) 6723 wake_up_all(&pgdat->pfmemalloc_wait); 6724 6725 /* Hopeless node, leave it to direct reclaim */ 6726 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6727 return true; 6728 6729 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 6730 clear_pgdat_congested(pgdat); 6731 return true; 6732 } 6733 6734 return false; 6735 } 6736 6737 /* 6738 * kswapd shrinks a node of pages that are at or below the highest usable 6739 * zone that is currently unbalanced. 6740 * 6741 * Returns true if kswapd scanned at least the requested number of pages to 6742 * reclaim or if the lack of progress was due to pages under writeback. 6743 * This is used to determine if the scanning priority needs to be raised. 6744 */ 6745 static bool kswapd_shrink_node(pg_data_t *pgdat, 6746 struct scan_control *sc) 6747 { 6748 struct zone *zone; 6749 int z; 6750 unsigned long nr_reclaimed = sc->nr_reclaimed; 6751 6752 /* Reclaim a number of pages proportional to the number of zones */ 6753 sc->nr_to_reclaim = 0; 6754 for (z = 0; z <= sc->reclaim_idx; z++) { 6755 zone = pgdat->node_zones + z; 6756 if (!managed_zone(zone)) 6757 continue; 6758 6759 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 6760 } 6761 6762 /* 6763 * Historically care was taken to put equal pressure on all zones but 6764 * now pressure is applied based on node LRU order. 6765 */ 6766 shrink_node(pgdat, sc); 6767 6768 /* 6769 * Fragmentation may mean that the system cannot be rebalanced for 6770 * high-order allocations. If twice the allocation size has been 6771 * reclaimed then recheck watermarks only at order-0 to prevent 6772 * excessive reclaim. Assume that a process requested a high-order 6773 * can direct reclaim/compact. 6774 */ 6775 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 6776 sc->order = 0; 6777 6778 /* account for progress from mm_account_reclaimed_pages() */ 6779 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; 6780 } 6781 6782 /* Page allocator PCP high watermark is lowered if reclaim is active. */ 6783 static inline void 6784 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) 6785 { 6786 int i; 6787 struct zone *zone; 6788 6789 for (i = 0; i <= highest_zoneidx; i++) { 6790 zone = pgdat->node_zones + i; 6791 6792 if (!managed_zone(zone)) 6793 continue; 6794 6795 if (active) 6796 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6797 else 6798 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6799 } 6800 } 6801 6802 static inline void 6803 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6804 { 6805 update_reclaim_active(pgdat, highest_zoneidx, true); 6806 } 6807 6808 static inline void 6809 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6810 { 6811 update_reclaim_active(pgdat, highest_zoneidx, false); 6812 } 6813 6814 /* 6815 * For kswapd, balance_pgdat() will reclaim pages across a node from zones 6816 * that are eligible for use by the caller until at least one zone is 6817 * balanced. 6818 * 6819 * Returns the order kswapd finished reclaiming at. 6820 * 6821 * kswapd scans the zones in the highmem->normal->dma direction. It skips 6822 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 6823 * found to have free_pages <= high_wmark_pages(zone), any page in that zone 6824 * or lower is eligible for reclaim until at least one usable zone is 6825 * balanced. 6826 */ 6827 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 6828 { 6829 int i; 6830 unsigned long nr_soft_reclaimed; 6831 unsigned long nr_soft_scanned; 6832 unsigned long pflags; 6833 unsigned long nr_boost_reclaim; 6834 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 6835 bool boosted; 6836 struct zone *zone; 6837 struct scan_control sc = { 6838 .gfp_mask = GFP_KERNEL, 6839 .order = order, 6840 .may_unmap = 1, 6841 }; 6842 6843 set_task_reclaim_state(current, &sc.reclaim_state); 6844 psi_memstall_enter(&pflags); 6845 __fs_reclaim_acquire(_THIS_IP_); 6846 6847 count_vm_event(PAGEOUTRUN); 6848 6849 /* 6850 * Account for the reclaim boost. Note that the zone boost is left in 6851 * place so that parallel allocations that are near the watermark will 6852 * stall or direct reclaim until kswapd is finished. 6853 */ 6854 nr_boost_reclaim = 0; 6855 for (i = 0; i <= highest_zoneidx; i++) { 6856 zone = pgdat->node_zones + i; 6857 if (!managed_zone(zone)) 6858 continue; 6859 6860 nr_boost_reclaim += zone->watermark_boost; 6861 zone_boosts[i] = zone->watermark_boost; 6862 } 6863 boosted = nr_boost_reclaim; 6864 6865 restart: 6866 set_reclaim_active(pgdat, highest_zoneidx); 6867 sc.priority = DEF_PRIORITY; 6868 do { 6869 unsigned long nr_reclaimed = sc.nr_reclaimed; 6870 bool raise_priority = true; 6871 bool balanced; 6872 bool ret; 6873 bool was_frozen; 6874 6875 sc.reclaim_idx = highest_zoneidx; 6876 6877 /* 6878 * If the number of buffer_heads exceeds the maximum allowed 6879 * then consider reclaiming from all zones. This has a dual 6880 * purpose -- on 64-bit systems it is expected that 6881 * buffer_heads are stripped during active rotation. On 32-bit 6882 * systems, highmem pages can pin lowmem memory and shrinking 6883 * buffers can relieve lowmem pressure. Reclaim may still not 6884 * go ahead if all eligible zones for the original allocation 6885 * request are balanced to avoid excessive reclaim from kswapd. 6886 */ 6887 if (buffer_heads_over_limit) { 6888 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 6889 zone = pgdat->node_zones + i; 6890 if (!managed_zone(zone)) 6891 continue; 6892 6893 sc.reclaim_idx = i; 6894 break; 6895 } 6896 } 6897 6898 /* 6899 * If the pgdat is imbalanced then ignore boosting and preserve 6900 * the watermarks for a later time and restart. Note that the 6901 * zone watermarks will be still reset at the end of balancing 6902 * on the grounds that the normal reclaim should be enough to 6903 * re-evaluate if boosting is required when kswapd next wakes. 6904 */ 6905 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 6906 if (!balanced && nr_boost_reclaim) { 6907 nr_boost_reclaim = 0; 6908 goto restart; 6909 } 6910 6911 /* 6912 * If boosting is not active then only reclaim if there are no 6913 * eligible zones. Note that sc.reclaim_idx is not used as 6914 * buffer_heads_over_limit may have adjusted it. 6915 */ 6916 if (!nr_boost_reclaim && balanced) 6917 goto out; 6918 6919 /* Limit the priority of boosting to avoid reclaim writeback */ 6920 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 6921 raise_priority = false; 6922 6923 /* 6924 * Do not writeback or swap pages for boosted reclaim. The 6925 * intent is to relieve pressure not issue sub-optimal IO 6926 * from reclaim context. If no pages are reclaimed, the 6927 * reclaim will be aborted. 6928 */ 6929 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 6930 sc.may_swap = !nr_boost_reclaim; 6931 6932 /* 6933 * Do some background aging, to give pages a chance to be 6934 * referenced before reclaiming. All pages are rotated 6935 * regardless of classzone as this is about consistent aging. 6936 */ 6937 kswapd_age_node(pgdat, &sc); 6938 6939 /* 6940 * If we're getting trouble reclaiming, start doing writepage 6941 * even in laptop mode. 6942 */ 6943 if (sc.priority < DEF_PRIORITY - 2) 6944 sc.may_writepage = 1; 6945 6946 /* Call soft limit reclaim before calling shrink_node. */ 6947 sc.nr_scanned = 0; 6948 nr_soft_scanned = 0; 6949 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order, 6950 sc.gfp_mask, &nr_soft_scanned); 6951 sc.nr_reclaimed += nr_soft_reclaimed; 6952 6953 /* 6954 * There should be no need to raise the scanning priority if 6955 * enough pages are already being scanned that that high 6956 * watermark would be met at 100% efficiency. 6957 */ 6958 if (kswapd_shrink_node(pgdat, &sc)) 6959 raise_priority = false; 6960 6961 /* 6962 * If the low watermark is met there is no need for processes 6963 * to be throttled on pfmemalloc_wait as they should not be 6964 * able to safely make forward progress. Wake them 6965 */ 6966 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 6967 allow_direct_reclaim(pgdat)) 6968 wake_up_all(&pgdat->pfmemalloc_wait); 6969 6970 /* Check if kswapd should be suspending */ 6971 __fs_reclaim_release(_THIS_IP_); 6972 ret = kthread_freezable_should_stop(&was_frozen); 6973 __fs_reclaim_acquire(_THIS_IP_); 6974 if (was_frozen || ret) 6975 break; 6976 6977 /* 6978 * Raise priority if scanning rate is too low or there was no 6979 * progress in reclaiming pages 6980 */ 6981 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 6982 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 6983 6984 /* 6985 * If reclaim made no progress for a boost, stop reclaim as 6986 * IO cannot be queued and it could be an infinite loop in 6987 * extreme circumstances. 6988 */ 6989 if (nr_boost_reclaim && !nr_reclaimed) 6990 break; 6991 6992 if (raise_priority || !nr_reclaimed) 6993 sc.priority--; 6994 } while (sc.priority >= 1); 6995 6996 /* 6997 * Restart only if it went through the priority loop all the way, 6998 * but cache_trim_mode didn't work. 6999 */ 7000 if (!sc.nr_reclaimed && sc.priority < 1 && 7001 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) { 7002 sc.no_cache_trim_mode = 1; 7003 goto restart; 7004 } 7005 7006 if (!sc.nr_reclaimed) 7007 pgdat->kswapd_failures++; 7008 7009 out: 7010 clear_reclaim_active(pgdat, highest_zoneidx); 7011 7012 /* If reclaim was boosted, account for the reclaim done in this pass */ 7013 if (boosted) { 7014 unsigned long flags; 7015 7016 for (i = 0; i <= highest_zoneidx; i++) { 7017 if (!zone_boosts[i]) 7018 continue; 7019 7020 /* Increments are under the zone lock */ 7021 zone = pgdat->node_zones + i; 7022 spin_lock_irqsave(&zone->lock, flags); 7023 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 7024 spin_unlock_irqrestore(&zone->lock, flags); 7025 } 7026 7027 /* 7028 * As there is now likely space, wakeup kcompact to defragment 7029 * pageblocks. 7030 */ 7031 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 7032 } 7033 7034 snapshot_refaults(NULL, pgdat); 7035 __fs_reclaim_release(_THIS_IP_); 7036 psi_memstall_leave(&pflags); 7037 set_task_reclaim_state(current, NULL); 7038 7039 /* 7040 * Return the order kswapd stopped reclaiming at as 7041 * prepare_kswapd_sleep() takes it into account. If another caller 7042 * entered the allocator slow path while kswapd was awake, order will 7043 * remain at the higher level. 7044 */ 7045 return sc.order; 7046 } 7047 7048 /* 7049 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 7050 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 7051 * not a valid index then either kswapd runs for first time or kswapd couldn't 7052 * sleep after previous reclaim attempt (node is still unbalanced). In that 7053 * case return the zone index of the previous kswapd reclaim cycle. 7054 */ 7055 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 7056 enum zone_type prev_highest_zoneidx) 7057 { 7058 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7059 7060 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 7061 } 7062 7063 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 7064 unsigned int highest_zoneidx) 7065 { 7066 long remaining = 0; 7067 DEFINE_WAIT(wait); 7068 7069 if (freezing(current) || kthread_should_stop()) 7070 return; 7071 7072 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7073 7074 /* 7075 * Try to sleep for a short interval. Note that kcompactd will only be 7076 * woken if it is possible to sleep for a short interval. This is 7077 * deliberate on the assumption that if reclaim cannot keep an 7078 * eligible zone balanced that it's also unlikely that compaction will 7079 * succeed. 7080 */ 7081 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7082 /* 7083 * Compaction records what page blocks it recently failed to 7084 * isolate pages from and skips them in the future scanning. 7085 * When kswapd is going to sleep, it is reasonable to assume 7086 * that pages and compaction may succeed so reset the cache. 7087 */ 7088 reset_isolation_suitable(pgdat); 7089 7090 /* 7091 * We have freed the memory, now we should compact it to make 7092 * allocation of the requested order possible. 7093 */ 7094 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 7095 7096 remaining = schedule_timeout(HZ/10); 7097 7098 /* 7099 * If woken prematurely then reset kswapd_highest_zoneidx and 7100 * order. The values will either be from a wakeup request or 7101 * the previous request that slept prematurely. 7102 */ 7103 if (remaining) { 7104 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 7105 kswapd_highest_zoneidx(pgdat, 7106 highest_zoneidx)); 7107 7108 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 7109 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); 7110 } 7111 7112 finish_wait(&pgdat->kswapd_wait, &wait); 7113 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7114 } 7115 7116 /* 7117 * After a short sleep, check if it was a premature sleep. If not, then 7118 * go fully to sleep until explicitly woken up. 7119 */ 7120 if (!remaining && 7121 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7122 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 7123 7124 /* 7125 * vmstat counters are not perfectly accurate and the estimated 7126 * value for counters such as NR_FREE_PAGES can deviate from the 7127 * true value by nr_online_cpus * threshold. To avoid the zone 7128 * watermarks being breached while under pressure, we reduce the 7129 * per-cpu vmstat threshold while kswapd is awake and restore 7130 * them before going back to sleep. 7131 */ 7132 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 7133 7134 if (!kthread_should_stop()) 7135 schedule(); 7136 7137 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 7138 } else { 7139 if (remaining) 7140 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 7141 else 7142 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 7143 } 7144 finish_wait(&pgdat->kswapd_wait, &wait); 7145 } 7146 7147 /* 7148 * The background pageout daemon, started as a kernel thread 7149 * from the init process. 7150 * 7151 * This basically trickles out pages so that we have _some_ 7152 * free memory available even if there is no other activity 7153 * that frees anything up. This is needed for things like routing 7154 * etc, where we otherwise might have all activity going on in 7155 * asynchronous contexts that cannot page things out. 7156 * 7157 * If there are applications that are active memory-allocators 7158 * (most normal use), this basically shouldn't matter. 7159 */ 7160 static int kswapd(void *p) 7161 { 7162 unsigned int alloc_order, reclaim_order; 7163 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 7164 pg_data_t *pgdat = (pg_data_t *)p; 7165 struct task_struct *tsk = current; 7166 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 7167 7168 if (!cpumask_empty(cpumask)) 7169 set_cpus_allowed_ptr(tsk, cpumask); 7170 7171 /* 7172 * Tell the memory management that we're a "memory allocator", 7173 * and that if we need more memory we should get access to it 7174 * regardless (see "__alloc_pages()"). "kswapd" should 7175 * never get caught in the normal page freeing logic. 7176 * 7177 * (Kswapd normally doesn't need memory anyway, but sometimes 7178 * you need a small amount of memory in order to be able to 7179 * page out something else, and this flag essentially protects 7180 * us from recursively trying to free more memory as we're 7181 * trying to free the first piece of memory in the first place). 7182 */ 7183 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; 7184 set_freezable(); 7185 7186 WRITE_ONCE(pgdat->kswapd_order, 0); 7187 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7188 atomic_set(&pgdat->nr_writeback_throttled, 0); 7189 for ( ; ; ) { 7190 bool was_frozen; 7191 7192 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 7193 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7194 highest_zoneidx); 7195 7196 kswapd_try_sleep: 7197 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 7198 highest_zoneidx); 7199 7200 /* Read the new order and highest_zoneidx */ 7201 alloc_order = READ_ONCE(pgdat->kswapd_order); 7202 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7203 highest_zoneidx); 7204 WRITE_ONCE(pgdat->kswapd_order, 0); 7205 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7206 7207 if (kthread_freezable_should_stop(&was_frozen)) 7208 break; 7209 7210 /* 7211 * We can speed up thawing tasks if we don't call balance_pgdat 7212 * after returning from the refrigerator 7213 */ 7214 if (was_frozen) 7215 continue; 7216 7217 /* 7218 * Reclaim begins at the requested order but if a high-order 7219 * reclaim fails then kswapd falls back to reclaiming for 7220 * order-0. If that happens, kswapd will consider sleeping 7221 * for the order it finished reclaiming at (reclaim_order) 7222 * but kcompactd is woken to compact for the original 7223 * request (alloc_order). 7224 */ 7225 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 7226 alloc_order); 7227 reclaim_order = balance_pgdat(pgdat, alloc_order, 7228 highest_zoneidx); 7229 if (reclaim_order < alloc_order) 7230 goto kswapd_try_sleep; 7231 } 7232 7233 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); 7234 7235 return 0; 7236 } 7237 7238 /* 7239 * A zone is low on free memory or too fragmented for high-order memory. If 7240 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 7241 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 7242 * has failed or is not needed, still wake up kcompactd if only compaction is 7243 * needed. 7244 */ 7245 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 7246 enum zone_type highest_zoneidx) 7247 { 7248 pg_data_t *pgdat; 7249 enum zone_type curr_idx; 7250 7251 if (!managed_zone(zone)) 7252 return; 7253 7254 if (!cpuset_zone_allowed(zone, gfp_flags)) 7255 return; 7256 7257 pgdat = zone->zone_pgdat; 7258 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7259 7260 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 7261 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 7262 7263 if (READ_ONCE(pgdat->kswapd_order) < order) 7264 WRITE_ONCE(pgdat->kswapd_order, order); 7265 7266 if (!waitqueue_active(&pgdat->kswapd_wait)) 7267 return; 7268 7269 /* Hopeless node, leave it to direct reclaim if possible */ 7270 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 7271 (pgdat_balanced(pgdat, order, highest_zoneidx) && 7272 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 7273 /* 7274 * There may be plenty of free memory available, but it's too 7275 * fragmented for high-order allocations. Wake up kcompactd 7276 * and rely on compaction_suitable() to determine if it's 7277 * needed. If it fails, it will defer subsequent attempts to 7278 * ratelimit its work. 7279 */ 7280 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 7281 wakeup_kcompactd(pgdat, order, highest_zoneidx); 7282 return; 7283 } 7284 7285 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 7286 gfp_flags); 7287 wake_up_interruptible(&pgdat->kswapd_wait); 7288 } 7289 7290 #ifdef CONFIG_HIBERNATION 7291 /* 7292 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 7293 * freed pages. 7294 * 7295 * Rather than trying to age LRUs the aim is to preserve the overall 7296 * LRU order by reclaiming preferentially 7297 * inactive > active > active referenced > active mapped 7298 */ 7299 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 7300 { 7301 struct scan_control sc = { 7302 .nr_to_reclaim = nr_to_reclaim, 7303 .gfp_mask = GFP_HIGHUSER_MOVABLE, 7304 .reclaim_idx = MAX_NR_ZONES - 1, 7305 .priority = DEF_PRIORITY, 7306 .may_writepage = 1, 7307 .may_unmap = 1, 7308 .may_swap = 1, 7309 .hibernation_mode = 1, 7310 }; 7311 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 7312 unsigned long nr_reclaimed; 7313 unsigned int noreclaim_flag; 7314 7315 fs_reclaim_acquire(sc.gfp_mask); 7316 noreclaim_flag = memalloc_noreclaim_save(); 7317 set_task_reclaim_state(current, &sc.reclaim_state); 7318 7319 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 7320 7321 set_task_reclaim_state(current, NULL); 7322 memalloc_noreclaim_restore(noreclaim_flag); 7323 fs_reclaim_release(sc.gfp_mask); 7324 7325 return nr_reclaimed; 7326 } 7327 #endif /* CONFIG_HIBERNATION */ 7328 7329 /* 7330 * This kswapd start function will be called by init and node-hot-add. 7331 */ 7332 void __meminit kswapd_run(int nid) 7333 { 7334 pg_data_t *pgdat = NODE_DATA(nid); 7335 7336 pgdat_kswapd_lock(pgdat); 7337 if (!pgdat->kswapd) { 7338 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 7339 if (IS_ERR(pgdat->kswapd)) { 7340 /* failure at boot is fatal */ 7341 pr_err("Failed to start kswapd on node %d,ret=%ld\n", 7342 nid, PTR_ERR(pgdat->kswapd)); 7343 BUG_ON(system_state < SYSTEM_RUNNING); 7344 pgdat->kswapd = NULL; 7345 } 7346 } 7347 pgdat_kswapd_unlock(pgdat); 7348 } 7349 7350 /* 7351 * Called by memory hotplug when all memory in a node is offlined. Caller must 7352 * be holding mem_hotplug_begin/done(). 7353 */ 7354 void __meminit kswapd_stop(int nid) 7355 { 7356 pg_data_t *pgdat = NODE_DATA(nid); 7357 struct task_struct *kswapd; 7358 7359 pgdat_kswapd_lock(pgdat); 7360 kswapd = pgdat->kswapd; 7361 if (kswapd) { 7362 kthread_stop(kswapd); 7363 pgdat->kswapd = NULL; 7364 } 7365 pgdat_kswapd_unlock(pgdat); 7366 } 7367 7368 static int __init kswapd_init(void) 7369 { 7370 int nid; 7371 7372 swap_setup(); 7373 for_each_node_state(nid, N_MEMORY) 7374 kswapd_run(nid); 7375 return 0; 7376 } 7377 7378 module_init(kswapd_init) 7379 7380 #ifdef CONFIG_NUMA 7381 /* 7382 * Node reclaim mode 7383 * 7384 * If non-zero call node_reclaim when the number of free pages falls below 7385 * the watermarks. 7386 */ 7387 int node_reclaim_mode __read_mostly; 7388 7389 /* 7390 * Priority for NODE_RECLAIM. This determines the fraction of pages 7391 * of a node considered for each zone_reclaim. 4 scans 1/16th of 7392 * a zone. 7393 */ 7394 #define NODE_RECLAIM_PRIORITY 4 7395 7396 /* 7397 * Percentage of pages in a zone that must be unmapped for node_reclaim to 7398 * occur. 7399 */ 7400 int sysctl_min_unmapped_ratio = 1; 7401 7402 /* 7403 * If the number of slab pages in a zone grows beyond this percentage then 7404 * slab reclaim needs to occur. 7405 */ 7406 int sysctl_min_slab_ratio = 5; 7407 7408 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 7409 { 7410 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 7411 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 7412 node_page_state(pgdat, NR_ACTIVE_FILE); 7413 7414 /* 7415 * It's possible for there to be more file mapped pages than 7416 * accounted for by the pages on the file LRU lists because 7417 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 7418 */ 7419 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 7420 } 7421 7422 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 7423 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 7424 { 7425 unsigned long nr_pagecache_reclaimable; 7426 unsigned long delta = 0; 7427 7428 /* 7429 * If RECLAIM_UNMAP is set, then all file pages are considered 7430 * potentially reclaimable. Otherwise, we have to worry about 7431 * pages like swapcache and node_unmapped_file_pages() provides 7432 * a better estimate 7433 */ 7434 if (node_reclaim_mode & RECLAIM_UNMAP) 7435 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 7436 else 7437 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 7438 7439 /* If we can't clean pages, remove dirty pages from consideration */ 7440 if (!(node_reclaim_mode & RECLAIM_WRITE)) 7441 delta += node_page_state(pgdat, NR_FILE_DIRTY); 7442 7443 /* Watch for any possible underflows due to delta */ 7444 if (unlikely(delta > nr_pagecache_reclaimable)) 7445 delta = nr_pagecache_reclaimable; 7446 7447 return nr_pagecache_reclaimable - delta; 7448 } 7449 7450 /* 7451 * Try to free up some pages from this node through reclaim. 7452 */ 7453 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7454 { 7455 /* Minimum pages needed in order to stay on node */ 7456 const unsigned long nr_pages = 1 << order; 7457 struct task_struct *p = current; 7458 unsigned int noreclaim_flag; 7459 struct scan_control sc = { 7460 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 7461 .gfp_mask = current_gfp_context(gfp_mask), 7462 .order = order, 7463 .priority = NODE_RECLAIM_PRIORITY, 7464 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 7465 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 7466 .may_swap = 1, 7467 .reclaim_idx = gfp_zone(gfp_mask), 7468 }; 7469 unsigned long pflags; 7470 7471 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 7472 sc.gfp_mask); 7473 7474 cond_resched(); 7475 psi_memstall_enter(&pflags); 7476 delayacct_freepages_start(); 7477 fs_reclaim_acquire(sc.gfp_mask); 7478 /* 7479 * We need to be able to allocate from the reserves for RECLAIM_UNMAP 7480 */ 7481 noreclaim_flag = memalloc_noreclaim_save(); 7482 set_task_reclaim_state(p, &sc.reclaim_state); 7483 7484 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || 7485 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { 7486 /* 7487 * Free memory by calling shrink node with increasing 7488 * priorities until we have enough memory freed. 7489 */ 7490 do { 7491 shrink_node(pgdat, &sc); 7492 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 7493 } 7494 7495 set_task_reclaim_state(p, NULL); 7496 memalloc_noreclaim_restore(noreclaim_flag); 7497 fs_reclaim_release(sc.gfp_mask); 7498 psi_memstall_leave(&pflags); 7499 delayacct_freepages_end(); 7500 7501 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 7502 7503 return sc.nr_reclaimed >= nr_pages; 7504 } 7505 7506 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7507 { 7508 int ret; 7509 7510 /* 7511 * Node reclaim reclaims unmapped file backed pages and 7512 * slab pages if we are over the defined limits. 7513 * 7514 * A small portion of unmapped file backed pages is needed for 7515 * file I/O otherwise pages read by file I/O will be immediately 7516 * thrown out if the node is overallocated. So we do not reclaim 7517 * if less than a specified percentage of the node is used by 7518 * unmapped file backed pages. 7519 */ 7520 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 7521 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= 7522 pgdat->min_slab_pages) 7523 return NODE_RECLAIM_FULL; 7524 7525 /* 7526 * Do not scan if the allocation should not be delayed. 7527 */ 7528 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 7529 return NODE_RECLAIM_NOSCAN; 7530 7531 /* 7532 * Only run node reclaim on the local node or on nodes that do not 7533 * have associated processors. This will favor the local processor 7534 * over remote processors and spread off node memory allocations 7535 * as wide as possible. 7536 */ 7537 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 7538 return NODE_RECLAIM_NOSCAN; 7539 7540 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 7541 return NODE_RECLAIM_NOSCAN; 7542 7543 ret = __node_reclaim(pgdat, gfp_mask, order); 7544 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 7545 7546 if (ret) 7547 count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); 7548 else 7549 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 7550 7551 return ret; 7552 } 7553 #endif 7554 7555 /** 7556 * check_move_unevictable_folios - Move evictable folios to appropriate zone 7557 * lru list 7558 * @fbatch: Batch of lru folios to check. 7559 * 7560 * Checks folios for evictability, if an evictable folio is in the unevictable 7561 * lru list, moves it to the appropriate evictable lru list. This function 7562 * should be only used for lru folios. 7563 */ 7564 void check_move_unevictable_folios(struct folio_batch *fbatch) 7565 { 7566 struct lruvec *lruvec = NULL; 7567 int pgscanned = 0; 7568 int pgrescued = 0; 7569 int i; 7570 7571 for (i = 0; i < fbatch->nr; i++) { 7572 struct folio *folio = fbatch->folios[i]; 7573 int nr_pages = folio_nr_pages(folio); 7574 7575 pgscanned += nr_pages; 7576 7577 /* block memcg migration while the folio moves between lrus */ 7578 if (!folio_test_clear_lru(folio)) 7579 continue; 7580 7581 lruvec = folio_lruvec_relock_irq(folio, lruvec); 7582 if (folio_evictable(folio) && folio_test_unevictable(folio)) { 7583 lruvec_del_folio(lruvec, folio); 7584 folio_clear_unevictable(folio); 7585 lruvec_add_folio(lruvec, folio); 7586 pgrescued += nr_pages; 7587 } 7588 folio_set_lru(folio); 7589 } 7590 7591 if (lruvec) { 7592 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 7593 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7594 unlock_page_lruvec_irq(lruvec); 7595 } else if (pgscanned) { 7596 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7597 } 7598 } 7599 EXPORT_SYMBOL_GPL(check_move_unevictable_folios); 7600