1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 10 * Multiqueue VM started 5.8.00, Rik van Riel. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/vmpressure.h> 25 #include <linux/vmstat.h> 26 #include <linux/file.h> 27 #include <linux/writeback.h> 28 #include <linux/blkdev.h> 29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ 30 #include <linux/mm_inline.h> 31 #include <linux/backing-dev.h> 32 #include <linux/rmap.h> 33 #include <linux/topology.h> 34 #include <linux/cpu.h> 35 #include <linux/cpuset.h> 36 #include <linux/compaction.h> 37 #include <linux/notifier.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/migrate.h> 43 #include <linux/delayacct.h> 44 #include <linux/sysctl.h> 45 #include <linux/memory-tiers.h> 46 #include <linux/oom.h> 47 #include <linux/pagevec.h> 48 #include <linux/prefetch.h> 49 #include <linux/printk.h> 50 #include <linux/dax.h> 51 #include <linux/psi.h> 52 #include <linux/pagewalk.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/ctype.h> 55 #include <linux/debugfs.h> 56 #include <linux/khugepaged.h> 57 #include <linux/rculist_nulls.h> 58 #include <linux/random.h> 59 #include <linux/mmu_notifier.h> 60 61 #include <asm/tlbflush.h> 62 #include <asm/div64.h> 63 64 #include <linux/swapops.h> 65 #include <linux/balloon_compaction.h> 66 #include <linux/sched/sysctl.h> 67 68 #include "internal.h" 69 #include "swap.h" 70 71 #define CREATE_TRACE_POINTS 72 #include <trace/events/vmscan.h> 73 74 struct scan_control { 75 /* How many pages shrink_list() should reclaim */ 76 unsigned long nr_to_reclaim; 77 78 /* 79 * Nodemask of nodes allowed by the caller. If NULL, all nodes 80 * are scanned. 81 */ 82 nodemask_t *nodemask; 83 84 /* 85 * The memory cgroup that hit its limit and as a result is the 86 * primary target of this reclaim invocation. 87 */ 88 struct mem_cgroup *target_mem_cgroup; 89 90 /* 91 * Scan pressure balancing between anon and file LRUs 92 */ 93 unsigned long anon_cost; 94 unsigned long file_cost; 95 96 #ifdef CONFIG_MEMCG 97 /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */ 98 int *proactive_swappiness; 99 #endif 100 101 /* Can active folios be deactivated as part of reclaim? */ 102 #define DEACTIVATE_ANON 1 103 #define DEACTIVATE_FILE 2 104 unsigned int may_deactivate:2; 105 unsigned int force_deactivate:1; 106 unsigned int skipped_deactivate:1; 107 108 /* Writepage batching in laptop mode; RECLAIM_WRITE */ 109 unsigned int may_writepage:1; 110 111 /* Can mapped folios be reclaimed? */ 112 unsigned int may_unmap:1; 113 114 /* Can folios be swapped as part of reclaim? */ 115 unsigned int may_swap:1; 116 117 /* Not allow cache_trim_mode to be turned on as part of reclaim? */ 118 unsigned int no_cache_trim_mode:1; 119 120 /* Has cache_trim_mode failed at least once? */ 121 unsigned int cache_trim_mode_failed:1; 122 123 /* Proactive reclaim invoked by userspace through memory.reclaim */ 124 unsigned int proactive:1; 125 126 /* 127 * Cgroup memory below memory.low is protected as long as we 128 * don't threaten to OOM. If any cgroup is reclaimed at 129 * reduced force or passed over entirely due to its memory.low 130 * setting (memcg_low_skipped), and nothing is reclaimed as a 131 * result, then go back for one more cycle that reclaims the protected 132 * memory (memcg_low_reclaim) to avert OOM. 133 */ 134 unsigned int memcg_low_reclaim:1; 135 unsigned int memcg_low_skipped:1; 136 137 /* Shared cgroup tree walk failed, rescan the whole tree */ 138 unsigned int memcg_full_walk:1; 139 140 unsigned int hibernation_mode:1; 141 142 /* One of the zones is ready for compaction */ 143 unsigned int compaction_ready:1; 144 145 /* There is easily reclaimable cold cache in the current node */ 146 unsigned int cache_trim_mode:1; 147 148 /* The file folios on the current node are dangerously low */ 149 unsigned int file_is_tiny:1; 150 151 /* Always discard instead of demoting to lower tier memory */ 152 unsigned int no_demotion:1; 153 154 /* Allocation order */ 155 s8 order; 156 157 /* Scan (total_size >> priority) pages at once */ 158 s8 priority; 159 160 /* The highest zone to isolate folios for reclaim from */ 161 s8 reclaim_idx; 162 163 /* This context's GFP mask */ 164 gfp_t gfp_mask; 165 166 /* Incremented by the number of inactive pages that were scanned */ 167 unsigned long nr_scanned; 168 169 /* Number of pages freed so far during a call to shrink_zones() */ 170 unsigned long nr_reclaimed; 171 172 struct { 173 unsigned int dirty; 174 unsigned int unqueued_dirty; 175 unsigned int congested; 176 unsigned int writeback; 177 unsigned int immediate; 178 unsigned int file_taken; 179 unsigned int taken; 180 } nr; 181 182 /* for recording the reclaimed slab by now */ 183 struct reclaim_state reclaim_state; 184 }; 185 186 #ifdef ARCH_HAS_PREFETCHW 187 #define prefetchw_prev_lru_folio(_folio, _base, _field) \ 188 do { \ 189 if ((_folio)->lru.prev != _base) { \ 190 struct folio *prev; \ 191 \ 192 prev = lru_to_folio(&(_folio->lru)); \ 193 prefetchw(&prev->_field); \ 194 } \ 195 } while (0) 196 #else 197 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) 198 #endif 199 200 /* 201 * From 0 .. MAX_SWAPPINESS. Higher means more swappy. 202 */ 203 int vm_swappiness = 60; 204 205 #ifdef CONFIG_MEMCG 206 207 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ 208 static bool cgroup_reclaim(struct scan_control *sc) 209 { 210 return sc->target_mem_cgroup; 211 } 212 213 /* 214 * Returns true for reclaim on the root cgroup. This is true for direct 215 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. 216 */ 217 static bool root_reclaim(struct scan_control *sc) 218 { 219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 220 } 221 222 /** 223 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 224 * @sc: scan_control in question 225 * 226 * The normal page dirty throttling mechanism in balance_dirty_pages() is 227 * completely broken with the legacy memcg and direct stalling in 228 * shrink_folio_list() is used for throttling instead, which lacks all the 229 * niceties such as fairness, adaptive pausing, bandwidth proportional 230 * allocation and configurability. 231 * 232 * This function tests whether the vmscan currently in progress can assume 233 * that the normal dirty throttling mechanism is operational. 234 */ 235 static bool writeback_throttling_sane(struct scan_control *sc) 236 { 237 if (!cgroup_reclaim(sc)) 238 return true; 239 #ifdef CONFIG_CGROUP_WRITEBACK 240 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 241 return true; 242 #endif 243 return false; 244 } 245 246 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 247 { 248 if (sc->proactive && sc->proactive_swappiness) 249 return *sc->proactive_swappiness; 250 return mem_cgroup_swappiness(memcg); 251 } 252 #else 253 static bool cgroup_reclaim(struct scan_control *sc) 254 { 255 return false; 256 } 257 258 static bool root_reclaim(struct scan_control *sc) 259 { 260 return true; 261 } 262 263 static bool writeback_throttling_sane(struct scan_control *sc) 264 { 265 return true; 266 } 267 268 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) 269 { 270 return READ_ONCE(vm_swappiness); 271 } 272 #endif 273 274 static void set_task_reclaim_state(struct task_struct *task, 275 struct reclaim_state *rs) 276 { 277 /* Check for an overwrite */ 278 WARN_ON_ONCE(rs && task->reclaim_state); 279 280 /* Check for the nulling of an already-nulled member */ 281 WARN_ON_ONCE(!rs && !task->reclaim_state); 282 283 task->reclaim_state = rs; 284 } 285 286 /* 287 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to 288 * scan_control->nr_reclaimed. 289 */ 290 static void flush_reclaim_state(struct scan_control *sc) 291 { 292 /* 293 * Currently, reclaim_state->reclaimed includes three types of pages 294 * freed outside of vmscan: 295 * (1) Slab pages. 296 * (2) Clean file pages from pruned inodes (on highmem systems). 297 * (3) XFS freed buffer pages. 298 * 299 * For all of these cases, we cannot universally link the pages to a 300 * single memcg. For example, a memcg-aware shrinker can free one object 301 * charged to the target memcg, causing an entire page to be freed. 302 * If we count the entire page as reclaimed from the memcg, we end up 303 * overestimating the reclaimed amount (potentially under-reclaiming). 304 * 305 * Only count such pages for global reclaim to prevent under-reclaiming 306 * from the target memcg; preventing unnecessary retries during memcg 307 * charging and false positives from proactive reclaim. 308 * 309 * For uncommon cases where the freed pages were actually mostly 310 * charged to the target memcg, we end up underestimating the reclaimed 311 * amount. This should be fine. The freed pages will be uncharged 312 * anyway, even if they are not counted here properly, and we will be 313 * able to make forward progress in charging (which is usually in a 314 * retry loop). 315 * 316 * We can go one step further, and report the uncharged objcg pages in 317 * memcg reclaim, to make reporting more accurate and reduce 318 * underestimation, but it's probably not worth the complexity for now. 319 */ 320 if (current->reclaim_state && root_reclaim(sc)) { 321 sc->nr_reclaimed += current->reclaim_state->reclaimed; 322 current->reclaim_state->reclaimed = 0; 323 } 324 } 325 326 static bool can_demote(int nid, struct scan_control *sc) 327 { 328 if (!numa_demotion_enabled) 329 return false; 330 if (sc && sc->no_demotion) 331 return false; 332 if (next_demotion_node(nid) == NUMA_NO_NODE) 333 return false; 334 335 return true; 336 } 337 338 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, 339 int nid, 340 struct scan_control *sc) 341 { 342 if (memcg == NULL) { 343 /* 344 * For non-memcg reclaim, is there 345 * space in any swap device? 346 */ 347 if (get_nr_swap_pages() > 0) 348 return true; 349 } else { 350 /* Is the memcg below its swap limit? */ 351 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) 352 return true; 353 } 354 355 /* 356 * The page can not be swapped. 357 * 358 * Can it be reclaimed from this node via demotion? 359 */ 360 return can_demote(nid, sc); 361 } 362 363 /* 364 * This misses isolated folios which are not accounted for to save counters. 365 * As the data only determines if reclaim or compaction continues, it is 366 * not expected that isolated folios will be a dominating factor. 367 */ 368 unsigned long zone_reclaimable_pages(struct zone *zone) 369 { 370 unsigned long nr; 371 372 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 373 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 374 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 375 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 376 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 377 /* 378 * If there are no reclaimable file-backed or anonymous pages, 379 * ensure zones with sufficient free pages are not skipped. 380 * This prevents zones like DMA32 from being ignored in reclaim 381 * scenarios where they can still help alleviate memory pressure. 382 */ 383 if (nr == 0) 384 nr = zone_page_state_snapshot(zone, NR_FREE_PAGES); 385 return nr; 386 } 387 388 /** 389 * lruvec_lru_size - Returns the number of pages on the given LRU list. 390 * @lruvec: lru vector 391 * @lru: lru to use 392 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) 393 */ 394 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, 395 int zone_idx) 396 { 397 unsigned long size = 0; 398 int zid; 399 400 for (zid = 0; zid <= zone_idx; zid++) { 401 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 402 403 if (!managed_zone(zone)) 404 continue; 405 406 if (!mem_cgroup_disabled()) 407 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 408 else 409 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); 410 } 411 return size; 412 } 413 414 static unsigned long drop_slab_node(int nid) 415 { 416 unsigned long freed = 0; 417 struct mem_cgroup *memcg = NULL; 418 419 memcg = mem_cgroup_iter(NULL, NULL, NULL); 420 do { 421 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 422 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 423 424 return freed; 425 } 426 427 void drop_slab(void) 428 { 429 int nid; 430 int shift = 0; 431 unsigned long freed; 432 433 do { 434 freed = 0; 435 for_each_online_node(nid) { 436 if (fatal_signal_pending(current)) 437 return; 438 439 freed += drop_slab_node(nid); 440 } 441 } while ((freed >> shift++) > 1); 442 } 443 444 static int reclaimer_offset(void) 445 { 446 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 447 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); 448 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 449 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); 450 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 451 PGSCAN_DIRECT - PGSCAN_KSWAPD); 452 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 453 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); 454 455 if (current_is_kswapd()) 456 return 0; 457 if (current_is_khugepaged()) 458 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; 459 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; 460 } 461 462 static inline int is_page_cache_freeable(struct folio *folio) 463 { 464 /* 465 * A freeable page cache folio is referenced only by the caller 466 * that isolated the folio, the page cache and optional filesystem 467 * private data at folio->private. 468 */ 469 return folio_ref_count(folio) - folio_test_private(folio) == 470 1 + folio_nr_pages(folio); 471 } 472 473 /* 474 * We detected a synchronous write error writing a folio out. Probably 475 * -ENOSPC. We need to propagate that into the address_space for a subsequent 476 * fsync(), msync() or close(). 477 * 478 * The tricky part is that after writepage we cannot touch the mapping: nothing 479 * prevents it from being freed up. But we have a ref on the folio and once 480 * that folio is locked, the mapping is pinned. 481 * 482 * We're allowed to run sleeping folio_lock() here because we know the caller has 483 * __GFP_FS. 484 */ 485 static void handle_write_error(struct address_space *mapping, 486 struct folio *folio, int error) 487 { 488 folio_lock(folio); 489 if (folio_mapping(folio) == mapping) 490 mapping_set_error(mapping, error); 491 folio_unlock(folio); 492 } 493 494 static bool skip_throttle_noprogress(pg_data_t *pgdat) 495 { 496 int reclaimable = 0, write_pending = 0; 497 int i; 498 499 /* 500 * If kswapd is disabled, reschedule if necessary but do not 501 * throttle as the system is likely near OOM. 502 */ 503 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 504 return true; 505 506 /* 507 * If there are a lot of dirty/writeback folios then do not 508 * throttle as throttling will occur when the folios cycle 509 * towards the end of the LRU if still under writeback. 510 */ 511 for (i = 0; i < MAX_NR_ZONES; i++) { 512 struct zone *zone = pgdat->node_zones + i; 513 514 if (!managed_zone(zone)) 515 continue; 516 517 reclaimable += zone_reclaimable_pages(zone); 518 write_pending += zone_page_state_snapshot(zone, 519 NR_ZONE_WRITE_PENDING); 520 } 521 if (2 * write_pending <= reclaimable) 522 return true; 523 524 return false; 525 } 526 527 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) 528 { 529 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; 530 long timeout, ret; 531 DEFINE_WAIT(wait); 532 533 /* 534 * Do not throttle user workers, kthreads other than kswapd or 535 * workqueues. They may be required for reclaim to make 536 * forward progress (e.g. journalling workqueues or kthreads). 537 */ 538 if (!current_is_kswapd() && 539 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { 540 cond_resched(); 541 return; 542 } 543 544 /* 545 * These figures are pulled out of thin air. 546 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many 547 * parallel reclaimers which is a short-lived event so the timeout is 548 * short. Failing to make progress or waiting on writeback are 549 * potentially long-lived events so use a longer timeout. This is shaky 550 * logic as a failure to make progress could be due to anything from 551 * writeback to a slow device to excessive referenced folios at the tail 552 * of the inactive LRU. 553 */ 554 switch(reason) { 555 case VMSCAN_THROTTLE_WRITEBACK: 556 timeout = HZ/10; 557 558 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { 559 WRITE_ONCE(pgdat->nr_reclaim_start, 560 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); 561 } 562 563 break; 564 case VMSCAN_THROTTLE_CONGESTED: 565 fallthrough; 566 case VMSCAN_THROTTLE_NOPROGRESS: 567 if (skip_throttle_noprogress(pgdat)) { 568 cond_resched(); 569 return; 570 } 571 572 timeout = 1; 573 574 break; 575 case VMSCAN_THROTTLE_ISOLATED: 576 timeout = HZ/50; 577 break; 578 default: 579 WARN_ON_ONCE(1); 580 timeout = HZ; 581 break; 582 } 583 584 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 585 ret = schedule_timeout(timeout); 586 finish_wait(wqh, &wait); 587 588 if (reason == VMSCAN_THROTTLE_WRITEBACK) 589 atomic_dec(&pgdat->nr_writeback_throttled); 590 591 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), 592 jiffies_to_usecs(timeout - ret), 593 reason); 594 } 595 596 /* 597 * Account for folios written if tasks are throttled waiting on dirty 598 * folios to clean. If enough folios have been cleaned since throttling 599 * started then wakeup the throttled tasks. 600 */ 601 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 602 int nr_throttled) 603 { 604 unsigned long nr_written; 605 606 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); 607 608 /* 609 * This is an inaccurate read as the per-cpu deltas may not 610 * be synchronised. However, given that the system is 611 * writeback throttled, it is not worth taking the penalty 612 * of getting an accurate count. At worst, the throttle 613 * timeout guarantees forward progress. 614 */ 615 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - 616 READ_ONCE(pgdat->nr_reclaim_start); 617 618 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) 619 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); 620 } 621 622 /* possible outcome of pageout() */ 623 typedef enum { 624 /* failed to write folio out, folio is locked */ 625 PAGE_KEEP, 626 /* move folio to the active list, folio is locked */ 627 PAGE_ACTIVATE, 628 /* folio has been sent to the disk successfully, folio is unlocked */ 629 PAGE_SUCCESS, 630 /* folio is clean and locked */ 631 PAGE_CLEAN, 632 } pageout_t; 633 634 /* 635 * pageout is called by shrink_folio_list() for each dirty folio. 636 * Calls ->writepage(). 637 */ 638 static pageout_t pageout(struct folio *folio, struct address_space *mapping, 639 struct swap_iocb **plug, struct list_head *folio_list) 640 { 641 /* 642 * If the folio is dirty, only perform writeback if that write 643 * will be non-blocking. To prevent this allocation from being 644 * stalled by pagecache activity. But note that there may be 645 * stalls if we need to run get_block(). We could test 646 * PagePrivate for that. 647 * 648 * If this process is currently in __generic_file_write_iter() against 649 * this folio's queue, we can perform writeback even if that 650 * will block. 651 * 652 * If the folio is swapcache, write it back even if that would 653 * block, for some throttling. This happens by accident, because 654 * swap_backing_dev_info is bust: it doesn't reflect the 655 * congestion state of the swapdevs. Easy to fix, if needed. 656 */ 657 if (!is_page_cache_freeable(folio)) 658 return PAGE_KEEP; 659 if (!mapping) { 660 /* 661 * Some data journaling orphaned folios can have 662 * folio->mapping == NULL while being dirty with clean buffers. 663 */ 664 if (folio_test_private(folio)) { 665 if (try_to_free_buffers(folio)) { 666 folio_clear_dirty(folio); 667 pr_info("%s: orphaned folio\n", __func__); 668 return PAGE_CLEAN; 669 } 670 } 671 return PAGE_KEEP; 672 } 673 if (mapping->a_ops->writepage == NULL) 674 return PAGE_ACTIVATE; 675 676 if (folio_clear_dirty_for_io(folio)) { 677 int res; 678 struct writeback_control wbc = { 679 .sync_mode = WB_SYNC_NONE, 680 .nr_to_write = SWAP_CLUSTER_MAX, 681 .range_start = 0, 682 .range_end = LLONG_MAX, 683 .for_reclaim = 1, 684 .swap_plug = plug, 685 }; 686 687 /* 688 * The large shmem folio can be split if CONFIG_THP_SWAP is 689 * not enabled or contiguous swap entries are failed to 690 * allocate. 691 */ 692 if (shmem_mapping(mapping) && folio_test_large(folio)) 693 wbc.list = folio_list; 694 695 folio_set_reclaim(folio); 696 res = mapping->a_ops->writepage(&folio->page, &wbc); 697 if (res < 0) 698 handle_write_error(mapping, folio, res); 699 if (res == AOP_WRITEPAGE_ACTIVATE) { 700 folio_clear_reclaim(folio); 701 return PAGE_ACTIVATE; 702 } 703 704 if (!folio_test_writeback(folio)) { 705 /* synchronous write or broken a_ops? */ 706 folio_clear_reclaim(folio); 707 } 708 trace_mm_vmscan_write_folio(folio); 709 node_stat_add_folio(folio, NR_VMSCAN_WRITE); 710 return PAGE_SUCCESS; 711 } 712 713 return PAGE_CLEAN; 714 } 715 716 /* 717 * Same as remove_mapping, but if the folio is removed from the mapping, it 718 * gets returned with a refcount of 0. 719 */ 720 static int __remove_mapping(struct address_space *mapping, struct folio *folio, 721 bool reclaimed, struct mem_cgroup *target_memcg) 722 { 723 int refcount; 724 void *shadow = NULL; 725 726 BUG_ON(!folio_test_locked(folio)); 727 BUG_ON(mapping != folio_mapping(folio)); 728 729 if (!folio_test_swapcache(folio)) 730 spin_lock(&mapping->host->i_lock); 731 xa_lock_irq(&mapping->i_pages); 732 /* 733 * The non racy check for a busy folio. 734 * 735 * Must be careful with the order of the tests. When someone has 736 * a ref to the folio, it may be possible that they dirty it then 737 * drop the reference. So if the dirty flag is tested before the 738 * refcount here, then the following race may occur: 739 * 740 * get_user_pages(&page); 741 * [user mapping goes away] 742 * write_to(page); 743 * !folio_test_dirty(folio) [good] 744 * folio_set_dirty(folio); 745 * folio_put(folio); 746 * !refcount(folio) [good, discard it] 747 * 748 * [oops, our write_to data is lost] 749 * 750 * Reversing the order of the tests ensures such a situation cannot 751 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags 752 * load is not satisfied before that of folio->_refcount. 753 * 754 * Note that if the dirty flag is always set via folio_mark_dirty, 755 * and thus under the i_pages lock, then this ordering is not required. 756 */ 757 refcount = 1 + folio_nr_pages(folio); 758 if (!folio_ref_freeze(folio, refcount)) 759 goto cannot_free; 760 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ 761 if (unlikely(folio_test_dirty(folio))) { 762 folio_ref_unfreeze(folio, refcount); 763 goto cannot_free; 764 } 765 766 if (folio_test_swapcache(folio)) { 767 swp_entry_t swap = folio->swap; 768 769 if (reclaimed && !mapping_exiting(mapping)) 770 shadow = workingset_eviction(folio, target_memcg); 771 __delete_from_swap_cache(folio, swap, shadow); 772 mem_cgroup_swapout(folio, swap); 773 xa_unlock_irq(&mapping->i_pages); 774 put_swap_folio(folio, swap); 775 } else { 776 void (*free_folio)(struct folio *); 777 778 free_folio = mapping->a_ops->free_folio; 779 /* 780 * Remember a shadow entry for reclaimed file cache in 781 * order to detect refaults, thus thrashing, later on. 782 * 783 * But don't store shadows in an address space that is 784 * already exiting. This is not just an optimization, 785 * inode reclaim needs to empty out the radix tree or 786 * the nodes are lost. Don't plant shadows behind its 787 * back. 788 * 789 * We also don't store shadows for DAX mappings because the 790 * only page cache folios found in these are zero pages 791 * covering holes, and because we don't want to mix DAX 792 * exceptional entries and shadow exceptional entries in the 793 * same address_space. 794 */ 795 if (reclaimed && folio_is_file_lru(folio) && 796 !mapping_exiting(mapping) && !dax_mapping(mapping)) 797 shadow = workingset_eviction(folio, target_memcg); 798 __filemap_remove_folio(folio, shadow); 799 xa_unlock_irq(&mapping->i_pages); 800 if (mapping_shrinkable(mapping)) 801 inode_add_lru(mapping->host); 802 spin_unlock(&mapping->host->i_lock); 803 804 if (free_folio) 805 free_folio(folio); 806 } 807 808 return 1; 809 810 cannot_free: 811 xa_unlock_irq(&mapping->i_pages); 812 if (!folio_test_swapcache(folio)) 813 spin_unlock(&mapping->host->i_lock); 814 return 0; 815 } 816 817 /** 818 * remove_mapping() - Attempt to remove a folio from its mapping. 819 * @mapping: The address space. 820 * @folio: The folio to remove. 821 * 822 * If the folio is dirty, under writeback or if someone else has a ref 823 * on it, removal will fail. 824 * Return: The number of pages removed from the mapping. 0 if the folio 825 * could not be removed. 826 * Context: The caller should have a single refcount on the folio and 827 * hold its lock. 828 */ 829 long remove_mapping(struct address_space *mapping, struct folio *folio) 830 { 831 if (__remove_mapping(mapping, folio, false, NULL)) { 832 /* 833 * Unfreezing the refcount with 1 effectively 834 * drops the pagecache ref for us without requiring another 835 * atomic operation. 836 */ 837 folio_ref_unfreeze(folio, 1); 838 return folio_nr_pages(folio); 839 } 840 return 0; 841 } 842 843 /** 844 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. 845 * @folio: Folio to be returned to an LRU list. 846 * 847 * Add previously isolated @folio to appropriate LRU list. 848 * The folio may still be unevictable for other reasons. 849 * 850 * Context: lru_lock must not be held, interrupts must be enabled. 851 */ 852 void folio_putback_lru(struct folio *folio) 853 { 854 folio_add_lru(folio); 855 folio_put(folio); /* drop ref from isolate */ 856 } 857 858 enum folio_references { 859 FOLIOREF_RECLAIM, 860 FOLIOREF_RECLAIM_CLEAN, 861 FOLIOREF_KEEP, 862 FOLIOREF_ACTIVATE, 863 }; 864 865 static enum folio_references folio_check_references(struct folio *folio, 866 struct scan_control *sc) 867 { 868 int referenced_ptes, referenced_folio; 869 unsigned long vm_flags; 870 871 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 872 &vm_flags); 873 referenced_folio = folio_test_clear_referenced(folio); 874 875 /* 876 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. 877 * Let the folio, now marked Mlocked, be moved to the unevictable list. 878 */ 879 if (vm_flags & VM_LOCKED) 880 return FOLIOREF_ACTIVATE; 881 882 /* 883 * There are two cases to consider. 884 * 1) Rmap lock contention: rotate. 885 * 2) Skip the non-shared swapbacked folio mapped solely by 886 * the exiting or OOM-reaped process. 887 */ 888 if (referenced_ptes == -1) 889 return FOLIOREF_KEEP; 890 891 if (referenced_ptes) { 892 /* 893 * All mapped folios start out with page table 894 * references from the instantiating fault, so we need 895 * to look twice if a mapped file/anon folio is used more 896 * than once. 897 * 898 * Mark it and spare it for another trip around the 899 * inactive list. Another page table reference will 900 * lead to its activation. 901 * 902 * Note: the mark is set for activated folios as well 903 * so that recently deactivated but used folios are 904 * quickly recovered. 905 */ 906 folio_set_referenced(folio); 907 908 if (referenced_folio || referenced_ptes > 1) 909 return FOLIOREF_ACTIVATE; 910 911 /* 912 * Activate file-backed executable folios after first usage. 913 */ 914 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) 915 return FOLIOREF_ACTIVATE; 916 917 return FOLIOREF_KEEP; 918 } 919 920 /* Reclaim if clean, defer dirty folios to writeback */ 921 if (referenced_folio && folio_is_file_lru(folio)) 922 return FOLIOREF_RECLAIM_CLEAN; 923 924 return FOLIOREF_RECLAIM; 925 } 926 927 /* Check if a folio is dirty or under writeback */ 928 static void folio_check_dirty_writeback(struct folio *folio, 929 bool *dirty, bool *writeback) 930 { 931 struct address_space *mapping; 932 933 /* 934 * Anonymous folios are not handled by flushers and must be written 935 * from reclaim context. Do not stall reclaim based on them. 936 * MADV_FREE anonymous folios are put into inactive file list too. 937 * They could be mistakenly treated as file lru. So further anon 938 * test is needed. 939 */ 940 if (!folio_is_file_lru(folio) || 941 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { 942 *dirty = false; 943 *writeback = false; 944 return; 945 } 946 947 /* By default assume that the folio flags are accurate */ 948 *dirty = folio_test_dirty(folio); 949 *writeback = folio_test_writeback(folio); 950 951 /* Verify dirty/writeback state if the filesystem supports it */ 952 if (!folio_test_private(folio)) 953 return; 954 955 mapping = folio_mapping(folio); 956 if (mapping && mapping->a_ops->is_dirty_writeback) 957 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); 958 } 959 960 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private) 961 { 962 struct folio *dst; 963 nodemask_t *allowed_mask; 964 struct migration_target_control *mtc; 965 966 mtc = (struct migration_target_control *)private; 967 968 allowed_mask = mtc->nmask; 969 /* 970 * make sure we allocate from the target node first also trying to 971 * demote or reclaim pages from the target node via kswapd if we are 972 * low on free memory on target node. If we don't do this and if 973 * we have free memory on the slower(lower) memtier, we would start 974 * allocating pages from slower(lower) memory tiers without even forcing 975 * a demotion of cold pages from the target memtier. This can result 976 * in the kernel placing hot pages in slower(lower) memory tiers. 977 */ 978 mtc->nmask = NULL; 979 mtc->gfp_mask |= __GFP_THISNODE; 980 dst = alloc_migration_target(src, (unsigned long)mtc); 981 if (dst) 982 return dst; 983 984 mtc->gfp_mask &= ~__GFP_THISNODE; 985 mtc->nmask = allowed_mask; 986 987 return alloc_migration_target(src, (unsigned long)mtc); 988 } 989 990 /* 991 * Take folios on @demote_folios and attempt to demote them to another node. 992 * Folios which are not demoted are left on @demote_folios. 993 */ 994 static unsigned int demote_folio_list(struct list_head *demote_folios, 995 struct pglist_data *pgdat) 996 { 997 int target_nid = next_demotion_node(pgdat->node_id); 998 unsigned int nr_succeeded; 999 nodemask_t allowed_mask; 1000 1001 struct migration_target_control mtc = { 1002 /* 1003 * Allocate from 'node', or fail quickly and quietly. 1004 * When this happens, 'page' will likely just be discarded 1005 * instead of migrated. 1006 */ 1007 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | 1008 __GFP_NOMEMALLOC | GFP_NOWAIT, 1009 .nid = target_nid, 1010 .nmask = &allowed_mask, 1011 .reason = MR_DEMOTION, 1012 }; 1013 1014 if (list_empty(demote_folios)) 1015 return 0; 1016 1017 if (target_nid == NUMA_NO_NODE) 1018 return 0; 1019 1020 node_get_allowed_targets(pgdat, &allowed_mask); 1021 1022 /* Demotion ignores all cpuset and mempolicy settings */ 1023 migrate_pages(demote_folios, alloc_migrate_folio, NULL, 1024 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, 1025 &nr_succeeded); 1026 1027 return nr_succeeded; 1028 } 1029 1030 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) 1031 { 1032 if (gfp_mask & __GFP_FS) 1033 return true; 1034 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) 1035 return false; 1036 /* 1037 * We can "enter_fs" for swap-cache with only __GFP_IO 1038 * providing this isn't SWP_FS_OPS. 1039 * ->flags can be updated non-atomicially (scan_swap_map_slots), 1040 * but that will never affect SWP_FS_OPS, so the data_race 1041 * is safe. 1042 */ 1043 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); 1044 } 1045 1046 /* 1047 * shrink_folio_list() returns the number of reclaimed pages 1048 */ 1049 static unsigned int shrink_folio_list(struct list_head *folio_list, 1050 struct pglist_data *pgdat, struct scan_control *sc, 1051 struct reclaim_stat *stat, bool ignore_references) 1052 { 1053 struct folio_batch free_folios; 1054 LIST_HEAD(ret_folios); 1055 LIST_HEAD(demote_folios); 1056 unsigned int nr_reclaimed = 0; 1057 unsigned int pgactivate = 0; 1058 bool do_demote_pass; 1059 struct swap_iocb *plug = NULL; 1060 1061 folio_batch_init(&free_folios); 1062 memset(stat, 0, sizeof(*stat)); 1063 cond_resched(); 1064 do_demote_pass = can_demote(pgdat->node_id, sc); 1065 1066 retry: 1067 while (!list_empty(folio_list)) { 1068 struct address_space *mapping; 1069 struct folio *folio; 1070 enum folio_references references = FOLIOREF_RECLAIM; 1071 bool dirty, writeback; 1072 unsigned int nr_pages; 1073 1074 cond_resched(); 1075 1076 folio = lru_to_folio(folio_list); 1077 list_del(&folio->lru); 1078 1079 if (!folio_trylock(folio)) 1080 goto keep; 1081 1082 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1083 1084 nr_pages = folio_nr_pages(folio); 1085 1086 /* Account the number of base pages */ 1087 sc->nr_scanned += nr_pages; 1088 1089 if (unlikely(!folio_evictable(folio))) 1090 goto activate_locked; 1091 1092 if (!sc->may_unmap && folio_mapped(folio)) 1093 goto keep_locked; 1094 1095 /* folio_update_gen() tried to promote this page? */ 1096 if (lru_gen_enabled() && !ignore_references && 1097 folio_mapped(folio) && folio_test_referenced(folio)) 1098 goto keep_locked; 1099 1100 /* 1101 * The number of dirty pages determines if a node is marked 1102 * reclaim_congested. kswapd will stall and start writing 1103 * folios if the tail of the LRU is all dirty unqueued folios. 1104 */ 1105 folio_check_dirty_writeback(folio, &dirty, &writeback); 1106 if (dirty || writeback) 1107 stat->nr_dirty += nr_pages; 1108 1109 if (dirty && !writeback) 1110 stat->nr_unqueued_dirty += nr_pages; 1111 1112 /* 1113 * Treat this folio as congested if folios are cycling 1114 * through the LRU so quickly that the folios marked 1115 * for immediate reclaim are making it to the end of 1116 * the LRU a second time. 1117 */ 1118 if (writeback && folio_test_reclaim(folio)) 1119 stat->nr_congested += nr_pages; 1120 1121 /* 1122 * If a folio at the tail of the LRU is under writeback, there 1123 * are three cases to consider. 1124 * 1125 * 1) If reclaim is encountering an excessive number 1126 * of folios under writeback and this folio has both 1127 * the writeback and reclaim flags set, then it 1128 * indicates that folios are being queued for I/O but 1129 * are being recycled through the LRU before the I/O 1130 * can complete. Waiting on the folio itself risks an 1131 * indefinite stall if it is impossible to writeback 1132 * the folio due to I/O error or disconnected storage 1133 * so instead note that the LRU is being scanned too 1134 * quickly and the caller can stall after the folio 1135 * list has been processed. 1136 * 1137 * 2) Global or new memcg reclaim encounters a folio that is 1138 * not marked for immediate reclaim, or the caller does not 1139 * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1140 * not to fs). In this case mark the folio for immediate 1141 * reclaim and continue scanning. 1142 * 1143 * Require may_enter_fs() because we would wait on fs, which 1144 * may not have submitted I/O yet. And the loop driver might 1145 * enter reclaim, and deadlock if it waits on a folio for 1146 * which it is needed to do the write (loop masks off 1147 * __GFP_IO|__GFP_FS for this reason); but more thought 1148 * would probably show more reasons. 1149 * 1150 * 3) Legacy memcg encounters a folio that already has the 1151 * reclaim flag set. memcg does not have any dirty folio 1152 * throttling so we could easily OOM just because too many 1153 * folios are in writeback and there is nothing else to 1154 * reclaim. Wait for the writeback to complete. 1155 * 1156 * In cases 1) and 2) we activate the folios to get them out of 1157 * the way while we continue scanning for clean folios on the 1158 * inactive list and refilling from the active list. The 1159 * observation here is that waiting for disk writes is more 1160 * expensive than potentially causing reloads down the line. 1161 * Since they're marked for immediate reclaim, they won't put 1162 * memory pressure on the cache working set any longer than it 1163 * takes to write them to disk. 1164 */ 1165 if (folio_test_writeback(folio)) { 1166 /* Case 1 above */ 1167 if (current_is_kswapd() && 1168 folio_test_reclaim(folio) && 1169 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1170 stat->nr_immediate += nr_pages; 1171 goto activate_locked; 1172 1173 /* Case 2 above */ 1174 } else if (writeback_throttling_sane(sc) || 1175 !folio_test_reclaim(folio) || 1176 !may_enter_fs(folio, sc->gfp_mask)) { 1177 /* 1178 * This is slightly racy - 1179 * folio_end_writeback() might have 1180 * just cleared the reclaim flag, then 1181 * setting the reclaim flag here ends up 1182 * interpreted as the readahead flag - but 1183 * that does not matter enough to care. 1184 * What we do want is for this folio to 1185 * have the reclaim flag set next time 1186 * memcg reclaim reaches the tests above, 1187 * so it will then wait for writeback to 1188 * avoid OOM; and it's also appropriate 1189 * in global reclaim. 1190 */ 1191 folio_set_reclaim(folio); 1192 stat->nr_writeback += nr_pages; 1193 goto activate_locked; 1194 1195 /* Case 3 above */ 1196 } else { 1197 folio_unlock(folio); 1198 folio_wait_writeback(folio); 1199 /* then go back and try same folio again */ 1200 list_add_tail(&folio->lru, folio_list); 1201 continue; 1202 } 1203 } 1204 1205 if (!ignore_references) 1206 references = folio_check_references(folio, sc); 1207 1208 switch (references) { 1209 case FOLIOREF_ACTIVATE: 1210 goto activate_locked; 1211 case FOLIOREF_KEEP: 1212 stat->nr_ref_keep += nr_pages; 1213 goto keep_locked; 1214 case FOLIOREF_RECLAIM: 1215 case FOLIOREF_RECLAIM_CLEAN: 1216 ; /* try to reclaim the folio below */ 1217 } 1218 1219 /* 1220 * Before reclaiming the folio, try to relocate 1221 * its contents to another node. 1222 */ 1223 if (do_demote_pass && 1224 (thp_migration_supported() || !folio_test_large(folio))) { 1225 list_add(&folio->lru, &demote_folios); 1226 folio_unlock(folio); 1227 continue; 1228 } 1229 1230 /* 1231 * Anonymous process memory has backing store? 1232 * Try to allocate it some swap space here. 1233 * Lazyfree folio could be freed directly 1234 */ 1235 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { 1236 if (!folio_test_swapcache(folio)) { 1237 if (!(sc->gfp_mask & __GFP_IO)) 1238 goto keep_locked; 1239 if (folio_maybe_dma_pinned(folio)) 1240 goto keep_locked; 1241 if (folio_test_large(folio)) { 1242 /* cannot split folio, skip it */ 1243 if (!can_split_folio(folio, 1, NULL)) 1244 goto activate_locked; 1245 /* 1246 * Split partially mapped folios right away. 1247 * We can free the unmapped pages without IO. 1248 */ 1249 if (data_race(!list_empty(&folio->_deferred_list) && 1250 folio_test_partially_mapped(folio)) && 1251 split_folio_to_list(folio, folio_list)) 1252 goto activate_locked; 1253 } 1254 if (!add_to_swap(folio)) { 1255 int __maybe_unused order = folio_order(folio); 1256 1257 if (!folio_test_large(folio)) 1258 goto activate_locked_split; 1259 /* Fallback to swap normal pages */ 1260 if (split_folio_to_list(folio, folio_list)) 1261 goto activate_locked; 1262 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1263 if (nr_pages >= HPAGE_PMD_NR) { 1264 count_memcg_folio_events(folio, 1265 THP_SWPOUT_FALLBACK, 1); 1266 count_vm_event(THP_SWPOUT_FALLBACK); 1267 } 1268 #endif 1269 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); 1270 if (!add_to_swap(folio)) 1271 goto activate_locked_split; 1272 } 1273 } 1274 } 1275 1276 /* 1277 * If the folio was split above, the tail pages will make 1278 * their own pass through this function and be accounted 1279 * then. 1280 */ 1281 if ((nr_pages > 1) && !folio_test_large(folio)) { 1282 sc->nr_scanned -= (nr_pages - 1); 1283 nr_pages = 1; 1284 } 1285 1286 /* 1287 * The folio is mapped into the page tables of one or more 1288 * processes. Try to unmap it here. 1289 */ 1290 if (folio_mapped(folio)) { 1291 enum ttu_flags flags = TTU_BATCH_FLUSH; 1292 bool was_swapbacked = folio_test_swapbacked(folio); 1293 1294 if (folio_test_pmd_mappable(folio)) 1295 flags |= TTU_SPLIT_HUGE_PMD; 1296 /* 1297 * Without TTU_SYNC, try_to_unmap will only begin to 1298 * hold PTL from the first present PTE within a large 1299 * folio. Some initial PTEs might be skipped due to 1300 * races with parallel PTE writes in which PTEs can be 1301 * cleared temporarily before being written new present 1302 * values. This will lead to a large folio is still 1303 * mapped while some subpages have been partially 1304 * unmapped after try_to_unmap; TTU_SYNC helps 1305 * try_to_unmap acquire PTL from the first PTE, 1306 * eliminating the influence of temporary PTE values. 1307 */ 1308 if (folio_test_large(folio)) 1309 flags |= TTU_SYNC; 1310 1311 try_to_unmap(folio, flags); 1312 if (folio_mapped(folio)) { 1313 stat->nr_unmap_fail += nr_pages; 1314 if (!was_swapbacked && 1315 folio_test_swapbacked(folio)) 1316 stat->nr_lazyfree_fail += nr_pages; 1317 goto activate_locked; 1318 } 1319 } 1320 1321 /* 1322 * Folio is unmapped now so it cannot be newly pinned anymore. 1323 * No point in trying to reclaim folio if it is pinned. 1324 * Furthermore we don't want to reclaim underlying fs metadata 1325 * if the folio is pinned and thus potentially modified by the 1326 * pinning process as that may upset the filesystem. 1327 */ 1328 if (folio_maybe_dma_pinned(folio)) 1329 goto activate_locked; 1330 1331 mapping = folio_mapping(folio); 1332 if (folio_test_dirty(folio)) { 1333 /* 1334 * Only kswapd can writeback filesystem folios 1335 * to avoid risk of stack overflow. But avoid 1336 * injecting inefficient single-folio I/O into 1337 * flusher writeback as much as possible: only 1338 * write folios when we've encountered many 1339 * dirty folios, and when we've already scanned 1340 * the rest of the LRU for clean folios and see 1341 * the same dirty folios again (with the reclaim 1342 * flag set). 1343 */ 1344 if (folio_is_file_lru(folio) && 1345 (!current_is_kswapd() || 1346 !folio_test_reclaim(folio) || 1347 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 1348 /* 1349 * Immediately reclaim when written back. 1350 * Similar in principle to folio_deactivate() 1351 * except we already have the folio isolated 1352 * and know it's dirty 1353 */ 1354 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, 1355 nr_pages); 1356 folio_set_reclaim(folio); 1357 1358 goto activate_locked; 1359 } 1360 1361 if (references == FOLIOREF_RECLAIM_CLEAN) 1362 goto keep_locked; 1363 if (!may_enter_fs(folio, sc->gfp_mask)) 1364 goto keep_locked; 1365 if (!sc->may_writepage) 1366 goto keep_locked; 1367 1368 /* 1369 * Folio is dirty. Flush the TLB if a writable entry 1370 * potentially exists to avoid CPU writes after I/O 1371 * starts and then write it out here. 1372 */ 1373 try_to_unmap_flush_dirty(); 1374 switch (pageout(folio, mapping, &plug, folio_list)) { 1375 case PAGE_KEEP: 1376 goto keep_locked; 1377 case PAGE_ACTIVATE: 1378 /* 1379 * If shmem folio is split when writeback to swap, 1380 * the tail pages will make their own pass through 1381 * this function and be accounted then. 1382 */ 1383 if (nr_pages > 1 && !folio_test_large(folio)) { 1384 sc->nr_scanned -= (nr_pages - 1); 1385 nr_pages = 1; 1386 } 1387 goto activate_locked; 1388 case PAGE_SUCCESS: 1389 if (nr_pages > 1 && !folio_test_large(folio)) { 1390 sc->nr_scanned -= (nr_pages - 1); 1391 nr_pages = 1; 1392 } 1393 stat->nr_pageout += nr_pages; 1394 1395 if (folio_test_writeback(folio)) 1396 goto keep; 1397 if (folio_test_dirty(folio)) 1398 goto keep; 1399 1400 /* 1401 * A synchronous write - probably a ramdisk. Go 1402 * ahead and try to reclaim the folio. 1403 */ 1404 if (!folio_trylock(folio)) 1405 goto keep; 1406 if (folio_test_dirty(folio) || 1407 folio_test_writeback(folio)) 1408 goto keep_locked; 1409 mapping = folio_mapping(folio); 1410 fallthrough; 1411 case PAGE_CLEAN: 1412 ; /* try to free the folio below */ 1413 } 1414 } 1415 1416 /* 1417 * If the folio has buffers, try to free the buffer 1418 * mappings associated with this folio. If we succeed 1419 * we try to free the folio as well. 1420 * 1421 * We do this even if the folio is dirty. 1422 * filemap_release_folio() does not perform I/O, but it 1423 * is possible for a folio to have the dirty flag set, 1424 * but it is actually clean (all its buffers are clean). 1425 * This happens if the buffers were written out directly, 1426 * with submit_bh(). ext3 will do this, as well as 1427 * the blockdev mapping. filemap_release_folio() will 1428 * discover that cleanness and will drop the buffers 1429 * and mark the folio clean - it can be freed. 1430 * 1431 * Rarely, folios can have buffers and no ->mapping. 1432 * These are the folios which were not successfully 1433 * invalidated in truncate_cleanup_folio(). We try to 1434 * drop those buffers here and if that worked, and the 1435 * folio is no longer mapped into process address space 1436 * (refcount == 1) it can be freed. Otherwise, leave 1437 * the folio on the LRU so it is swappable. 1438 */ 1439 if (folio_needs_release(folio)) { 1440 if (!filemap_release_folio(folio, sc->gfp_mask)) 1441 goto activate_locked; 1442 if (!mapping && folio_ref_count(folio) == 1) { 1443 folio_unlock(folio); 1444 if (folio_put_testzero(folio)) 1445 goto free_it; 1446 else { 1447 /* 1448 * rare race with speculative reference. 1449 * the speculative reference will free 1450 * this folio shortly, so we may 1451 * increment nr_reclaimed here (and 1452 * leave it off the LRU). 1453 */ 1454 nr_reclaimed += nr_pages; 1455 continue; 1456 } 1457 } 1458 } 1459 1460 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 1461 /* follow __remove_mapping for reference */ 1462 if (!folio_ref_freeze(folio, 1)) 1463 goto keep_locked; 1464 /* 1465 * The folio has only one reference left, which is 1466 * from the isolation. After the caller puts the 1467 * folio back on the lru and drops the reference, the 1468 * folio will be freed anyway. It doesn't matter 1469 * which lru it goes on. So we don't bother checking 1470 * the dirty flag here. 1471 */ 1472 count_vm_events(PGLAZYFREED, nr_pages); 1473 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); 1474 } else if (!mapping || !__remove_mapping(mapping, folio, true, 1475 sc->target_mem_cgroup)) 1476 goto keep_locked; 1477 1478 folio_unlock(folio); 1479 free_it: 1480 /* 1481 * Folio may get swapped out as a whole, need to account 1482 * all pages in it. 1483 */ 1484 nr_reclaimed += nr_pages; 1485 1486 folio_unqueue_deferred_split(folio); 1487 if (folio_batch_add(&free_folios, folio) == 0) { 1488 mem_cgroup_uncharge_folios(&free_folios); 1489 try_to_unmap_flush(); 1490 free_unref_folios(&free_folios); 1491 } 1492 continue; 1493 1494 activate_locked_split: 1495 /* 1496 * The tail pages that are failed to add into swap cache 1497 * reach here. Fixup nr_scanned and nr_pages. 1498 */ 1499 if (nr_pages > 1) { 1500 sc->nr_scanned -= (nr_pages - 1); 1501 nr_pages = 1; 1502 } 1503 activate_locked: 1504 /* Not a candidate for swapping, so reclaim swap space. */ 1505 if (folio_test_swapcache(folio) && 1506 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) 1507 folio_free_swap(folio); 1508 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1509 if (!folio_test_mlocked(folio)) { 1510 int type = folio_is_file_lru(folio); 1511 folio_set_active(folio); 1512 stat->nr_activate[type] += nr_pages; 1513 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); 1514 } 1515 keep_locked: 1516 folio_unlock(folio); 1517 keep: 1518 list_add(&folio->lru, &ret_folios); 1519 VM_BUG_ON_FOLIO(folio_test_lru(folio) || 1520 folio_test_unevictable(folio), folio); 1521 } 1522 /* 'folio_list' is always empty here */ 1523 1524 /* Migrate folios selected for demotion */ 1525 stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); 1526 nr_reclaimed += stat->nr_demoted; 1527 /* Folios that could not be demoted are still in @demote_folios */ 1528 if (!list_empty(&demote_folios)) { 1529 /* Folios which weren't demoted go back on @folio_list */ 1530 list_splice_init(&demote_folios, folio_list); 1531 1532 /* 1533 * goto retry to reclaim the undemoted folios in folio_list if 1534 * desired. 1535 * 1536 * Reclaiming directly from top tier nodes is not often desired 1537 * due to it breaking the LRU ordering: in general memory 1538 * should be reclaimed from lower tier nodes and demoted from 1539 * top tier nodes. 1540 * 1541 * However, disabling reclaim from top tier nodes entirely 1542 * would cause ooms in edge scenarios where lower tier memory 1543 * is unreclaimable for whatever reason, eg memory being 1544 * mlocked or too hot to reclaim. We can disable reclaim 1545 * from top tier nodes in proactive reclaim though as that is 1546 * not real memory pressure. 1547 */ 1548 if (!sc->proactive) { 1549 do_demote_pass = false; 1550 goto retry; 1551 } 1552 } 1553 1554 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 1555 1556 mem_cgroup_uncharge_folios(&free_folios); 1557 try_to_unmap_flush(); 1558 free_unref_folios(&free_folios); 1559 1560 list_splice(&ret_folios, folio_list); 1561 count_vm_events(PGACTIVATE, pgactivate); 1562 1563 if (plug) 1564 swap_write_unplug(plug); 1565 return nr_reclaimed; 1566 } 1567 1568 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1569 struct list_head *folio_list) 1570 { 1571 struct scan_control sc = { 1572 .gfp_mask = GFP_KERNEL, 1573 .may_unmap = 1, 1574 }; 1575 struct reclaim_stat stat; 1576 unsigned int nr_reclaimed; 1577 struct folio *folio, *next; 1578 LIST_HEAD(clean_folios); 1579 unsigned int noreclaim_flag; 1580 1581 list_for_each_entry_safe(folio, next, folio_list, lru) { 1582 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && 1583 !folio_test_dirty(folio) && !__folio_test_movable(folio) && 1584 !folio_test_unevictable(folio)) { 1585 folio_clear_active(folio); 1586 list_move(&folio->lru, &clean_folios); 1587 } 1588 } 1589 1590 /* 1591 * We should be safe here since we are only dealing with file pages and 1592 * we are not kswapd and therefore cannot write dirty file pages. But 1593 * call memalloc_noreclaim_save() anyway, just in case these conditions 1594 * change in the future. 1595 */ 1596 noreclaim_flag = memalloc_noreclaim_save(); 1597 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, 1598 &stat, true); 1599 memalloc_noreclaim_restore(noreclaim_flag); 1600 1601 list_splice(&clean_folios, folio_list); 1602 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1603 -(long)nr_reclaimed); 1604 /* 1605 * Since lazyfree pages are isolated from file LRU from the beginning, 1606 * they will rotate back to anonymous LRU in the end if it failed to 1607 * discard so isolated count will be mismatched. 1608 * Compensate the isolated count for both LRU lists. 1609 */ 1610 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, 1611 stat.nr_lazyfree_fail); 1612 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1613 -(long)stat.nr_lazyfree_fail); 1614 return nr_reclaimed; 1615 } 1616 1617 /* 1618 * Update LRU sizes after isolating pages. The LRU size updates must 1619 * be complete before mem_cgroup_update_lru_size due to a sanity check. 1620 */ 1621 static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1622 enum lru_list lru, unsigned long *nr_zone_taken) 1623 { 1624 int zid; 1625 1626 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1627 if (!nr_zone_taken[zid]) 1628 continue; 1629 1630 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1631 } 1632 1633 } 1634 1635 /* 1636 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. 1637 * 1638 * lruvec->lru_lock is heavily contended. Some of the functions that 1639 * shrink the lists perform better by taking out a batch of pages 1640 * and working on them outside the LRU lock. 1641 * 1642 * For pagecache intensive workloads, this function is the hottest 1643 * spot in the kernel (apart from copy_*_user functions). 1644 * 1645 * Lru_lock must be held before calling this function. 1646 * 1647 * @nr_to_scan: The number of eligible pages to look through on the list. 1648 * @lruvec: The LRU vector to pull pages from. 1649 * @dst: The temp list to put pages on to. 1650 * @nr_scanned: The number of pages that were scanned. 1651 * @sc: The scan_control struct for this reclaim session 1652 * @lru: LRU list id for isolating 1653 * 1654 * returns how many pages were moved onto *@dst. 1655 */ 1656 static unsigned long isolate_lru_folios(unsigned long nr_to_scan, 1657 struct lruvec *lruvec, struct list_head *dst, 1658 unsigned long *nr_scanned, struct scan_control *sc, 1659 enum lru_list lru) 1660 { 1661 struct list_head *src = &lruvec->lists[lru]; 1662 unsigned long nr_taken = 0; 1663 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 1664 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1665 unsigned long skipped = 0; 1666 unsigned long scan, total_scan, nr_pages; 1667 LIST_HEAD(folios_skipped); 1668 1669 total_scan = 0; 1670 scan = 0; 1671 while (scan < nr_to_scan && !list_empty(src)) { 1672 struct list_head *move_to = src; 1673 struct folio *folio; 1674 1675 folio = lru_to_folio(src); 1676 prefetchw_prev_lru_folio(folio, src, flags); 1677 1678 nr_pages = folio_nr_pages(folio); 1679 total_scan += nr_pages; 1680 1681 if (folio_zonenum(folio) > sc->reclaim_idx) { 1682 nr_skipped[folio_zonenum(folio)] += nr_pages; 1683 move_to = &folios_skipped; 1684 goto move; 1685 } 1686 1687 /* 1688 * Do not count skipped folios because that makes the function 1689 * return with no isolated folios if the LRU mostly contains 1690 * ineligible folios. This causes the VM to not reclaim any 1691 * folios, triggering a premature OOM. 1692 * Account all pages in a folio. 1693 */ 1694 scan += nr_pages; 1695 1696 if (!folio_test_lru(folio)) 1697 goto move; 1698 if (!sc->may_unmap && folio_mapped(folio)) 1699 goto move; 1700 1701 /* 1702 * Be careful not to clear the lru flag until after we're 1703 * sure the folio is not being freed elsewhere -- the 1704 * folio release code relies on it. 1705 */ 1706 if (unlikely(!folio_try_get(folio))) 1707 goto move; 1708 1709 if (!folio_test_clear_lru(folio)) { 1710 /* Another thread is already isolating this folio */ 1711 folio_put(folio); 1712 goto move; 1713 } 1714 1715 nr_taken += nr_pages; 1716 nr_zone_taken[folio_zonenum(folio)] += nr_pages; 1717 move_to = dst; 1718 move: 1719 list_move(&folio->lru, move_to); 1720 } 1721 1722 /* 1723 * Splice any skipped folios to the start of the LRU list. Note that 1724 * this disrupts the LRU order when reclaiming for lower zones but 1725 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1726 * scanning would soon rescan the same folios to skip and waste lots 1727 * of cpu cycles. 1728 */ 1729 if (!list_empty(&folios_skipped)) { 1730 int zid; 1731 1732 list_splice(&folios_skipped, src); 1733 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1734 if (!nr_skipped[zid]) 1735 continue; 1736 1737 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 1738 skipped += nr_skipped[zid]; 1739 } 1740 } 1741 *nr_scanned = total_scan; 1742 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1743 total_scan, skipped, nr_taken, lru); 1744 update_lru_sizes(lruvec, lru, nr_zone_taken); 1745 return nr_taken; 1746 } 1747 1748 /** 1749 * folio_isolate_lru() - Try to isolate a folio from its LRU list. 1750 * @folio: Folio to isolate from its LRU list. 1751 * 1752 * Isolate a @folio from an LRU list and adjust the vmstat statistic 1753 * corresponding to whatever LRU list the folio was on. 1754 * 1755 * The folio will have its LRU flag cleared. If it was found on the 1756 * active list, it will have the Active flag set. If it was found on the 1757 * unevictable list, it will have the Unevictable flag set. These flags 1758 * may need to be cleared by the caller before letting the page go. 1759 * 1760 * Context: 1761 * 1762 * (1) Must be called with an elevated refcount on the folio. This is a 1763 * fundamental difference from isolate_lru_folios() (which is called 1764 * without a stable reference). 1765 * (2) The lru_lock must not be held. 1766 * (3) Interrupts must be enabled. 1767 * 1768 * Return: true if the folio was removed from an LRU list. 1769 * false if the folio was not on an LRU list. 1770 */ 1771 bool folio_isolate_lru(struct folio *folio) 1772 { 1773 bool ret = false; 1774 1775 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); 1776 1777 if (folio_test_clear_lru(folio)) { 1778 struct lruvec *lruvec; 1779 1780 folio_get(folio); 1781 lruvec = folio_lruvec_lock_irq(folio); 1782 lruvec_del_folio(lruvec, folio); 1783 unlock_page_lruvec_irq(lruvec); 1784 ret = true; 1785 } 1786 1787 return ret; 1788 } 1789 1790 /* 1791 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1792 * then get rescheduled. When there are massive number of tasks doing page 1793 * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1794 * the LRU list will go small and be scanned faster than necessary, leading to 1795 * unnecessary swapping, thrashing and OOM. 1796 */ 1797 static bool too_many_isolated(struct pglist_data *pgdat, int file, 1798 struct scan_control *sc) 1799 { 1800 unsigned long inactive, isolated; 1801 bool too_many; 1802 1803 if (current_is_kswapd()) 1804 return false; 1805 1806 if (!writeback_throttling_sane(sc)) 1807 return false; 1808 1809 if (file) { 1810 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1811 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 1812 } else { 1813 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1814 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 1815 } 1816 1817 /* 1818 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 1819 * won't get blocked by normal direct-reclaimers, forming a circular 1820 * deadlock. 1821 */ 1822 if (gfp_has_io_fs(sc->gfp_mask)) 1823 inactive >>= 3; 1824 1825 too_many = isolated > inactive; 1826 1827 /* Wake up tasks throttled due to too_many_isolated. */ 1828 if (!too_many) 1829 wake_throttle_isolated(pgdat); 1830 1831 return too_many; 1832 } 1833 1834 /* 1835 * move_folios_to_lru() moves folios from private @list to appropriate LRU list. 1836 * 1837 * Returns the number of pages moved to the given lruvec. 1838 */ 1839 static unsigned int move_folios_to_lru(struct lruvec *lruvec, 1840 struct list_head *list) 1841 { 1842 int nr_pages, nr_moved = 0; 1843 struct folio_batch free_folios; 1844 1845 folio_batch_init(&free_folios); 1846 while (!list_empty(list)) { 1847 struct folio *folio = lru_to_folio(list); 1848 1849 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1850 list_del(&folio->lru); 1851 if (unlikely(!folio_evictable(folio))) { 1852 spin_unlock_irq(&lruvec->lru_lock); 1853 folio_putback_lru(folio); 1854 spin_lock_irq(&lruvec->lru_lock); 1855 continue; 1856 } 1857 1858 /* 1859 * The folio_set_lru needs to be kept here for list integrity. 1860 * Otherwise: 1861 * #0 move_folios_to_lru #1 release_pages 1862 * if (!folio_put_testzero()) 1863 * if (folio_put_testzero()) 1864 * !lru //skip lru_lock 1865 * folio_set_lru() 1866 * list_add(&folio->lru,) 1867 * list_add(&folio->lru,) 1868 */ 1869 folio_set_lru(folio); 1870 1871 if (unlikely(folio_put_testzero(folio))) { 1872 __folio_clear_lru_flags(folio); 1873 1874 folio_unqueue_deferred_split(folio); 1875 if (folio_batch_add(&free_folios, folio) == 0) { 1876 spin_unlock_irq(&lruvec->lru_lock); 1877 mem_cgroup_uncharge_folios(&free_folios); 1878 free_unref_folios(&free_folios); 1879 spin_lock_irq(&lruvec->lru_lock); 1880 } 1881 1882 continue; 1883 } 1884 1885 /* 1886 * All pages were isolated from the same lruvec (and isolation 1887 * inhibits memcg migration). 1888 */ 1889 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 1890 lruvec_add_folio(lruvec, folio); 1891 nr_pages = folio_nr_pages(folio); 1892 nr_moved += nr_pages; 1893 if (folio_test_active(folio)) 1894 workingset_age_nonresident(lruvec, nr_pages); 1895 } 1896 1897 if (free_folios.nr) { 1898 spin_unlock_irq(&lruvec->lru_lock); 1899 mem_cgroup_uncharge_folios(&free_folios); 1900 free_unref_folios(&free_folios); 1901 spin_lock_irq(&lruvec->lru_lock); 1902 } 1903 1904 return nr_moved; 1905 } 1906 1907 /* 1908 * If a kernel thread (such as nfsd for loop-back mounts) services a backing 1909 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case 1910 * we should not throttle. Otherwise it is safe to do so. 1911 */ 1912 static int current_may_throttle(void) 1913 { 1914 return !(current->flags & PF_LOCAL_THROTTLE); 1915 } 1916 1917 /* 1918 * shrink_inactive_list() is a helper for shrink_node(). It returns the number 1919 * of reclaimed pages 1920 */ 1921 static unsigned long shrink_inactive_list(unsigned long nr_to_scan, 1922 struct lruvec *lruvec, struct scan_control *sc, 1923 enum lru_list lru) 1924 { 1925 LIST_HEAD(folio_list); 1926 unsigned long nr_scanned; 1927 unsigned int nr_reclaimed = 0; 1928 unsigned long nr_taken; 1929 struct reclaim_stat stat; 1930 bool file = is_file_lru(lru); 1931 enum vm_event_item item; 1932 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1933 bool stalled = false; 1934 1935 while (unlikely(too_many_isolated(pgdat, file, sc))) { 1936 if (stalled) 1937 return 0; 1938 1939 /* wait a bit for the reclaimer. */ 1940 stalled = true; 1941 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 1942 1943 /* We are about to die and free our memory. Return now. */ 1944 if (fatal_signal_pending(current)) 1945 return SWAP_CLUSTER_MAX; 1946 } 1947 1948 lru_add_drain(); 1949 1950 spin_lock_irq(&lruvec->lru_lock); 1951 1952 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, 1953 &nr_scanned, sc, lru); 1954 1955 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1956 item = PGSCAN_KSWAPD + reclaimer_offset(); 1957 if (!cgroup_reclaim(sc)) 1958 __count_vm_events(item, nr_scanned); 1959 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 1960 __count_vm_events(PGSCAN_ANON + file, nr_scanned); 1961 1962 spin_unlock_irq(&lruvec->lru_lock); 1963 1964 if (nr_taken == 0) 1965 return 0; 1966 1967 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); 1968 1969 spin_lock_irq(&lruvec->lru_lock); 1970 move_folios_to_lru(lruvec, &folio_list); 1971 1972 __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), 1973 stat.nr_demoted); 1974 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 1975 item = PGSTEAL_KSWAPD + reclaimer_offset(); 1976 if (!cgroup_reclaim(sc)) 1977 __count_vm_events(item, nr_reclaimed); 1978 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 1979 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); 1980 spin_unlock_irq(&lruvec->lru_lock); 1981 1982 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); 1983 1984 /* 1985 * If dirty folios are scanned that are not queued for IO, it 1986 * implies that flushers are not doing their job. This can 1987 * happen when memory pressure pushes dirty folios to the end of 1988 * the LRU before the dirty limits are breached and the dirty 1989 * data has expired. It can also happen when the proportion of 1990 * dirty folios grows not through writes but through memory 1991 * pressure reclaiming all the clean cache. And in some cases, 1992 * the flushers simply cannot keep up with the allocation 1993 * rate. Nudge the flusher threads in case they are asleep. 1994 */ 1995 if (stat.nr_unqueued_dirty == nr_taken) { 1996 wakeup_flusher_threads(WB_REASON_VMSCAN); 1997 /* 1998 * For cgroupv1 dirty throttling is achieved by waking up 1999 * the kernel flusher here and later waiting on folios 2000 * which are in writeback to finish (see shrink_folio_list()). 2001 * 2002 * Flusher may not be able to issue writeback quickly 2003 * enough for cgroupv1 writeback throttling to work 2004 * on a large system. 2005 */ 2006 if (!writeback_throttling_sane(sc)) 2007 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 2008 } 2009 2010 sc->nr.dirty += stat.nr_dirty; 2011 sc->nr.congested += stat.nr_congested; 2012 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 2013 sc->nr.writeback += stat.nr_writeback; 2014 sc->nr.immediate += stat.nr_immediate; 2015 sc->nr.taken += nr_taken; 2016 if (file) 2017 sc->nr.file_taken += nr_taken; 2018 2019 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 2020 nr_scanned, nr_reclaimed, &stat, sc->priority, file); 2021 return nr_reclaimed; 2022 } 2023 2024 /* 2025 * shrink_active_list() moves folios from the active LRU to the inactive LRU. 2026 * 2027 * We move them the other way if the folio is referenced by one or more 2028 * processes. 2029 * 2030 * If the folios are mostly unmapped, the processing is fast and it is 2031 * appropriate to hold lru_lock across the whole operation. But if 2032 * the folios are mapped, the processing is slow (folio_referenced()), so 2033 * we should drop lru_lock around each folio. It's impossible to balance 2034 * this, so instead we remove the folios from the LRU while processing them. 2035 * It is safe to rely on the active flag against the non-LRU folios in here 2036 * because nobody will play with that bit on a non-LRU folio. 2037 * 2038 * The downside is that we have to touch folio->_refcount against each folio. 2039 * But we had to alter folio->flags anyway. 2040 */ 2041 static void shrink_active_list(unsigned long nr_to_scan, 2042 struct lruvec *lruvec, 2043 struct scan_control *sc, 2044 enum lru_list lru) 2045 { 2046 unsigned long nr_taken; 2047 unsigned long nr_scanned; 2048 unsigned long vm_flags; 2049 LIST_HEAD(l_hold); /* The folios which were snipped off */ 2050 LIST_HEAD(l_active); 2051 LIST_HEAD(l_inactive); 2052 unsigned nr_deactivate, nr_activate; 2053 unsigned nr_rotated = 0; 2054 bool file = is_file_lru(lru); 2055 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2056 2057 lru_add_drain(); 2058 2059 spin_lock_irq(&lruvec->lru_lock); 2060 2061 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, 2062 &nr_scanned, sc, lru); 2063 2064 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2065 2066 if (!cgroup_reclaim(sc)) 2067 __count_vm_events(PGREFILL, nr_scanned); 2068 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 2069 2070 spin_unlock_irq(&lruvec->lru_lock); 2071 2072 while (!list_empty(&l_hold)) { 2073 struct folio *folio; 2074 2075 cond_resched(); 2076 folio = lru_to_folio(&l_hold); 2077 list_del(&folio->lru); 2078 2079 if (unlikely(!folio_evictable(folio))) { 2080 folio_putback_lru(folio); 2081 continue; 2082 } 2083 2084 if (unlikely(buffer_heads_over_limit)) { 2085 if (folio_needs_release(folio) && 2086 folio_trylock(folio)) { 2087 filemap_release_folio(folio, 0); 2088 folio_unlock(folio); 2089 } 2090 } 2091 2092 /* Referenced or rmap lock contention: rotate */ 2093 if (folio_referenced(folio, 0, sc->target_mem_cgroup, 2094 &vm_flags) != 0) { 2095 /* 2096 * Identify referenced, file-backed active folios and 2097 * give them one more trip around the active list. So 2098 * that executable code get better chances to stay in 2099 * memory under moderate memory pressure. Anon folios 2100 * are not likely to be evicted by use-once streaming 2101 * IO, plus JVM can create lots of anon VM_EXEC folios, 2102 * so we ignore them here. 2103 */ 2104 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { 2105 nr_rotated += folio_nr_pages(folio); 2106 list_add(&folio->lru, &l_active); 2107 continue; 2108 } 2109 } 2110 2111 folio_clear_active(folio); /* we are de-activating */ 2112 folio_set_workingset(folio); 2113 list_add(&folio->lru, &l_inactive); 2114 } 2115 2116 /* 2117 * Move folios back to the lru list. 2118 */ 2119 spin_lock_irq(&lruvec->lru_lock); 2120 2121 nr_activate = move_folios_to_lru(lruvec, &l_active); 2122 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); 2123 2124 __count_vm_events(PGDEACTIVATE, nr_deactivate); 2125 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 2126 2127 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2128 spin_unlock_irq(&lruvec->lru_lock); 2129 2130 if (nr_rotated) 2131 lru_note_cost(lruvec, file, 0, nr_rotated); 2132 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2133 nr_deactivate, nr_rotated, sc->priority, file); 2134 } 2135 2136 static unsigned int reclaim_folio_list(struct list_head *folio_list, 2137 struct pglist_data *pgdat) 2138 { 2139 struct reclaim_stat stat; 2140 unsigned int nr_reclaimed; 2141 struct folio *folio; 2142 struct scan_control sc = { 2143 .gfp_mask = GFP_KERNEL, 2144 .may_writepage = 1, 2145 .may_unmap = 1, 2146 .may_swap = 1, 2147 .no_demotion = 1, 2148 }; 2149 2150 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &stat, true); 2151 while (!list_empty(folio_list)) { 2152 folio = lru_to_folio(folio_list); 2153 list_del(&folio->lru); 2154 folio_putback_lru(folio); 2155 } 2156 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); 2157 2158 return nr_reclaimed; 2159 } 2160 2161 unsigned long reclaim_pages(struct list_head *folio_list) 2162 { 2163 int nid; 2164 unsigned int nr_reclaimed = 0; 2165 LIST_HEAD(node_folio_list); 2166 unsigned int noreclaim_flag; 2167 2168 if (list_empty(folio_list)) 2169 return nr_reclaimed; 2170 2171 noreclaim_flag = memalloc_noreclaim_save(); 2172 2173 nid = folio_nid(lru_to_folio(folio_list)); 2174 do { 2175 struct folio *folio = lru_to_folio(folio_list); 2176 2177 if (nid == folio_nid(folio)) { 2178 folio_clear_active(folio); 2179 list_move(&folio->lru, &node_folio_list); 2180 continue; 2181 } 2182 2183 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2184 nid = folio_nid(lru_to_folio(folio_list)); 2185 } while (!list_empty(folio_list)); 2186 2187 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); 2188 2189 memalloc_noreclaim_restore(noreclaim_flag); 2190 2191 return nr_reclaimed; 2192 } 2193 2194 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2195 struct lruvec *lruvec, struct scan_control *sc) 2196 { 2197 if (is_active_lru(lru)) { 2198 if (sc->may_deactivate & (1 << is_file_lru(lru))) 2199 shrink_active_list(nr_to_scan, lruvec, sc, lru); 2200 else 2201 sc->skipped_deactivate = 1; 2202 return 0; 2203 } 2204 2205 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2206 } 2207 2208 /* 2209 * The inactive anon list should be small enough that the VM never has 2210 * to do too much work. 2211 * 2212 * The inactive file list should be small enough to leave most memory 2213 * to the established workingset on the scan-resistant active list, 2214 * but large enough to avoid thrashing the aggregate readahead window. 2215 * 2216 * Both inactive lists should also be large enough that each inactive 2217 * folio has a chance to be referenced again before it is reclaimed. 2218 * 2219 * If that fails and refaulting is observed, the inactive list grows. 2220 * 2221 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios 2222 * on this LRU, maintained by the pageout code. An inactive_ratio 2223 * of 3 means 3:1 or 25% of the folios are kept on the inactive list. 2224 * 2225 * total target max 2226 * memory ratio inactive 2227 * ------------------------------------- 2228 * 10MB 1 5MB 2229 * 100MB 1 50MB 2230 * 1GB 3 250MB 2231 * 10GB 10 0.9GB 2232 * 100GB 31 3GB 2233 * 1TB 101 10GB 2234 * 10TB 320 32GB 2235 */ 2236 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) 2237 { 2238 enum lru_list active_lru = inactive_lru + LRU_ACTIVE; 2239 unsigned long inactive, active; 2240 unsigned long inactive_ratio; 2241 unsigned long gb; 2242 2243 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); 2244 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); 2245 2246 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2247 if (gb) 2248 inactive_ratio = int_sqrt(10 * gb); 2249 else 2250 inactive_ratio = 1; 2251 2252 return inactive * inactive_ratio < active; 2253 } 2254 2255 enum scan_balance { 2256 SCAN_EQUAL, 2257 SCAN_FRACT, 2258 SCAN_ANON, 2259 SCAN_FILE, 2260 }; 2261 2262 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) 2263 { 2264 unsigned long file; 2265 struct lruvec *target_lruvec; 2266 2267 if (lru_gen_enabled()) 2268 return; 2269 2270 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 2271 2272 /* 2273 * Flush the memory cgroup stats in rate-limited way as we don't need 2274 * most accurate stats here. We may switch to regular stats flushing 2275 * in the future once it is cheap enough. 2276 */ 2277 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); 2278 2279 /* 2280 * Determine the scan balance between anon and file LRUs. 2281 */ 2282 spin_lock_irq(&target_lruvec->lru_lock); 2283 sc->anon_cost = target_lruvec->anon_cost; 2284 sc->file_cost = target_lruvec->file_cost; 2285 spin_unlock_irq(&target_lruvec->lru_lock); 2286 2287 /* 2288 * Target desirable inactive:active list ratios for the anon 2289 * and file LRU lists. 2290 */ 2291 if (!sc->force_deactivate) { 2292 unsigned long refaults; 2293 2294 /* 2295 * When refaults are being observed, it means a new 2296 * workingset is being established. Deactivate to get 2297 * rid of any stale active pages quickly. 2298 */ 2299 refaults = lruvec_page_state(target_lruvec, 2300 WORKINGSET_ACTIVATE_ANON); 2301 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || 2302 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) 2303 sc->may_deactivate |= DEACTIVATE_ANON; 2304 else 2305 sc->may_deactivate &= ~DEACTIVATE_ANON; 2306 2307 refaults = lruvec_page_state(target_lruvec, 2308 WORKINGSET_ACTIVATE_FILE); 2309 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || 2310 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) 2311 sc->may_deactivate |= DEACTIVATE_FILE; 2312 else 2313 sc->may_deactivate &= ~DEACTIVATE_FILE; 2314 } else 2315 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; 2316 2317 /* 2318 * If we have plenty of inactive file pages that aren't 2319 * thrashing, try to reclaim those first before touching 2320 * anonymous pages. 2321 */ 2322 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); 2323 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && 2324 !sc->no_cache_trim_mode) 2325 sc->cache_trim_mode = 1; 2326 else 2327 sc->cache_trim_mode = 0; 2328 2329 /* 2330 * Prevent the reclaimer from falling into the cache trap: as 2331 * cache pages start out inactive, every cache fault will tip 2332 * the scan balance towards the file LRU. And as the file LRU 2333 * shrinks, so does the window for rotation from references. 2334 * This means we have a runaway feedback loop where a tiny 2335 * thrashing file LRU becomes infinitely more attractive than 2336 * anon pages. Try to detect this based on file LRU size. 2337 */ 2338 if (!cgroup_reclaim(sc)) { 2339 unsigned long total_high_wmark = 0; 2340 unsigned long free, anon; 2341 int z; 2342 2343 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2344 file = node_page_state(pgdat, NR_ACTIVE_FILE) + 2345 node_page_state(pgdat, NR_INACTIVE_FILE); 2346 2347 for (z = 0; z < MAX_NR_ZONES; z++) { 2348 struct zone *zone = &pgdat->node_zones[z]; 2349 2350 if (!managed_zone(zone)) 2351 continue; 2352 2353 total_high_wmark += high_wmark_pages(zone); 2354 } 2355 2356 /* 2357 * Consider anon: if that's low too, this isn't a 2358 * runaway file reclaim problem, but rather just 2359 * extreme pressure. Reclaim as per usual then. 2360 */ 2361 anon = node_page_state(pgdat, NR_INACTIVE_ANON); 2362 2363 sc->file_is_tiny = 2364 file + free <= total_high_wmark && 2365 !(sc->may_deactivate & DEACTIVATE_ANON) && 2366 anon >> sc->priority; 2367 } 2368 } 2369 2370 /* 2371 * Determine how aggressively the anon and file LRU lists should be 2372 * scanned. 2373 * 2374 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan 2375 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan 2376 */ 2377 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 2378 unsigned long *nr) 2379 { 2380 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2381 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2382 unsigned long anon_cost, file_cost, total_cost; 2383 int swappiness = sc_swappiness(sc, memcg); 2384 u64 fraction[ANON_AND_FILE]; 2385 u64 denominator = 0; /* gcc */ 2386 enum scan_balance scan_balance; 2387 unsigned long ap, fp; 2388 enum lru_list lru; 2389 2390 /* If we have no swap space, do not bother scanning anon folios. */ 2391 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { 2392 scan_balance = SCAN_FILE; 2393 goto out; 2394 } 2395 2396 /* 2397 * Global reclaim will swap to prevent OOM even with no 2398 * swappiness, but memcg users want to use this knob to 2399 * disable swapping for individual groups completely when 2400 * using the memory controller's swap limit feature would be 2401 * too expensive. 2402 */ 2403 if (cgroup_reclaim(sc) && !swappiness) { 2404 scan_balance = SCAN_FILE; 2405 goto out; 2406 } 2407 2408 /* 2409 * Do not apply any pressure balancing cleverness when the 2410 * system is close to OOM, scan both anon and file equally 2411 * (unless the swappiness setting disagrees with swapping). 2412 */ 2413 if (!sc->priority && swappiness) { 2414 scan_balance = SCAN_EQUAL; 2415 goto out; 2416 } 2417 2418 /* 2419 * If the system is almost out of file pages, force-scan anon. 2420 */ 2421 if (sc->file_is_tiny) { 2422 scan_balance = SCAN_ANON; 2423 goto out; 2424 } 2425 2426 /* 2427 * If there is enough inactive page cache, we do not reclaim 2428 * anything from the anonymous working right now. 2429 */ 2430 if (sc->cache_trim_mode) { 2431 scan_balance = SCAN_FILE; 2432 goto out; 2433 } 2434 2435 scan_balance = SCAN_FRACT; 2436 /* 2437 * Calculate the pressure balance between anon and file pages. 2438 * 2439 * The amount of pressure we put on each LRU is inversely 2440 * proportional to the cost of reclaiming each list, as 2441 * determined by the share of pages that are refaulting, times 2442 * the relative IO cost of bringing back a swapped out 2443 * anonymous page vs reloading a filesystem page (swappiness). 2444 * 2445 * Although we limit that influence to ensure no list gets 2446 * left behind completely: at least a third of the pressure is 2447 * applied, before swappiness. 2448 * 2449 * With swappiness at 100, anon and file have equal IO cost. 2450 */ 2451 total_cost = sc->anon_cost + sc->file_cost; 2452 anon_cost = total_cost + sc->anon_cost; 2453 file_cost = total_cost + sc->file_cost; 2454 total_cost = anon_cost + file_cost; 2455 2456 ap = swappiness * (total_cost + 1); 2457 ap /= anon_cost + 1; 2458 2459 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); 2460 fp /= file_cost + 1; 2461 2462 fraction[0] = ap; 2463 fraction[1] = fp; 2464 denominator = ap + fp; 2465 out: 2466 for_each_evictable_lru(lru) { 2467 bool file = is_file_lru(lru); 2468 unsigned long lruvec_size; 2469 unsigned long low, min; 2470 unsigned long scan; 2471 2472 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2473 mem_cgroup_protection(sc->target_mem_cgroup, memcg, 2474 &min, &low); 2475 2476 if (min || low) { 2477 /* 2478 * Scale a cgroup's reclaim pressure by proportioning 2479 * its current usage to its memory.low or memory.min 2480 * setting. 2481 * 2482 * This is important, as otherwise scanning aggression 2483 * becomes extremely binary -- from nothing as we 2484 * approach the memory protection threshold, to totally 2485 * nominal as we exceed it. This results in requiring 2486 * setting extremely liberal protection thresholds. It 2487 * also means we simply get no protection at all if we 2488 * set it too low, which is not ideal. 2489 * 2490 * If there is any protection in place, we reduce scan 2491 * pressure by how much of the total memory used is 2492 * within protection thresholds. 2493 * 2494 * There is one special case: in the first reclaim pass, 2495 * we skip over all groups that are within their low 2496 * protection. If that fails to reclaim enough pages to 2497 * satisfy the reclaim goal, we come back and override 2498 * the best-effort low protection. However, we still 2499 * ideally want to honor how well-behaved groups are in 2500 * that case instead of simply punishing them all 2501 * equally. As such, we reclaim them based on how much 2502 * memory they are using, reducing the scan pressure 2503 * again by how much of the total memory used is under 2504 * hard protection. 2505 */ 2506 unsigned long cgroup_size = mem_cgroup_size(memcg); 2507 unsigned long protection; 2508 2509 /* memory.low scaling, make sure we retry before OOM */ 2510 if (!sc->memcg_low_reclaim && low > min) { 2511 protection = low; 2512 sc->memcg_low_skipped = 1; 2513 } else { 2514 protection = min; 2515 } 2516 2517 /* Avoid TOCTOU with earlier protection check */ 2518 cgroup_size = max(cgroup_size, protection); 2519 2520 scan = lruvec_size - lruvec_size * protection / 2521 (cgroup_size + 1); 2522 2523 /* 2524 * Minimally target SWAP_CLUSTER_MAX pages to keep 2525 * reclaim moving forwards, avoiding decrementing 2526 * sc->priority further than desirable. 2527 */ 2528 scan = max(scan, SWAP_CLUSTER_MAX); 2529 } else { 2530 scan = lruvec_size; 2531 } 2532 2533 scan >>= sc->priority; 2534 2535 /* 2536 * If the cgroup's already been deleted, make sure to 2537 * scrape out the remaining cache. 2538 */ 2539 if (!scan && !mem_cgroup_online(memcg)) 2540 scan = min(lruvec_size, SWAP_CLUSTER_MAX); 2541 2542 switch (scan_balance) { 2543 case SCAN_EQUAL: 2544 /* Scan lists relative to size */ 2545 break; 2546 case SCAN_FRACT: 2547 /* 2548 * Scan types proportional to swappiness and 2549 * their relative recent reclaim efficiency. 2550 * Make sure we don't miss the last page on 2551 * the offlined memory cgroups because of a 2552 * round-off error. 2553 */ 2554 scan = mem_cgroup_online(memcg) ? 2555 div64_u64(scan * fraction[file], denominator) : 2556 DIV64_U64_ROUND_UP(scan * fraction[file], 2557 denominator); 2558 break; 2559 case SCAN_FILE: 2560 case SCAN_ANON: 2561 /* Scan one type exclusively */ 2562 if ((scan_balance == SCAN_FILE) != file) 2563 scan = 0; 2564 break; 2565 default: 2566 /* Look ma, no brain */ 2567 BUG(); 2568 } 2569 2570 nr[lru] = scan; 2571 } 2572 } 2573 2574 /* 2575 * Anonymous LRU management is a waste if there is 2576 * ultimately no way to reclaim the memory. 2577 */ 2578 static bool can_age_anon_pages(struct pglist_data *pgdat, 2579 struct scan_control *sc) 2580 { 2581 /* Aging the anon LRU is valuable if swap is present: */ 2582 if (total_swap_pages > 0) 2583 return true; 2584 2585 /* Also valuable if anon pages can be demoted: */ 2586 return can_demote(pgdat->node_id, sc); 2587 } 2588 2589 #ifdef CONFIG_LRU_GEN 2590 2591 #ifdef CONFIG_LRU_GEN_ENABLED 2592 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); 2593 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) 2594 #else 2595 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); 2596 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) 2597 #endif 2598 2599 static bool should_walk_mmu(void) 2600 { 2601 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); 2602 } 2603 2604 static bool should_clear_pmd_young(void) 2605 { 2606 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); 2607 } 2608 2609 /****************************************************************************** 2610 * shorthand helpers 2611 ******************************************************************************/ 2612 2613 #define DEFINE_MAX_SEQ(lruvec) \ 2614 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) 2615 2616 #define DEFINE_MIN_SEQ(lruvec) \ 2617 unsigned long min_seq[ANON_AND_FILE] = { \ 2618 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ 2619 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ 2620 } 2621 2622 #define for_each_gen_type_zone(gen, type, zone) \ 2623 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ 2624 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ 2625 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) 2626 2627 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) 2628 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) 2629 2630 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) 2631 { 2632 struct pglist_data *pgdat = NODE_DATA(nid); 2633 2634 #ifdef CONFIG_MEMCG 2635 if (memcg) { 2636 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; 2637 2638 /* see the comment in mem_cgroup_lruvec() */ 2639 if (!lruvec->pgdat) 2640 lruvec->pgdat = pgdat; 2641 2642 return lruvec; 2643 } 2644 #endif 2645 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2646 2647 return &pgdat->__lruvec; 2648 } 2649 2650 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) 2651 { 2652 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2653 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2654 2655 if (!sc->may_swap) 2656 return 0; 2657 2658 if (!can_demote(pgdat->node_id, sc) && 2659 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) 2660 return 0; 2661 2662 return sc_swappiness(sc, memcg); 2663 } 2664 2665 static int get_nr_gens(struct lruvec *lruvec, int type) 2666 { 2667 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; 2668 } 2669 2670 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) 2671 { 2672 /* see the comment on lru_gen_folio */ 2673 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && 2674 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && 2675 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; 2676 } 2677 2678 /****************************************************************************** 2679 * Bloom filters 2680 ******************************************************************************/ 2681 2682 /* 2683 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when 2684 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of 2685 * bits in a bitmap, k is the number of hash functions and n is the number of 2686 * inserted items. 2687 * 2688 * Page table walkers use one of the two filters to reduce their search space. 2689 * To get rid of non-leaf entries that no longer have enough leaf entries, the 2690 * aging uses the double-buffering technique to flip to the other filter each 2691 * time it produces a new generation. For non-leaf entries that have enough 2692 * leaf entries, the aging carries them over to the next generation in 2693 * walk_pmd_range(); the eviction also report them when walking the rmap 2694 * in lru_gen_look_around(). 2695 * 2696 * For future optimizations: 2697 * 1. It's not necessary to keep both filters all the time. The spare one can be 2698 * freed after the RCU grace period and reallocated if needed again. 2699 * 2. And when reallocating, it's worth scaling its size according to the number 2700 * of inserted entries in the other filter, to reduce the memory overhead on 2701 * small systems and false positives on large systems. 2702 * 3. Jenkins' hash function is an alternative to Knuth's. 2703 */ 2704 #define BLOOM_FILTER_SHIFT 15 2705 2706 static inline int filter_gen_from_seq(unsigned long seq) 2707 { 2708 return seq % NR_BLOOM_FILTERS; 2709 } 2710 2711 static void get_item_key(void *item, int *key) 2712 { 2713 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); 2714 2715 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); 2716 2717 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); 2718 key[1] = hash >> BLOOM_FILTER_SHIFT; 2719 } 2720 2721 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2722 void *item) 2723 { 2724 int key[2]; 2725 unsigned long *filter; 2726 int gen = filter_gen_from_seq(seq); 2727 2728 filter = READ_ONCE(mm_state->filters[gen]); 2729 if (!filter) 2730 return true; 2731 2732 get_item_key(item, key); 2733 2734 return test_bit(key[0], filter) && test_bit(key[1], filter); 2735 } 2736 2737 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2738 void *item) 2739 { 2740 int key[2]; 2741 unsigned long *filter; 2742 int gen = filter_gen_from_seq(seq); 2743 2744 filter = READ_ONCE(mm_state->filters[gen]); 2745 if (!filter) 2746 return; 2747 2748 get_item_key(item, key); 2749 2750 if (!test_bit(key[0], filter)) 2751 set_bit(key[0], filter); 2752 if (!test_bit(key[1], filter)) 2753 set_bit(key[1], filter); 2754 } 2755 2756 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq) 2757 { 2758 unsigned long *filter; 2759 int gen = filter_gen_from_seq(seq); 2760 2761 filter = mm_state->filters[gen]; 2762 if (filter) { 2763 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); 2764 return; 2765 } 2766 2767 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), 2768 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 2769 WRITE_ONCE(mm_state->filters[gen], filter); 2770 } 2771 2772 /****************************************************************************** 2773 * mm_struct list 2774 ******************************************************************************/ 2775 2776 #ifdef CONFIG_LRU_GEN_WALKS_MMU 2777 2778 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2779 { 2780 static struct lru_gen_mm_list mm_list = { 2781 .fifo = LIST_HEAD_INIT(mm_list.fifo), 2782 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), 2783 }; 2784 2785 #ifdef CONFIG_MEMCG 2786 if (memcg) 2787 return &memcg->mm_list; 2788 #endif 2789 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2790 2791 return &mm_list; 2792 } 2793 2794 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2795 { 2796 return &lruvec->mm_state; 2797 } 2798 2799 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2800 { 2801 int key; 2802 struct mm_struct *mm; 2803 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 2804 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 2805 2806 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); 2807 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); 2808 2809 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) 2810 return NULL; 2811 2812 clear_bit(key, &mm->lru_gen.bitmap); 2813 2814 return mmget_not_zero(mm) ? mm : NULL; 2815 } 2816 2817 void lru_gen_add_mm(struct mm_struct *mm) 2818 { 2819 int nid; 2820 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); 2821 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2822 2823 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); 2824 #ifdef CONFIG_MEMCG 2825 VM_WARN_ON_ONCE(mm->lru_gen.memcg); 2826 mm->lru_gen.memcg = memcg; 2827 #endif 2828 spin_lock(&mm_list->lock); 2829 2830 for_each_node_state(nid, N_MEMORY) { 2831 struct lruvec *lruvec = get_lruvec(memcg, nid); 2832 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2833 2834 /* the first addition since the last iteration */ 2835 if (mm_state->tail == &mm_list->fifo) 2836 mm_state->tail = &mm->lru_gen.list; 2837 } 2838 2839 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); 2840 2841 spin_unlock(&mm_list->lock); 2842 } 2843 2844 void lru_gen_del_mm(struct mm_struct *mm) 2845 { 2846 int nid; 2847 struct lru_gen_mm_list *mm_list; 2848 struct mem_cgroup *memcg = NULL; 2849 2850 if (list_empty(&mm->lru_gen.list)) 2851 return; 2852 2853 #ifdef CONFIG_MEMCG 2854 memcg = mm->lru_gen.memcg; 2855 #endif 2856 mm_list = get_mm_list(memcg); 2857 2858 spin_lock(&mm_list->lock); 2859 2860 for_each_node(nid) { 2861 struct lruvec *lruvec = get_lruvec(memcg, nid); 2862 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2863 2864 /* where the current iteration continues after */ 2865 if (mm_state->head == &mm->lru_gen.list) 2866 mm_state->head = mm_state->head->prev; 2867 2868 /* where the last iteration ended before */ 2869 if (mm_state->tail == &mm->lru_gen.list) 2870 mm_state->tail = mm_state->tail->next; 2871 } 2872 2873 list_del_init(&mm->lru_gen.list); 2874 2875 spin_unlock(&mm_list->lock); 2876 2877 #ifdef CONFIG_MEMCG 2878 mem_cgroup_put(mm->lru_gen.memcg); 2879 mm->lru_gen.memcg = NULL; 2880 #endif 2881 } 2882 2883 #ifdef CONFIG_MEMCG 2884 void lru_gen_migrate_mm(struct mm_struct *mm) 2885 { 2886 struct mem_cgroup *memcg; 2887 struct task_struct *task = rcu_dereference_protected(mm->owner, true); 2888 2889 VM_WARN_ON_ONCE(task->mm != mm); 2890 lockdep_assert_held(&task->alloc_lock); 2891 2892 /* for mm_update_next_owner() */ 2893 if (mem_cgroup_disabled()) 2894 return; 2895 2896 /* migration can happen before addition */ 2897 if (!mm->lru_gen.memcg) 2898 return; 2899 2900 rcu_read_lock(); 2901 memcg = mem_cgroup_from_task(task); 2902 rcu_read_unlock(); 2903 if (memcg == mm->lru_gen.memcg) 2904 return; 2905 2906 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); 2907 2908 lru_gen_del_mm(mm); 2909 lru_gen_add_mm(mm); 2910 } 2911 #endif 2912 2913 #else /* !CONFIG_LRU_GEN_WALKS_MMU */ 2914 2915 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2916 { 2917 return NULL; 2918 } 2919 2920 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2921 { 2922 return NULL; 2923 } 2924 2925 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2926 { 2927 return NULL; 2928 } 2929 2930 #endif 2931 2932 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last) 2933 { 2934 int i; 2935 int hist; 2936 struct lruvec *lruvec = walk->lruvec; 2937 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2938 2939 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); 2940 2941 hist = lru_hist_from_seq(walk->seq); 2942 2943 for (i = 0; i < NR_MM_STATS; i++) { 2944 WRITE_ONCE(mm_state->stats[hist][i], 2945 mm_state->stats[hist][i] + walk->mm_stats[i]); 2946 walk->mm_stats[i] = 0; 2947 } 2948 2949 if (NR_HIST_GENS > 1 && last) { 2950 hist = lru_hist_from_seq(walk->seq + 1); 2951 2952 for (i = 0; i < NR_MM_STATS; i++) 2953 WRITE_ONCE(mm_state->stats[hist][i], 0); 2954 } 2955 } 2956 2957 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter) 2958 { 2959 bool first = false; 2960 bool last = false; 2961 struct mm_struct *mm = NULL; 2962 struct lruvec *lruvec = walk->lruvec; 2963 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2964 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2965 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2966 2967 /* 2968 * mm_state->seq is incremented after each iteration of mm_list. There 2969 * are three interesting cases for this page table walker: 2970 * 1. It tries to start a new iteration with a stale max_seq: there is 2971 * nothing left to do. 2972 * 2. It started the next iteration: it needs to reset the Bloom filter 2973 * so that a fresh set of PTE tables can be recorded. 2974 * 3. It ended the current iteration: it needs to reset the mm stats 2975 * counters and tell its caller to increment max_seq. 2976 */ 2977 spin_lock(&mm_list->lock); 2978 2979 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); 2980 2981 if (walk->seq <= mm_state->seq) 2982 goto done; 2983 2984 if (!mm_state->head) 2985 mm_state->head = &mm_list->fifo; 2986 2987 if (mm_state->head == &mm_list->fifo) 2988 first = true; 2989 2990 do { 2991 mm_state->head = mm_state->head->next; 2992 if (mm_state->head == &mm_list->fifo) { 2993 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 2994 last = true; 2995 break; 2996 } 2997 2998 /* force scan for those added after the last iteration */ 2999 if (!mm_state->tail || mm_state->tail == mm_state->head) { 3000 mm_state->tail = mm_state->head->next; 3001 walk->force_scan = true; 3002 } 3003 } while (!(mm = get_next_mm(walk))); 3004 done: 3005 if (*iter || last) 3006 reset_mm_stats(walk, last); 3007 3008 spin_unlock(&mm_list->lock); 3009 3010 if (mm && first) 3011 reset_bloom_filter(mm_state, walk->seq + 1); 3012 3013 if (*iter) 3014 mmput_async(*iter); 3015 3016 *iter = mm; 3017 3018 return last; 3019 } 3020 3021 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq) 3022 { 3023 bool success = false; 3024 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3025 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 3026 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3027 3028 spin_lock(&mm_list->lock); 3029 3030 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); 3031 3032 if (seq > mm_state->seq) { 3033 mm_state->head = NULL; 3034 mm_state->tail = NULL; 3035 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 3036 success = true; 3037 } 3038 3039 spin_unlock(&mm_list->lock); 3040 3041 return success; 3042 } 3043 3044 /****************************************************************************** 3045 * PID controller 3046 ******************************************************************************/ 3047 3048 /* 3049 * A feedback loop based on Proportional-Integral-Derivative (PID) controller. 3050 * 3051 * The P term is refaulted/(evicted+protected) from a tier in the generation 3052 * currently being evicted; the I term is the exponential moving average of the 3053 * P term over the generations previously evicted, using the smoothing factor 3054 * 1/2; the D term isn't supported. 3055 * 3056 * The setpoint (SP) is always the first tier of one type; the process variable 3057 * (PV) is either any tier of the other type or any other tier of the same 3058 * type. 3059 * 3060 * The error is the difference between the SP and the PV; the correction is to 3061 * turn off protection when SP>PV or turn on protection when SP<PV. 3062 * 3063 * For future optimizations: 3064 * 1. The D term may discount the other two terms over time so that long-lived 3065 * generations can resist stale information. 3066 */ 3067 struct ctrl_pos { 3068 unsigned long refaulted; 3069 unsigned long total; 3070 int gain; 3071 }; 3072 3073 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, 3074 struct ctrl_pos *pos) 3075 { 3076 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3077 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 3078 3079 pos->refaulted = lrugen->avg_refaulted[type][tier] + 3080 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3081 pos->total = lrugen->avg_total[type][tier] + 3082 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3083 if (tier) 3084 pos->total += lrugen->protected[hist][type][tier - 1]; 3085 pos->gain = gain; 3086 } 3087 3088 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) 3089 { 3090 int hist, tier; 3091 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3092 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; 3093 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; 3094 3095 lockdep_assert_held(&lruvec->lru_lock); 3096 3097 if (!carryover && !clear) 3098 return; 3099 3100 hist = lru_hist_from_seq(seq); 3101 3102 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 3103 if (carryover) { 3104 unsigned long sum; 3105 3106 sum = lrugen->avg_refaulted[type][tier] + 3107 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3108 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); 3109 3110 sum = lrugen->avg_total[type][tier] + 3111 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3112 if (tier) 3113 sum += lrugen->protected[hist][type][tier - 1]; 3114 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); 3115 } 3116 3117 if (clear) { 3118 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); 3119 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); 3120 if (tier) 3121 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); 3122 } 3123 } 3124 } 3125 3126 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) 3127 { 3128 /* 3129 * Return true if the PV has a limited number of refaults or a lower 3130 * refaulted/total than the SP. 3131 */ 3132 return pv->refaulted < MIN_LRU_BATCH || 3133 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= 3134 (sp->refaulted + 1) * pv->total * pv->gain; 3135 } 3136 3137 /****************************************************************************** 3138 * the aging 3139 ******************************************************************************/ 3140 3141 /* promote pages accessed through page tables */ 3142 static int folio_update_gen(struct folio *folio, int gen) 3143 { 3144 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3145 3146 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 3147 3148 do { 3149 /* lru_gen_del_folio() has isolated this page? */ 3150 if (!(old_flags & LRU_GEN_MASK)) { 3151 /* for shrink_folio_list() */ 3152 new_flags = old_flags | BIT(PG_referenced); 3153 continue; 3154 } 3155 3156 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3157 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; 3158 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3159 3160 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3161 } 3162 3163 /* protect pages accessed multiple times through file descriptors */ 3164 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 3165 { 3166 int type = folio_is_file_lru(folio); 3167 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3168 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3169 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3170 3171 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); 3172 3173 do { 3174 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3175 /* folio_update_gen() has promoted this page? */ 3176 if (new_gen >= 0 && new_gen != old_gen) 3177 return new_gen; 3178 3179 new_gen = (old_gen + 1) % MAX_NR_GENS; 3180 3181 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3182 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; 3183 /* for folio_end_writeback() */ 3184 if (reclaiming) 3185 new_flags |= BIT(PG_reclaim); 3186 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3187 3188 lru_gen_update_size(lruvec, folio, old_gen, new_gen); 3189 3190 return new_gen; 3191 } 3192 3193 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, 3194 int old_gen, int new_gen) 3195 { 3196 int type = folio_is_file_lru(folio); 3197 int zone = folio_zonenum(folio); 3198 int delta = folio_nr_pages(folio); 3199 3200 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); 3201 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); 3202 3203 walk->batched++; 3204 3205 walk->nr_pages[old_gen][type][zone] -= delta; 3206 walk->nr_pages[new_gen][type][zone] += delta; 3207 } 3208 3209 static void reset_batch_size(struct lru_gen_mm_walk *walk) 3210 { 3211 int gen, type, zone; 3212 struct lruvec *lruvec = walk->lruvec; 3213 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3214 3215 walk->batched = 0; 3216 3217 for_each_gen_type_zone(gen, type, zone) { 3218 enum lru_list lru = type * LRU_INACTIVE_FILE; 3219 int delta = walk->nr_pages[gen][type][zone]; 3220 3221 if (!delta) 3222 continue; 3223 3224 walk->nr_pages[gen][type][zone] = 0; 3225 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], 3226 lrugen->nr_pages[gen][type][zone] + delta); 3227 3228 if (lru_gen_is_active(lruvec, gen)) 3229 lru += LRU_ACTIVE; 3230 __update_lru_size(lruvec, lru, zone, delta); 3231 } 3232 } 3233 3234 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) 3235 { 3236 struct address_space *mapping; 3237 struct vm_area_struct *vma = args->vma; 3238 struct lru_gen_mm_walk *walk = args->private; 3239 3240 if (!vma_is_accessible(vma)) 3241 return true; 3242 3243 if (is_vm_hugetlb_page(vma)) 3244 return true; 3245 3246 if (!vma_has_recency(vma)) 3247 return true; 3248 3249 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) 3250 return true; 3251 3252 if (vma == get_gate_vma(vma->vm_mm)) 3253 return true; 3254 3255 if (vma_is_anonymous(vma)) 3256 return !walk->can_swap; 3257 3258 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) 3259 return true; 3260 3261 mapping = vma->vm_file->f_mapping; 3262 if (mapping_unevictable(mapping)) 3263 return true; 3264 3265 if (shmem_mapping(mapping)) 3266 return !walk->can_swap; 3267 3268 /* to exclude special mappings like dax, etc. */ 3269 return !mapping->a_ops->read_folio; 3270 } 3271 3272 /* 3273 * Some userspace memory allocators map many single-page VMAs. Instead of 3274 * returning back to the PGD table for each of such VMAs, finish an entire PMD 3275 * table to reduce zigzags and improve cache performance. 3276 */ 3277 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, 3278 unsigned long *vm_start, unsigned long *vm_end) 3279 { 3280 unsigned long start = round_up(*vm_end, size); 3281 unsigned long end = (start | ~mask) + 1; 3282 VMA_ITERATOR(vmi, args->mm, start); 3283 3284 VM_WARN_ON_ONCE(mask & size); 3285 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); 3286 3287 for_each_vma(vmi, args->vma) { 3288 if (end && end <= args->vma->vm_start) 3289 return false; 3290 3291 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) 3292 continue; 3293 3294 *vm_start = max(start, args->vma->vm_start); 3295 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; 3296 3297 return true; 3298 } 3299 3300 return false; 3301 } 3302 3303 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr, 3304 struct pglist_data *pgdat) 3305 { 3306 unsigned long pfn = pte_pfn(pte); 3307 3308 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3309 3310 if (!pte_present(pte) || is_zero_pfn(pfn)) 3311 return -1; 3312 3313 if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) 3314 return -1; 3315 3316 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) 3317 return -1; 3318 3319 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3320 return -1; 3321 3322 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3323 return -1; 3324 3325 return pfn; 3326 } 3327 3328 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr, 3329 struct pglist_data *pgdat) 3330 { 3331 unsigned long pfn = pmd_pfn(pmd); 3332 3333 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3334 3335 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) 3336 return -1; 3337 3338 if (WARN_ON_ONCE(pmd_devmap(pmd))) 3339 return -1; 3340 3341 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) 3342 return -1; 3343 3344 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3345 return -1; 3346 3347 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3348 return -1; 3349 3350 return pfn; 3351 } 3352 3353 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, 3354 struct pglist_data *pgdat, bool can_swap) 3355 { 3356 struct folio *folio; 3357 3358 folio = pfn_folio(pfn); 3359 if (folio_nid(folio) != pgdat->node_id) 3360 return NULL; 3361 3362 if (folio_memcg(folio) != memcg) 3363 return NULL; 3364 3365 /* file VMAs can contain anon pages from COW */ 3366 if (!folio_is_file_lru(folio) && !can_swap) 3367 return NULL; 3368 3369 return folio; 3370 } 3371 3372 static bool suitable_to_scan(int total, int young) 3373 { 3374 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); 3375 3376 /* suitable if the average number of young PTEs per cacheline is >=1 */ 3377 return young * n >= total; 3378 } 3379 3380 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, 3381 struct mm_walk *args) 3382 { 3383 int i; 3384 pte_t *pte; 3385 spinlock_t *ptl; 3386 unsigned long addr; 3387 int total = 0; 3388 int young = 0; 3389 struct lru_gen_mm_walk *walk = args->private; 3390 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3391 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3392 DEFINE_MAX_SEQ(walk->lruvec); 3393 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3394 pmd_t pmdval; 3395 3396 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, 3397 &ptl); 3398 if (!pte) 3399 return false; 3400 if (!spin_trylock(ptl)) { 3401 pte_unmap(pte); 3402 return false; 3403 } 3404 3405 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 3406 pte_unmap_unlock(pte, ptl); 3407 return false; 3408 } 3409 3410 arch_enter_lazy_mmu_mode(); 3411 restart: 3412 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { 3413 unsigned long pfn; 3414 struct folio *folio; 3415 pte_t ptent = ptep_get(pte + i); 3416 3417 total++; 3418 walk->mm_stats[MM_LEAF_TOTAL]++; 3419 3420 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); 3421 if (pfn == -1) 3422 continue; 3423 3424 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3425 if (!folio) 3426 continue; 3427 3428 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) 3429 continue; 3430 3431 young++; 3432 walk->mm_stats[MM_LEAF_YOUNG]++; 3433 3434 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 3435 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3436 !folio_test_swapcache(folio))) 3437 folio_mark_dirty(folio); 3438 3439 old_gen = folio_update_gen(folio, new_gen); 3440 if (old_gen >= 0 && old_gen != new_gen) 3441 update_batch_size(walk, folio, old_gen, new_gen); 3442 } 3443 3444 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) 3445 goto restart; 3446 3447 arch_leave_lazy_mmu_mode(); 3448 pte_unmap_unlock(pte, ptl); 3449 3450 return suitable_to_scan(total, young); 3451 } 3452 3453 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, 3454 struct mm_walk *args, unsigned long *bitmap, unsigned long *first) 3455 { 3456 int i; 3457 pmd_t *pmd; 3458 spinlock_t *ptl; 3459 struct lru_gen_mm_walk *walk = args->private; 3460 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3461 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3462 DEFINE_MAX_SEQ(walk->lruvec); 3463 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3464 3465 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3466 3467 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ 3468 if (*first == -1) { 3469 *first = addr; 3470 bitmap_zero(bitmap, MIN_LRU_BATCH); 3471 return; 3472 } 3473 3474 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); 3475 if (i && i <= MIN_LRU_BATCH) { 3476 __set_bit(i - 1, bitmap); 3477 return; 3478 } 3479 3480 pmd = pmd_offset(pud, *first); 3481 3482 ptl = pmd_lockptr(args->mm, pmd); 3483 if (!spin_trylock(ptl)) 3484 goto done; 3485 3486 arch_enter_lazy_mmu_mode(); 3487 3488 do { 3489 unsigned long pfn; 3490 struct folio *folio; 3491 3492 /* don't round down the first address */ 3493 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; 3494 3495 if (!pmd_present(pmd[i])) 3496 goto next; 3497 3498 if (!pmd_trans_huge(pmd[i])) { 3499 if (!walk->force_scan && should_clear_pmd_young() && 3500 !mm_has_notifiers(args->mm)) 3501 pmdp_test_and_clear_young(vma, addr, pmd + i); 3502 goto next; 3503 } 3504 3505 pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat); 3506 if (pfn == -1) 3507 goto next; 3508 3509 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3510 if (!folio) 3511 goto next; 3512 3513 if (!pmdp_clear_young_notify(vma, addr, pmd + i)) 3514 goto next; 3515 3516 walk->mm_stats[MM_LEAF_YOUNG]++; 3517 3518 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && 3519 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3520 !folio_test_swapcache(folio))) 3521 folio_mark_dirty(folio); 3522 3523 old_gen = folio_update_gen(folio, new_gen); 3524 if (old_gen >= 0 && old_gen != new_gen) 3525 update_batch_size(walk, folio, old_gen, new_gen); 3526 next: 3527 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; 3528 } while (i <= MIN_LRU_BATCH); 3529 3530 arch_leave_lazy_mmu_mode(); 3531 spin_unlock(ptl); 3532 done: 3533 *first = -1; 3534 } 3535 3536 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, 3537 struct mm_walk *args) 3538 { 3539 int i; 3540 pmd_t *pmd; 3541 unsigned long next; 3542 unsigned long addr; 3543 struct vm_area_struct *vma; 3544 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); 3545 unsigned long first = -1; 3546 struct lru_gen_mm_walk *walk = args->private; 3547 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 3548 3549 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3550 3551 /* 3552 * Finish an entire PMD in two passes: the first only reaches to PTE 3553 * tables to avoid taking the PMD lock; the second, if necessary, takes 3554 * the PMD lock to clear the accessed bit in PMD entries. 3555 */ 3556 pmd = pmd_offset(pud, start & PUD_MASK); 3557 restart: 3558 /* walk_pte_range() may call get_next_vma() */ 3559 vma = args->vma; 3560 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { 3561 pmd_t val = pmdp_get_lockless(pmd + i); 3562 3563 next = pmd_addr_end(addr, end); 3564 3565 if (!pmd_present(val) || is_huge_zero_pmd(val)) { 3566 walk->mm_stats[MM_LEAF_TOTAL]++; 3567 continue; 3568 } 3569 3570 if (pmd_trans_huge(val)) { 3571 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3572 unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat); 3573 3574 walk->mm_stats[MM_LEAF_TOTAL]++; 3575 3576 if (pfn != -1) 3577 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3578 continue; 3579 } 3580 3581 if (!walk->force_scan && should_clear_pmd_young() && 3582 !mm_has_notifiers(args->mm)) { 3583 if (!pmd_young(val)) 3584 continue; 3585 3586 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3587 } 3588 3589 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) 3590 continue; 3591 3592 walk->mm_stats[MM_NONLEAF_FOUND]++; 3593 3594 if (!walk_pte_range(&val, addr, next, args)) 3595 continue; 3596 3597 walk->mm_stats[MM_NONLEAF_ADDED]++; 3598 3599 /* carry over to the next generation */ 3600 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); 3601 } 3602 3603 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); 3604 3605 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) 3606 goto restart; 3607 } 3608 3609 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, 3610 struct mm_walk *args) 3611 { 3612 int i; 3613 pud_t *pud; 3614 unsigned long addr; 3615 unsigned long next; 3616 struct lru_gen_mm_walk *walk = args->private; 3617 3618 VM_WARN_ON_ONCE(p4d_leaf(*p4d)); 3619 3620 pud = pud_offset(p4d, start & P4D_MASK); 3621 restart: 3622 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { 3623 pud_t val = READ_ONCE(pud[i]); 3624 3625 next = pud_addr_end(addr, end); 3626 3627 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) 3628 continue; 3629 3630 walk_pmd_range(&val, addr, next, args); 3631 3632 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { 3633 end = (addr | ~PUD_MASK) + 1; 3634 goto done; 3635 } 3636 } 3637 3638 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) 3639 goto restart; 3640 3641 end = round_up(end, P4D_SIZE); 3642 done: 3643 if (!end || !args->vma) 3644 return 1; 3645 3646 walk->next_addr = max(end, args->vma->vm_start); 3647 3648 return -EAGAIN; 3649 } 3650 3651 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) 3652 { 3653 static const struct mm_walk_ops mm_walk_ops = { 3654 .test_walk = should_skip_vma, 3655 .p4d_entry = walk_pud_range, 3656 .walk_lock = PGWALK_RDLOCK, 3657 }; 3658 int err; 3659 struct lruvec *lruvec = walk->lruvec; 3660 3661 walk->next_addr = FIRST_USER_ADDRESS; 3662 3663 do { 3664 DEFINE_MAX_SEQ(lruvec); 3665 3666 err = -EBUSY; 3667 3668 /* another thread might have called inc_max_seq() */ 3669 if (walk->seq != max_seq) 3670 break; 3671 3672 /* the caller might be holding the lock for write */ 3673 if (mmap_read_trylock(mm)) { 3674 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); 3675 3676 mmap_read_unlock(mm); 3677 } 3678 3679 if (walk->batched) { 3680 spin_lock_irq(&lruvec->lru_lock); 3681 reset_batch_size(walk); 3682 spin_unlock_irq(&lruvec->lru_lock); 3683 } 3684 3685 cond_resched(); 3686 } while (err == -EAGAIN); 3687 } 3688 3689 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) 3690 { 3691 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3692 3693 if (pgdat && current_is_kswapd()) { 3694 VM_WARN_ON_ONCE(walk); 3695 3696 walk = &pgdat->mm_walk; 3697 } else if (!walk && force_alloc) { 3698 VM_WARN_ON_ONCE(current_is_kswapd()); 3699 3700 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 3701 } 3702 3703 current->reclaim_state->mm_walk = walk; 3704 3705 return walk; 3706 } 3707 3708 static void clear_mm_walk(void) 3709 { 3710 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3711 3712 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); 3713 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); 3714 3715 current->reclaim_state->mm_walk = NULL; 3716 3717 if (!current_is_kswapd()) 3718 kfree(walk); 3719 } 3720 3721 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) 3722 { 3723 int zone; 3724 int remaining = MAX_LRU_BATCH; 3725 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3726 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3727 3728 if (type == LRU_GEN_ANON && !can_swap) 3729 goto done; 3730 3731 /* prevent cold/hot inversion if force_scan is true */ 3732 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3733 struct list_head *head = &lrugen->folios[old_gen][type][zone]; 3734 3735 while (!list_empty(head)) { 3736 struct folio *folio = lru_to_folio(head); 3737 3738 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 3739 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 3740 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 3741 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 3742 3743 new_gen = folio_inc_gen(lruvec, folio, false); 3744 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); 3745 3746 if (!--remaining) 3747 return false; 3748 } 3749 } 3750 done: 3751 reset_ctrl_pos(lruvec, type, true); 3752 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); 3753 3754 return true; 3755 } 3756 3757 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) 3758 { 3759 int gen, type, zone; 3760 bool success = false; 3761 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3762 DEFINE_MIN_SEQ(lruvec); 3763 3764 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3765 3766 /* find the oldest populated generation */ 3767 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3768 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { 3769 gen = lru_gen_from_seq(min_seq[type]); 3770 3771 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3772 if (!list_empty(&lrugen->folios[gen][type][zone])) 3773 goto next; 3774 } 3775 3776 min_seq[type]++; 3777 } 3778 next: 3779 ; 3780 } 3781 3782 /* see the comment on lru_gen_folio */ 3783 if (can_swap) { 3784 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); 3785 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); 3786 } 3787 3788 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3789 if (min_seq[type] == lrugen->min_seq[type]) 3790 continue; 3791 3792 reset_ctrl_pos(lruvec, type, true); 3793 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); 3794 success = true; 3795 } 3796 3797 return success; 3798 } 3799 3800 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3801 bool can_swap, bool force_scan) 3802 { 3803 bool success; 3804 int prev, next; 3805 int type, zone; 3806 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3807 restart: 3808 if (seq < READ_ONCE(lrugen->max_seq)) 3809 return false; 3810 3811 spin_lock_irq(&lruvec->lru_lock); 3812 3813 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3814 3815 success = seq == lrugen->max_seq; 3816 if (!success) 3817 goto unlock; 3818 3819 for (type = ANON_AND_FILE - 1; type >= 0; type--) { 3820 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) 3821 continue; 3822 3823 VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); 3824 3825 if (inc_min_seq(lruvec, type, can_swap)) 3826 continue; 3827 3828 spin_unlock_irq(&lruvec->lru_lock); 3829 cond_resched(); 3830 goto restart; 3831 } 3832 3833 /* 3834 * Update the active/inactive LRU sizes for compatibility. Both sides of 3835 * the current max_seq need to be covered, since max_seq+1 can overlap 3836 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do 3837 * overlap, cold/hot inversion happens. 3838 */ 3839 prev = lru_gen_from_seq(lrugen->max_seq - 1); 3840 next = lru_gen_from_seq(lrugen->max_seq + 1); 3841 3842 for (type = 0; type < ANON_AND_FILE; type++) { 3843 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3844 enum lru_list lru = type * LRU_INACTIVE_FILE; 3845 long delta = lrugen->nr_pages[prev][type][zone] - 3846 lrugen->nr_pages[next][type][zone]; 3847 3848 if (!delta) 3849 continue; 3850 3851 __update_lru_size(lruvec, lru, zone, delta); 3852 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); 3853 } 3854 } 3855 3856 for (type = 0; type < ANON_AND_FILE; type++) 3857 reset_ctrl_pos(lruvec, type, false); 3858 3859 WRITE_ONCE(lrugen->timestamps[next], jiffies); 3860 /* make sure preceding modifications appear */ 3861 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); 3862 unlock: 3863 spin_unlock_irq(&lruvec->lru_lock); 3864 3865 return success; 3866 } 3867 3868 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3869 bool can_swap, bool force_scan) 3870 { 3871 bool success; 3872 struct lru_gen_mm_walk *walk; 3873 struct mm_struct *mm = NULL; 3874 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3875 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3876 3877 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); 3878 3879 if (!mm_state) 3880 return inc_max_seq(lruvec, seq, can_swap, force_scan); 3881 3882 /* see the comment in iterate_mm_list() */ 3883 if (seq <= READ_ONCE(mm_state->seq)) 3884 return false; 3885 3886 /* 3887 * If the hardware doesn't automatically set the accessed bit, fallback 3888 * to lru_gen_look_around(), which only clears the accessed bit in a 3889 * handful of PTEs. Spreading the work out over a period of time usually 3890 * is less efficient, but it avoids bursty page faults. 3891 */ 3892 if (!should_walk_mmu()) { 3893 success = iterate_mm_list_nowalk(lruvec, seq); 3894 goto done; 3895 } 3896 3897 walk = set_mm_walk(NULL, true); 3898 if (!walk) { 3899 success = iterate_mm_list_nowalk(lruvec, seq); 3900 goto done; 3901 } 3902 3903 walk->lruvec = lruvec; 3904 walk->seq = seq; 3905 walk->can_swap = can_swap; 3906 walk->force_scan = force_scan; 3907 3908 do { 3909 success = iterate_mm_list(walk, &mm); 3910 if (mm) 3911 walk_mm(mm, walk); 3912 } while (mm); 3913 done: 3914 if (success) { 3915 success = inc_max_seq(lruvec, seq, can_swap, force_scan); 3916 WARN_ON_ONCE(!success); 3917 } 3918 3919 return success; 3920 } 3921 3922 /****************************************************************************** 3923 * working set protection 3924 ******************************************************************************/ 3925 3926 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) 3927 { 3928 int priority; 3929 unsigned long reclaimable; 3930 3931 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) 3932 return; 3933 /* 3934 * Determine the initial priority based on 3935 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, 3936 * where reclaimed_to_scanned_ratio = inactive / total. 3937 */ 3938 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); 3939 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 3940 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); 3941 3942 /* round down reclaimable and round up sc->nr_to_reclaim */ 3943 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); 3944 3945 /* 3946 * The estimation is based on LRU pages only, so cap it to prevent 3947 * overshoots of shrinker objects by large margins. 3948 */ 3949 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); 3950 } 3951 3952 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) 3953 { 3954 int gen, type, zone; 3955 unsigned long total = 0; 3956 bool can_swap = get_swappiness(lruvec, sc); 3957 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3958 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3959 DEFINE_MAX_SEQ(lruvec); 3960 DEFINE_MIN_SEQ(lruvec); 3961 3962 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3963 unsigned long seq; 3964 3965 for (seq = min_seq[type]; seq <= max_seq; seq++) { 3966 gen = lru_gen_from_seq(seq); 3967 3968 for (zone = 0; zone < MAX_NR_ZONES; zone++) 3969 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 3970 } 3971 } 3972 3973 /* whether the size is big enough to be helpful */ 3974 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; 3975 } 3976 3977 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, 3978 unsigned long min_ttl) 3979 { 3980 int gen; 3981 unsigned long birth; 3982 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3983 DEFINE_MIN_SEQ(lruvec); 3984 3985 if (mem_cgroup_below_min(NULL, memcg)) 3986 return false; 3987 3988 if (!lruvec_is_sizable(lruvec, sc)) 3989 return false; 3990 3991 /* see the comment on lru_gen_folio */ 3992 gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); 3993 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 3994 3995 return time_is_before_jiffies(birth + min_ttl); 3996 } 3997 3998 /* to protect the working set of the last N jiffies */ 3999 static unsigned long lru_gen_min_ttl __read_mostly; 4000 4001 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 4002 { 4003 struct mem_cgroup *memcg; 4004 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); 4005 bool reclaimable = !min_ttl; 4006 4007 VM_WARN_ON_ONCE(!current_is_kswapd()); 4008 4009 set_initial_priority(pgdat, sc); 4010 4011 memcg = mem_cgroup_iter(NULL, NULL, NULL); 4012 do { 4013 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4014 4015 mem_cgroup_calculate_protection(NULL, memcg); 4016 4017 if (!reclaimable) 4018 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); 4019 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 4020 4021 /* 4022 * The main goal is to OOM kill if every generation from all memcgs is 4023 * younger than min_ttl. However, another possibility is all memcgs are 4024 * either too small or below min. 4025 */ 4026 if (!reclaimable && mutex_trylock(&oom_lock)) { 4027 struct oom_control oc = { 4028 .gfp_mask = sc->gfp_mask, 4029 }; 4030 4031 out_of_memory(&oc); 4032 4033 mutex_unlock(&oom_lock); 4034 } 4035 } 4036 4037 /****************************************************************************** 4038 * rmap/PT walk feedback 4039 ******************************************************************************/ 4040 4041 /* 4042 * This function exploits spatial locality when shrink_folio_list() walks the 4043 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If 4044 * the scan was done cacheline efficiently, it adds the PMD entry pointing to 4045 * the PTE table to the Bloom filter. This forms a feedback loop between the 4046 * eviction and the aging. 4047 */ 4048 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) 4049 { 4050 int i; 4051 unsigned long start; 4052 unsigned long end; 4053 struct lru_gen_mm_walk *walk; 4054 int young = 1; 4055 pte_t *pte = pvmw->pte; 4056 unsigned long addr = pvmw->address; 4057 struct vm_area_struct *vma = pvmw->vma; 4058 struct folio *folio = pfn_folio(pvmw->pfn); 4059 bool can_swap = !folio_is_file_lru(folio); 4060 struct mem_cgroup *memcg = folio_memcg(folio); 4061 struct pglist_data *pgdat = folio_pgdat(folio); 4062 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4063 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 4064 DEFINE_MAX_SEQ(lruvec); 4065 int old_gen, new_gen = lru_gen_from_seq(max_seq); 4066 4067 lockdep_assert_held(pvmw->ptl); 4068 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); 4069 4070 if (!ptep_clear_young_notify(vma, addr, pte)) 4071 return false; 4072 4073 if (spin_is_contended(pvmw->ptl)) 4074 return true; 4075 4076 /* exclude special VMAs containing anon pages from COW */ 4077 if (vma->vm_flags & VM_SPECIAL) 4078 return true; 4079 4080 /* avoid taking the LRU lock under the PTL when possible */ 4081 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; 4082 4083 start = max(addr & PMD_MASK, vma->vm_start); 4084 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; 4085 4086 if (end - start == PAGE_SIZE) 4087 return true; 4088 4089 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { 4090 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) 4091 end = start + MIN_LRU_BATCH * PAGE_SIZE; 4092 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) 4093 start = end - MIN_LRU_BATCH * PAGE_SIZE; 4094 else { 4095 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; 4096 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; 4097 } 4098 } 4099 4100 arch_enter_lazy_mmu_mode(); 4101 4102 pte -= (addr - start) / PAGE_SIZE; 4103 4104 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { 4105 unsigned long pfn; 4106 pte_t ptent = ptep_get(pte + i); 4107 4108 pfn = get_pte_pfn(ptent, vma, addr, pgdat); 4109 if (pfn == -1) 4110 continue; 4111 4112 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); 4113 if (!folio) 4114 continue; 4115 4116 if (!ptep_clear_young_notify(vma, addr, pte + i)) 4117 continue; 4118 4119 young++; 4120 4121 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 4122 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 4123 !folio_test_swapcache(folio))) 4124 folio_mark_dirty(folio); 4125 4126 if (walk) { 4127 old_gen = folio_update_gen(folio, new_gen); 4128 if (old_gen >= 0 && old_gen != new_gen) 4129 update_batch_size(walk, folio, old_gen, new_gen); 4130 4131 continue; 4132 } 4133 4134 old_gen = folio_lru_gen(folio); 4135 if (old_gen < 0) 4136 folio_set_referenced(folio); 4137 else if (old_gen != new_gen) { 4138 folio_clear_lru_refs(folio); 4139 folio_activate(folio); 4140 } 4141 } 4142 4143 arch_leave_lazy_mmu_mode(); 4144 4145 /* feedback from rmap walkers to page table walkers */ 4146 if (mm_state && suitable_to_scan(i, young)) 4147 update_bloom_filter(mm_state, max_seq, pvmw->pmd); 4148 4149 return true; 4150 } 4151 4152 /****************************************************************************** 4153 * memcg LRU 4154 ******************************************************************************/ 4155 4156 /* see the comment on MEMCG_NR_GENS */ 4157 enum { 4158 MEMCG_LRU_NOP, 4159 MEMCG_LRU_HEAD, 4160 MEMCG_LRU_TAIL, 4161 MEMCG_LRU_OLD, 4162 MEMCG_LRU_YOUNG, 4163 }; 4164 4165 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) 4166 { 4167 int seg; 4168 int old, new; 4169 unsigned long flags; 4170 int bin = get_random_u32_below(MEMCG_NR_BINS); 4171 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4172 4173 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); 4174 4175 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); 4176 4177 seg = 0; 4178 new = old = lruvec->lrugen.gen; 4179 4180 /* see the comment on MEMCG_NR_GENS */ 4181 if (op == MEMCG_LRU_HEAD) 4182 seg = MEMCG_LRU_HEAD; 4183 else if (op == MEMCG_LRU_TAIL) 4184 seg = MEMCG_LRU_TAIL; 4185 else if (op == MEMCG_LRU_OLD) 4186 new = get_memcg_gen(pgdat->memcg_lru.seq); 4187 else if (op == MEMCG_LRU_YOUNG) 4188 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); 4189 else 4190 VM_WARN_ON_ONCE(true); 4191 4192 WRITE_ONCE(lruvec->lrugen.seg, seg); 4193 WRITE_ONCE(lruvec->lrugen.gen, new); 4194 4195 hlist_nulls_del_rcu(&lruvec->lrugen.list); 4196 4197 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) 4198 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4199 else 4200 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4201 4202 pgdat->memcg_lru.nr_memcgs[old]--; 4203 pgdat->memcg_lru.nr_memcgs[new]++; 4204 4205 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) 4206 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4207 4208 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); 4209 } 4210 4211 #ifdef CONFIG_MEMCG 4212 4213 void lru_gen_online_memcg(struct mem_cgroup *memcg) 4214 { 4215 int gen; 4216 int nid; 4217 int bin = get_random_u32_below(MEMCG_NR_BINS); 4218 4219 for_each_node(nid) { 4220 struct pglist_data *pgdat = NODE_DATA(nid); 4221 struct lruvec *lruvec = get_lruvec(memcg, nid); 4222 4223 spin_lock_irq(&pgdat->memcg_lru.lock); 4224 4225 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); 4226 4227 gen = get_memcg_gen(pgdat->memcg_lru.seq); 4228 4229 lruvec->lrugen.gen = gen; 4230 4231 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); 4232 pgdat->memcg_lru.nr_memcgs[gen]++; 4233 4234 spin_unlock_irq(&pgdat->memcg_lru.lock); 4235 } 4236 } 4237 4238 void lru_gen_offline_memcg(struct mem_cgroup *memcg) 4239 { 4240 int nid; 4241 4242 for_each_node(nid) { 4243 struct lruvec *lruvec = get_lruvec(memcg, nid); 4244 4245 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); 4246 } 4247 } 4248 4249 void lru_gen_release_memcg(struct mem_cgroup *memcg) 4250 { 4251 int gen; 4252 int nid; 4253 4254 for_each_node(nid) { 4255 struct pglist_data *pgdat = NODE_DATA(nid); 4256 struct lruvec *lruvec = get_lruvec(memcg, nid); 4257 4258 spin_lock_irq(&pgdat->memcg_lru.lock); 4259 4260 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) 4261 goto unlock; 4262 4263 gen = lruvec->lrugen.gen; 4264 4265 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); 4266 pgdat->memcg_lru.nr_memcgs[gen]--; 4267 4268 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) 4269 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4270 unlock: 4271 spin_unlock_irq(&pgdat->memcg_lru.lock); 4272 } 4273 } 4274 4275 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 4276 { 4277 struct lruvec *lruvec = get_lruvec(memcg, nid); 4278 4279 /* see the comment on MEMCG_NR_GENS */ 4280 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) 4281 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); 4282 } 4283 4284 #endif /* CONFIG_MEMCG */ 4285 4286 /****************************************************************************** 4287 * the eviction 4288 ******************************************************************************/ 4289 4290 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, 4291 int tier_idx) 4292 { 4293 bool success; 4294 bool dirty, writeback; 4295 int gen = folio_lru_gen(folio); 4296 int type = folio_is_file_lru(folio); 4297 int zone = folio_zonenum(folio); 4298 int delta = folio_nr_pages(folio); 4299 int refs = folio_lru_refs(folio); 4300 int tier = lru_tier_from_refs(refs); 4301 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4302 4303 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); 4304 4305 /* unevictable */ 4306 if (!folio_evictable(folio)) { 4307 success = lru_gen_del_folio(lruvec, folio, true); 4308 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4309 folio_set_unevictable(folio); 4310 lruvec_add_folio(lruvec, folio); 4311 __count_vm_events(UNEVICTABLE_PGCULLED, delta); 4312 return true; 4313 } 4314 4315 /* promoted */ 4316 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { 4317 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4318 return true; 4319 } 4320 4321 /* protected */ 4322 if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) { 4323 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 4324 4325 gen = folio_inc_gen(lruvec, folio, false); 4326 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4327 4328 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 4329 lrugen->protected[hist][type][tier - 1] + delta); 4330 return true; 4331 } 4332 4333 /* ineligible */ 4334 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { 4335 gen = folio_inc_gen(lruvec, folio, false); 4336 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4337 return true; 4338 } 4339 4340 dirty = folio_test_dirty(folio); 4341 writeback = folio_test_writeback(folio); 4342 if (type == LRU_GEN_FILE && dirty) { 4343 sc->nr.file_taken += delta; 4344 if (!writeback) 4345 sc->nr.unqueued_dirty += delta; 4346 } 4347 4348 /* waiting for writeback */ 4349 if (folio_test_locked(folio) || writeback || 4350 (type == LRU_GEN_FILE && dirty)) { 4351 gen = folio_inc_gen(lruvec, folio, true); 4352 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4353 return true; 4354 } 4355 4356 return false; 4357 } 4358 4359 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) 4360 { 4361 bool success; 4362 4363 /* swap constrained */ 4364 if (!(sc->gfp_mask & __GFP_IO) && 4365 (folio_test_dirty(folio) || 4366 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) 4367 return false; 4368 4369 /* raced with release_pages() */ 4370 if (!folio_try_get(folio)) 4371 return false; 4372 4373 /* raced with another isolation */ 4374 if (!folio_test_clear_lru(folio)) { 4375 folio_put(folio); 4376 return false; 4377 } 4378 4379 /* see the comment on MAX_NR_TIERS */ 4380 if (!folio_test_referenced(folio)) 4381 folio_clear_lru_refs(folio); 4382 4383 /* for shrink_folio_list() */ 4384 folio_clear_reclaim(folio); 4385 folio_clear_referenced(folio); 4386 4387 success = lru_gen_del_folio(lruvec, folio, true); 4388 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4389 4390 return true; 4391 } 4392 4393 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, 4394 int type, int tier, struct list_head *list) 4395 { 4396 int i; 4397 int gen; 4398 enum vm_event_item item; 4399 int sorted = 0; 4400 int scanned = 0; 4401 int isolated = 0; 4402 int skipped = 0; 4403 int remaining = MAX_LRU_BATCH; 4404 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4405 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4406 4407 VM_WARN_ON_ONCE(!list_empty(list)); 4408 4409 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) 4410 return 0; 4411 4412 gen = lru_gen_from_seq(lrugen->min_seq[type]); 4413 4414 for (i = MAX_NR_ZONES; i > 0; i--) { 4415 LIST_HEAD(moved); 4416 int skipped_zone = 0; 4417 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; 4418 struct list_head *head = &lrugen->folios[gen][type][zone]; 4419 4420 while (!list_empty(head)) { 4421 struct folio *folio = lru_to_folio(head); 4422 int delta = folio_nr_pages(folio); 4423 4424 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 4425 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 4426 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 4427 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 4428 4429 scanned += delta; 4430 4431 if (sort_folio(lruvec, folio, sc, tier)) 4432 sorted += delta; 4433 else if (isolate_folio(lruvec, folio, sc)) { 4434 list_add(&folio->lru, list); 4435 isolated += delta; 4436 } else { 4437 list_move(&folio->lru, &moved); 4438 skipped_zone += delta; 4439 } 4440 4441 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) 4442 break; 4443 } 4444 4445 if (skipped_zone) { 4446 list_splice(&moved, head); 4447 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone); 4448 skipped += skipped_zone; 4449 } 4450 4451 if (!remaining || isolated >= MIN_LRU_BATCH) 4452 break; 4453 } 4454 4455 item = PGSCAN_KSWAPD + reclaimer_offset(); 4456 if (!cgroup_reclaim(sc)) { 4457 __count_vm_events(item, isolated); 4458 __count_vm_events(PGREFILL, sorted); 4459 } 4460 __count_memcg_events(memcg, item, isolated); 4461 __count_memcg_events(memcg, PGREFILL, sorted); 4462 __count_vm_events(PGSCAN_ANON + type, isolated); 4463 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, 4464 scanned, skipped, isolated, 4465 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4466 if (type == LRU_GEN_FILE) 4467 sc->nr.file_taken += isolated; 4468 /* 4469 * There might not be eligible folios due to reclaim_idx. Check the 4470 * remaining to prevent livelock if it's not making progress. 4471 */ 4472 return isolated || !remaining ? scanned : 0; 4473 } 4474 4475 static int get_tier_idx(struct lruvec *lruvec, int type) 4476 { 4477 int tier; 4478 struct ctrl_pos sp, pv; 4479 4480 /* 4481 * To leave a margin for fluctuations, use a larger gain factor (1:2). 4482 * This value is chosen because any other tier would have at least twice 4483 * as many refaults as the first tier. 4484 */ 4485 read_ctrl_pos(lruvec, type, 0, 1, &sp); 4486 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4487 read_ctrl_pos(lruvec, type, tier, 2, &pv); 4488 if (!positive_ctrl_err(&sp, &pv)) 4489 break; 4490 } 4491 4492 return tier - 1; 4493 } 4494 4495 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) 4496 { 4497 int type, tier; 4498 struct ctrl_pos sp, pv; 4499 int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; 4500 4501 /* 4502 * Compare the first tier of anon with that of file to determine which 4503 * type to scan. Also need to compare other tiers of the selected type 4504 * with the first tier of the other type to determine the last tier (of 4505 * the selected type) to evict. 4506 */ 4507 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); 4508 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); 4509 type = positive_ctrl_err(&sp, &pv); 4510 4511 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); 4512 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4513 read_ctrl_pos(lruvec, type, tier, gain[type], &pv); 4514 if (!positive_ctrl_err(&sp, &pv)) 4515 break; 4516 } 4517 4518 *tier_idx = tier - 1; 4519 4520 return type; 4521 } 4522 4523 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, 4524 int *type_scanned, struct list_head *list) 4525 { 4526 int i; 4527 int type; 4528 int scanned; 4529 int tier = -1; 4530 DEFINE_MIN_SEQ(lruvec); 4531 4532 /* 4533 * Try to make the obvious choice first, and if anon and file are both 4534 * available from the same generation, 4535 * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon 4536 * first. 4537 * 2. If !__GFP_IO, file first since clean pagecache is more likely to 4538 * exist than clean swapcache. 4539 */ 4540 if (!swappiness) 4541 type = LRU_GEN_FILE; 4542 else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) 4543 type = LRU_GEN_ANON; 4544 else if (swappiness == 1) 4545 type = LRU_GEN_FILE; 4546 else if (swappiness == MAX_SWAPPINESS) 4547 type = LRU_GEN_ANON; 4548 else if (!(sc->gfp_mask & __GFP_IO)) 4549 type = LRU_GEN_FILE; 4550 else 4551 type = get_type_to_scan(lruvec, swappiness, &tier); 4552 4553 for (i = !swappiness; i < ANON_AND_FILE; i++) { 4554 if (tier < 0) 4555 tier = get_tier_idx(lruvec, type); 4556 4557 scanned = scan_folios(lruvec, sc, type, tier, list); 4558 if (scanned) 4559 break; 4560 4561 type = !type; 4562 tier = -1; 4563 } 4564 4565 *type_scanned = type; 4566 4567 return scanned; 4568 } 4569 4570 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) 4571 { 4572 int type; 4573 int scanned; 4574 int reclaimed; 4575 LIST_HEAD(list); 4576 LIST_HEAD(clean); 4577 struct folio *folio; 4578 struct folio *next; 4579 enum vm_event_item item; 4580 struct reclaim_stat stat; 4581 struct lru_gen_mm_walk *walk; 4582 bool skip_retry = false; 4583 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4584 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4585 4586 spin_lock_irq(&lruvec->lru_lock); 4587 4588 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); 4589 4590 scanned += try_to_inc_min_seq(lruvec, swappiness); 4591 4592 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) 4593 scanned = 0; 4594 4595 spin_unlock_irq(&lruvec->lru_lock); 4596 4597 if (list_empty(&list)) 4598 return scanned; 4599 retry: 4600 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); 4601 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 4602 sc->nr_reclaimed += reclaimed; 4603 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 4604 scanned, reclaimed, &stat, sc->priority, 4605 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4606 4607 list_for_each_entry_safe_reverse(folio, next, &list, lru) { 4608 if (!folio_evictable(folio)) { 4609 list_del(&folio->lru); 4610 folio_putback_lru(folio); 4611 continue; 4612 } 4613 4614 if (folio_test_reclaim(folio) && 4615 (folio_test_dirty(folio) || folio_test_writeback(folio))) { 4616 /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ 4617 if (folio_test_workingset(folio)) 4618 folio_set_referenced(folio); 4619 continue; 4620 } 4621 4622 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || 4623 folio_mapped(folio) || folio_test_locked(folio) || 4624 folio_test_dirty(folio) || folio_test_writeback(folio)) { 4625 /* don't add rejected folios to the oldest generation */ 4626 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 4627 BIT(PG_active)); 4628 continue; 4629 } 4630 4631 /* retry folios that may have missed folio_rotate_reclaimable() */ 4632 list_move(&folio->lru, &clean); 4633 } 4634 4635 spin_lock_irq(&lruvec->lru_lock); 4636 4637 move_folios_to_lru(lruvec, &list); 4638 4639 walk = current->reclaim_state->mm_walk; 4640 if (walk && walk->batched) { 4641 walk->lruvec = lruvec; 4642 reset_batch_size(walk); 4643 } 4644 4645 item = PGSTEAL_KSWAPD + reclaimer_offset(); 4646 if (!cgroup_reclaim(sc)) 4647 __count_vm_events(item, reclaimed); 4648 __count_memcg_events(memcg, item, reclaimed); 4649 __count_vm_events(PGSTEAL_ANON + type, reclaimed); 4650 4651 spin_unlock_irq(&lruvec->lru_lock); 4652 4653 list_splice_init(&clean, &list); 4654 4655 if (!list_empty(&list)) { 4656 skip_retry = true; 4657 goto retry; 4658 } 4659 4660 return scanned; 4661 } 4662 4663 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, 4664 bool can_swap, unsigned long *nr_to_scan) 4665 { 4666 int gen, type, zone; 4667 unsigned long old = 0; 4668 unsigned long young = 0; 4669 unsigned long total = 0; 4670 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4671 DEFINE_MIN_SEQ(lruvec); 4672 4673 /* whether this lruvec is completely out of cold folios */ 4674 if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { 4675 *nr_to_scan = 0; 4676 return true; 4677 } 4678 4679 for (type = !can_swap; type < ANON_AND_FILE; type++) { 4680 unsigned long seq; 4681 4682 for (seq = min_seq[type]; seq <= max_seq; seq++) { 4683 unsigned long size = 0; 4684 4685 gen = lru_gen_from_seq(seq); 4686 4687 for (zone = 0; zone < MAX_NR_ZONES; zone++) 4688 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 4689 4690 total += size; 4691 if (seq == max_seq) 4692 young += size; 4693 else if (seq + MIN_NR_GENS == max_seq) 4694 old += size; 4695 } 4696 } 4697 4698 *nr_to_scan = total; 4699 4700 /* 4701 * The aging tries to be lazy to reduce the overhead, while the eviction 4702 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the 4703 * ideal number of generations is MIN_NR_GENS+1. 4704 */ 4705 if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) 4706 return false; 4707 4708 /* 4709 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) 4710 * of the total number of pages for each generation. A reasonable range 4711 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The 4712 * aging cares about the upper bound of hot pages, while the eviction 4713 * cares about the lower bound of cold pages. 4714 */ 4715 if (young * MIN_NR_GENS > total) 4716 return true; 4717 if (old * (MIN_NR_GENS + 2) < total) 4718 return true; 4719 4720 return false; 4721 } 4722 4723 /* 4724 * For future optimizations: 4725 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg 4726 * reclaim. 4727 */ 4728 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) 4729 { 4730 bool success; 4731 unsigned long nr_to_scan; 4732 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4733 DEFINE_MAX_SEQ(lruvec); 4734 4735 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) 4736 return -1; 4737 4738 success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan); 4739 4740 /* try to scrape all its memory if this memcg was deleted */ 4741 if (nr_to_scan && !mem_cgroup_online(memcg)) 4742 return nr_to_scan; 4743 4744 /* try to get away with not aging at the default priority */ 4745 if (!success || sc->priority == DEF_PRIORITY) 4746 return nr_to_scan >> sc->priority; 4747 4748 /* stop scanning this lruvec as it's low on cold folios */ 4749 return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; 4750 } 4751 4752 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) 4753 { 4754 int i; 4755 enum zone_watermarks mark; 4756 4757 /* don't abort memcg reclaim to ensure fairness */ 4758 if (!root_reclaim(sc)) 4759 return false; 4760 4761 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) 4762 return true; 4763 4764 /* check the order to exclude compaction-induced reclaim */ 4765 if (!current_is_kswapd() || sc->order) 4766 return false; 4767 4768 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ? 4769 WMARK_PROMO : WMARK_HIGH; 4770 4771 for (i = 0; i <= sc->reclaim_idx; i++) { 4772 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; 4773 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH; 4774 4775 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) 4776 return false; 4777 } 4778 4779 /* kswapd should abort if all eligible zones are safe */ 4780 return true; 4781 } 4782 4783 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4784 { 4785 long nr_to_scan; 4786 unsigned long scanned = 0; 4787 int swappiness = get_swappiness(lruvec, sc); 4788 4789 while (true) { 4790 int delta; 4791 4792 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); 4793 if (nr_to_scan <= 0) 4794 break; 4795 4796 delta = evict_folios(lruvec, sc, swappiness); 4797 if (!delta) 4798 break; 4799 4800 scanned += delta; 4801 if (scanned >= nr_to_scan) 4802 break; 4803 4804 if (should_abort_scan(lruvec, sc)) 4805 break; 4806 4807 cond_resched(); 4808 } 4809 4810 /* 4811 * If too many file cache in the coldest generation can't be evicted 4812 * due to being dirty, wake up the flusher. 4813 */ 4814 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken) 4815 wakeup_flusher_threads(WB_REASON_VMSCAN); 4816 4817 /* whether this lruvec should be rotated */ 4818 return nr_to_scan < 0; 4819 } 4820 4821 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) 4822 { 4823 bool success; 4824 unsigned long scanned = sc->nr_scanned; 4825 unsigned long reclaimed = sc->nr_reclaimed; 4826 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4827 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4828 4829 /* lru_gen_age_node() called mem_cgroup_calculate_protection() */ 4830 if (mem_cgroup_below_min(NULL, memcg)) 4831 return MEMCG_LRU_YOUNG; 4832 4833 if (mem_cgroup_below_low(NULL, memcg)) { 4834 /* see the comment on MEMCG_NR_GENS */ 4835 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) 4836 return MEMCG_LRU_TAIL; 4837 4838 memcg_memory_event(memcg, MEMCG_LOW); 4839 } 4840 4841 success = try_to_shrink_lruvec(lruvec, sc); 4842 4843 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); 4844 4845 if (!sc->proactive) 4846 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, 4847 sc->nr_reclaimed - reclaimed); 4848 4849 flush_reclaim_state(sc); 4850 4851 if (success && mem_cgroup_online(memcg)) 4852 return MEMCG_LRU_YOUNG; 4853 4854 if (!success && lruvec_is_sizable(lruvec, sc)) 4855 return 0; 4856 4857 /* one retry if offlined or too small */ 4858 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? 4859 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; 4860 } 4861 4862 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) 4863 { 4864 int op; 4865 int gen; 4866 int bin; 4867 int first_bin; 4868 struct lruvec *lruvec; 4869 struct lru_gen_folio *lrugen; 4870 struct mem_cgroup *memcg; 4871 struct hlist_nulls_node *pos; 4872 4873 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); 4874 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); 4875 restart: 4876 op = 0; 4877 memcg = NULL; 4878 4879 rcu_read_lock(); 4880 4881 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { 4882 if (op) { 4883 lru_gen_rotate_memcg(lruvec, op); 4884 op = 0; 4885 } 4886 4887 mem_cgroup_put(memcg); 4888 memcg = NULL; 4889 4890 if (gen != READ_ONCE(lrugen->gen)) 4891 continue; 4892 4893 lruvec = container_of(lrugen, struct lruvec, lrugen); 4894 memcg = lruvec_memcg(lruvec); 4895 4896 if (!mem_cgroup_tryget(memcg)) { 4897 lru_gen_release_memcg(memcg); 4898 memcg = NULL; 4899 continue; 4900 } 4901 4902 rcu_read_unlock(); 4903 4904 op = shrink_one(lruvec, sc); 4905 4906 rcu_read_lock(); 4907 4908 if (should_abort_scan(lruvec, sc)) 4909 break; 4910 } 4911 4912 rcu_read_unlock(); 4913 4914 if (op) 4915 lru_gen_rotate_memcg(lruvec, op); 4916 4917 mem_cgroup_put(memcg); 4918 4919 if (!is_a_nulls(pos)) 4920 return; 4921 4922 /* restart if raced with lru_gen_rotate_memcg() */ 4923 if (gen != get_nulls_value(pos)) 4924 goto restart; 4925 4926 /* try the rest of the bins of the current generation */ 4927 bin = get_memcg_bin(bin + 1); 4928 if (bin != first_bin) 4929 goto restart; 4930 } 4931 4932 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4933 { 4934 struct blk_plug plug; 4935 4936 VM_WARN_ON_ONCE(root_reclaim(sc)); 4937 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 4938 4939 lru_add_drain(); 4940 4941 blk_start_plug(&plug); 4942 4943 set_mm_walk(NULL, sc->proactive); 4944 4945 if (try_to_shrink_lruvec(lruvec, sc)) 4946 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); 4947 4948 clear_mm_walk(); 4949 4950 blk_finish_plug(&plug); 4951 } 4952 4953 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 4954 { 4955 struct blk_plug plug; 4956 unsigned long reclaimed = sc->nr_reclaimed; 4957 4958 VM_WARN_ON_ONCE(!root_reclaim(sc)); 4959 4960 /* 4961 * Unmapped clean folios are already prioritized. Scanning for more of 4962 * them is likely futile and can cause high reclaim latency when there 4963 * is a large number of memcgs. 4964 */ 4965 if (!sc->may_writepage || !sc->may_unmap) 4966 goto done; 4967 4968 lru_add_drain(); 4969 4970 blk_start_plug(&plug); 4971 4972 set_mm_walk(pgdat, sc->proactive); 4973 4974 set_initial_priority(pgdat, sc); 4975 4976 if (current_is_kswapd()) 4977 sc->nr_reclaimed = 0; 4978 4979 if (mem_cgroup_disabled()) 4980 shrink_one(&pgdat->__lruvec, sc); 4981 else 4982 shrink_many(pgdat, sc); 4983 4984 if (current_is_kswapd()) 4985 sc->nr_reclaimed += reclaimed; 4986 4987 clear_mm_walk(); 4988 4989 blk_finish_plug(&plug); 4990 done: 4991 if (sc->nr_reclaimed > reclaimed) 4992 pgdat->kswapd_failures = 0; 4993 } 4994 4995 /****************************************************************************** 4996 * state change 4997 ******************************************************************************/ 4998 4999 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) 5000 { 5001 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5002 5003 if (lrugen->enabled) { 5004 enum lru_list lru; 5005 5006 for_each_evictable_lru(lru) { 5007 if (!list_empty(&lruvec->lists[lru])) 5008 return false; 5009 } 5010 } else { 5011 int gen, type, zone; 5012 5013 for_each_gen_type_zone(gen, type, zone) { 5014 if (!list_empty(&lrugen->folios[gen][type][zone])) 5015 return false; 5016 } 5017 } 5018 5019 return true; 5020 } 5021 5022 static bool fill_evictable(struct lruvec *lruvec) 5023 { 5024 enum lru_list lru; 5025 int remaining = MAX_LRU_BATCH; 5026 5027 for_each_evictable_lru(lru) { 5028 int type = is_file_lru(lru); 5029 bool active = is_active_lru(lru); 5030 struct list_head *head = &lruvec->lists[lru]; 5031 5032 while (!list_empty(head)) { 5033 bool success; 5034 struct folio *folio = lru_to_folio(head); 5035 5036 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5037 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); 5038 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5039 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); 5040 5041 lruvec_del_folio(lruvec, folio); 5042 success = lru_gen_add_folio(lruvec, folio, false); 5043 VM_WARN_ON_ONCE(!success); 5044 5045 if (!--remaining) 5046 return false; 5047 } 5048 } 5049 5050 return true; 5051 } 5052 5053 static bool drain_evictable(struct lruvec *lruvec) 5054 { 5055 int gen, type, zone; 5056 int remaining = MAX_LRU_BATCH; 5057 5058 for_each_gen_type_zone(gen, type, zone) { 5059 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; 5060 5061 while (!list_empty(head)) { 5062 bool success; 5063 struct folio *folio = lru_to_folio(head); 5064 5065 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5066 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 5067 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5068 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 5069 5070 success = lru_gen_del_folio(lruvec, folio, false); 5071 VM_WARN_ON_ONCE(!success); 5072 lruvec_add_folio(lruvec, folio); 5073 5074 if (!--remaining) 5075 return false; 5076 } 5077 } 5078 5079 return true; 5080 } 5081 5082 static void lru_gen_change_state(bool enabled) 5083 { 5084 static DEFINE_MUTEX(state_mutex); 5085 5086 struct mem_cgroup *memcg; 5087 5088 cgroup_lock(); 5089 cpus_read_lock(); 5090 get_online_mems(); 5091 mutex_lock(&state_mutex); 5092 5093 if (enabled == lru_gen_enabled()) 5094 goto unlock; 5095 5096 if (enabled) 5097 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5098 else 5099 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5100 5101 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5102 do { 5103 int nid; 5104 5105 for_each_node(nid) { 5106 struct lruvec *lruvec = get_lruvec(memcg, nid); 5107 5108 spin_lock_irq(&lruvec->lru_lock); 5109 5110 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 5111 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); 5112 5113 lruvec->lrugen.enabled = enabled; 5114 5115 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { 5116 spin_unlock_irq(&lruvec->lru_lock); 5117 cond_resched(); 5118 spin_lock_irq(&lruvec->lru_lock); 5119 } 5120 5121 spin_unlock_irq(&lruvec->lru_lock); 5122 } 5123 5124 cond_resched(); 5125 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5126 unlock: 5127 mutex_unlock(&state_mutex); 5128 put_online_mems(); 5129 cpus_read_unlock(); 5130 cgroup_unlock(); 5131 } 5132 5133 /****************************************************************************** 5134 * sysfs interface 5135 ******************************************************************************/ 5136 5137 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5138 { 5139 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); 5140 } 5141 5142 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5143 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, 5144 const char *buf, size_t len) 5145 { 5146 unsigned int msecs; 5147 5148 if (kstrtouint(buf, 0, &msecs)) 5149 return -EINVAL; 5150 5151 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); 5152 5153 return len; 5154 } 5155 5156 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); 5157 5158 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5159 { 5160 unsigned int caps = 0; 5161 5162 if (get_cap(LRU_GEN_CORE)) 5163 caps |= BIT(LRU_GEN_CORE); 5164 5165 if (should_walk_mmu()) 5166 caps |= BIT(LRU_GEN_MM_WALK); 5167 5168 if (should_clear_pmd_young()) 5169 caps |= BIT(LRU_GEN_NONLEAF_YOUNG); 5170 5171 return sysfs_emit(buf, "0x%04x\n", caps); 5172 } 5173 5174 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5175 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 5176 const char *buf, size_t len) 5177 { 5178 int i; 5179 unsigned int caps; 5180 5181 if (tolower(*buf) == 'n') 5182 caps = 0; 5183 else if (tolower(*buf) == 'y') 5184 caps = -1; 5185 else if (kstrtouint(buf, 0, &caps)) 5186 return -EINVAL; 5187 5188 for (i = 0; i < NR_LRU_GEN_CAPS; i++) { 5189 bool enabled = caps & BIT(i); 5190 5191 if (i == LRU_GEN_CORE) 5192 lru_gen_change_state(enabled); 5193 else if (enabled) 5194 static_branch_enable(&lru_gen_caps[i]); 5195 else 5196 static_branch_disable(&lru_gen_caps[i]); 5197 } 5198 5199 return len; 5200 } 5201 5202 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); 5203 5204 static struct attribute *lru_gen_attrs[] = { 5205 &lru_gen_min_ttl_attr.attr, 5206 &lru_gen_enabled_attr.attr, 5207 NULL 5208 }; 5209 5210 static const struct attribute_group lru_gen_attr_group = { 5211 .name = "lru_gen", 5212 .attrs = lru_gen_attrs, 5213 }; 5214 5215 /****************************************************************************** 5216 * debugfs interface 5217 ******************************************************************************/ 5218 5219 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) 5220 { 5221 struct mem_cgroup *memcg; 5222 loff_t nr_to_skip = *pos; 5223 5224 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); 5225 if (!m->private) 5226 return ERR_PTR(-ENOMEM); 5227 5228 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5229 do { 5230 int nid; 5231 5232 for_each_node_state(nid, N_MEMORY) { 5233 if (!nr_to_skip--) 5234 return get_lruvec(memcg, nid); 5235 } 5236 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5237 5238 return NULL; 5239 } 5240 5241 static void lru_gen_seq_stop(struct seq_file *m, void *v) 5242 { 5243 if (!IS_ERR_OR_NULL(v)) 5244 mem_cgroup_iter_break(NULL, lruvec_memcg(v)); 5245 5246 kvfree(m->private); 5247 m->private = NULL; 5248 } 5249 5250 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) 5251 { 5252 int nid = lruvec_pgdat(v)->node_id; 5253 struct mem_cgroup *memcg = lruvec_memcg(v); 5254 5255 ++*pos; 5256 5257 nid = next_memory_node(nid); 5258 if (nid == MAX_NUMNODES) { 5259 memcg = mem_cgroup_iter(NULL, memcg, NULL); 5260 if (!memcg) 5261 return NULL; 5262 5263 nid = first_memory_node; 5264 } 5265 5266 return get_lruvec(memcg, nid); 5267 } 5268 5269 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, 5270 unsigned long max_seq, unsigned long *min_seq, 5271 unsigned long seq) 5272 { 5273 int i; 5274 int type, tier; 5275 int hist = lru_hist_from_seq(seq); 5276 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5277 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5278 5279 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 5280 seq_printf(m, " %10d", tier); 5281 for (type = 0; type < ANON_AND_FILE; type++) { 5282 const char *s = "xxx"; 5283 unsigned long n[3] = {}; 5284 5285 if (seq == max_seq) { 5286 s = "RTx"; 5287 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); 5288 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); 5289 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { 5290 s = "rep"; 5291 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); 5292 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); 5293 if (tier) 5294 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); 5295 } 5296 5297 for (i = 0; i < 3; i++) 5298 seq_printf(m, " %10lu%c", n[i], s[i]); 5299 } 5300 seq_putc(m, '\n'); 5301 } 5302 5303 if (!mm_state) 5304 return; 5305 5306 seq_puts(m, " "); 5307 for (i = 0; i < NR_MM_STATS; i++) { 5308 const char *s = "xxxx"; 5309 unsigned long n = 0; 5310 5311 if (seq == max_seq && NR_HIST_GENS == 1) { 5312 s = "TYFA"; 5313 n = READ_ONCE(mm_state->stats[hist][i]); 5314 } else if (seq != max_seq && NR_HIST_GENS > 1) { 5315 s = "tyfa"; 5316 n = READ_ONCE(mm_state->stats[hist][i]); 5317 } 5318 5319 seq_printf(m, " %10lu%c", n, s[i]); 5320 } 5321 seq_putc(m, '\n'); 5322 } 5323 5324 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5325 static int lru_gen_seq_show(struct seq_file *m, void *v) 5326 { 5327 unsigned long seq; 5328 bool full = !debugfs_real_fops(m->file)->write; 5329 struct lruvec *lruvec = v; 5330 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5331 int nid = lruvec_pgdat(lruvec)->node_id; 5332 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 5333 DEFINE_MAX_SEQ(lruvec); 5334 DEFINE_MIN_SEQ(lruvec); 5335 5336 if (nid == first_memory_node) { 5337 const char *path = memcg ? m->private : ""; 5338 5339 #ifdef CONFIG_MEMCG 5340 if (memcg) 5341 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); 5342 #endif 5343 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); 5344 } 5345 5346 seq_printf(m, " node %5d\n", nid); 5347 5348 if (!full) 5349 seq = min_seq[LRU_GEN_ANON]; 5350 else if (max_seq >= MAX_NR_GENS) 5351 seq = max_seq - MAX_NR_GENS + 1; 5352 else 5353 seq = 0; 5354 5355 for (; seq <= max_seq; seq++) { 5356 int type, zone; 5357 int gen = lru_gen_from_seq(seq); 5358 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 5359 5360 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); 5361 5362 for (type = 0; type < ANON_AND_FILE; type++) { 5363 unsigned long size = 0; 5364 char mark = full && seq < min_seq[type] ? 'x' : ' '; 5365 5366 for (zone = 0; zone < MAX_NR_ZONES; zone++) 5367 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 5368 5369 seq_printf(m, " %10lu%c", size, mark); 5370 } 5371 5372 seq_putc(m, '\n'); 5373 5374 if (full) 5375 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); 5376 } 5377 5378 return 0; 5379 } 5380 5381 static const struct seq_operations lru_gen_seq_ops = { 5382 .start = lru_gen_seq_start, 5383 .stop = lru_gen_seq_stop, 5384 .next = lru_gen_seq_next, 5385 .show = lru_gen_seq_show, 5386 }; 5387 5388 static int run_aging(struct lruvec *lruvec, unsigned long seq, 5389 bool can_swap, bool force_scan) 5390 { 5391 DEFINE_MAX_SEQ(lruvec); 5392 DEFINE_MIN_SEQ(lruvec); 5393 5394 if (seq < max_seq) 5395 return 0; 5396 5397 if (seq > max_seq) 5398 return -EINVAL; 5399 5400 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) 5401 return -ERANGE; 5402 5403 try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan); 5404 5405 return 0; 5406 } 5407 5408 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, 5409 int swappiness, unsigned long nr_to_reclaim) 5410 { 5411 DEFINE_MAX_SEQ(lruvec); 5412 5413 if (seq + MIN_NR_GENS > max_seq) 5414 return -EINVAL; 5415 5416 sc->nr_reclaimed = 0; 5417 5418 while (!signal_pending(current)) { 5419 DEFINE_MIN_SEQ(lruvec); 5420 5421 if (seq < min_seq[!swappiness]) 5422 return 0; 5423 5424 if (sc->nr_reclaimed >= nr_to_reclaim) 5425 return 0; 5426 5427 if (!evict_folios(lruvec, sc, swappiness)) 5428 return 0; 5429 5430 cond_resched(); 5431 } 5432 5433 return -EINTR; 5434 } 5435 5436 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, 5437 struct scan_control *sc, int swappiness, unsigned long opt) 5438 { 5439 struct lruvec *lruvec; 5440 int err = -EINVAL; 5441 struct mem_cgroup *memcg = NULL; 5442 5443 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) 5444 return -EINVAL; 5445 5446 if (!mem_cgroup_disabled()) { 5447 rcu_read_lock(); 5448 5449 memcg = mem_cgroup_from_id(memcg_id); 5450 if (!mem_cgroup_tryget(memcg)) 5451 memcg = NULL; 5452 5453 rcu_read_unlock(); 5454 5455 if (!memcg) 5456 return -EINVAL; 5457 } 5458 5459 if (memcg_id != mem_cgroup_id(memcg)) 5460 goto done; 5461 5462 lruvec = get_lruvec(memcg, nid); 5463 5464 if (swappiness < MIN_SWAPPINESS) 5465 swappiness = get_swappiness(lruvec, sc); 5466 else if (swappiness > MAX_SWAPPINESS) 5467 goto done; 5468 5469 switch (cmd) { 5470 case '+': 5471 err = run_aging(lruvec, seq, swappiness, opt); 5472 break; 5473 case '-': 5474 err = run_eviction(lruvec, seq, sc, swappiness, opt); 5475 break; 5476 } 5477 done: 5478 mem_cgroup_put(memcg); 5479 5480 return err; 5481 } 5482 5483 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5484 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, 5485 size_t len, loff_t *pos) 5486 { 5487 void *buf; 5488 char *cur, *next; 5489 unsigned int flags; 5490 struct blk_plug plug; 5491 int err = -EINVAL; 5492 struct scan_control sc = { 5493 .may_writepage = true, 5494 .may_unmap = true, 5495 .may_swap = true, 5496 .reclaim_idx = MAX_NR_ZONES - 1, 5497 .gfp_mask = GFP_KERNEL, 5498 }; 5499 5500 buf = kvmalloc(len + 1, GFP_KERNEL); 5501 if (!buf) 5502 return -ENOMEM; 5503 5504 if (copy_from_user(buf, src, len)) { 5505 kvfree(buf); 5506 return -EFAULT; 5507 } 5508 5509 set_task_reclaim_state(current, &sc.reclaim_state); 5510 flags = memalloc_noreclaim_save(); 5511 blk_start_plug(&plug); 5512 if (!set_mm_walk(NULL, true)) { 5513 err = -ENOMEM; 5514 goto done; 5515 } 5516 5517 next = buf; 5518 next[len] = '\0'; 5519 5520 while ((cur = strsep(&next, ",;\n"))) { 5521 int n; 5522 int end; 5523 char cmd; 5524 unsigned int memcg_id; 5525 unsigned int nid; 5526 unsigned long seq; 5527 unsigned int swappiness = -1; 5528 unsigned long opt = -1; 5529 5530 cur = skip_spaces(cur); 5531 if (!*cur) 5532 continue; 5533 5534 n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, 5535 &seq, &end, &swappiness, &end, &opt, &end); 5536 if (n < 4 || cur[end]) { 5537 err = -EINVAL; 5538 break; 5539 } 5540 5541 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); 5542 if (err) 5543 break; 5544 } 5545 done: 5546 clear_mm_walk(); 5547 blk_finish_plug(&plug); 5548 memalloc_noreclaim_restore(flags); 5549 set_task_reclaim_state(current, NULL); 5550 5551 kvfree(buf); 5552 5553 return err ? : len; 5554 } 5555 5556 static int lru_gen_seq_open(struct inode *inode, struct file *file) 5557 { 5558 return seq_open(file, &lru_gen_seq_ops); 5559 } 5560 5561 static const struct file_operations lru_gen_rw_fops = { 5562 .open = lru_gen_seq_open, 5563 .read = seq_read, 5564 .write = lru_gen_seq_write, 5565 .llseek = seq_lseek, 5566 .release = seq_release, 5567 }; 5568 5569 static const struct file_operations lru_gen_ro_fops = { 5570 .open = lru_gen_seq_open, 5571 .read = seq_read, 5572 .llseek = seq_lseek, 5573 .release = seq_release, 5574 }; 5575 5576 /****************************************************************************** 5577 * initialization 5578 ******************************************************************************/ 5579 5580 void lru_gen_init_pgdat(struct pglist_data *pgdat) 5581 { 5582 int i, j; 5583 5584 spin_lock_init(&pgdat->memcg_lru.lock); 5585 5586 for (i = 0; i < MEMCG_NR_GENS; i++) { 5587 for (j = 0; j < MEMCG_NR_BINS; j++) 5588 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); 5589 } 5590 } 5591 5592 void lru_gen_init_lruvec(struct lruvec *lruvec) 5593 { 5594 int i; 5595 int gen, type, zone; 5596 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5597 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5598 5599 lrugen->max_seq = MIN_NR_GENS + 1; 5600 lrugen->enabled = lru_gen_enabled(); 5601 5602 for (i = 0; i <= MIN_NR_GENS + 1; i++) 5603 lrugen->timestamps[i] = jiffies; 5604 5605 for_each_gen_type_zone(gen, type, zone) 5606 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); 5607 5608 if (mm_state) 5609 mm_state->seq = MIN_NR_GENS; 5610 } 5611 5612 #ifdef CONFIG_MEMCG 5613 5614 void lru_gen_init_memcg(struct mem_cgroup *memcg) 5615 { 5616 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5617 5618 if (!mm_list) 5619 return; 5620 5621 INIT_LIST_HEAD(&mm_list->fifo); 5622 spin_lock_init(&mm_list->lock); 5623 } 5624 5625 void lru_gen_exit_memcg(struct mem_cgroup *memcg) 5626 { 5627 int i; 5628 int nid; 5629 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5630 5631 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); 5632 5633 for_each_node(nid) { 5634 struct lruvec *lruvec = get_lruvec(memcg, nid); 5635 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5636 5637 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, 5638 sizeof(lruvec->lrugen.nr_pages))); 5639 5640 lruvec->lrugen.list.next = LIST_POISON1; 5641 5642 if (!mm_state) 5643 continue; 5644 5645 for (i = 0; i < NR_BLOOM_FILTERS; i++) { 5646 bitmap_free(mm_state->filters[i]); 5647 mm_state->filters[i] = NULL; 5648 } 5649 } 5650 } 5651 5652 #endif /* CONFIG_MEMCG */ 5653 5654 static int __init init_lru_gen(void) 5655 { 5656 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); 5657 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); 5658 5659 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) 5660 pr_err("lru_gen: failed to create sysfs group\n"); 5661 5662 debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); 5663 debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); 5664 5665 return 0; 5666 }; 5667 late_initcall(init_lru_gen); 5668 5669 #else /* !CONFIG_LRU_GEN */ 5670 5671 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 5672 { 5673 BUILD_BUG(); 5674 } 5675 5676 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5677 { 5678 BUILD_BUG(); 5679 } 5680 5681 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5682 { 5683 BUILD_BUG(); 5684 } 5685 5686 #endif /* CONFIG_LRU_GEN */ 5687 5688 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5689 { 5690 unsigned long nr[NR_LRU_LISTS]; 5691 unsigned long targets[NR_LRU_LISTS]; 5692 unsigned long nr_to_scan; 5693 enum lru_list lru; 5694 unsigned long nr_reclaimed = 0; 5695 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 5696 bool proportional_reclaim; 5697 struct blk_plug plug; 5698 5699 if (lru_gen_enabled() && !root_reclaim(sc)) { 5700 lru_gen_shrink_lruvec(lruvec, sc); 5701 return; 5702 } 5703 5704 get_scan_count(lruvec, sc, nr); 5705 5706 /* Record the original scan target for proportional adjustments later */ 5707 memcpy(targets, nr, sizeof(nr)); 5708 5709 /* 5710 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 5711 * event that can occur when there is little memory pressure e.g. 5712 * multiple streaming readers/writers. Hence, we do not abort scanning 5713 * when the requested number of pages are reclaimed when scanning at 5714 * DEF_PRIORITY on the assumption that the fact we are direct 5715 * reclaiming implies that kswapd is not keeping up and it is best to 5716 * do a batch of work at once. For memcg reclaim one check is made to 5717 * abort proportional reclaim if either the file or anon lru has already 5718 * dropped to zero at the first pass. 5719 */ 5720 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && 5721 sc->priority == DEF_PRIORITY); 5722 5723 blk_start_plug(&plug); 5724 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 5725 nr[LRU_INACTIVE_FILE]) { 5726 unsigned long nr_anon, nr_file, percentage; 5727 unsigned long nr_scanned; 5728 5729 for_each_evictable_lru(lru) { 5730 if (nr[lru]) { 5731 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 5732 nr[lru] -= nr_to_scan; 5733 5734 nr_reclaimed += shrink_list(lru, nr_to_scan, 5735 lruvec, sc); 5736 } 5737 } 5738 5739 cond_resched(); 5740 5741 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) 5742 continue; 5743 5744 /* 5745 * For kswapd and memcg, reclaim at least the number of pages 5746 * requested. Ensure that the anon and file LRUs are scanned 5747 * proportionally what was requested by get_scan_count(). We 5748 * stop reclaiming one LRU and reduce the amount scanning 5749 * proportional to the original scan target. 5750 */ 5751 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 5752 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 5753 5754 /* 5755 * It's just vindictive to attack the larger once the smaller 5756 * has gone to zero. And given the way we stop scanning the 5757 * smaller below, this makes sure that we only make one nudge 5758 * towards proportionality once we've got nr_to_reclaim. 5759 */ 5760 if (!nr_file || !nr_anon) 5761 break; 5762 5763 if (nr_file > nr_anon) { 5764 unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 5765 targets[LRU_ACTIVE_ANON] + 1; 5766 lru = LRU_BASE; 5767 percentage = nr_anon * 100 / scan_target; 5768 } else { 5769 unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 5770 targets[LRU_ACTIVE_FILE] + 1; 5771 lru = LRU_FILE; 5772 percentage = nr_file * 100 / scan_target; 5773 } 5774 5775 /* Stop scanning the smaller of the LRU */ 5776 nr[lru] = 0; 5777 nr[lru + LRU_ACTIVE] = 0; 5778 5779 /* 5780 * Recalculate the other LRU scan count based on its original 5781 * scan target and the percentage scanning already complete 5782 */ 5783 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 5784 nr_scanned = targets[lru] - nr[lru]; 5785 nr[lru] = targets[lru] * (100 - percentage) / 100; 5786 nr[lru] -= min(nr[lru], nr_scanned); 5787 5788 lru += LRU_ACTIVE; 5789 nr_scanned = targets[lru] - nr[lru]; 5790 nr[lru] = targets[lru] * (100 - percentage) / 100; 5791 nr[lru] -= min(nr[lru], nr_scanned); 5792 } 5793 blk_finish_plug(&plug); 5794 sc->nr_reclaimed += nr_reclaimed; 5795 5796 /* 5797 * Even if we did not try to evict anon pages at all, we want to 5798 * rebalance the anon lru active/inactive ratio. 5799 */ 5800 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && 5801 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 5802 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 5803 sc, LRU_ACTIVE_ANON); 5804 } 5805 5806 /* Use reclaim/compaction for costly allocs or under memory pressure */ 5807 static bool in_reclaim_compaction(struct scan_control *sc) 5808 { 5809 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && 5810 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 5811 sc->priority < DEF_PRIORITY - 2)) 5812 return true; 5813 5814 return false; 5815 } 5816 5817 /* 5818 * Reclaim/compaction is used for high-order allocation requests. It reclaims 5819 * order-0 pages before compacting the zone. should_continue_reclaim() returns 5820 * true if more pages should be reclaimed such that when the page allocator 5821 * calls try_to_compact_pages() that it will have enough free pages to succeed. 5822 * It will give up earlier than that if there is difficulty reclaiming pages. 5823 */ 5824 static inline bool should_continue_reclaim(struct pglist_data *pgdat, 5825 unsigned long nr_reclaimed, 5826 struct scan_control *sc) 5827 { 5828 unsigned long pages_for_compaction; 5829 unsigned long inactive_lru_pages; 5830 int z; 5831 5832 /* If not in reclaim/compaction mode, stop */ 5833 if (!in_reclaim_compaction(sc)) 5834 return false; 5835 5836 /* 5837 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX 5838 * number of pages that were scanned. This will return to the caller 5839 * with the risk reclaim/compaction and the resulting allocation attempt 5840 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL 5841 * allocations through requiring that the full LRU list has been scanned 5842 * first, by assuming that zero delta of sc->nr_scanned means full LRU 5843 * scan, but that approximation was wrong, and there were corner cases 5844 * where always a non-zero amount of pages were scanned. 5845 */ 5846 if (!nr_reclaimed) 5847 return false; 5848 5849 /* If compaction would go ahead or the allocation would succeed, stop */ 5850 for (z = 0; z <= sc->reclaim_idx; z++) { 5851 struct zone *zone = &pgdat->node_zones[z]; 5852 if (!managed_zone(zone)) 5853 continue; 5854 5855 /* Allocation can already succeed, nothing to do */ 5856 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 5857 sc->reclaim_idx, 0)) 5858 return false; 5859 5860 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) 5861 return false; 5862 } 5863 5864 /* 5865 * If we have not reclaimed enough pages for compaction and the 5866 * inactive lists are large enough, continue reclaiming 5867 */ 5868 pages_for_compaction = compact_gap(sc->order); 5869 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 5870 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 5871 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 5872 5873 return inactive_lru_pages > pages_for_compaction; 5874 } 5875 5876 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) 5877 { 5878 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; 5879 struct mem_cgroup_reclaim_cookie reclaim = { 5880 .pgdat = pgdat, 5881 }; 5882 struct mem_cgroup_reclaim_cookie *partial = &reclaim; 5883 struct mem_cgroup *memcg; 5884 5885 /* 5886 * In most cases, direct reclaimers can do partial walks 5887 * through the cgroup tree, using an iterator state that 5888 * persists across invocations. This strikes a balance between 5889 * fairness and allocation latency. 5890 * 5891 * For kswapd, reliable forward progress is more important 5892 * than a quick return to idle. Always do full walks. 5893 */ 5894 if (current_is_kswapd() || sc->memcg_full_walk) 5895 partial = NULL; 5896 5897 memcg = mem_cgroup_iter(target_memcg, NULL, partial); 5898 do { 5899 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 5900 unsigned long reclaimed; 5901 unsigned long scanned; 5902 5903 /* 5904 * This loop can become CPU-bound when target memcgs 5905 * aren't eligible for reclaim - either because they 5906 * don't have any reclaimable pages, or because their 5907 * memory is explicitly protected. Avoid soft lockups. 5908 */ 5909 cond_resched(); 5910 5911 mem_cgroup_calculate_protection(target_memcg, memcg); 5912 5913 if (mem_cgroup_below_min(target_memcg, memcg)) { 5914 /* 5915 * Hard protection. 5916 * If there is no reclaimable memory, OOM. 5917 */ 5918 continue; 5919 } else if (mem_cgroup_below_low(target_memcg, memcg)) { 5920 /* 5921 * Soft protection. 5922 * Respect the protection only as long as 5923 * there is an unprotected supply 5924 * of reclaimable memory from other cgroups. 5925 */ 5926 if (!sc->memcg_low_reclaim) { 5927 sc->memcg_low_skipped = 1; 5928 continue; 5929 } 5930 memcg_memory_event(memcg, MEMCG_LOW); 5931 } 5932 5933 reclaimed = sc->nr_reclaimed; 5934 scanned = sc->nr_scanned; 5935 5936 shrink_lruvec(lruvec, sc); 5937 5938 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 5939 sc->priority); 5940 5941 /* Record the group's reclaim efficiency */ 5942 if (!sc->proactive) 5943 vmpressure(sc->gfp_mask, memcg, false, 5944 sc->nr_scanned - scanned, 5945 sc->nr_reclaimed - reclaimed); 5946 5947 /* If partial walks are allowed, bail once goal is reached */ 5948 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { 5949 mem_cgroup_iter_break(target_memcg, memcg); 5950 break; 5951 } 5952 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, partial))); 5953 } 5954 5955 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 5956 { 5957 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 5958 struct lruvec *target_lruvec; 5959 bool reclaimable = false; 5960 5961 if (lru_gen_enabled() && root_reclaim(sc)) { 5962 memset(&sc->nr, 0, sizeof(sc->nr)); 5963 lru_gen_shrink_node(pgdat, sc); 5964 return; 5965 } 5966 5967 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 5968 5969 again: 5970 memset(&sc->nr, 0, sizeof(sc->nr)); 5971 5972 nr_reclaimed = sc->nr_reclaimed; 5973 nr_scanned = sc->nr_scanned; 5974 5975 prepare_scan_control(pgdat, sc); 5976 5977 shrink_node_memcgs(pgdat, sc); 5978 5979 flush_reclaim_state(sc); 5980 5981 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; 5982 5983 /* Record the subtree's reclaim efficiency */ 5984 if (!sc->proactive) 5985 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 5986 sc->nr_scanned - nr_scanned, nr_node_reclaimed); 5987 5988 if (nr_node_reclaimed) 5989 reclaimable = true; 5990 5991 if (current_is_kswapd()) { 5992 /* 5993 * If reclaim is isolating dirty pages under writeback, 5994 * it implies that the long-lived page allocation rate 5995 * is exceeding the page laundering rate. Either the 5996 * global limits are not being effective at throttling 5997 * processes due to the page distribution throughout 5998 * zones or there is heavy usage of a slow backing 5999 * device. The only option is to throttle from reclaim 6000 * context which is not ideal as there is no guarantee 6001 * the dirtying process is throttled in the same way 6002 * balance_dirty_pages() manages. 6003 * 6004 * Once a node is flagged PGDAT_WRITEBACK, kswapd will 6005 * count the number of pages under pages flagged for 6006 * immediate reclaim and stall if any are encountered 6007 * in the nr_immediate check below. 6008 */ 6009 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 6010 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 6011 6012 /* Allow kswapd to start writing pages during reclaim.*/ 6013 if (sc->nr.unqueued_dirty && 6014 sc->nr.unqueued_dirty == sc->nr.file_taken) 6015 set_bit(PGDAT_DIRTY, &pgdat->flags); 6016 6017 /* 6018 * If kswapd scans pages marked for immediate 6019 * reclaim and under writeback (nr_immediate), it 6020 * implies that pages are cycling through the LRU 6021 * faster than they are written so forcibly stall 6022 * until some pages complete writeback. 6023 */ 6024 if (sc->nr.immediate) 6025 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 6026 } 6027 6028 /* 6029 * Tag a node/memcg as congested if all the dirty pages were marked 6030 * for writeback and immediate reclaim (counted in nr.congested). 6031 * 6032 * Legacy memcg will stall in page writeback so avoid forcibly 6033 * stalling in reclaim_throttle(). 6034 */ 6035 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { 6036 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) 6037 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); 6038 6039 if (current_is_kswapd()) 6040 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); 6041 } 6042 6043 /* 6044 * Stall direct reclaim for IO completions if the lruvec is 6045 * node is congested. Allow kswapd to continue until it 6046 * starts encountering unqueued dirty pages or cycling through 6047 * the LRU too quickly. 6048 */ 6049 if (!current_is_kswapd() && current_may_throttle() && 6050 !sc->hibernation_mode && 6051 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || 6052 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) 6053 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); 6054 6055 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) 6056 goto again; 6057 6058 /* 6059 * Kswapd gives up on balancing particular nodes after too 6060 * many failures to reclaim anything from them and goes to 6061 * sleep. On reclaim progress, reset the failure counter. A 6062 * successful direct reclaim run will revive a dormant kswapd. 6063 */ 6064 if (reclaimable) 6065 pgdat->kswapd_failures = 0; 6066 else if (sc->cache_trim_mode) 6067 sc->cache_trim_mode_failed = 1; 6068 } 6069 6070 /* 6071 * Returns true if compaction should go ahead for a costly-order request, or 6072 * the allocation would already succeed without compaction. Return false if we 6073 * should reclaim first. 6074 */ 6075 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 6076 { 6077 unsigned long watermark; 6078 6079 if (!gfp_compaction_allowed(sc->gfp_mask)) 6080 return false; 6081 6082 /* Allocation can already succeed, nothing to do */ 6083 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 6084 sc->reclaim_idx, 0)) 6085 return true; 6086 6087 /* Compaction cannot yet proceed. Do reclaim. */ 6088 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) 6089 return false; 6090 6091 /* 6092 * Compaction is already possible, but it takes time to run and there 6093 * are potentially other callers using the pages just freed. So proceed 6094 * with reclaim to make a buffer of free pages available to give 6095 * compaction a reasonable chance of completing and allocating the page. 6096 * Note that we won't actually reclaim the whole buffer in one attempt 6097 * as the target watermark in should_continue_reclaim() is lower. But if 6098 * we are already above the high+gap watermark, don't reclaim at all. 6099 */ 6100 watermark = high_wmark_pages(zone) + compact_gap(sc->order); 6101 6102 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 6103 } 6104 6105 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) 6106 { 6107 /* 6108 * If reclaim is making progress greater than 12% efficiency then 6109 * wake all the NOPROGRESS throttled tasks. 6110 */ 6111 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { 6112 wait_queue_head_t *wqh; 6113 6114 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; 6115 if (waitqueue_active(wqh)) 6116 wake_up(wqh); 6117 6118 return; 6119 } 6120 6121 /* 6122 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will 6123 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages 6124 * under writeback and marked for immediate reclaim at the tail of the 6125 * LRU. 6126 */ 6127 if (current_is_kswapd() || cgroup_reclaim(sc)) 6128 return; 6129 6130 /* Throttle if making no progress at high prioities. */ 6131 if (sc->priority == 1 && !sc->nr_reclaimed) 6132 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); 6133 } 6134 6135 /* 6136 * This is the direct reclaim path, for page-allocating processes. We only 6137 * try to reclaim pages from zones which will satisfy the caller's allocation 6138 * request. 6139 * 6140 * If a zone is deemed to be full of pinned pages then just give it a light 6141 * scan then give up on it. 6142 */ 6143 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 6144 { 6145 struct zoneref *z; 6146 struct zone *zone; 6147 unsigned long nr_soft_reclaimed; 6148 unsigned long nr_soft_scanned; 6149 gfp_t orig_mask; 6150 pg_data_t *last_pgdat = NULL; 6151 pg_data_t *first_pgdat = NULL; 6152 6153 /* 6154 * If the number of buffer_heads in the machine exceeds the maximum 6155 * allowed level, force direct reclaim to scan the highmem zone as 6156 * highmem pages could be pinning lowmem pages storing buffer_heads 6157 */ 6158 orig_mask = sc->gfp_mask; 6159 if (buffer_heads_over_limit) { 6160 sc->gfp_mask |= __GFP_HIGHMEM; 6161 sc->reclaim_idx = gfp_zone(sc->gfp_mask); 6162 } 6163 6164 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6165 sc->reclaim_idx, sc->nodemask) { 6166 /* 6167 * Take care memory controller reclaiming has small influence 6168 * to global LRU. 6169 */ 6170 if (!cgroup_reclaim(sc)) { 6171 if (!cpuset_zone_allowed(zone, 6172 GFP_KERNEL | __GFP_HARDWALL)) 6173 continue; 6174 6175 /* 6176 * If we already have plenty of memory free for 6177 * compaction in this zone, don't free any more. 6178 * Even though compaction is invoked for any 6179 * non-zero order, only frequent costly order 6180 * reclamation is disruptive enough to become a 6181 * noticeable problem, like transparent huge 6182 * page allocations. 6183 */ 6184 if (IS_ENABLED(CONFIG_COMPACTION) && 6185 sc->order > PAGE_ALLOC_COSTLY_ORDER && 6186 compaction_ready(zone, sc)) { 6187 sc->compaction_ready = true; 6188 continue; 6189 } 6190 6191 /* 6192 * Shrink each node in the zonelist once. If the 6193 * zonelist is ordered by zone (not the default) then a 6194 * node may be shrunk multiple times but in that case 6195 * the user prefers lower zones being preserved. 6196 */ 6197 if (zone->zone_pgdat == last_pgdat) 6198 continue; 6199 6200 /* 6201 * This steals pages from memory cgroups over softlimit 6202 * and returns the number of reclaimed pages and 6203 * scanned pages. This works for global memory pressure 6204 * and balancing, not for a memcg's limit. 6205 */ 6206 nr_soft_scanned = 0; 6207 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, 6208 sc->order, sc->gfp_mask, 6209 &nr_soft_scanned); 6210 sc->nr_reclaimed += nr_soft_reclaimed; 6211 sc->nr_scanned += nr_soft_scanned; 6212 /* need some check for avoid more shrink_zone() */ 6213 } 6214 6215 if (!first_pgdat) 6216 first_pgdat = zone->zone_pgdat; 6217 6218 /* See comment about same check for global reclaim above */ 6219 if (zone->zone_pgdat == last_pgdat) 6220 continue; 6221 last_pgdat = zone->zone_pgdat; 6222 shrink_node(zone->zone_pgdat, sc); 6223 } 6224 6225 if (first_pgdat) 6226 consider_reclaim_throttle(first_pgdat, sc); 6227 6228 /* 6229 * Restore to original mask to avoid the impact on the caller if we 6230 * promoted it to __GFP_HIGHMEM. 6231 */ 6232 sc->gfp_mask = orig_mask; 6233 } 6234 6235 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) 6236 { 6237 struct lruvec *target_lruvec; 6238 unsigned long refaults; 6239 6240 if (lru_gen_enabled()) 6241 return; 6242 6243 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 6244 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); 6245 target_lruvec->refaults[WORKINGSET_ANON] = refaults; 6246 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); 6247 target_lruvec->refaults[WORKINGSET_FILE] = refaults; 6248 } 6249 6250 /* 6251 * This is the main entry point to direct page reclaim. 6252 * 6253 * If a full scan of the inactive list fails to free enough memory then we 6254 * are "out of memory" and something needs to be killed. 6255 * 6256 * If the caller is !__GFP_FS then the probability of a failure is reasonably 6257 * high - the zone may be full of dirty or under-writeback pages, which this 6258 * caller can't do much about. We kick the writeback threads and take explicit 6259 * naps in the hope that some of these pages can be written. But if the 6260 * allocating task holds filesystem locks which prevent writeout this might not 6261 * work, and the allocation attempt will fail. 6262 * 6263 * returns: 0, if no pages reclaimed 6264 * else, the number of pages reclaimed 6265 */ 6266 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 6267 struct scan_control *sc) 6268 { 6269 int initial_priority = sc->priority; 6270 pg_data_t *last_pgdat; 6271 struct zoneref *z; 6272 struct zone *zone; 6273 retry: 6274 delayacct_freepages_start(); 6275 6276 if (!cgroup_reclaim(sc)) 6277 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 6278 6279 do { 6280 if (!sc->proactive) 6281 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 6282 sc->priority); 6283 sc->nr_scanned = 0; 6284 shrink_zones(zonelist, sc); 6285 6286 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 6287 break; 6288 6289 if (sc->compaction_ready) 6290 break; 6291 6292 /* 6293 * If we're getting trouble reclaiming, start doing 6294 * writepage even in laptop mode. 6295 */ 6296 if (sc->priority < DEF_PRIORITY - 2) 6297 sc->may_writepage = 1; 6298 } while (--sc->priority >= 0); 6299 6300 last_pgdat = NULL; 6301 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 6302 sc->nodemask) { 6303 if (zone->zone_pgdat == last_pgdat) 6304 continue; 6305 last_pgdat = zone->zone_pgdat; 6306 6307 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 6308 6309 if (cgroup_reclaim(sc)) { 6310 struct lruvec *lruvec; 6311 6312 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, 6313 zone->zone_pgdat); 6314 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6315 } 6316 } 6317 6318 delayacct_freepages_end(); 6319 6320 if (sc->nr_reclaimed) 6321 return sc->nr_reclaimed; 6322 6323 /* Aborted reclaim to try compaction? don't OOM, then */ 6324 if (sc->compaction_ready) 6325 return 1; 6326 6327 /* 6328 * In most cases, direct reclaimers can do partial walks 6329 * through the cgroup tree to meet the reclaim goal while 6330 * keeping latency low. Since the iterator state is shared 6331 * among all direct reclaim invocations (to retain fairness 6332 * among cgroups), though, high concurrency can result in 6333 * individual threads not seeing enough cgroups to make 6334 * meaningful forward progress. Avoid false OOMs in this case. 6335 */ 6336 if (!sc->memcg_full_walk) { 6337 sc->priority = initial_priority; 6338 sc->memcg_full_walk = 1; 6339 goto retry; 6340 } 6341 6342 /* 6343 * We make inactive:active ratio decisions based on the node's 6344 * composition of memory, but a restrictive reclaim_idx or a 6345 * memory.low cgroup setting can exempt large amounts of 6346 * memory from reclaim. Neither of which are very common, so 6347 * instead of doing costly eligibility calculations of the 6348 * entire cgroup subtree up front, we assume the estimates are 6349 * good, and retry with forcible deactivation if that fails. 6350 */ 6351 if (sc->skipped_deactivate) { 6352 sc->priority = initial_priority; 6353 sc->force_deactivate = 1; 6354 sc->skipped_deactivate = 0; 6355 goto retry; 6356 } 6357 6358 /* Untapped cgroup reserves? Don't OOM, retry. */ 6359 if (sc->memcg_low_skipped) { 6360 sc->priority = initial_priority; 6361 sc->force_deactivate = 0; 6362 sc->memcg_low_reclaim = 1; 6363 sc->memcg_low_skipped = 0; 6364 goto retry; 6365 } 6366 6367 return 0; 6368 } 6369 6370 static bool allow_direct_reclaim(pg_data_t *pgdat) 6371 { 6372 struct zone *zone; 6373 unsigned long pfmemalloc_reserve = 0; 6374 unsigned long free_pages = 0; 6375 int i; 6376 bool wmark_ok; 6377 6378 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6379 return true; 6380 6381 for (i = 0; i <= ZONE_NORMAL; i++) { 6382 zone = &pgdat->node_zones[i]; 6383 if (!managed_zone(zone)) 6384 continue; 6385 6386 if (!zone_reclaimable_pages(zone)) 6387 continue; 6388 6389 pfmemalloc_reserve += min_wmark_pages(zone); 6390 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); 6391 } 6392 6393 /* If there are no reserves (unexpected config) then do not throttle */ 6394 if (!pfmemalloc_reserve) 6395 return true; 6396 6397 wmark_ok = free_pages > pfmemalloc_reserve / 2; 6398 6399 /* kswapd must be awake if processes are being throttled */ 6400 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 6401 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 6402 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 6403 6404 wake_up_interruptible(&pgdat->kswapd_wait); 6405 } 6406 6407 return wmark_ok; 6408 } 6409 6410 /* 6411 * Throttle direct reclaimers if backing storage is backed by the network 6412 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 6413 * depleted. kswapd will continue to make progress and wake the processes 6414 * when the low watermark is reached. 6415 * 6416 * Returns true if a fatal signal was delivered during throttling. If this 6417 * happens, the page allocator should not consider triggering the OOM killer. 6418 */ 6419 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 6420 nodemask_t *nodemask) 6421 { 6422 struct zoneref *z; 6423 struct zone *zone; 6424 pg_data_t *pgdat = NULL; 6425 6426 /* 6427 * Kernel threads should not be throttled as they may be indirectly 6428 * responsible for cleaning pages necessary for reclaim to make forward 6429 * progress. kjournald for example may enter direct reclaim while 6430 * committing a transaction where throttling it could forcing other 6431 * processes to block on log_wait_commit(). 6432 */ 6433 if (current->flags & PF_KTHREAD) 6434 goto out; 6435 6436 /* 6437 * If a fatal signal is pending, this process should not throttle. 6438 * It should return quickly so it can exit and free its memory 6439 */ 6440 if (fatal_signal_pending(current)) 6441 goto out; 6442 6443 /* 6444 * Check if the pfmemalloc reserves are ok by finding the first node 6445 * with a usable ZONE_NORMAL or lower zone. The expectation is that 6446 * GFP_KERNEL will be required for allocating network buffers when 6447 * swapping over the network so ZONE_HIGHMEM is unusable. 6448 * 6449 * Throttling is based on the first usable node and throttled processes 6450 * wait on a queue until kswapd makes progress and wakes them. There 6451 * is an affinity then between processes waking up and where reclaim 6452 * progress has been made assuming the process wakes on the same node. 6453 * More importantly, processes running on remote nodes will not compete 6454 * for remote pfmemalloc reserves and processes on different nodes 6455 * should make reasonable progress. 6456 */ 6457 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6458 gfp_zone(gfp_mask), nodemask) { 6459 if (zone_idx(zone) > ZONE_NORMAL) 6460 continue; 6461 6462 /* Throttle based on the first usable node */ 6463 pgdat = zone->zone_pgdat; 6464 if (allow_direct_reclaim(pgdat)) 6465 goto out; 6466 break; 6467 } 6468 6469 /* If no zone was usable by the allocation flags then do not throttle */ 6470 if (!pgdat) 6471 goto out; 6472 6473 /* Account for the throttling */ 6474 count_vm_event(PGSCAN_DIRECT_THROTTLE); 6475 6476 /* 6477 * If the caller cannot enter the filesystem, it's possible that it 6478 * is due to the caller holding an FS lock or performing a journal 6479 * transaction in the case of a filesystem like ext[3|4]. In this case, 6480 * it is not safe to block on pfmemalloc_wait as kswapd could be 6481 * blocked waiting on the same lock. Instead, throttle for up to a 6482 * second before continuing. 6483 */ 6484 if (!(gfp_mask & __GFP_FS)) 6485 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 6486 allow_direct_reclaim(pgdat), HZ); 6487 else 6488 /* Throttle until kswapd wakes the process */ 6489 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 6490 allow_direct_reclaim(pgdat)); 6491 6492 if (fatal_signal_pending(current)) 6493 return true; 6494 6495 out: 6496 return false; 6497 } 6498 6499 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 6500 gfp_t gfp_mask, nodemask_t *nodemask) 6501 { 6502 unsigned long nr_reclaimed; 6503 struct scan_control sc = { 6504 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6505 .gfp_mask = current_gfp_context(gfp_mask), 6506 .reclaim_idx = gfp_zone(gfp_mask), 6507 .order = order, 6508 .nodemask = nodemask, 6509 .priority = DEF_PRIORITY, 6510 .may_writepage = !laptop_mode, 6511 .may_unmap = 1, 6512 .may_swap = 1, 6513 }; 6514 6515 /* 6516 * scan_control uses s8 fields for order, priority, and reclaim_idx. 6517 * Confirm they are large enough for max values. 6518 */ 6519 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX); 6520 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 6521 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 6522 6523 /* 6524 * Do not enter reclaim if fatal signal was delivered while throttled. 6525 * 1 is returned so that the page allocator does not OOM kill at this 6526 * point. 6527 */ 6528 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 6529 return 1; 6530 6531 set_task_reclaim_state(current, &sc.reclaim_state); 6532 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 6533 6534 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6535 6536 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 6537 set_task_reclaim_state(current, NULL); 6538 6539 return nr_reclaimed; 6540 } 6541 6542 #ifdef CONFIG_MEMCG 6543 6544 /* Only used by soft limit reclaim. Do not reuse for anything else. */ 6545 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 6546 gfp_t gfp_mask, bool noswap, 6547 pg_data_t *pgdat, 6548 unsigned long *nr_scanned) 6549 { 6550 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 6551 struct scan_control sc = { 6552 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6553 .target_mem_cgroup = memcg, 6554 .may_writepage = !laptop_mode, 6555 .may_unmap = 1, 6556 .reclaim_idx = MAX_NR_ZONES - 1, 6557 .may_swap = !noswap, 6558 }; 6559 6560 WARN_ON_ONCE(!current->reclaim_state); 6561 6562 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 6563 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 6564 6565 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 6566 sc.gfp_mask); 6567 6568 /* 6569 * NOTE: Although we can get the priority field, using it 6570 * here is not a good idea, since it limits the pages we can scan. 6571 * if we don't reclaim here, the shrink_node from balance_pgdat 6572 * will pick up pages from other mem cgroup's as well. We hack 6573 * the priority and make it zero. 6574 */ 6575 shrink_lruvec(lruvec, &sc); 6576 6577 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 6578 6579 *nr_scanned = sc.nr_scanned; 6580 6581 return sc.nr_reclaimed; 6582 } 6583 6584 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 6585 unsigned long nr_pages, 6586 gfp_t gfp_mask, 6587 unsigned int reclaim_options, 6588 int *swappiness) 6589 { 6590 unsigned long nr_reclaimed; 6591 unsigned int noreclaim_flag; 6592 struct scan_control sc = { 6593 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 6594 .proactive_swappiness = swappiness, 6595 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 6596 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 6597 .reclaim_idx = MAX_NR_ZONES - 1, 6598 .target_mem_cgroup = memcg, 6599 .priority = DEF_PRIORITY, 6600 .may_writepage = !laptop_mode, 6601 .may_unmap = 1, 6602 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), 6603 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), 6604 }; 6605 /* 6606 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put 6607 * equal pressure on all the nodes. This is based on the assumption that 6608 * the reclaim does not bail out early. 6609 */ 6610 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 6611 6612 set_task_reclaim_state(current, &sc.reclaim_state); 6613 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 6614 noreclaim_flag = memalloc_noreclaim_save(); 6615 6616 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6617 6618 memalloc_noreclaim_restore(noreclaim_flag); 6619 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 6620 set_task_reclaim_state(current, NULL); 6621 6622 return nr_reclaimed; 6623 } 6624 #endif 6625 6626 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) 6627 { 6628 struct mem_cgroup *memcg; 6629 struct lruvec *lruvec; 6630 6631 if (lru_gen_enabled()) { 6632 lru_gen_age_node(pgdat, sc); 6633 return; 6634 } 6635 6636 if (!can_age_anon_pages(pgdat, sc)) 6637 return; 6638 6639 lruvec = mem_cgroup_lruvec(NULL, pgdat); 6640 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 6641 return; 6642 6643 memcg = mem_cgroup_iter(NULL, NULL, NULL); 6644 do { 6645 lruvec = mem_cgroup_lruvec(memcg, pgdat); 6646 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 6647 sc, LRU_ACTIVE_ANON); 6648 memcg = mem_cgroup_iter(NULL, memcg, NULL); 6649 } while (memcg); 6650 } 6651 6652 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 6653 { 6654 int i; 6655 struct zone *zone; 6656 6657 /* 6658 * Check for watermark boosts top-down as the higher zones 6659 * are more likely to be boosted. Both watermarks and boosts 6660 * should not be checked at the same time as reclaim would 6661 * start prematurely when there is no boosting and a lower 6662 * zone is balanced. 6663 */ 6664 for (i = highest_zoneidx; i >= 0; i--) { 6665 zone = pgdat->node_zones + i; 6666 if (!managed_zone(zone)) 6667 continue; 6668 6669 if (zone->watermark_boost) 6670 return true; 6671 } 6672 6673 return false; 6674 } 6675 6676 /* 6677 * Returns true if there is an eligible zone balanced for the request order 6678 * and highest_zoneidx 6679 */ 6680 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 6681 { 6682 int i; 6683 unsigned long mark = -1; 6684 struct zone *zone; 6685 6686 /* 6687 * Check watermarks bottom-up as lower zones are more likely to 6688 * meet watermarks. 6689 */ 6690 for (i = 0; i <= highest_zoneidx; i++) { 6691 zone = pgdat->node_zones + i; 6692 6693 if (!managed_zone(zone)) 6694 continue; 6695 6696 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) 6697 mark = promo_wmark_pages(zone); 6698 else 6699 mark = high_wmark_pages(zone); 6700 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 6701 return true; 6702 } 6703 6704 /* 6705 * If a node has no managed zone within highest_zoneidx, it does not 6706 * need balancing by definition. This can happen if a zone-restricted 6707 * allocation tries to wake a remote kswapd. 6708 */ 6709 if (mark == -1) 6710 return true; 6711 6712 return false; 6713 } 6714 6715 /* Clear pgdat state for congested, dirty or under writeback. */ 6716 static void clear_pgdat_congested(pg_data_t *pgdat) 6717 { 6718 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); 6719 6720 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); 6721 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6722 clear_bit(PGDAT_DIRTY, &pgdat->flags); 6723 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 6724 } 6725 6726 /* 6727 * Prepare kswapd for sleeping. This verifies that there are no processes 6728 * waiting in throttle_direct_reclaim() and that watermarks have been met. 6729 * 6730 * Returns true if kswapd is ready to sleep 6731 */ 6732 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 6733 int highest_zoneidx) 6734 { 6735 /* 6736 * The throttled processes are normally woken up in balance_pgdat() as 6737 * soon as allow_direct_reclaim() is true. But there is a potential 6738 * race between when kswapd checks the watermarks and a process gets 6739 * throttled. There is also a potential race if processes get 6740 * throttled, kswapd wakes, a large process exits thereby balancing the 6741 * zones, which causes kswapd to exit balance_pgdat() before reaching 6742 * the wake up checks. If kswapd is going to sleep, no process should 6743 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 6744 * the wake up is premature, processes will wake kswapd and get 6745 * throttled again. The difference from wake ups in balance_pgdat() is 6746 * that here we are under prepare_to_wait(). 6747 */ 6748 if (waitqueue_active(&pgdat->pfmemalloc_wait)) 6749 wake_up_all(&pgdat->pfmemalloc_wait); 6750 6751 /* Hopeless node, leave it to direct reclaim */ 6752 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6753 return true; 6754 6755 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 6756 clear_pgdat_congested(pgdat); 6757 return true; 6758 } 6759 6760 return false; 6761 } 6762 6763 /* 6764 * kswapd shrinks a node of pages that are at or below the highest usable 6765 * zone that is currently unbalanced. 6766 * 6767 * Returns true if kswapd scanned at least the requested number of pages to 6768 * reclaim or if the lack of progress was due to pages under writeback. 6769 * This is used to determine if the scanning priority needs to be raised. 6770 */ 6771 static bool kswapd_shrink_node(pg_data_t *pgdat, 6772 struct scan_control *sc) 6773 { 6774 struct zone *zone; 6775 int z; 6776 unsigned long nr_reclaimed = sc->nr_reclaimed; 6777 6778 /* Reclaim a number of pages proportional to the number of zones */ 6779 sc->nr_to_reclaim = 0; 6780 for (z = 0; z <= sc->reclaim_idx; z++) { 6781 zone = pgdat->node_zones + z; 6782 if (!managed_zone(zone)) 6783 continue; 6784 6785 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 6786 } 6787 6788 /* 6789 * Historically care was taken to put equal pressure on all zones but 6790 * now pressure is applied based on node LRU order. 6791 */ 6792 shrink_node(pgdat, sc); 6793 6794 /* 6795 * Fragmentation may mean that the system cannot be rebalanced for 6796 * high-order allocations. If twice the allocation size has been 6797 * reclaimed then recheck watermarks only at order-0 to prevent 6798 * excessive reclaim. Assume that a process requested a high-order 6799 * can direct reclaim/compact. 6800 */ 6801 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 6802 sc->order = 0; 6803 6804 /* account for progress from mm_account_reclaimed_pages() */ 6805 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; 6806 } 6807 6808 /* Page allocator PCP high watermark is lowered if reclaim is active. */ 6809 static inline void 6810 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) 6811 { 6812 int i; 6813 struct zone *zone; 6814 6815 for (i = 0; i <= highest_zoneidx; i++) { 6816 zone = pgdat->node_zones + i; 6817 6818 if (!managed_zone(zone)) 6819 continue; 6820 6821 if (active) 6822 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6823 else 6824 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6825 } 6826 } 6827 6828 static inline void 6829 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6830 { 6831 update_reclaim_active(pgdat, highest_zoneidx, true); 6832 } 6833 6834 static inline void 6835 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6836 { 6837 update_reclaim_active(pgdat, highest_zoneidx, false); 6838 } 6839 6840 /* 6841 * For kswapd, balance_pgdat() will reclaim pages across a node from zones 6842 * that are eligible for use by the caller until at least one zone is 6843 * balanced. 6844 * 6845 * Returns the order kswapd finished reclaiming at. 6846 * 6847 * kswapd scans the zones in the highmem->normal->dma direction. It skips 6848 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 6849 * found to have free_pages <= high_wmark_pages(zone), any page in that zone 6850 * or lower is eligible for reclaim until at least one usable zone is 6851 * balanced. 6852 */ 6853 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 6854 { 6855 int i; 6856 unsigned long nr_soft_reclaimed; 6857 unsigned long nr_soft_scanned; 6858 unsigned long pflags; 6859 unsigned long nr_boost_reclaim; 6860 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 6861 bool boosted; 6862 struct zone *zone; 6863 struct scan_control sc = { 6864 .gfp_mask = GFP_KERNEL, 6865 .order = order, 6866 .may_unmap = 1, 6867 }; 6868 6869 set_task_reclaim_state(current, &sc.reclaim_state); 6870 psi_memstall_enter(&pflags); 6871 __fs_reclaim_acquire(_THIS_IP_); 6872 6873 count_vm_event(PAGEOUTRUN); 6874 6875 /* 6876 * Account for the reclaim boost. Note that the zone boost is left in 6877 * place so that parallel allocations that are near the watermark will 6878 * stall or direct reclaim until kswapd is finished. 6879 */ 6880 nr_boost_reclaim = 0; 6881 for (i = 0; i <= highest_zoneidx; i++) { 6882 zone = pgdat->node_zones + i; 6883 if (!managed_zone(zone)) 6884 continue; 6885 6886 nr_boost_reclaim += zone->watermark_boost; 6887 zone_boosts[i] = zone->watermark_boost; 6888 } 6889 boosted = nr_boost_reclaim; 6890 6891 restart: 6892 set_reclaim_active(pgdat, highest_zoneidx); 6893 sc.priority = DEF_PRIORITY; 6894 do { 6895 unsigned long nr_reclaimed = sc.nr_reclaimed; 6896 bool raise_priority = true; 6897 bool balanced; 6898 bool ret; 6899 bool was_frozen; 6900 6901 sc.reclaim_idx = highest_zoneidx; 6902 6903 /* 6904 * If the number of buffer_heads exceeds the maximum allowed 6905 * then consider reclaiming from all zones. This has a dual 6906 * purpose -- on 64-bit systems it is expected that 6907 * buffer_heads are stripped during active rotation. On 32-bit 6908 * systems, highmem pages can pin lowmem memory and shrinking 6909 * buffers can relieve lowmem pressure. Reclaim may still not 6910 * go ahead if all eligible zones for the original allocation 6911 * request are balanced to avoid excessive reclaim from kswapd. 6912 */ 6913 if (buffer_heads_over_limit) { 6914 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 6915 zone = pgdat->node_zones + i; 6916 if (!managed_zone(zone)) 6917 continue; 6918 6919 sc.reclaim_idx = i; 6920 break; 6921 } 6922 } 6923 6924 /* 6925 * If the pgdat is imbalanced then ignore boosting and preserve 6926 * the watermarks for a later time and restart. Note that the 6927 * zone watermarks will be still reset at the end of balancing 6928 * on the grounds that the normal reclaim should be enough to 6929 * re-evaluate if boosting is required when kswapd next wakes. 6930 */ 6931 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 6932 if (!balanced && nr_boost_reclaim) { 6933 nr_boost_reclaim = 0; 6934 goto restart; 6935 } 6936 6937 /* 6938 * If boosting is not active then only reclaim if there are no 6939 * eligible zones. Note that sc.reclaim_idx is not used as 6940 * buffer_heads_over_limit may have adjusted it. 6941 */ 6942 if (!nr_boost_reclaim && balanced) 6943 goto out; 6944 6945 /* Limit the priority of boosting to avoid reclaim writeback */ 6946 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 6947 raise_priority = false; 6948 6949 /* 6950 * Do not writeback or swap pages for boosted reclaim. The 6951 * intent is to relieve pressure not issue sub-optimal IO 6952 * from reclaim context. If no pages are reclaimed, the 6953 * reclaim will be aborted. 6954 */ 6955 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 6956 sc.may_swap = !nr_boost_reclaim; 6957 6958 /* 6959 * Do some background aging, to give pages a chance to be 6960 * referenced before reclaiming. All pages are rotated 6961 * regardless of classzone as this is about consistent aging. 6962 */ 6963 kswapd_age_node(pgdat, &sc); 6964 6965 /* 6966 * If we're getting trouble reclaiming, start doing writepage 6967 * even in laptop mode. 6968 */ 6969 if (sc.priority < DEF_PRIORITY - 2) 6970 sc.may_writepage = 1; 6971 6972 /* Call soft limit reclaim before calling shrink_node. */ 6973 sc.nr_scanned = 0; 6974 nr_soft_scanned = 0; 6975 nr_soft_reclaimed = memcg1_soft_limit_reclaim(pgdat, sc.order, 6976 sc.gfp_mask, &nr_soft_scanned); 6977 sc.nr_reclaimed += nr_soft_reclaimed; 6978 6979 /* 6980 * There should be no need to raise the scanning priority if 6981 * enough pages are already being scanned that that high 6982 * watermark would be met at 100% efficiency. 6983 */ 6984 if (kswapd_shrink_node(pgdat, &sc)) 6985 raise_priority = false; 6986 6987 /* 6988 * If the low watermark is met there is no need for processes 6989 * to be throttled on pfmemalloc_wait as they should not be 6990 * able to safely make forward progress. Wake them 6991 */ 6992 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 6993 allow_direct_reclaim(pgdat)) 6994 wake_up_all(&pgdat->pfmemalloc_wait); 6995 6996 /* Check if kswapd should be suspending */ 6997 __fs_reclaim_release(_THIS_IP_); 6998 ret = kthread_freezable_should_stop(&was_frozen); 6999 __fs_reclaim_acquire(_THIS_IP_); 7000 if (was_frozen || ret) 7001 break; 7002 7003 /* 7004 * Raise priority if scanning rate is too low or there was no 7005 * progress in reclaiming pages 7006 */ 7007 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 7008 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 7009 7010 /* 7011 * If reclaim made no progress for a boost, stop reclaim as 7012 * IO cannot be queued and it could be an infinite loop in 7013 * extreme circumstances. 7014 */ 7015 if (nr_boost_reclaim && !nr_reclaimed) 7016 break; 7017 7018 if (raise_priority || !nr_reclaimed) 7019 sc.priority--; 7020 } while (sc.priority >= 1); 7021 7022 /* 7023 * Restart only if it went through the priority loop all the way, 7024 * but cache_trim_mode didn't work. 7025 */ 7026 if (!sc.nr_reclaimed && sc.priority < 1 && 7027 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) { 7028 sc.no_cache_trim_mode = 1; 7029 goto restart; 7030 } 7031 7032 if (!sc.nr_reclaimed) 7033 pgdat->kswapd_failures++; 7034 7035 out: 7036 clear_reclaim_active(pgdat, highest_zoneidx); 7037 7038 /* If reclaim was boosted, account for the reclaim done in this pass */ 7039 if (boosted) { 7040 unsigned long flags; 7041 7042 for (i = 0; i <= highest_zoneidx; i++) { 7043 if (!zone_boosts[i]) 7044 continue; 7045 7046 /* Increments are under the zone lock */ 7047 zone = pgdat->node_zones + i; 7048 spin_lock_irqsave(&zone->lock, flags); 7049 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 7050 spin_unlock_irqrestore(&zone->lock, flags); 7051 } 7052 7053 /* 7054 * As there is now likely space, wakeup kcompact to defragment 7055 * pageblocks. 7056 */ 7057 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 7058 } 7059 7060 snapshot_refaults(NULL, pgdat); 7061 __fs_reclaim_release(_THIS_IP_); 7062 psi_memstall_leave(&pflags); 7063 set_task_reclaim_state(current, NULL); 7064 7065 /* 7066 * Return the order kswapd stopped reclaiming at as 7067 * prepare_kswapd_sleep() takes it into account. If another caller 7068 * entered the allocator slow path while kswapd was awake, order will 7069 * remain at the higher level. 7070 */ 7071 return sc.order; 7072 } 7073 7074 /* 7075 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 7076 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 7077 * not a valid index then either kswapd runs for first time or kswapd couldn't 7078 * sleep after previous reclaim attempt (node is still unbalanced). In that 7079 * case return the zone index of the previous kswapd reclaim cycle. 7080 */ 7081 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 7082 enum zone_type prev_highest_zoneidx) 7083 { 7084 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7085 7086 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 7087 } 7088 7089 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 7090 unsigned int highest_zoneidx) 7091 { 7092 long remaining = 0; 7093 DEFINE_WAIT(wait); 7094 7095 if (freezing(current) || kthread_should_stop()) 7096 return; 7097 7098 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7099 7100 /* 7101 * Try to sleep for a short interval. Note that kcompactd will only be 7102 * woken if it is possible to sleep for a short interval. This is 7103 * deliberate on the assumption that if reclaim cannot keep an 7104 * eligible zone balanced that it's also unlikely that compaction will 7105 * succeed. 7106 */ 7107 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7108 /* 7109 * Compaction records what page blocks it recently failed to 7110 * isolate pages from and skips them in the future scanning. 7111 * When kswapd is going to sleep, it is reasonable to assume 7112 * that pages and compaction may succeed so reset the cache. 7113 */ 7114 reset_isolation_suitable(pgdat); 7115 7116 /* 7117 * We have freed the memory, now we should compact it to make 7118 * allocation of the requested order possible. 7119 */ 7120 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 7121 7122 remaining = schedule_timeout(HZ/10); 7123 7124 /* 7125 * If woken prematurely then reset kswapd_highest_zoneidx and 7126 * order. The values will either be from a wakeup request or 7127 * the previous request that slept prematurely. 7128 */ 7129 if (remaining) { 7130 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 7131 kswapd_highest_zoneidx(pgdat, 7132 highest_zoneidx)); 7133 7134 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 7135 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); 7136 } 7137 7138 finish_wait(&pgdat->kswapd_wait, &wait); 7139 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7140 } 7141 7142 /* 7143 * After a short sleep, check if it was a premature sleep. If not, then 7144 * go fully to sleep until explicitly woken up. 7145 */ 7146 if (!remaining && 7147 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7148 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 7149 7150 /* 7151 * vmstat counters are not perfectly accurate and the estimated 7152 * value for counters such as NR_FREE_PAGES can deviate from the 7153 * true value by nr_online_cpus * threshold. To avoid the zone 7154 * watermarks being breached while under pressure, we reduce the 7155 * per-cpu vmstat threshold while kswapd is awake and restore 7156 * them before going back to sleep. 7157 */ 7158 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 7159 7160 if (!kthread_should_stop()) 7161 schedule(); 7162 7163 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 7164 } else { 7165 if (remaining) 7166 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 7167 else 7168 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 7169 } 7170 finish_wait(&pgdat->kswapd_wait, &wait); 7171 } 7172 7173 /* 7174 * The background pageout daemon, started as a kernel thread 7175 * from the init process. 7176 * 7177 * This basically trickles out pages so that we have _some_ 7178 * free memory available even if there is no other activity 7179 * that frees anything up. This is needed for things like routing 7180 * etc, where we otherwise might have all activity going on in 7181 * asynchronous contexts that cannot page things out. 7182 * 7183 * If there are applications that are active memory-allocators 7184 * (most normal use), this basically shouldn't matter. 7185 */ 7186 static int kswapd(void *p) 7187 { 7188 unsigned int alloc_order, reclaim_order; 7189 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 7190 pg_data_t *pgdat = (pg_data_t *)p; 7191 struct task_struct *tsk = current; 7192 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 7193 7194 if (!cpumask_empty(cpumask)) 7195 set_cpus_allowed_ptr(tsk, cpumask); 7196 7197 /* 7198 * Tell the memory management that we're a "memory allocator", 7199 * and that if we need more memory we should get access to it 7200 * regardless (see "__alloc_pages()"). "kswapd" should 7201 * never get caught in the normal page freeing logic. 7202 * 7203 * (Kswapd normally doesn't need memory anyway, but sometimes 7204 * you need a small amount of memory in order to be able to 7205 * page out something else, and this flag essentially protects 7206 * us from recursively trying to free more memory as we're 7207 * trying to free the first piece of memory in the first place). 7208 */ 7209 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; 7210 set_freezable(); 7211 7212 WRITE_ONCE(pgdat->kswapd_order, 0); 7213 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7214 atomic_set(&pgdat->nr_writeback_throttled, 0); 7215 for ( ; ; ) { 7216 bool was_frozen; 7217 7218 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 7219 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7220 highest_zoneidx); 7221 7222 kswapd_try_sleep: 7223 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 7224 highest_zoneidx); 7225 7226 /* Read the new order and highest_zoneidx */ 7227 alloc_order = READ_ONCE(pgdat->kswapd_order); 7228 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7229 highest_zoneidx); 7230 WRITE_ONCE(pgdat->kswapd_order, 0); 7231 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7232 7233 if (kthread_freezable_should_stop(&was_frozen)) 7234 break; 7235 7236 /* 7237 * We can speed up thawing tasks if we don't call balance_pgdat 7238 * after returning from the refrigerator 7239 */ 7240 if (was_frozen) 7241 continue; 7242 7243 /* 7244 * Reclaim begins at the requested order but if a high-order 7245 * reclaim fails then kswapd falls back to reclaiming for 7246 * order-0. If that happens, kswapd will consider sleeping 7247 * for the order it finished reclaiming at (reclaim_order) 7248 * but kcompactd is woken to compact for the original 7249 * request (alloc_order). 7250 */ 7251 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 7252 alloc_order); 7253 reclaim_order = balance_pgdat(pgdat, alloc_order, 7254 highest_zoneidx); 7255 if (reclaim_order < alloc_order) 7256 goto kswapd_try_sleep; 7257 } 7258 7259 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); 7260 7261 return 0; 7262 } 7263 7264 /* 7265 * A zone is low on free memory or too fragmented for high-order memory. If 7266 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 7267 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 7268 * has failed or is not needed, still wake up kcompactd if only compaction is 7269 * needed. 7270 */ 7271 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 7272 enum zone_type highest_zoneidx) 7273 { 7274 pg_data_t *pgdat; 7275 enum zone_type curr_idx; 7276 7277 if (!managed_zone(zone)) 7278 return; 7279 7280 if (!cpuset_zone_allowed(zone, gfp_flags)) 7281 return; 7282 7283 pgdat = zone->zone_pgdat; 7284 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7285 7286 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 7287 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 7288 7289 if (READ_ONCE(pgdat->kswapd_order) < order) 7290 WRITE_ONCE(pgdat->kswapd_order, order); 7291 7292 if (!waitqueue_active(&pgdat->kswapd_wait)) 7293 return; 7294 7295 /* Hopeless node, leave it to direct reclaim if possible */ 7296 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 7297 (pgdat_balanced(pgdat, order, highest_zoneidx) && 7298 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 7299 /* 7300 * There may be plenty of free memory available, but it's too 7301 * fragmented for high-order allocations. Wake up kcompactd 7302 * and rely on compaction_suitable() to determine if it's 7303 * needed. If it fails, it will defer subsequent attempts to 7304 * ratelimit its work. 7305 */ 7306 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 7307 wakeup_kcompactd(pgdat, order, highest_zoneidx); 7308 return; 7309 } 7310 7311 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 7312 gfp_flags); 7313 wake_up_interruptible(&pgdat->kswapd_wait); 7314 } 7315 7316 #ifdef CONFIG_HIBERNATION 7317 /* 7318 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 7319 * freed pages. 7320 * 7321 * Rather than trying to age LRUs the aim is to preserve the overall 7322 * LRU order by reclaiming preferentially 7323 * inactive > active > active referenced > active mapped 7324 */ 7325 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 7326 { 7327 struct scan_control sc = { 7328 .nr_to_reclaim = nr_to_reclaim, 7329 .gfp_mask = GFP_HIGHUSER_MOVABLE, 7330 .reclaim_idx = MAX_NR_ZONES - 1, 7331 .priority = DEF_PRIORITY, 7332 .may_writepage = 1, 7333 .may_unmap = 1, 7334 .may_swap = 1, 7335 .hibernation_mode = 1, 7336 }; 7337 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 7338 unsigned long nr_reclaimed; 7339 unsigned int noreclaim_flag; 7340 7341 fs_reclaim_acquire(sc.gfp_mask); 7342 noreclaim_flag = memalloc_noreclaim_save(); 7343 set_task_reclaim_state(current, &sc.reclaim_state); 7344 7345 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 7346 7347 set_task_reclaim_state(current, NULL); 7348 memalloc_noreclaim_restore(noreclaim_flag); 7349 fs_reclaim_release(sc.gfp_mask); 7350 7351 return nr_reclaimed; 7352 } 7353 #endif /* CONFIG_HIBERNATION */ 7354 7355 /* 7356 * This kswapd start function will be called by init and node-hot-add. 7357 */ 7358 void __meminit kswapd_run(int nid) 7359 { 7360 pg_data_t *pgdat = NODE_DATA(nid); 7361 7362 pgdat_kswapd_lock(pgdat); 7363 if (!pgdat->kswapd) { 7364 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 7365 if (IS_ERR(pgdat->kswapd)) { 7366 /* failure at boot is fatal */ 7367 pr_err("Failed to start kswapd on node %d,ret=%ld\n", 7368 nid, PTR_ERR(pgdat->kswapd)); 7369 BUG_ON(system_state < SYSTEM_RUNNING); 7370 pgdat->kswapd = NULL; 7371 } 7372 } 7373 pgdat_kswapd_unlock(pgdat); 7374 } 7375 7376 /* 7377 * Called by memory hotplug when all memory in a node is offlined. Caller must 7378 * be holding mem_hotplug_begin/done(). 7379 */ 7380 void __meminit kswapd_stop(int nid) 7381 { 7382 pg_data_t *pgdat = NODE_DATA(nid); 7383 struct task_struct *kswapd; 7384 7385 pgdat_kswapd_lock(pgdat); 7386 kswapd = pgdat->kswapd; 7387 if (kswapd) { 7388 kthread_stop(kswapd); 7389 pgdat->kswapd = NULL; 7390 } 7391 pgdat_kswapd_unlock(pgdat); 7392 } 7393 7394 static int __init kswapd_init(void) 7395 { 7396 int nid; 7397 7398 swap_setup(); 7399 for_each_node_state(nid, N_MEMORY) 7400 kswapd_run(nid); 7401 return 0; 7402 } 7403 7404 module_init(kswapd_init) 7405 7406 #ifdef CONFIG_NUMA 7407 /* 7408 * Node reclaim mode 7409 * 7410 * If non-zero call node_reclaim when the number of free pages falls below 7411 * the watermarks. 7412 */ 7413 int node_reclaim_mode __read_mostly; 7414 7415 /* 7416 * Priority for NODE_RECLAIM. This determines the fraction of pages 7417 * of a node considered for each zone_reclaim. 4 scans 1/16th of 7418 * a zone. 7419 */ 7420 #define NODE_RECLAIM_PRIORITY 4 7421 7422 /* 7423 * Percentage of pages in a zone that must be unmapped for node_reclaim to 7424 * occur. 7425 */ 7426 int sysctl_min_unmapped_ratio = 1; 7427 7428 /* 7429 * If the number of slab pages in a zone grows beyond this percentage then 7430 * slab reclaim needs to occur. 7431 */ 7432 int sysctl_min_slab_ratio = 5; 7433 7434 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 7435 { 7436 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 7437 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 7438 node_page_state(pgdat, NR_ACTIVE_FILE); 7439 7440 /* 7441 * It's possible for there to be more file mapped pages than 7442 * accounted for by the pages on the file LRU lists because 7443 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 7444 */ 7445 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 7446 } 7447 7448 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 7449 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 7450 { 7451 unsigned long nr_pagecache_reclaimable; 7452 unsigned long delta = 0; 7453 7454 /* 7455 * If RECLAIM_UNMAP is set, then all file pages are considered 7456 * potentially reclaimable. Otherwise, we have to worry about 7457 * pages like swapcache and node_unmapped_file_pages() provides 7458 * a better estimate 7459 */ 7460 if (node_reclaim_mode & RECLAIM_UNMAP) 7461 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 7462 else 7463 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 7464 7465 /* If we can't clean pages, remove dirty pages from consideration */ 7466 if (!(node_reclaim_mode & RECLAIM_WRITE)) 7467 delta += node_page_state(pgdat, NR_FILE_DIRTY); 7468 7469 /* Watch for any possible underflows due to delta */ 7470 if (unlikely(delta > nr_pagecache_reclaimable)) 7471 delta = nr_pagecache_reclaimable; 7472 7473 return nr_pagecache_reclaimable - delta; 7474 } 7475 7476 /* 7477 * Try to free up some pages from this node through reclaim. 7478 */ 7479 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7480 { 7481 /* Minimum pages needed in order to stay on node */ 7482 const unsigned long nr_pages = 1 << order; 7483 struct task_struct *p = current; 7484 unsigned int noreclaim_flag; 7485 struct scan_control sc = { 7486 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 7487 .gfp_mask = current_gfp_context(gfp_mask), 7488 .order = order, 7489 .priority = NODE_RECLAIM_PRIORITY, 7490 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 7491 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 7492 .may_swap = 1, 7493 .reclaim_idx = gfp_zone(gfp_mask), 7494 }; 7495 unsigned long pflags; 7496 7497 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 7498 sc.gfp_mask); 7499 7500 cond_resched(); 7501 psi_memstall_enter(&pflags); 7502 delayacct_freepages_start(); 7503 fs_reclaim_acquire(sc.gfp_mask); 7504 /* 7505 * We need to be able to allocate from the reserves for RECLAIM_UNMAP 7506 */ 7507 noreclaim_flag = memalloc_noreclaim_save(); 7508 set_task_reclaim_state(p, &sc.reclaim_state); 7509 7510 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || 7511 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { 7512 /* 7513 * Free memory by calling shrink node with increasing 7514 * priorities until we have enough memory freed. 7515 */ 7516 do { 7517 shrink_node(pgdat, &sc); 7518 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 7519 } 7520 7521 set_task_reclaim_state(p, NULL); 7522 memalloc_noreclaim_restore(noreclaim_flag); 7523 fs_reclaim_release(sc.gfp_mask); 7524 psi_memstall_leave(&pflags); 7525 delayacct_freepages_end(); 7526 7527 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 7528 7529 return sc.nr_reclaimed >= nr_pages; 7530 } 7531 7532 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7533 { 7534 int ret; 7535 7536 /* 7537 * Node reclaim reclaims unmapped file backed pages and 7538 * slab pages if we are over the defined limits. 7539 * 7540 * A small portion of unmapped file backed pages is needed for 7541 * file I/O otherwise pages read by file I/O will be immediately 7542 * thrown out if the node is overallocated. So we do not reclaim 7543 * if less than a specified percentage of the node is used by 7544 * unmapped file backed pages. 7545 */ 7546 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 7547 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= 7548 pgdat->min_slab_pages) 7549 return NODE_RECLAIM_FULL; 7550 7551 /* 7552 * Do not scan if the allocation should not be delayed. 7553 */ 7554 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 7555 return NODE_RECLAIM_NOSCAN; 7556 7557 /* 7558 * Only run node reclaim on the local node or on nodes that do not 7559 * have associated processors. This will favor the local processor 7560 * over remote processors and spread off node memory allocations 7561 * as wide as possible. 7562 */ 7563 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 7564 return NODE_RECLAIM_NOSCAN; 7565 7566 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 7567 return NODE_RECLAIM_NOSCAN; 7568 7569 ret = __node_reclaim(pgdat, gfp_mask, order); 7570 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 7571 7572 if (ret) 7573 count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); 7574 else 7575 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 7576 7577 return ret; 7578 } 7579 #endif 7580 7581 /** 7582 * check_move_unevictable_folios - Move evictable folios to appropriate zone 7583 * lru list 7584 * @fbatch: Batch of lru folios to check. 7585 * 7586 * Checks folios for evictability, if an evictable folio is in the unevictable 7587 * lru list, moves it to the appropriate evictable lru list. This function 7588 * should be only used for lru folios. 7589 */ 7590 void check_move_unevictable_folios(struct folio_batch *fbatch) 7591 { 7592 struct lruvec *lruvec = NULL; 7593 int pgscanned = 0; 7594 int pgrescued = 0; 7595 int i; 7596 7597 for (i = 0; i < fbatch->nr; i++) { 7598 struct folio *folio = fbatch->folios[i]; 7599 int nr_pages = folio_nr_pages(folio); 7600 7601 pgscanned += nr_pages; 7602 7603 /* block memcg migration while the folio moves between lrus */ 7604 if (!folio_test_clear_lru(folio)) 7605 continue; 7606 7607 lruvec = folio_lruvec_relock_irq(folio, lruvec); 7608 if (folio_evictable(folio) && folio_test_unevictable(folio)) { 7609 lruvec_del_folio(lruvec, folio); 7610 folio_clear_unevictable(folio); 7611 lruvec_add_folio(lruvec, folio); 7612 pgrescued += nr_pages; 7613 } 7614 folio_set_lru(folio); 7615 } 7616 7617 if (lruvec) { 7618 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 7619 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7620 unlock_page_lruvec_irq(lruvec); 7621 } else if (pgscanned) { 7622 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7623 } 7624 } 7625 EXPORT_SYMBOL_GPL(check_move_unevictable_folios); 7626