1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 4 * 5 * Swap reorganised 29.12.95, Stephen Tweedie. 6 * kswapd added: 7.1.96 sct 7 * Removed kswapd_ctl limits, and swap out as many pages as needed 8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 9 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 10 * Multiqueue VM started 5.8.00, Rik van Riel. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/mm.h> 16 #include <linux/sched/mm.h> 17 #include <linux/module.h> 18 #include <linux/gfp.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/pagemap.h> 22 #include <linux/init.h> 23 #include <linux/highmem.h> 24 #include <linux/vmpressure.h> 25 #include <linux/vmstat.h> 26 #include <linux/file.h> 27 #include <linux/writeback.h> 28 #include <linux/blkdev.h> 29 #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ 30 #include <linux/mm_inline.h> 31 #include <linux/backing-dev.h> 32 #include <linux/rmap.h> 33 #include <linux/topology.h> 34 #include <linux/cpu.h> 35 #include <linux/cpuset.h> 36 #include <linux/compaction.h> 37 #include <linux/notifier.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/freezer.h> 41 #include <linux/memcontrol.h> 42 #include <linux/migrate.h> 43 #include <linux/delayacct.h> 44 #include <linux/sysctl.h> 45 #include <linux/memory-tiers.h> 46 #include <linux/oom.h> 47 #include <linux/pagevec.h> 48 #include <linux/prefetch.h> 49 #include <linux/printk.h> 50 #include <linux/dax.h> 51 #include <linux/psi.h> 52 #include <linux/pagewalk.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/ctype.h> 55 #include <linux/debugfs.h> 56 #include <linux/khugepaged.h> 57 #include <linux/rculist_nulls.h> 58 #include <linux/random.h> 59 60 #include <asm/tlbflush.h> 61 #include <asm/div64.h> 62 63 #include <linux/swapops.h> 64 #include <linux/balloon_compaction.h> 65 #include <linux/sched/sysctl.h> 66 67 #include "internal.h" 68 #include "swap.h" 69 70 #define CREATE_TRACE_POINTS 71 #include <trace/events/vmscan.h> 72 73 struct scan_control { 74 /* How many pages shrink_list() should reclaim */ 75 unsigned long nr_to_reclaim; 76 77 /* 78 * Nodemask of nodes allowed by the caller. If NULL, all nodes 79 * are scanned. 80 */ 81 nodemask_t *nodemask; 82 83 /* 84 * The memory cgroup that hit its limit and as a result is the 85 * primary target of this reclaim invocation. 86 */ 87 struct mem_cgroup *target_mem_cgroup; 88 89 /* 90 * Scan pressure balancing between anon and file LRUs 91 */ 92 unsigned long anon_cost; 93 unsigned long file_cost; 94 95 /* Can active folios be deactivated as part of reclaim? */ 96 #define DEACTIVATE_ANON 1 97 #define DEACTIVATE_FILE 2 98 unsigned int may_deactivate:2; 99 unsigned int force_deactivate:1; 100 unsigned int skipped_deactivate:1; 101 102 /* Writepage batching in laptop mode; RECLAIM_WRITE */ 103 unsigned int may_writepage:1; 104 105 /* Can mapped folios be reclaimed? */ 106 unsigned int may_unmap:1; 107 108 /* Can folios be swapped as part of reclaim? */ 109 unsigned int may_swap:1; 110 111 /* Not allow cache_trim_mode to be turned on as part of reclaim? */ 112 unsigned int no_cache_trim_mode:1; 113 114 /* Has cache_trim_mode failed at least once? */ 115 unsigned int cache_trim_mode_failed:1; 116 117 /* Proactive reclaim invoked by userspace through memory.reclaim */ 118 unsigned int proactive:1; 119 120 /* 121 * Cgroup memory below memory.low is protected as long as we 122 * don't threaten to OOM. If any cgroup is reclaimed at 123 * reduced force or passed over entirely due to its memory.low 124 * setting (memcg_low_skipped), and nothing is reclaimed as a 125 * result, then go back for one more cycle that reclaims the protected 126 * memory (memcg_low_reclaim) to avert OOM. 127 */ 128 unsigned int memcg_low_reclaim:1; 129 unsigned int memcg_low_skipped:1; 130 131 unsigned int hibernation_mode:1; 132 133 /* One of the zones is ready for compaction */ 134 unsigned int compaction_ready:1; 135 136 /* There is easily reclaimable cold cache in the current node */ 137 unsigned int cache_trim_mode:1; 138 139 /* The file folios on the current node are dangerously low */ 140 unsigned int file_is_tiny:1; 141 142 /* Always discard instead of demoting to lower tier memory */ 143 unsigned int no_demotion:1; 144 145 /* Allocation order */ 146 s8 order; 147 148 /* Scan (total_size >> priority) pages at once */ 149 s8 priority; 150 151 /* The highest zone to isolate folios for reclaim from */ 152 s8 reclaim_idx; 153 154 /* This context's GFP mask */ 155 gfp_t gfp_mask; 156 157 /* Incremented by the number of inactive pages that were scanned */ 158 unsigned long nr_scanned; 159 160 /* Number of pages freed so far during a call to shrink_zones() */ 161 unsigned long nr_reclaimed; 162 163 struct { 164 unsigned int dirty; 165 unsigned int unqueued_dirty; 166 unsigned int congested; 167 unsigned int writeback; 168 unsigned int immediate; 169 unsigned int file_taken; 170 unsigned int taken; 171 } nr; 172 173 /* for recording the reclaimed slab by now */ 174 struct reclaim_state reclaim_state; 175 }; 176 177 #ifdef ARCH_HAS_PREFETCHW 178 #define prefetchw_prev_lru_folio(_folio, _base, _field) \ 179 do { \ 180 if ((_folio)->lru.prev != _base) { \ 181 struct folio *prev; \ 182 \ 183 prev = lru_to_folio(&(_folio->lru)); \ 184 prefetchw(&prev->_field); \ 185 } \ 186 } while (0) 187 #else 188 #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) 189 #endif 190 191 /* 192 * From 0 .. 200. Higher means more swappy. 193 */ 194 int vm_swappiness = 60; 195 196 #ifdef CONFIG_MEMCG 197 198 /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ 199 static bool cgroup_reclaim(struct scan_control *sc) 200 { 201 return sc->target_mem_cgroup; 202 } 203 204 /* 205 * Returns true for reclaim on the root cgroup. This is true for direct 206 * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. 207 */ 208 static bool root_reclaim(struct scan_control *sc) 209 { 210 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); 211 } 212 213 /** 214 * writeback_throttling_sane - is the usual dirty throttling mechanism available? 215 * @sc: scan_control in question 216 * 217 * The normal page dirty throttling mechanism in balance_dirty_pages() is 218 * completely broken with the legacy memcg and direct stalling in 219 * shrink_folio_list() is used for throttling instead, which lacks all the 220 * niceties such as fairness, adaptive pausing, bandwidth proportional 221 * allocation and configurability. 222 * 223 * This function tests whether the vmscan currently in progress can assume 224 * that the normal dirty throttling mechanism is operational. 225 */ 226 static bool writeback_throttling_sane(struct scan_control *sc) 227 { 228 if (!cgroup_reclaim(sc)) 229 return true; 230 #ifdef CONFIG_CGROUP_WRITEBACK 231 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 232 return true; 233 #endif 234 return false; 235 } 236 #else 237 static bool cgroup_reclaim(struct scan_control *sc) 238 { 239 return false; 240 } 241 242 static bool root_reclaim(struct scan_control *sc) 243 { 244 return true; 245 } 246 247 static bool writeback_throttling_sane(struct scan_control *sc) 248 { 249 return true; 250 } 251 #endif 252 253 static void set_task_reclaim_state(struct task_struct *task, 254 struct reclaim_state *rs) 255 { 256 /* Check for an overwrite */ 257 WARN_ON_ONCE(rs && task->reclaim_state); 258 259 /* Check for the nulling of an already-nulled member */ 260 WARN_ON_ONCE(!rs && !task->reclaim_state); 261 262 task->reclaim_state = rs; 263 } 264 265 /* 266 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to 267 * scan_control->nr_reclaimed. 268 */ 269 static void flush_reclaim_state(struct scan_control *sc) 270 { 271 /* 272 * Currently, reclaim_state->reclaimed includes three types of pages 273 * freed outside of vmscan: 274 * (1) Slab pages. 275 * (2) Clean file pages from pruned inodes (on highmem systems). 276 * (3) XFS freed buffer pages. 277 * 278 * For all of these cases, we cannot universally link the pages to a 279 * single memcg. For example, a memcg-aware shrinker can free one object 280 * charged to the target memcg, causing an entire page to be freed. 281 * If we count the entire page as reclaimed from the memcg, we end up 282 * overestimating the reclaimed amount (potentially under-reclaiming). 283 * 284 * Only count such pages for global reclaim to prevent under-reclaiming 285 * from the target memcg; preventing unnecessary retries during memcg 286 * charging and false positives from proactive reclaim. 287 * 288 * For uncommon cases where the freed pages were actually mostly 289 * charged to the target memcg, we end up underestimating the reclaimed 290 * amount. This should be fine. The freed pages will be uncharged 291 * anyway, even if they are not counted here properly, and we will be 292 * able to make forward progress in charging (which is usually in a 293 * retry loop). 294 * 295 * We can go one step further, and report the uncharged objcg pages in 296 * memcg reclaim, to make reporting more accurate and reduce 297 * underestimation, but it's probably not worth the complexity for now. 298 */ 299 if (current->reclaim_state && root_reclaim(sc)) { 300 sc->nr_reclaimed += current->reclaim_state->reclaimed; 301 current->reclaim_state->reclaimed = 0; 302 } 303 } 304 305 static bool can_demote(int nid, struct scan_control *sc) 306 { 307 if (!numa_demotion_enabled) 308 return false; 309 if (sc && sc->no_demotion) 310 return false; 311 if (next_demotion_node(nid) == NUMA_NO_NODE) 312 return false; 313 314 return true; 315 } 316 317 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, 318 int nid, 319 struct scan_control *sc) 320 { 321 if (memcg == NULL) { 322 /* 323 * For non-memcg reclaim, is there 324 * space in any swap device? 325 */ 326 if (get_nr_swap_pages() > 0) 327 return true; 328 } else { 329 /* Is the memcg below its swap limit? */ 330 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) 331 return true; 332 } 333 334 /* 335 * The page can not be swapped. 336 * 337 * Can it be reclaimed from this node via demotion? 338 */ 339 return can_demote(nid, sc); 340 } 341 342 /* 343 * This misses isolated folios which are not accounted for to save counters. 344 * As the data only determines if reclaim or compaction continues, it is 345 * not expected that isolated folios will be a dominating factor. 346 */ 347 unsigned long zone_reclaimable_pages(struct zone *zone) 348 { 349 unsigned long nr; 350 351 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + 352 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); 353 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 354 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 355 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 356 357 return nr; 358 } 359 360 /** 361 * lruvec_lru_size - Returns the number of pages on the given LRU list. 362 * @lruvec: lru vector 363 * @lru: lru to use 364 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) 365 */ 366 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, 367 int zone_idx) 368 { 369 unsigned long size = 0; 370 int zid; 371 372 for (zid = 0; zid <= zone_idx; zid++) { 373 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 374 375 if (!managed_zone(zone)) 376 continue; 377 378 if (!mem_cgroup_disabled()) 379 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 380 else 381 size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); 382 } 383 return size; 384 } 385 386 static unsigned long drop_slab_node(int nid) 387 { 388 unsigned long freed = 0; 389 struct mem_cgroup *memcg = NULL; 390 391 memcg = mem_cgroup_iter(NULL, NULL, NULL); 392 do { 393 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); 394 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); 395 396 return freed; 397 } 398 399 void drop_slab(void) 400 { 401 int nid; 402 int shift = 0; 403 unsigned long freed; 404 405 do { 406 freed = 0; 407 for_each_online_node(nid) { 408 if (fatal_signal_pending(current)) 409 return; 410 411 freed += drop_slab_node(nid); 412 } 413 } while ((freed >> shift++) > 1); 414 } 415 416 static int reclaimer_offset(void) 417 { 418 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 419 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); 420 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 421 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); 422 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != 423 PGSCAN_DIRECT - PGSCAN_KSWAPD); 424 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != 425 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); 426 427 if (current_is_kswapd()) 428 return 0; 429 if (current_is_khugepaged()) 430 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; 431 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; 432 } 433 434 static inline int is_page_cache_freeable(struct folio *folio) 435 { 436 /* 437 * A freeable page cache folio is referenced only by the caller 438 * that isolated the folio, the page cache and optional filesystem 439 * private data at folio->private. 440 */ 441 return folio_ref_count(folio) - folio_test_private(folio) == 442 1 + folio_nr_pages(folio); 443 } 444 445 /* 446 * We detected a synchronous write error writing a folio out. Probably 447 * -ENOSPC. We need to propagate that into the address_space for a subsequent 448 * fsync(), msync() or close(). 449 * 450 * The tricky part is that after writepage we cannot touch the mapping: nothing 451 * prevents it from being freed up. But we have a ref on the folio and once 452 * that folio is locked, the mapping is pinned. 453 * 454 * We're allowed to run sleeping folio_lock() here because we know the caller has 455 * __GFP_FS. 456 */ 457 static void handle_write_error(struct address_space *mapping, 458 struct folio *folio, int error) 459 { 460 folio_lock(folio); 461 if (folio_mapping(folio) == mapping) 462 mapping_set_error(mapping, error); 463 folio_unlock(folio); 464 } 465 466 static bool skip_throttle_noprogress(pg_data_t *pgdat) 467 { 468 int reclaimable = 0, write_pending = 0; 469 int i; 470 471 /* 472 * If kswapd is disabled, reschedule if necessary but do not 473 * throttle as the system is likely near OOM. 474 */ 475 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 476 return true; 477 478 /* 479 * If there are a lot of dirty/writeback folios then do not 480 * throttle as throttling will occur when the folios cycle 481 * towards the end of the LRU if still under writeback. 482 */ 483 for (i = 0; i < MAX_NR_ZONES; i++) { 484 struct zone *zone = pgdat->node_zones + i; 485 486 if (!managed_zone(zone)) 487 continue; 488 489 reclaimable += zone_reclaimable_pages(zone); 490 write_pending += zone_page_state_snapshot(zone, 491 NR_ZONE_WRITE_PENDING); 492 } 493 if (2 * write_pending <= reclaimable) 494 return true; 495 496 return false; 497 } 498 499 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) 500 { 501 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; 502 long timeout, ret; 503 DEFINE_WAIT(wait); 504 505 /* 506 * Do not throttle user workers, kthreads other than kswapd or 507 * workqueues. They may be required for reclaim to make 508 * forward progress (e.g. journalling workqueues or kthreads). 509 */ 510 if (!current_is_kswapd() && 511 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { 512 cond_resched(); 513 return; 514 } 515 516 /* 517 * These figures are pulled out of thin air. 518 * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many 519 * parallel reclaimers which is a short-lived event so the timeout is 520 * short. Failing to make progress or waiting on writeback are 521 * potentially long-lived events so use a longer timeout. This is shaky 522 * logic as a failure to make progress could be due to anything from 523 * writeback to a slow device to excessive referenced folios at the tail 524 * of the inactive LRU. 525 */ 526 switch(reason) { 527 case VMSCAN_THROTTLE_WRITEBACK: 528 timeout = HZ/10; 529 530 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { 531 WRITE_ONCE(pgdat->nr_reclaim_start, 532 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); 533 } 534 535 break; 536 case VMSCAN_THROTTLE_CONGESTED: 537 fallthrough; 538 case VMSCAN_THROTTLE_NOPROGRESS: 539 if (skip_throttle_noprogress(pgdat)) { 540 cond_resched(); 541 return; 542 } 543 544 timeout = 1; 545 546 break; 547 case VMSCAN_THROTTLE_ISOLATED: 548 timeout = HZ/50; 549 break; 550 default: 551 WARN_ON_ONCE(1); 552 timeout = HZ; 553 break; 554 } 555 556 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 557 ret = schedule_timeout(timeout); 558 finish_wait(wqh, &wait); 559 560 if (reason == VMSCAN_THROTTLE_WRITEBACK) 561 atomic_dec(&pgdat->nr_writeback_throttled); 562 563 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), 564 jiffies_to_usecs(timeout - ret), 565 reason); 566 } 567 568 /* 569 * Account for folios written if tasks are throttled waiting on dirty 570 * folios to clean. If enough folios have been cleaned since throttling 571 * started then wakeup the throttled tasks. 572 */ 573 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 574 int nr_throttled) 575 { 576 unsigned long nr_written; 577 578 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); 579 580 /* 581 * This is an inaccurate read as the per-cpu deltas may not 582 * be synchronised. However, given that the system is 583 * writeback throttled, it is not worth taking the penalty 584 * of getting an accurate count. At worst, the throttle 585 * timeout guarantees forward progress. 586 */ 587 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - 588 READ_ONCE(pgdat->nr_reclaim_start); 589 590 if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) 591 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); 592 } 593 594 /* possible outcome of pageout() */ 595 typedef enum { 596 /* failed to write folio out, folio is locked */ 597 PAGE_KEEP, 598 /* move folio to the active list, folio is locked */ 599 PAGE_ACTIVATE, 600 /* folio has been sent to the disk successfully, folio is unlocked */ 601 PAGE_SUCCESS, 602 /* folio is clean and locked */ 603 PAGE_CLEAN, 604 } pageout_t; 605 606 /* 607 * pageout is called by shrink_folio_list() for each dirty folio. 608 * Calls ->writepage(). 609 */ 610 static pageout_t pageout(struct folio *folio, struct address_space *mapping, 611 struct swap_iocb **plug) 612 { 613 /* 614 * If the folio is dirty, only perform writeback if that write 615 * will be non-blocking. To prevent this allocation from being 616 * stalled by pagecache activity. But note that there may be 617 * stalls if we need to run get_block(). We could test 618 * PagePrivate for that. 619 * 620 * If this process is currently in __generic_file_write_iter() against 621 * this folio's queue, we can perform writeback even if that 622 * will block. 623 * 624 * If the folio is swapcache, write it back even if that would 625 * block, for some throttling. This happens by accident, because 626 * swap_backing_dev_info is bust: it doesn't reflect the 627 * congestion state of the swapdevs. Easy to fix, if needed. 628 */ 629 if (!is_page_cache_freeable(folio)) 630 return PAGE_KEEP; 631 if (!mapping) { 632 /* 633 * Some data journaling orphaned folios can have 634 * folio->mapping == NULL while being dirty with clean buffers. 635 */ 636 if (folio_test_private(folio)) { 637 if (try_to_free_buffers(folio)) { 638 folio_clear_dirty(folio); 639 pr_info("%s: orphaned folio\n", __func__); 640 return PAGE_CLEAN; 641 } 642 } 643 return PAGE_KEEP; 644 } 645 if (mapping->a_ops->writepage == NULL) 646 return PAGE_ACTIVATE; 647 648 if (folio_clear_dirty_for_io(folio)) { 649 int res; 650 struct writeback_control wbc = { 651 .sync_mode = WB_SYNC_NONE, 652 .nr_to_write = SWAP_CLUSTER_MAX, 653 .range_start = 0, 654 .range_end = LLONG_MAX, 655 .for_reclaim = 1, 656 .swap_plug = plug, 657 }; 658 659 folio_set_reclaim(folio); 660 res = mapping->a_ops->writepage(&folio->page, &wbc); 661 if (res < 0) 662 handle_write_error(mapping, folio, res); 663 if (res == AOP_WRITEPAGE_ACTIVATE) { 664 folio_clear_reclaim(folio); 665 return PAGE_ACTIVATE; 666 } 667 668 if (!folio_test_writeback(folio)) { 669 /* synchronous write or broken a_ops? */ 670 folio_clear_reclaim(folio); 671 } 672 trace_mm_vmscan_write_folio(folio); 673 node_stat_add_folio(folio, NR_VMSCAN_WRITE); 674 return PAGE_SUCCESS; 675 } 676 677 return PAGE_CLEAN; 678 } 679 680 /* 681 * Same as remove_mapping, but if the folio is removed from the mapping, it 682 * gets returned with a refcount of 0. 683 */ 684 static int __remove_mapping(struct address_space *mapping, struct folio *folio, 685 bool reclaimed, struct mem_cgroup *target_memcg) 686 { 687 int refcount; 688 void *shadow = NULL; 689 690 BUG_ON(!folio_test_locked(folio)); 691 BUG_ON(mapping != folio_mapping(folio)); 692 693 if (!folio_test_swapcache(folio)) 694 spin_lock(&mapping->host->i_lock); 695 xa_lock_irq(&mapping->i_pages); 696 /* 697 * The non racy check for a busy folio. 698 * 699 * Must be careful with the order of the tests. When someone has 700 * a ref to the folio, it may be possible that they dirty it then 701 * drop the reference. So if the dirty flag is tested before the 702 * refcount here, then the following race may occur: 703 * 704 * get_user_pages(&page); 705 * [user mapping goes away] 706 * write_to(page); 707 * !folio_test_dirty(folio) [good] 708 * folio_set_dirty(folio); 709 * folio_put(folio); 710 * !refcount(folio) [good, discard it] 711 * 712 * [oops, our write_to data is lost] 713 * 714 * Reversing the order of the tests ensures such a situation cannot 715 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags 716 * load is not satisfied before that of folio->_refcount. 717 * 718 * Note that if the dirty flag is always set via folio_mark_dirty, 719 * and thus under the i_pages lock, then this ordering is not required. 720 */ 721 refcount = 1 + folio_nr_pages(folio); 722 if (!folio_ref_freeze(folio, refcount)) 723 goto cannot_free; 724 /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ 725 if (unlikely(folio_test_dirty(folio))) { 726 folio_ref_unfreeze(folio, refcount); 727 goto cannot_free; 728 } 729 730 if (folio_test_swapcache(folio)) { 731 swp_entry_t swap = folio->swap; 732 733 if (reclaimed && !mapping_exiting(mapping)) 734 shadow = workingset_eviction(folio, target_memcg); 735 __delete_from_swap_cache(folio, swap, shadow); 736 mem_cgroup_swapout(folio, swap); 737 xa_unlock_irq(&mapping->i_pages); 738 put_swap_folio(folio, swap); 739 } else { 740 void (*free_folio)(struct folio *); 741 742 free_folio = mapping->a_ops->free_folio; 743 /* 744 * Remember a shadow entry for reclaimed file cache in 745 * order to detect refaults, thus thrashing, later on. 746 * 747 * But don't store shadows in an address space that is 748 * already exiting. This is not just an optimization, 749 * inode reclaim needs to empty out the radix tree or 750 * the nodes are lost. Don't plant shadows behind its 751 * back. 752 * 753 * We also don't store shadows for DAX mappings because the 754 * only page cache folios found in these are zero pages 755 * covering holes, and because we don't want to mix DAX 756 * exceptional entries and shadow exceptional entries in the 757 * same address_space. 758 */ 759 if (reclaimed && folio_is_file_lru(folio) && 760 !mapping_exiting(mapping) && !dax_mapping(mapping)) 761 shadow = workingset_eviction(folio, target_memcg); 762 __filemap_remove_folio(folio, shadow); 763 xa_unlock_irq(&mapping->i_pages); 764 if (mapping_shrinkable(mapping)) 765 inode_add_lru(mapping->host); 766 spin_unlock(&mapping->host->i_lock); 767 768 if (free_folio) 769 free_folio(folio); 770 } 771 772 return 1; 773 774 cannot_free: 775 xa_unlock_irq(&mapping->i_pages); 776 if (!folio_test_swapcache(folio)) 777 spin_unlock(&mapping->host->i_lock); 778 return 0; 779 } 780 781 /** 782 * remove_mapping() - Attempt to remove a folio from its mapping. 783 * @mapping: The address space. 784 * @folio: The folio to remove. 785 * 786 * If the folio is dirty, under writeback or if someone else has a ref 787 * on it, removal will fail. 788 * Return: The number of pages removed from the mapping. 0 if the folio 789 * could not be removed. 790 * Context: The caller should have a single refcount on the folio and 791 * hold its lock. 792 */ 793 long remove_mapping(struct address_space *mapping, struct folio *folio) 794 { 795 if (__remove_mapping(mapping, folio, false, NULL)) { 796 /* 797 * Unfreezing the refcount with 1 effectively 798 * drops the pagecache ref for us without requiring another 799 * atomic operation. 800 */ 801 folio_ref_unfreeze(folio, 1); 802 return folio_nr_pages(folio); 803 } 804 return 0; 805 } 806 807 /** 808 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. 809 * @folio: Folio to be returned to an LRU list. 810 * 811 * Add previously isolated @folio to appropriate LRU list. 812 * The folio may still be unevictable for other reasons. 813 * 814 * Context: lru_lock must not be held, interrupts must be enabled. 815 */ 816 void folio_putback_lru(struct folio *folio) 817 { 818 folio_add_lru(folio); 819 folio_put(folio); /* drop ref from isolate */ 820 } 821 822 enum folio_references { 823 FOLIOREF_RECLAIM, 824 FOLIOREF_RECLAIM_CLEAN, 825 FOLIOREF_KEEP, 826 FOLIOREF_ACTIVATE, 827 }; 828 829 static enum folio_references folio_check_references(struct folio *folio, 830 struct scan_control *sc) 831 { 832 int referenced_ptes, referenced_folio; 833 unsigned long vm_flags; 834 835 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 836 &vm_flags); 837 referenced_folio = folio_test_clear_referenced(folio); 838 839 /* 840 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. 841 * Let the folio, now marked Mlocked, be moved to the unevictable list. 842 */ 843 if (vm_flags & VM_LOCKED) 844 return FOLIOREF_ACTIVATE; 845 846 /* rmap lock contention: rotate */ 847 if (referenced_ptes == -1) 848 return FOLIOREF_KEEP; 849 850 if (referenced_ptes) { 851 /* 852 * All mapped folios start out with page table 853 * references from the instantiating fault, so we need 854 * to look twice if a mapped file/anon folio is used more 855 * than once. 856 * 857 * Mark it and spare it for another trip around the 858 * inactive list. Another page table reference will 859 * lead to its activation. 860 * 861 * Note: the mark is set for activated folios as well 862 * so that recently deactivated but used folios are 863 * quickly recovered. 864 */ 865 folio_set_referenced(folio); 866 867 if (referenced_folio || referenced_ptes > 1) 868 return FOLIOREF_ACTIVATE; 869 870 /* 871 * Activate file-backed executable folios after first usage. 872 */ 873 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) 874 return FOLIOREF_ACTIVATE; 875 876 return FOLIOREF_KEEP; 877 } 878 879 /* Reclaim if clean, defer dirty folios to writeback */ 880 if (referenced_folio && folio_is_file_lru(folio)) 881 return FOLIOREF_RECLAIM_CLEAN; 882 883 return FOLIOREF_RECLAIM; 884 } 885 886 /* Check if a folio is dirty or under writeback */ 887 static void folio_check_dirty_writeback(struct folio *folio, 888 bool *dirty, bool *writeback) 889 { 890 struct address_space *mapping; 891 892 /* 893 * Anonymous folios are not handled by flushers and must be written 894 * from reclaim context. Do not stall reclaim based on them. 895 * MADV_FREE anonymous folios are put into inactive file list too. 896 * They could be mistakenly treated as file lru. So further anon 897 * test is needed. 898 */ 899 if (!folio_is_file_lru(folio) || 900 (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { 901 *dirty = false; 902 *writeback = false; 903 return; 904 } 905 906 /* By default assume that the folio flags are accurate */ 907 *dirty = folio_test_dirty(folio); 908 *writeback = folio_test_writeback(folio); 909 910 /* Verify dirty/writeback state if the filesystem supports it */ 911 if (!folio_test_private(folio)) 912 return; 913 914 mapping = folio_mapping(folio); 915 if (mapping && mapping->a_ops->is_dirty_writeback) 916 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); 917 } 918 919 static struct folio *alloc_demote_folio(struct folio *src, 920 unsigned long private) 921 { 922 struct folio *dst; 923 nodemask_t *allowed_mask; 924 struct migration_target_control *mtc; 925 926 mtc = (struct migration_target_control *)private; 927 928 allowed_mask = mtc->nmask; 929 /* 930 * make sure we allocate from the target node first also trying to 931 * demote or reclaim pages from the target node via kswapd if we are 932 * low on free memory on target node. If we don't do this and if 933 * we have free memory on the slower(lower) memtier, we would start 934 * allocating pages from slower(lower) memory tiers without even forcing 935 * a demotion of cold pages from the target memtier. This can result 936 * in the kernel placing hot pages in slower(lower) memory tiers. 937 */ 938 mtc->nmask = NULL; 939 mtc->gfp_mask |= __GFP_THISNODE; 940 dst = alloc_migration_target(src, (unsigned long)mtc); 941 if (dst) 942 return dst; 943 944 mtc->gfp_mask &= ~__GFP_THISNODE; 945 mtc->nmask = allowed_mask; 946 947 return alloc_migration_target(src, (unsigned long)mtc); 948 } 949 950 /* 951 * Take folios on @demote_folios and attempt to demote them to another node. 952 * Folios which are not demoted are left on @demote_folios. 953 */ 954 static unsigned int demote_folio_list(struct list_head *demote_folios, 955 struct pglist_data *pgdat) 956 { 957 int target_nid = next_demotion_node(pgdat->node_id); 958 unsigned int nr_succeeded; 959 nodemask_t allowed_mask; 960 961 struct migration_target_control mtc = { 962 /* 963 * Allocate from 'node', or fail quickly and quietly. 964 * When this happens, 'page' will likely just be discarded 965 * instead of migrated. 966 */ 967 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | 968 __GFP_NOMEMALLOC | GFP_NOWAIT, 969 .nid = target_nid, 970 .nmask = &allowed_mask 971 }; 972 973 if (list_empty(demote_folios)) 974 return 0; 975 976 if (target_nid == NUMA_NO_NODE) 977 return 0; 978 979 node_get_allowed_targets(pgdat, &allowed_mask); 980 981 /* Demotion ignores all cpuset and mempolicy settings */ 982 migrate_pages(demote_folios, alloc_demote_folio, NULL, 983 (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, 984 &nr_succeeded); 985 986 mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(), 987 nr_succeeded); 988 989 return nr_succeeded; 990 } 991 992 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) 993 { 994 if (gfp_mask & __GFP_FS) 995 return true; 996 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) 997 return false; 998 /* 999 * We can "enter_fs" for swap-cache with only __GFP_IO 1000 * providing this isn't SWP_FS_OPS. 1001 * ->flags can be updated non-atomicially (scan_swap_map_slots), 1002 * but that will never affect SWP_FS_OPS, so the data_race 1003 * is safe. 1004 */ 1005 return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); 1006 } 1007 1008 /* 1009 * shrink_folio_list() returns the number of reclaimed pages 1010 */ 1011 static unsigned int shrink_folio_list(struct list_head *folio_list, 1012 struct pglist_data *pgdat, struct scan_control *sc, 1013 struct reclaim_stat *stat, bool ignore_references) 1014 { 1015 struct folio_batch free_folios; 1016 LIST_HEAD(ret_folios); 1017 LIST_HEAD(demote_folios); 1018 unsigned int nr_reclaimed = 0; 1019 unsigned int pgactivate = 0; 1020 bool do_demote_pass; 1021 struct swap_iocb *plug = NULL; 1022 1023 folio_batch_init(&free_folios); 1024 memset(stat, 0, sizeof(*stat)); 1025 cond_resched(); 1026 do_demote_pass = can_demote(pgdat->node_id, sc); 1027 1028 retry: 1029 while (!list_empty(folio_list)) { 1030 struct address_space *mapping; 1031 struct folio *folio; 1032 enum folio_references references = FOLIOREF_RECLAIM; 1033 bool dirty, writeback; 1034 unsigned int nr_pages; 1035 1036 cond_resched(); 1037 1038 folio = lru_to_folio(folio_list); 1039 list_del(&folio->lru); 1040 1041 if (!folio_trylock(folio)) 1042 goto keep; 1043 1044 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1045 1046 nr_pages = folio_nr_pages(folio); 1047 1048 /* Account the number of base pages */ 1049 sc->nr_scanned += nr_pages; 1050 1051 if (unlikely(!folio_evictable(folio))) 1052 goto activate_locked; 1053 1054 if (!sc->may_unmap && folio_mapped(folio)) 1055 goto keep_locked; 1056 1057 /* folio_update_gen() tried to promote this page? */ 1058 if (lru_gen_enabled() && !ignore_references && 1059 folio_mapped(folio) && folio_test_referenced(folio)) 1060 goto keep_locked; 1061 1062 /* 1063 * The number of dirty pages determines if a node is marked 1064 * reclaim_congested. kswapd will stall and start writing 1065 * folios if the tail of the LRU is all dirty unqueued folios. 1066 */ 1067 folio_check_dirty_writeback(folio, &dirty, &writeback); 1068 if (dirty || writeback) 1069 stat->nr_dirty += nr_pages; 1070 1071 if (dirty && !writeback) 1072 stat->nr_unqueued_dirty += nr_pages; 1073 1074 /* 1075 * Treat this folio as congested if folios are cycling 1076 * through the LRU so quickly that the folios marked 1077 * for immediate reclaim are making it to the end of 1078 * the LRU a second time. 1079 */ 1080 if (writeback && folio_test_reclaim(folio)) 1081 stat->nr_congested += nr_pages; 1082 1083 /* 1084 * If a folio at the tail of the LRU is under writeback, there 1085 * are three cases to consider. 1086 * 1087 * 1) If reclaim is encountering an excessive number 1088 * of folios under writeback and this folio has both 1089 * the writeback and reclaim flags set, then it 1090 * indicates that folios are being queued for I/O but 1091 * are being recycled through the LRU before the I/O 1092 * can complete. Waiting on the folio itself risks an 1093 * indefinite stall if it is impossible to writeback 1094 * the folio due to I/O error or disconnected storage 1095 * so instead note that the LRU is being scanned too 1096 * quickly and the caller can stall after the folio 1097 * list has been processed. 1098 * 1099 * 2) Global or new memcg reclaim encounters a folio that is 1100 * not marked for immediate reclaim, or the caller does not 1101 * have __GFP_FS (or __GFP_IO if it's simply going to swap, 1102 * not to fs). In this case mark the folio for immediate 1103 * reclaim and continue scanning. 1104 * 1105 * Require may_enter_fs() because we would wait on fs, which 1106 * may not have submitted I/O yet. And the loop driver might 1107 * enter reclaim, and deadlock if it waits on a folio for 1108 * which it is needed to do the write (loop masks off 1109 * __GFP_IO|__GFP_FS for this reason); but more thought 1110 * would probably show more reasons. 1111 * 1112 * 3) Legacy memcg encounters a folio that already has the 1113 * reclaim flag set. memcg does not have any dirty folio 1114 * throttling so we could easily OOM just because too many 1115 * folios are in writeback and there is nothing else to 1116 * reclaim. Wait for the writeback to complete. 1117 * 1118 * In cases 1) and 2) we activate the folios to get them out of 1119 * the way while we continue scanning for clean folios on the 1120 * inactive list and refilling from the active list. The 1121 * observation here is that waiting for disk writes is more 1122 * expensive than potentially causing reloads down the line. 1123 * Since they're marked for immediate reclaim, they won't put 1124 * memory pressure on the cache working set any longer than it 1125 * takes to write them to disk. 1126 */ 1127 if (folio_test_writeback(folio)) { 1128 /* Case 1 above */ 1129 if (current_is_kswapd() && 1130 folio_test_reclaim(folio) && 1131 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { 1132 stat->nr_immediate += nr_pages; 1133 goto activate_locked; 1134 1135 /* Case 2 above */ 1136 } else if (writeback_throttling_sane(sc) || 1137 !folio_test_reclaim(folio) || 1138 !may_enter_fs(folio, sc->gfp_mask)) { 1139 /* 1140 * This is slightly racy - 1141 * folio_end_writeback() might have 1142 * just cleared the reclaim flag, then 1143 * setting the reclaim flag here ends up 1144 * interpreted as the readahead flag - but 1145 * that does not matter enough to care. 1146 * What we do want is for this folio to 1147 * have the reclaim flag set next time 1148 * memcg reclaim reaches the tests above, 1149 * so it will then wait for writeback to 1150 * avoid OOM; and it's also appropriate 1151 * in global reclaim. 1152 */ 1153 folio_set_reclaim(folio); 1154 stat->nr_writeback += nr_pages; 1155 goto activate_locked; 1156 1157 /* Case 3 above */ 1158 } else { 1159 folio_unlock(folio); 1160 folio_wait_writeback(folio); 1161 /* then go back and try same folio again */ 1162 list_add_tail(&folio->lru, folio_list); 1163 continue; 1164 } 1165 } 1166 1167 if (!ignore_references) 1168 references = folio_check_references(folio, sc); 1169 1170 switch (references) { 1171 case FOLIOREF_ACTIVATE: 1172 goto activate_locked; 1173 case FOLIOREF_KEEP: 1174 stat->nr_ref_keep += nr_pages; 1175 goto keep_locked; 1176 case FOLIOREF_RECLAIM: 1177 case FOLIOREF_RECLAIM_CLEAN: 1178 ; /* try to reclaim the folio below */ 1179 } 1180 1181 /* 1182 * Before reclaiming the folio, try to relocate 1183 * its contents to another node. 1184 */ 1185 if (do_demote_pass && 1186 (thp_migration_supported() || !folio_test_large(folio))) { 1187 list_add(&folio->lru, &demote_folios); 1188 folio_unlock(folio); 1189 continue; 1190 } 1191 1192 /* 1193 * Anonymous process memory has backing store? 1194 * Try to allocate it some swap space here. 1195 * Lazyfree folio could be freed directly 1196 */ 1197 if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { 1198 if (!folio_test_swapcache(folio)) { 1199 if (!(sc->gfp_mask & __GFP_IO)) 1200 goto keep_locked; 1201 if (folio_maybe_dma_pinned(folio)) 1202 goto keep_locked; 1203 if (folio_test_large(folio)) { 1204 /* cannot split folio, skip it */ 1205 if (!can_split_folio(folio, NULL)) 1206 goto activate_locked; 1207 /* 1208 * Split folios without a PMD map right 1209 * away. Chances are some or all of the 1210 * tail pages can be freed without IO. 1211 */ 1212 if (!folio_entire_mapcount(folio) && 1213 split_folio_to_list(folio, 1214 folio_list)) 1215 goto activate_locked; 1216 } 1217 if (!add_to_swap(folio)) { 1218 if (!folio_test_large(folio)) 1219 goto activate_locked_split; 1220 /* Fallback to swap normal pages */ 1221 if (split_folio_to_list(folio, 1222 folio_list)) 1223 goto activate_locked; 1224 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1225 count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); 1226 count_vm_event(THP_SWPOUT_FALLBACK); 1227 #endif 1228 if (!add_to_swap(folio)) 1229 goto activate_locked_split; 1230 } 1231 } 1232 } else if (folio_test_swapbacked(folio) && 1233 folio_test_large(folio)) { 1234 /* Split shmem folio */ 1235 if (split_folio_to_list(folio, folio_list)) 1236 goto keep_locked; 1237 } 1238 1239 /* 1240 * If the folio was split above, the tail pages will make 1241 * their own pass through this function and be accounted 1242 * then. 1243 */ 1244 if ((nr_pages > 1) && !folio_test_large(folio)) { 1245 sc->nr_scanned -= (nr_pages - 1); 1246 nr_pages = 1; 1247 } 1248 1249 /* 1250 * The folio is mapped into the page tables of one or more 1251 * processes. Try to unmap it here. 1252 */ 1253 if (folio_mapped(folio)) { 1254 enum ttu_flags flags = TTU_BATCH_FLUSH; 1255 bool was_swapbacked = folio_test_swapbacked(folio); 1256 1257 if (folio_test_pmd_mappable(folio)) 1258 flags |= TTU_SPLIT_HUGE_PMD; 1259 1260 try_to_unmap(folio, flags); 1261 if (folio_mapped(folio)) { 1262 stat->nr_unmap_fail += nr_pages; 1263 if (!was_swapbacked && 1264 folio_test_swapbacked(folio)) 1265 stat->nr_lazyfree_fail += nr_pages; 1266 goto activate_locked; 1267 } 1268 } 1269 1270 /* 1271 * Folio is unmapped now so it cannot be newly pinned anymore. 1272 * No point in trying to reclaim folio if it is pinned. 1273 * Furthermore we don't want to reclaim underlying fs metadata 1274 * if the folio is pinned and thus potentially modified by the 1275 * pinning process as that may upset the filesystem. 1276 */ 1277 if (folio_maybe_dma_pinned(folio)) 1278 goto activate_locked; 1279 1280 mapping = folio_mapping(folio); 1281 if (folio_test_dirty(folio)) { 1282 /* 1283 * Only kswapd can writeback filesystem folios 1284 * to avoid risk of stack overflow. But avoid 1285 * injecting inefficient single-folio I/O into 1286 * flusher writeback as much as possible: only 1287 * write folios when we've encountered many 1288 * dirty folios, and when we've already scanned 1289 * the rest of the LRU for clean folios and see 1290 * the same dirty folios again (with the reclaim 1291 * flag set). 1292 */ 1293 if (folio_is_file_lru(folio) && 1294 (!current_is_kswapd() || 1295 !folio_test_reclaim(folio) || 1296 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { 1297 /* 1298 * Immediately reclaim when written back. 1299 * Similar in principle to folio_deactivate() 1300 * except we already have the folio isolated 1301 * and know it's dirty 1302 */ 1303 node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, 1304 nr_pages); 1305 folio_set_reclaim(folio); 1306 1307 goto activate_locked; 1308 } 1309 1310 if (references == FOLIOREF_RECLAIM_CLEAN) 1311 goto keep_locked; 1312 if (!may_enter_fs(folio, sc->gfp_mask)) 1313 goto keep_locked; 1314 if (!sc->may_writepage) 1315 goto keep_locked; 1316 1317 /* 1318 * Folio is dirty. Flush the TLB if a writable entry 1319 * potentially exists to avoid CPU writes after I/O 1320 * starts and then write it out here. 1321 */ 1322 try_to_unmap_flush_dirty(); 1323 switch (pageout(folio, mapping, &plug)) { 1324 case PAGE_KEEP: 1325 goto keep_locked; 1326 case PAGE_ACTIVATE: 1327 goto activate_locked; 1328 case PAGE_SUCCESS: 1329 stat->nr_pageout += nr_pages; 1330 1331 if (folio_test_writeback(folio)) 1332 goto keep; 1333 if (folio_test_dirty(folio)) 1334 goto keep; 1335 1336 /* 1337 * A synchronous write - probably a ramdisk. Go 1338 * ahead and try to reclaim the folio. 1339 */ 1340 if (!folio_trylock(folio)) 1341 goto keep; 1342 if (folio_test_dirty(folio) || 1343 folio_test_writeback(folio)) 1344 goto keep_locked; 1345 mapping = folio_mapping(folio); 1346 fallthrough; 1347 case PAGE_CLEAN: 1348 ; /* try to free the folio below */ 1349 } 1350 } 1351 1352 /* 1353 * If the folio has buffers, try to free the buffer 1354 * mappings associated with this folio. If we succeed 1355 * we try to free the folio as well. 1356 * 1357 * We do this even if the folio is dirty. 1358 * filemap_release_folio() does not perform I/O, but it 1359 * is possible for a folio to have the dirty flag set, 1360 * but it is actually clean (all its buffers are clean). 1361 * This happens if the buffers were written out directly, 1362 * with submit_bh(). ext3 will do this, as well as 1363 * the blockdev mapping. filemap_release_folio() will 1364 * discover that cleanness and will drop the buffers 1365 * and mark the folio clean - it can be freed. 1366 * 1367 * Rarely, folios can have buffers and no ->mapping. 1368 * These are the folios which were not successfully 1369 * invalidated in truncate_cleanup_folio(). We try to 1370 * drop those buffers here and if that worked, and the 1371 * folio is no longer mapped into process address space 1372 * (refcount == 1) it can be freed. Otherwise, leave 1373 * the folio on the LRU so it is swappable. 1374 */ 1375 if (folio_needs_release(folio)) { 1376 if (!filemap_release_folio(folio, sc->gfp_mask)) 1377 goto activate_locked; 1378 if (!mapping && folio_ref_count(folio) == 1) { 1379 folio_unlock(folio); 1380 if (folio_put_testzero(folio)) 1381 goto free_it; 1382 else { 1383 /* 1384 * rare race with speculative reference. 1385 * the speculative reference will free 1386 * this folio shortly, so we may 1387 * increment nr_reclaimed here (and 1388 * leave it off the LRU). 1389 */ 1390 nr_reclaimed += nr_pages; 1391 continue; 1392 } 1393 } 1394 } 1395 1396 if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { 1397 /* follow __remove_mapping for reference */ 1398 if (!folio_ref_freeze(folio, 1)) 1399 goto keep_locked; 1400 /* 1401 * The folio has only one reference left, which is 1402 * from the isolation. After the caller puts the 1403 * folio back on the lru and drops the reference, the 1404 * folio will be freed anyway. It doesn't matter 1405 * which lru it goes on. So we don't bother checking 1406 * the dirty flag here. 1407 */ 1408 count_vm_events(PGLAZYFREED, nr_pages); 1409 count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); 1410 } else if (!mapping || !__remove_mapping(mapping, folio, true, 1411 sc->target_mem_cgroup)) 1412 goto keep_locked; 1413 1414 folio_unlock(folio); 1415 free_it: 1416 /* 1417 * Folio may get swapped out as a whole, need to account 1418 * all pages in it. 1419 */ 1420 nr_reclaimed += nr_pages; 1421 1422 if (folio_test_large(folio) && 1423 folio_test_large_rmappable(folio)) 1424 folio_undo_large_rmappable(folio); 1425 if (folio_batch_add(&free_folios, folio) == 0) { 1426 mem_cgroup_uncharge_folios(&free_folios); 1427 try_to_unmap_flush(); 1428 free_unref_folios(&free_folios); 1429 } 1430 continue; 1431 1432 activate_locked_split: 1433 /* 1434 * The tail pages that are failed to add into swap cache 1435 * reach here. Fixup nr_scanned and nr_pages. 1436 */ 1437 if (nr_pages > 1) { 1438 sc->nr_scanned -= (nr_pages - 1); 1439 nr_pages = 1; 1440 } 1441 activate_locked: 1442 /* Not a candidate for swapping, so reclaim swap space. */ 1443 if (folio_test_swapcache(folio) && 1444 (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) 1445 folio_free_swap(folio); 1446 VM_BUG_ON_FOLIO(folio_test_active(folio), folio); 1447 if (!folio_test_mlocked(folio)) { 1448 int type = folio_is_file_lru(folio); 1449 folio_set_active(folio); 1450 stat->nr_activate[type] += nr_pages; 1451 count_memcg_folio_events(folio, PGACTIVATE, nr_pages); 1452 } 1453 keep_locked: 1454 folio_unlock(folio); 1455 keep: 1456 list_add(&folio->lru, &ret_folios); 1457 VM_BUG_ON_FOLIO(folio_test_lru(folio) || 1458 folio_test_unevictable(folio), folio); 1459 } 1460 /* 'folio_list' is always empty here */ 1461 1462 /* Migrate folios selected for demotion */ 1463 nr_reclaimed += demote_folio_list(&demote_folios, pgdat); 1464 /* Folios that could not be demoted are still in @demote_folios */ 1465 if (!list_empty(&demote_folios)) { 1466 /* Folios which weren't demoted go back on @folio_list */ 1467 list_splice_init(&demote_folios, folio_list); 1468 1469 /* 1470 * goto retry to reclaim the undemoted folios in folio_list if 1471 * desired. 1472 * 1473 * Reclaiming directly from top tier nodes is not often desired 1474 * due to it breaking the LRU ordering: in general memory 1475 * should be reclaimed from lower tier nodes and demoted from 1476 * top tier nodes. 1477 * 1478 * However, disabling reclaim from top tier nodes entirely 1479 * would cause ooms in edge scenarios where lower tier memory 1480 * is unreclaimable for whatever reason, eg memory being 1481 * mlocked or too hot to reclaim. We can disable reclaim 1482 * from top tier nodes in proactive reclaim though as that is 1483 * not real memory pressure. 1484 */ 1485 if (!sc->proactive) { 1486 do_demote_pass = false; 1487 goto retry; 1488 } 1489 } 1490 1491 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; 1492 1493 mem_cgroup_uncharge_folios(&free_folios); 1494 try_to_unmap_flush(); 1495 free_unref_folios(&free_folios); 1496 1497 list_splice(&ret_folios, folio_list); 1498 count_vm_events(PGACTIVATE, pgactivate); 1499 1500 if (plug) 1501 swap_write_unplug(plug); 1502 return nr_reclaimed; 1503 } 1504 1505 unsigned int reclaim_clean_pages_from_list(struct zone *zone, 1506 struct list_head *folio_list) 1507 { 1508 struct scan_control sc = { 1509 .gfp_mask = GFP_KERNEL, 1510 .may_unmap = 1, 1511 }; 1512 struct reclaim_stat stat; 1513 unsigned int nr_reclaimed; 1514 struct folio *folio, *next; 1515 LIST_HEAD(clean_folios); 1516 unsigned int noreclaim_flag; 1517 1518 list_for_each_entry_safe(folio, next, folio_list, lru) { 1519 if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && 1520 !folio_test_dirty(folio) && !__folio_test_movable(folio) && 1521 !folio_test_unevictable(folio)) { 1522 folio_clear_active(folio); 1523 list_move(&folio->lru, &clean_folios); 1524 } 1525 } 1526 1527 /* 1528 * We should be safe here since we are only dealing with file pages and 1529 * we are not kswapd and therefore cannot write dirty file pages. But 1530 * call memalloc_noreclaim_save() anyway, just in case these conditions 1531 * change in the future. 1532 */ 1533 noreclaim_flag = memalloc_noreclaim_save(); 1534 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, 1535 &stat, true); 1536 memalloc_noreclaim_restore(noreclaim_flag); 1537 1538 list_splice(&clean_folios, folio_list); 1539 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1540 -(long)nr_reclaimed); 1541 /* 1542 * Since lazyfree pages are isolated from file LRU from the beginning, 1543 * they will rotate back to anonymous LRU in the end if it failed to 1544 * discard so isolated count will be mismatched. 1545 * Compensate the isolated count for both LRU lists. 1546 */ 1547 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, 1548 stat.nr_lazyfree_fail); 1549 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, 1550 -(long)stat.nr_lazyfree_fail); 1551 return nr_reclaimed; 1552 } 1553 1554 /* 1555 * Update LRU sizes after isolating pages. The LRU size updates must 1556 * be complete before mem_cgroup_update_lru_size due to a sanity check. 1557 */ 1558 static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1559 enum lru_list lru, unsigned long *nr_zone_taken) 1560 { 1561 int zid; 1562 1563 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1564 if (!nr_zone_taken[zid]) 1565 continue; 1566 1567 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1568 } 1569 1570 } 1571 1572 #ifdef CONFIG_CMA 1573 /* 1574 * It is waste of effort to scan and reclaim CMA pages if it is not available 1575 * for current allocation context. Kswapd can not be enrolled as it can not 1576 * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL 1577 */ 1578 static bool skip_cma(struct folio *folio, struct scan_control *sc) 1579 { 1580 return !current_is_kswapd() && 1581 gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && 1582 folio_migratetype(folio) == MIGRATE_CMA; 1583 } 1584 #else 1585 static bool skip_cma(struct folio *folio, struct scan_control *sc) 1586 { 1587 return false; 1588 } 1589 #endif 1590 1591 /* 1592 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. 1593 * 1594 * lruvec->lru_lock is heavily contended. Some of the functions that 1595 * shrink the lists perform better by taking out a batch of pages 1596 * and working on them outside the LRU lock. 1597 * 1598 * For pagecache intensive workloads, this function is the hottest 1599 * spot in the kernel (apart from copy_*_user functions). 1600 * 1601 * Lru_lock must be held before calling this function. 1602 * 1603 * @nr_to_scan: The number of eligible pages to look through on the list. 1604 * @lruvec: The LRU vector to pull pages from. 1605 * @dst: The temp list to put pages on to. 1606 * @nr_scanned: The number of pages that were scanned. 1607 * @sc: The scan_control struct for this reclaim session 1608 * @lru: LRU list id for isolating 1609 * 1610 * returns how many pages were moved onto *@dst. 1611 */ 1612 static unsigned long isolate_lru_folios(unsigned long nr_to_scan, 1613 struct lruvec *lruvec, struct list_head *dst, 1614 unsigned long *nr_scanned, struct scan_control *sc, 1615 enum lru_list lru) 1616 { 1617 struct list_head *src = &lruvec->lists[lru]; 1618 unsigned long nr_taken = 0; 1619 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; 1620 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1621 unsigned long skipped = 0; 1622 unsigned long scan, total_scan, nr_pages; 1623 LIST_HEAD(folios_skipped); 1624 1625 total_scan = 0; 1626 scan = 0; 1627 while (scan < nr_to_scan && !list_empty(src)) { 1628 struct list_head *move_to = src; 1629 struct folio *folio; 1630 1631 folio = lru_to_folio(src); 1632 prefetchw_prev_lru_folio(folio, src, flags); 1633 1634 nr_pages = folio_nr_pages(folio); 1635 total_scan += nr_pages; 1636 1637 if (folio_zonenum(folio) > sc->reclaim_idx || 1638 skip_cma(folio, sc)) { 1639 nr_skipped[folio_zonenum(folio)] += nr_pages; 1640 move_to = &folios_skipped; 1641 goto move; 1642 } 1643 1644 /* 1645 * Do not count skipped folios because that makes the function 1646 * return with no isolated folios if the LRU mostly contains 1647 * ineligible folios. This causes the VM to not reclaim any 1648 * folios, triggering a premature OOM. 1649 * Account all pages in a folio. 1650 */ 1651 scan += nr_pages; 1652 1653 if (!folio_test_lru(folio)) 1654 goto move; 1655 if (!sc->may_unmap && folio_mapped(folio)) 1656 goto move; 1657 1658 /* 1659 * Be careful not to clear the lru flag until after we're 1660 * sure the folio is not being freed elsewhere -- the 1661 * folio release code relies on it. 1662 */ 1663 if (unlikely(!folio_try_get(folio))) 1664 goto move; 1665 1666 if (!folio_test_clear_lru(folio)) { 1667 /* Another thread is already isolating this folio */ 1668 folio_put(folio); 1669 goto move; 1670 } 1671 1672 nr_taken += nr_pages; 1673 nr_zone_taken[folio_zonenum(folio)] += nr_pages; 1674 move_to = dst; 1675 move: 1676 list_move(&folio->lru, move_to); 1677 } 1678 1679 /* 1680 * Splice any skipped folios to the start of the LRU list. Note that 1681 * this disrupts the LRU order when reclaiming for lower zones but 1682 * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX 1683 * scanning would soon rescan the same folios to skip and waste lots 1684 * of cpu cycles. 1685 */ 1686 if (!list_empty(&folios_skipped)) { 1687 int zid; 1688 1689 list_splice(&folios_skipped, src); 1690 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1691 if (!nr_skipped[zid]) 1692 continue; 1693 1694 __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); 1695 skipped += nr_skipped[zid]; 1696 } 1697 } 1698 *nr_scanned = total_scan; 1699 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, 1700 total_scan, skipped, nr_taken, lru); 1701 update_lru_sizes(lruvec, lru, nr_zone_taken); 1702 return nr_taken; 1703 } 1704 1705 /** 1706 * folio_isolate_lru() - Try to isolate a folio from its LRU list. 1707 * @folio: Folio to isolate from its LRU list. 1708 * 1709 * Isolate a @folio from an LRU list and adjust the vmstat statistic 1710 * corresponding to whatever LRU list the folio was on. 1711 * 1712 * The folio will have its LRU flag cleared. If it was found on the 1713 * active list, it will have the Active flag set. If it was found on the 1714 * unevictable list, it will have the Unevictable flag set. These flags 1715 * may need to be cleared by the caller before letting the page go. 1716 * 1717 * Context: 1718 * 1719 * (1) Must be called with an elevated refcount on the folio. This is a 1720 * fundamental difference from isolate_lru_folios() (which is called 1721 * without a stable reference). 1722 * (2) The lru_lock must not be held. 1723 * (3) Interrupts must be enabled. 1724 * 1725 * Return: true if the folio was removed from an LRU list. 1726 * false if the folio was not on an LRU list. 1727 */ 1728 bool folio_isolate_lru(struct folio *folio) 1729 { 1730 bool ret = false; 1731 1732 VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); 1733 1734 if (folio_test_clear_lru(folio)) { 1735 struct lruvec *lruvec; 1736 1737 folio_get(folio); 1738 lruvec = folio_lruvec_lock_irq(folio); 1739 lruvec_del_folio(lruvec, folio); 1740 unlock_page_lruvec_irq(lruvec); 1741 ret = true; 1742 } 1743 1744 return ret; 1745 } 1746 1747 /* 1748 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and 1749 * then get rescheduled. When there are massive number of tasks doing page 1750 * allocation, such sleeping direct reclaimers may keep piling up on each CPU, 1751 * the LRU list will go small and be scanned faster than necessary, leading to 1752 * unnecessary swapping, thrashing and OOM. 1753 */ 1754 static bool too_many_isolated(struct pglist_data *pgdat, int file, 1755 struct scan_control *sc) 1756 { 1757 unsigned long inactive, isolated; 1758 bool too_many; 1759 1760 if (current_is_kswapd()) 1761 return false; 1762 1763 if (!writeback_throttling_sane(sc)) 1764 return false; 1765 1766 if (file) { 1767 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); 1768 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); 1769 } else { 1770 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); 1771 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); 1772 } 1773 1774 /* 1775 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they 1776 * won't get blocked by normal direct-reclaimers, forming a circular 1777 * deadlock. 1778 */ 1779 if (gfp_has_io_fs(sc->gfp_mask)) 1780 inactive >>= 3; 1781 1782 too_many = isolated > inactive; 1783 1784 /* Wake up tasks throttled due to too_many_isolated. */ 1785 if (!too_many) 1786 wake_throttle_isolated(pgdat); 1787 1788 return too_many; 1789 } 1790 1791 /* 1792 * move_folios_to_lru() moves folios from private @list to appropriate LRU list. 1793 * 1794 * Returns the number of pages moved to the given lruvec. 1795 */ 1796 static unsigned int move_folios_to_lru(struct lruvec *lruvec, 1797 struct list_head *list) 1798 { 1799 int nr_pages, nr_moved = 0; 1800 struct folio_batch free_folios; 1801 1802 folio_batch_init(&free_folios); 1803 while (!list_empty(list)) { 1804 struct folio *folio = lru_to_folio(list); 1805 1806 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1807 list_del(&folio->lru); 1808 if (unlikely(!folio_evictable(folio))) { 1809 spin_unlock_irq(&lruvec->lru_lock); 1810 folio_putback_lru(folio); 1811 spin_lock_irq(&lruvec->lru_lock); 1812 continue; 1813 } 1814 1815 /* 1816 * The folio_set_lru needs to be kept here for list integrity. 1817 * Otherwise: 1818 * #0 move_folios_to_lru #1 release_pages 1819 * if (!folio_put_testzero()) 1820 * if (folio_put_testzero()) 1821 * !lru //skip lru_lock 1822 * folio_set_lru() 1823 * list_add(&folio->lru,) 1824 * list_add(&folio->lru,) 1825 */ 1826 folio_set_lru(folio); 1827 1828 if (unlikely(folio_put_testzero(folio))) { 1829 __folio_clear_lru_flags(folio); 1830 1831 if (folio_test_large(folio) && 1832 folio_test_large_rmappable(folio)) 1833 folio_undo_large_rmappable(folio); 1834 if (folio_batch_add(&free_folios, folio) == 0) { 1835 spin_unlock_irq(&lruvec->lru_lock); 1836 mem_cgroup_uncharge_folios(&free_folios); 1837 free_unref_folios(&free_folios); 1838 spin_lock_irq(&lruvec->lru_lock); 1839 } 1840 1841 continue; 1842 } 1843 1844 /* 1845 * All pages were isolated from the same lruvec (and isolation 1846 * inhibits memcg migration). 1847 */ 1848 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); 1849 lruvec_add_folio(lruvec, folio); 1850 nr_pages = folio_nr_pages(folio); 1851 nr_moved += nr_pages; 1852 if (folio_test_active(folio)) 1853 workingset_age_nonresident(lruvec, nr_pages); 1854 } 1855 1856 if (free_folios.nr) { 1857 spin_unlock_irq(&lruvec->lru_lock); 1858 mem_cgroup_uncharge_folios(&free_folios); 1859 free_unref_folios(&free_folios); 1860 spin_lock_irq(&lruvec->lru_lock); 1861 } 1862 1863 return nr_moved; 1864 } 1865 1866 /* 1867 * If a kernel thread (such as nfsd for loop-back mounts) services a backing 1868 * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case 1869 * we should not throttle. Otherwise it is safe to do so. 1870 */ 1871 static int current_may_throttle(void) 1872 { 1873 return !(current->flags & PF_LOCAL_THROTTLE); 1874 } 1875 1876 /* 1877 * shrink_inactive_list() is a helper for shrink_node(). It returns the number 1878 * of reclaimed pages 1879 */ 1880 static unsigned long shrink_inactive_list(unsigned long nr_to_scan, 1881 struct lruvec *lruvec, struct scan_control *sc, 1882 enum lru_list lru) 1883 { 1884 LIST_HEAD(folio_list); 1885 unsigned long nr_scanned; 1886 unsigned int nr_reclaimed = 0; 1887 unsigned long nr_taken; 1888 struct reclaim_stat stat; 1889 bool file = is_file_lru(lru); 1890 enum vm_event_item item; 1891 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 1892 bool stalled = false; 1893 1894 while (unlikely(too_many_isolated(pgdat, file, sc))) { 1895 if (stalled) 1896 return 0; 1897 1898 /* wait a bit for the reclaimer. */ 1899 stalled = true; 1900 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 1901 1902 /* We are about to die and free our memory. Return now. */ 1903 if (fatal_signal_pending(current)) 1904 return SWAP_CLUSTER_MAX; 1905 } 1906 1907 lru_add_drain(); 1908 1909 spin_lock_irq(&lruvec->lru_lock); 1910 1911 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, 1912 &nr_scanned, sc, lru); 1913 1914 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 1915 item = PGSCAN_KSWAPD + reclaimer_offset(); 1916 if (!cgroup_reclaim(sc)) 1917 __count_vm_events(item, nr_scanned); 1918 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); 1919 __count_vm_events(PGSCAN_ANON + file, nr_scanned); 1920 1921 spin_unlock_irq(&lruvec->lru_lock); 1922 1923 if (nr_taken == 0) 1924 return 0; 1925 1926 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); 1927 1928 spin_lock_irq(&lruvec->lru_lock); 1929 move_folios_to_lru(lruvec, &folio_list); 1930 1931 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 1932 item = PGSTEAL_KSWAPD + reclaimer_offset(); 1933 if (!cgroup_reclaim(sc)) 1934 __count_vm_events(item, nr_reclaimed); 1935 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); 1936 __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); 1937 spin_unlock_irq(&lruvec->lru_lock); 1938 1939 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); 1940 1941 /* 1942 * If dirty folios are scanned that are not queued for IO, it 1943 * implies that flushers are not doing their job. This can 1944 * happen when memory pressure pushes dirty folios to the end of 1945 * the LRU before the dirty limits are breached and the dirty 1946 * data has expired. It can also happen when the proportion of 1947 * dirty folios grows not through writes but through memory 1948 * pressure reclaiming all the clean cache. And in some cases, 1949 * the flushers simply cannot keep up with the allocation 1950 * rate. Nudge the flusher threads in case they are asleep. 1951 */ 1952 if (stat.nr_unqueued_dirty == nr_taken) { 1953 wakeup_flusher_threads(WB_REASON_VMSCAN); 1954 /* 1955 * For cgroupv1 dirty throttling is achieved by waking up 1956 * the kernel flusher here and later waiting on folios 1957 * which are in writeback to finish (see shrink_folio_list()). 1958 * 1959 * Flusher may not be able to issue writeback quickly 1960 * enough for cgroupv1 writeback throttling to work 1961 * on a large system. 1962 */ 1963 if (!writeback_throttling_sane(sc)) 1964 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 1965 } 1966 1967 sc->nr.dirty += stat.nr_dirty; 1968 sc->nr.congested += stat.nr_congested; 1969 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; 1970 sc->nr.writeback += stat.nr_writeback; 1971 sc->nr.immediate += stat.nr_immediate; 1972 sc->nr.taken += nr_taken; 1973 if (file) 1974 sc->nr.file_taken += nr_taken; 1975 1976 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 1977 nr_scanned, nr_reclaimed, &stat, sc->priority, file); 1978 return nr_reclaimed; 1979 } 1980 1981 /* 1982 * shrink_active_list() moves folios from the active LRU to the inactive LRU. 1983 * 1984 * We move them the other way if the folio is referenced by one or more 1985 * processes. 1986 * 1987 * If the folios are mostly unmapped, the processing is fast and it is 1988 * appropriate to hold lru_lock across the whole operation. But if 1989 * the folios are mapped, the processing is slow (folio_referenced()), so 1990 * we should drop lru_lock around each folio. It's impossible to balance 1991 * this, so instead we remove the folios from the LRU while processing them. 1992 * It is safe to rely on the active flag against the non-LRU folios in here 1993 * because nobody will play with that bit on a non-LRU folio. 1994 * 1995 * The downside is that we have to touch folio->_refcount against each folio. 1996 * But we had to alter folio->flags anyway. 1997 */ 1998 static void shrink_active_list(unsigned long nr_to_scan, 1999 struct lruvec *lruvec, 2000 struct scan_control *sc, 2001 enum lru_list lru) 2002 { 2003 unsigned long nr_taken; 2004 unsigned long nr_scanned; 2005 unsigned long vm_flags; 2006 LIST_HEAD(l_hold); /* The folios which were snipped off */ 2007 LIST_HEAD(l_active); 2008 LIST_HEAD(l_inactive); 2009 unsigned nr_deactivate, nr_activate; 2010 unsigned nr_rotated = 0; 2011 bool file = is_file_lru(lru); 2012 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2013 2014 lru_add_drain(); 2015 2016 spin_lock_irq(&lruvec->lru_lock); 2017 2018 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, 2019 &nr_scanned, sc, lru); 2020 2021 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); 2022 2023 if (!cgroup_reclaim(sc)) 2024 __count_vm_events(PGREFILL, nr_scanned); 2025 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); 2026 2027 spin_unlock_irq(&lruvec->lru_lock); 2028 2029 while (!list_empty(&l_hold)) { 2030 struct folio *folio; 2031 2032 cond_resched(); 2033 folio = lru_to_folio(&l_hold); 2034 list_del(&folio->lru); 2035 2036 if (unlikely(!folio_evictable(folio))) { 2037 folio_putback_lru(folio); 2038 continue; 2039 } 2040 2041 if (unlikely(buffer_heads_over_limit)) { 2042 if (folio_needs_release(folio) && 2043 folio_trylock(folio)) { 2044 filemap_release_folio(folio, 0); 2045 folio_unlock(folio); 2046 } 2047 } 2048 2049 /* Referenced or rmap lock contention: rotate */ 2050 if (folio_referenced(folio, 0, sc->target_mem_cgroup, 2051 &vm_flags) != 0) { 2052 /* 2053 * Identify referenced, file-backed active folios and 2054 * give them one more trip around the active list. So 2055 * that executable code get better chances to stay in 2056 * memory under moderate memory pressure. Anon folios 2057 * are not likely to be evicted by use-once streaming 2058 * IO, plus JVM can create lots of anon VM_EXEC folios, 2059 * so we ignore them here. 2060 */ 2061 if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { 2062 nr_rotated += folio_nr_pages(folio); 2063 list_add(&folio->lru, &l_active); 2064 continue; 2065 } 2066 } 2067 2068 folio_clear_active(folio); /* we are de-activating */ 2069 folio_set_workingset(folio); 2070 list_add(&folio->lru, &l_inactive); 2071 } 2072 2073 /* 2074 * Move folios back to the lru list. 2075 */ 2076 spin_lock_irq(&lruvec->lru_lock); 2077 2078 nr_activate = move_folios_to_lru(lruvec, &l_active); 2079 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); 2080 2081 __count_vm_events(PGDEACTIVATE, nr_deactivate); 2082 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); 2083 2084 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); 2085 spin_unlock_irq(&lruvec->lru_lock); 2086 2087 if (nr_rotated) 2088 lru_note_cost(lruvec, file, 0, nr_rotated); 2089 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2090 nr_deactivate, nr_rotated, sc->priority, file); 2091 } 2092 2093 static unsigned int reclaim_folio_list(struct list_head *folio_list, 2094 struct pglist_data *pgdat, 2095 bool ignore_references) 2096 { 2097 struct reclaim_stat dummy_stat; 2098 unsigned int nr_reclaimed; 2099 struct folio *folio; 2100 struct scan_control sc = { 2101 .gfp_mask = GFP_KERNEL, 2102 .may_writepage = 1, 2103 .may_unmap = 1, 2104 .may_swap = 1, 2105 .no_demotion = 1, 2106 }; 2107 2108 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, ignore_references); 2109 while (!list_empty(folio_list)) { 2110 folio = lru_to_folio(folio_list); 2111 list_del(&folio->lru); 2112 folio_putback_lru(folio); 2113 } 2114 2115 return nr_reclaimed; 2116 } 2117 2118 unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references) 2119 { 2120 int nid; 2121 unsigned int nr_reclaimed = 0; 2122 LIST_HEAD(node_folio_list); 2123 unsigned int noreclaim_flag; 2124 2125 if (list_empty(folio_list)) 2126 return nr_reclaimed; 2127 2128 noreclaim_flag = memalloc_noreclaim_save(); 2129 2130 nid = folio_nid(lru_to_folio(folio_list)); 2131 do { 2132 struct folio *folio = lru_to_folio(folio_list); 2133 2134 if (nid == folio_nid(folio)) { 2135 folio_clear_active(folio); 2136 list_move(&folio->lru, &node_folio_list); 2137 continue; 2138 } 2139 2140 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), 2141 ignore_references); 2142 nid = folio_nid(lru_to_folio(folio_list)); 2143 } while (!list_empty(folio_list)); 2144 2145 nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), ignore_references); 2146 2147 memalloc_noreclaim_restore(noreclaim_flag); 2148 2149 return nr_reclaimed; 2150 } 2151 2152 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2153 struct lruvec *lruvec, struct scan_control *sc) 2154 { 2155 if (is_active_lru(lru)) { 2156 if (sc->may_deactivate & (1 << is_file_lru(lru))) 2157 shrink_active_list(nr_to_scan, lruvec, sc, lru); 2158 else 2159 sc->skipped_deactivate = 1; 2160 return 0; 2161 } 2162 2163 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); 2164 } 2165 2166 /* 2167 * The inactive anon list should be small enough that the VM never has 2168 * to do too much work. 2169 * 2170 * The inactive file list should be small enough to leave most memory 2171 * to the established workingset on the scan-resistant active list, 2172 * but large enough to avoid thrashing the aggregate readahead window. 2173 * 2174 * Both inactive lists should also be large enough that each inactive 2175 * folio has a chance to be referenced again before it is reclaimed. 2176 * 2177 * If that fails and refaulting is observed, the inactive list grows. 2178 * 2179 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios 2180 * on this LRU, maintained by the pageout code. An inactive_ratio 2181 * of 3 means 3:1 or 25% of the folios are kept on the inactive list. 2182 * 2183 * total target max 2184 * memory ratio inactive 2185 * ------------------------------------- 2186 * 10MB 1 5MB 2187 * 100MB 1 50MB 2188 * 1GB 3 250MB 2189 * 10GB 10 0.9GB 2190 * 100GB 31 3GB 2191 * 1TB 101 10GB 2192 * 10TB 320 32GB 2193 */ 2194 static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) 2195 { 2196 enum lru_list active_lru = inactive_lru + LRU_ACTIVE; 2197 unsigned long inactive, active; 2198 unsigned long inactive_ratio; 2199 unsigned long gb; 2200 2201 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); 2202 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); 2203 2204 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2205 if (gb) 2206 inactive_ratio = int_sqrt(10 * gb); 2207 else 2208 inactive_ratio = 1; 2209 2210 return inactive * inactive_ratio < active; 2211 } 2212 2213 enum scan_balance { 2214 SCAN_EQUAL, 2215 SCAN_FRACT, 2216 SCAN_ANON, 2217 SCAN_FILE, 2218 }; 2219 2220 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) 2221 { 2222 unsigned long file; 2223 struct lruvec *target_lruvec; 2224 2225 if (lru_gen_enabled()) 2226 return; 2227 2228 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 2229 2230 /* 2231 * Flush the memory cgroup stats, so that we read accurate per-memcg 2232 * lruvec stats for heuristics. 2233 */ 2234 mem_cgroup_flush_stats(sc->target_mem_cgroup); 2235 2236 /* 2237 * Determine the scan balance between anon and file LRUs. 2238 */ 2239 spin_lock_irq(&target_lruvec->lru_lock); 2240 sc->anon_cost = target_lruvec->anon_cost; 2241 sc->file_cost = target_lruvec->file_cost; 2242 spin_unlock_irq(&target_lruvec->lru_lock); 2243 2244 /* 2245 * Target desirable inactive:active list ratios for the anon 2246 * and file LRU lists. 2247 */ 2248 if (!sc->force_deactivate) { 2249 unsigned long refaults; 2250 2251 /* 2252 * When refaults are being observed, it means a new 2253 * workingset is being established. Deactivate to get 2254 * rid of any stale active pages quickly. 2255 */ 2256 refaults = lruvec_page_state(target_lruvec, 2257 WORKINGSET_ACTIVATE_ANON); 2258 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || 2259 inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) 2260 sc->may_deactivate |= DEACTIVATE_ANON; 2261 else 2262 sc->may_deactivate &= ~DEACTIVATE_ANON; 2263 2264 refaults = lruvec_page_state(target_lruvec, 2265 WORKINGSET_ACTIVATE_FILE); 2266 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || 2267 inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) 2268 sc->may_deactivate |= DEACTIVATE_FILE; 2269 else 2270 sc->may_deactivate &= ~DEACTIVATE_FILE; 2271 } else 2272 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; 2273 2274 /* 2275 * If we have plenty of inactive file pages that aren't 2276 * thrashing, try to reclaim those first before touching 2277 * anonymous pages. 2278 */ 2279 file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); 2280 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && 2281 !sc->no_cache_trim_mode) 2282 sc->cache_trim_mode = 1; 2283 else 2284 sc->cache_trim_mode = 0; 2285 2286 /* 2287 * Prevent the reclaimer from falling into the cache trap: as 2288 * cache pages start out inactive, every cache fault will tip 2289 * the scan balance towards the file LRU. And as the file LRU 2290 * shrinks, so does the window for rotation from references. 2291 * This means we have a runaway feedback loop where a tiny 2292 * thrashing file LRU becomes infinitely more attractive than 2293 * anon pages. Try to detect this based on file LRU size. 2294 */ 2295 if (!cgroup_reclaim(sc)) { 2296 unsigned long total_high_wmark = 0; 2297 unsigned long free, anon; 2298 int z; 2299 2300 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); 2301 file = node_page_state(pgdat, NR_ACTIVE_FILE) + 2302 node_page_state(pgdat, NR_INACTIVE_FILE); 2303 2304 for (z = 0; z < MAX_NR_ZONES; z++) { 2305 struct zone *zone = &pgdat->node_zones[z]; 2306 2307 if (!managed_zone(zone)) 2308 continue; 2309 2310 total_high_wmark += high_wmark_pages(zone); 2311 } 2312 2313 /* 2314 * Consider anon: if that's low too, this isn't a 2315 * runaway file reclaim problem, but rather just 2316 * extreme pressure. Reclaim as per usual then. 2317 */ 2318 anon = node_page_state(pgdat, NR_INACTIVE_ANON); 2319 2320 sc->file_is_tiny = 2321 file + free <= total_high_wmark && 2322 !(sc->may_deactivate & DEACTIVATE_ANON) && 2323 anon >> sc->priority; 2324 } 2325 } 2326 2327 /* 2328 * Determine how aggressively the anon and file LRU lists should be 2329 * scanned. 2330 * 2331 * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan 2332 * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan 2333 */ 2334 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, 2335 unsigned long *nr) 2336 { 2337 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2338 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2339 unsigned long anon_cost, file_cost, total_cost; 2340 int swappiness = mem_cgroup_swappiness(memcg); 2341 u64 fraction[ANON_AND_FILE]; 2342 u64 denominator = 0; /* gcc */ 2343 enum scan_balance scan_balance; 2344 unsigned long ap, fp; 2345 enum lru_list lru; 2346 2347 /* If we have no swap space, do not bother scanning anon folios. */ 2348 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { 2349 scan_balance = SCAN_FILE; 2350 goto out; 2351 } 2352 2353 /* 2354 * Global reclaim will swap to prevent OOM even with no 2355 * swappiness, but memcg users want to use this knob to 2356 * disable swapping for individual groups completely when 2357 * using the memory controller's swap limit feature would be 2358 * too expensive. 2359 */ 2360 if (cgroup_reclaim(sc) && !swappiness) { 2361 scan_balance = SCAN_FILE; 2362 goto out; 2363 } 2364 2365 /* 2366 * Do not apply any pressure balancing cleverness when the 2367 * system is close to OOM, scan both anon and file equally 2368 * (unless the swappiness setting disagrees with swapping). 2369 */ 2370 if (!sc->priority && swappiness) { 2371 scan_balance = SCAN_EQUAL; 2372 goto out; 2373 } 2374 2375 /* 2376 * If the system is almost out of file pages, force-scan anon. 2377 */ 2378 if (sc->file_is_tiny) { 2379 scan_balance = SCAN_ANON; 2380 goto out; 2381 } 2382 2383 /* 2384 * If there is enough inactive page cache, we do not reclaim 2385 * anything from the anonymous working right now. 2386 */ 2387 if (sc->cache_trim_mode) { 2388 scan_balance = SCAN_FILE; 2389 goto out; 2390 } 2391 2392 scan_balance = SCAN_FRACT; 2393 /* 2394 * Calculate the pressure balance between anon and file pages. 2395 * 2396 * The amount of pressure we put on each LRU is inversely 2397 * proportional to the cost of reclaiming each list, as 2398 * determined by the share of pages that are refaulting, times 2399 * the relative IO cost of bringing back a swapped out 2400 * anonymous page vs reloading a filesystem page (swappiness). 2401 * 2402 * Although we limit that influence to ensure no list gets 2403 * left behind completely: at least a third of the pressure is 2404 * applied, before swappiness. 2405 * 2406 * With swappiness at 100, anon and file have equal IO cost. 2407 */ 2408 total_cost = sc->anon_cost + sc->file_cost; 2409 anon_cost = total_cost + sc->anon_cost; 2410 file_cost = total_cost + sc->file_cost; 2411 total_cost = anon_cost + file_cost; 2412 2413 ap = swappiness * (total_cost + 1); 2414 ap /= anon_cost + 1; 2415 2416 fp = (200 - swappiness) * (total_cost + 1); 2417 fp /= file_cost + 1; 2418 2419 fraction[0] = ap; 2420 fraction[1] = fp; 2421 denominator = ap + fp; 2422 out: 2423 for_each_evictable_lru(lru) { 2424 bool file = is_file_lru(lru); 2425 unsigned long lruvec_size; 2426 unsigned long low, min; 2427 unsigned long scan; 2428 2429 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); 2430 mem_cgroup_protection(sc->target_mem_cgroup, memcg, 2431 &min, &low); 2432 2433 if (min || low) { 2434 /* 2435 * Scale a cgroup's reclaim pressure by proportioning 2436 * its current usage to its memory.low or memory.min 2437 * setting. 2438 * 2439 * This is important, as otherwise scanning aggression 2440 * becomes extremely binary -- from nothing as we 2441 * approach the memory protection threshold, to totally 2442 * nominal as we exceed it. This results in requiring 2443 * setting extremely liberal protection thresholds. It 2444 * also means we simply get no protection at all if we 2445 * set it too low, which is not ideal. 2446 * 2447 * If there is any protection in place, we reduce scan 2448 * pressure by how much of the total memory used is 2449 * within protection thresholds. 2450 * 2451 * There is one special case: in the first reclaim pass, 2452 * we skip over all groups that are within their low 2453 * protection. If that fails to reclaim enough pages to 2454 * satisfy the reclaim goal, we come back and override 2455 * the best-effort low protection. However, we still 2456 * ideally want to honor how well-behaved groups are in 2457 * that case instead of simply punishing them all 2458 * equally. As such, we reclaim them based on how much 2459 * memory they are using, reducing the scan pressure 2460 * again by how much of the total memory used is under 2461 * hard protection. 2462 */ 2463 unsigned long cgroup_size = mem_cgroup_size(memcg); 2464 unsigned long protection; 2465 2466 /* memory.low scaling, make sure we retry before OOM */ 2467 if (!sc->memcg_low_reclaim && low > min) { 2468 protection = low; 2469 sc->memcg_low_skipped = 1; 2470 } else { 2471 protection = min; 2472 } 2473 2474 /* Avoid TOCTOU with earlier protection check */ 2475 cgroup_size = max(cgroup_size, protection); 2476 2477 scan = lruvec_size - lruvec_size * protection / 2478 (cgroup_size + 1); 2479 2480 /* 2481 * Minimally target SWAP_CLUSTER_MAX pages to keep 2482 * reclaim moving forwards, avoiding decrementing 2483 * sc->priority further than desirable. 2484 */ 2485 scan = max(scan, SWAP_CLUSTER_MAX); 2486 } else { 2487 scan = lruvec_size; 2488 } 2489 2490 scan >>= sc->priority; 2491 2492 /* 2493 * If the cgroup's already been deleted, make sure to 2494 * scrape out the remaining cache. 2495 */ 2496 if (!scan && !mem_cgroup_online(memcg)) 2497 scan = min(lruvec_size, SWAP_CLUSTER_MAX); 2498 2499 switch (scan_balance) { 2500 case SCAN_EQUAL: 2501 /* Scan lists relative to size */ 2502 break; 2503 case SCAN_FRACT: 2504 /* 2505 * Scan types proportional to swappiness and 2506 * their relative recent reclaim efficiency. 2507 * Make sure we don't miss the last page on 2508 * the offlined memory cgroups because of a 2509 * round-off error. 2510 */ 2511 scan = mem_cgroup_online(memcg) ? 2512 div64_u64(scan * fraction[file], denominator) : 2513 DIV64_U64_ROUND_UP(scan * fraction[file], 2514 denominator); 2515 break; 2516 case SCAN_FILE: 2517 case SCAN_ANON: 2518 /* Scan one type exclusively */ 2519 if ((scan_balance == SCAN_FILE) != file) 2520 scan = 0; 2521 break; 2522 default: 2523 /* Look ma, no brain */ 2524 BUG(); 2525 } 2526 2527 nr[lru] = scan; 2528 } 2529 } 2530 2531 /* 2532 * Anonymous LRU management is a waste if there is 2533 * ultimately no way to reclaim the memory. 2534 */ 2535 static bool can_age_anon_pages(struct pglist_data *pgdat, 2536 struct scan_control *sc) 2537 { 2538 /* Aging the anon LRU is valuable if swap is present: */ 2539 if (total_swap_pages > 0) 2540 return true; 2541 2542 /* Also valuable if anon pages can be demoted: */ 2543 return can_demote(pgdat->node_id, sc); 2544 } 2545 2546 #ifdef CONFIG_LRU_GEN 2547 2548 #ifdef CONFIG_LRU_GEN_ENABLED 2549 DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); 2550 #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) 2551 #else 2552 DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); 2553 #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) 2554 #endif 2555 2556 static bool should_walk_mmu(void) 2557 { 2558 return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); 2559 } 2560 2561 static bool should_clear_pmd_young(void) 2562 { 2563 return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); 2564 } 2565 2566 /****************************************************************************** 2567 * shorthand helpers 2568 ******************************************************************************/ 2569 2570 #define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) 2571 2572 #define DEFINE_MAX_SEQ(lruvec) \ 2573 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) 2574 2575 #define DEFINE_MIN_SEQ(lruvec) \ 2576 unsigned long min_seq[ANON_AND_FILE] = { \ 2577 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ 2578 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ 2579 } 2580 2581 #define for_each_gen_type_zone(gen, type, zone) \ 2582 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ 2583 for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ 2584 for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) 2585 2586 #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) 2587 #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) 2588 2589 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) 2590 { 2591 struct pglist_data *pgdat = NODE_DATA(nid); 2592 2593 #ifdef CONFIG_MEMCG 2594 if (memcg) { 2595 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; 2596 2597 /* see the comment in mem_cgroup_lruvec() */ 2598 if (!lruvec->pgdat) 2599 lruvec->pgdat = pgdat; 2600 2601 return lruvec; 2602 } 2603 #endif 2604 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2605 2606 return &pgdat->__lruvec; 2607 } 2608 2609 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) 2610 { 2611 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2612 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2613 2614 if (!sc->may_swap) 2615 return 0; 2616 2617 if (!can_demote(pgdat->node_id, sc) && 2618 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) 2619 return 0; 2620 2621 return mem_cgroup_swappiness(memcg); 2622 } 2623 2624 static int get_nr_gens(struct lruvec *lruvec, int type) 2625 { 2626 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; 2627 } 2628 2629 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) 2630 { 2631 /* see the comment on lru_gen_folio */ 2632 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && 2633 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && 2634 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; 2635 } 2636 2637 /****************************************************************************** 2638 * Bloom filters 2639 ******************************************************************************/ 2640 2641 /* 2642 * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when 2643 * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of 2644 * bits in a bitmap, k is the number of hash functions and n is the number of 2645 * inserted items. 2646 * 2647 * Page table walkers use one of the two filters to reduce their search space. 2648 * To get rid of non-leaf entries that no longer have enough leaf entries, the 2649 * aging uses the double-buffering technique to flip to the other filter each 2650 * time it produces a new generation. For non-leaf entries that have enough 2651 * leaf entries, the aging carries them over to the next generation in 2652 * walk_pmd_range(); the eviction also report them when walking the rmap 2653 * in lru_gen_look_around(). 2654 * 2655 * For future optimizations: 2656 * 1. It's not necessary to keep both filters all the time. The spare one can be 2657 * freed after the RCU grace period and reallocated if needed again. 2658 * 2. And when reallocating, it's worth scaling its size according to the number 2659 * of inserted entries in the other filter, to reduce the memory overhead on 2660 * small systems and false positives on large systems. 2661 * 3. Jenkins' hash function is an alternative to Knuth's. 2662 */ 2663 #define BLOOM_FILTER_SHIFT 15 2664 2665 static inline int filter_gen_from_seq(unsigned long seq) 2666 { 2667 return seq % NR_BLOOM_FILTERS; 2668 } 2669 2670 static void get_item_key(void *item, int *key) 2671 { 2672 u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); 2673 2674 BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); 2675 2676 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); 2677 key[1] = hash >> BLOOM_FILTER_SHIFT; 2678 } 2679 2680 static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2681 void *item) 2682 { 2683 int key[2]; 2684 unsigned long *filter; 2685 int gen = filter_gen_from_seq(seq); 2686 2687 filter = READ_ONCE(mm_state->filters[gen]); 2688 if (!filter) 2689 return true; 2690 2691 get_item_key(item, key); 2692 2693 return test_bit(key[0], filter) && test_bit(key[1], filter); 2694 } 2695 2696 static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq, 2697 void *item) 2698 { 2699 int key[2]; 2700 unsigned long *filter; 2701 int gen = filter_gen_from_seq(seq); 2702 2703 filter = READ_ONCE(mm_state->filters[gen]); 2704 if (!filter) 2705 return; 2706 2707 get_item_key(item, key); 2708 2709 if (!test_bit(key[0], filter)) 2710 set_bit(key[0], filter); 2711 if (!test_bit(key[1], filter)) 2712 set_bit(key[1], filter); 2713 } 2714 2715 static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq) 2716 { 2717 unsigned long *filter; 2718 int gen = filter_gen_from_seq(seq); 2719 2720 filter = mm_state->filters[gen]; 2721 if (filter) { 2722 bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); 2723 return; 2724 } 2725 2726 filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), 2727 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 2728 WRITE_ONCE(mm_state->filters[gen], filter); 2729 } 2730 2731 /****************************************************************************** 2732 * mm_struct list 2733 ******************************************************************************/ 2734 2735 #ifdef CONFIG_LRU_GEN_WALKS_MMU 2736 2737 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2738 { 2739 static struct lru_gen_mm_list mm_list = { 2740 .fifo = LIST_HEAD_INIT(mm_list.fifo), 2741 .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), 2742 }; 2743 2744 #ifdef CONFIG_MEMCG 2745 if (memcg) 2746 return &memcg->mm_list; 2747 #endif 2748 VM_WARN_ON_ONCE(!mem_cgroup_disabled()); 2749 2750 return &mm_list; 2751 } 2752 2753 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2754 { 2755 return &lruvec->mm_state; 2756 } 2757 2758 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2759 { 2760 int key; 2761 struct mm_struct *mm; 2762 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 2763 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 2764 2765 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); 2766 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); 2767 2768 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) 2769 return NULL; 2770 2771 clear_bit(key, &mm->lru_gen.bitmap); 2772 2773 return mmget_not_zero(mm) ? mm : NULL; 2774 } 2775 2776 void lru_gen_add_mm(struct mm_struct *mm) 2777 { 2778 int nid; 2779 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); 2780 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2781 2782 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); 2783 #ifdef CONFIG_MEMCG 2784 VM_WARN_ON_ONCE(mm->lru_gen.memcg); 2785 mm->lru_gen.memcg = memcg; 2786 #endif 2787 spin_lock(&mm_list->lock); 2788 2789 for_each_node_state(nid, N_MEMORY) { 2790 struct lruvec *lruvec = get_lruvec(memcg, nid); 2791 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2792 2793 /* the first addition since the last iteration */ 2794 if (mm_state->tail == &mm_list->fifo) 2795 mm_state->tail = &mm->lru_gen.list; 2796 } 2797 2798 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); 2799 2800 spin_unlock(&mm_list->lock); 2801 } 2802 2803 void lru_gen_del_mm(struct mm_struct *mm) 2804 { 2805 int nid; 2806 struct lru_gen_mm_list *mm_list; 2807 struct mem_cgroup *memcg = NULL; 2808 2809 if (list_empty(&mm->lru_gen.list)) 2810 return; 2811 2812 #ifdef CONFIG_MEMCG 2813 memcg = mm->lru_gen.memcg; 2814 #endif 2815 mm_list = get_mm_list(memcg); 2816 2817 spin_lock(&mm_list->lock); 2818 2819 for_each_node(nid) { 2820 struct lruvec *lruvec = get_lruvec(memcg, nid); 2821 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2822 2823 /* where the current iteration continues after */ 2824 if (mm_state->head == &mm->lru_gen.list) 2825 mm_state->head = mm_state->head->prev; 2826 2827 /* where the last iteration ended before */ 2828 if (mm_state->tail == &mm->lru_gen.list) 2829 mm_state->tail = mm_state->tail->next; 2830 } 2831 2832 list_del_init(&mm->lru_gen.list); 2833 2834 spin_unlock(&mm_list->lock); 2835 2836 #ifdef CONFIG_MEMCG 2837 mem_cgroup_put(mm->lru_gen.memcg); 2838 mm->lru_gen.memcg = NULL; 2839 #endif 2840 } 2841 2842 #ifdef CONFIG_MEMCG 2843 void lru_gen_migrate_mm(struct mm_struct *mm) 2844 { 2845 struct mem_cgroup *memcg; 2846 struct task_struct *task = rcu_dereference_protected(mm->owner, true); 2847 2848 VM_WARN_ON_ONCE(task->mm != mm); 2849 lockdep_assert_held(&task->alloc_lock); 2850 2851 /* for mm_update_next_owner() */ 2852 if (mem_cgroup_disabled()) 2853 return; 2854 2855 /* migration can happen before addition */ 2856 if (!mm->lru_gen.memcg) 2857 return; 2858 2859 rcu_read_lock(); 2860 memcg = mem_cgroup_from_task(task); 2861 rcu_read_unlock(); 2862 if (memcg == mm->lru_gen.memcg) 2863 return; 2864 2865 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); 2866 2867 lru_gen_del_mm(mm); 2868 lru_gen_add_mm(mm); 2869 } 2870 #endif 2871 2872 #else /* !CONFIG_LRU_GEN_WALKS_MMU */ 2873 2874 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) 2875 { 2876 return NULL; 2877 } 2878 2879 static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec) 2880 { 2881 return NULL; 2882 } 2883 2884 static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk) 2885 { 2886 return NULL; 2887 } 2888 2889 #endif 2890 2891 static void reset_mm_stats(struct lru_gen_mm_walk *walk, bool last) 2892 { 2893 int i; 2894 int hist; 2895 struct lruvec *lruvec = walk->lruvec; 2896 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2897 2898 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); 2899 2900 hist = lru_hist_from_seq(walk->seq); 2901 2902 for (i = 0; i < NR_MM_STATS; i++) { 2903 WRITE_ONCE(mm_state->stats[hist][i], 2904 mm_state->stats[hist][i] + walk->mm_stats[i]); 2905 walk->mm_stats[i] = 0; 2906 } 2907 2908 if (NR_HIST_GENS > 1 && last) { 2909 hist = lru_hist_from_seq(walk->seq + 1); 2910 2911 for (i = 0; i < NR_MM_STATS; i++) 2912 WRITE_ONCE(mm_state->stats[hist][i], 0); 2913 } 2914 } 2915 2916 static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter) 2917 { 2918 bool first = false; 2919 bool last = false; 2920 struct mm_struct *mm = NULL; 2921 struct lruvec *lruvec = walk->lruvec; 2922 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2923 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2924 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2925 2926 /* 2927 * mm_state->seq is incremented after each iteration of mm_list. There 2928 * are three interesting cases for this page table walker: 2929 * 1. It tries to start a new iteration with a stale max_seq: there is 2930 * nothing left to do. 2931 * 2. It started the next iteration: it needs to reset the Bloom filter 2932 * so that a fresh set of PTE tables can be recorded. 2933 * 3. It ended the current iteration: it needs to reset the mm stats 2934 * counters and tell its caller to increment max_seq. 2935 */ 2936 spin_lock(&mm_list->lock); 2937 2938 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); 2939 2940 if (walk->seq <= mm_state->seq) 2941 goto done; 2942 2943 if (!mm_state->head) 2944 mm_state->head = &mm_list->fifo; 2945 2946 if (mm_state->head == &mm_list->fifo) 2947 first = true; 2948 2949 do { 2950 mm_state->head = mm_state->head->next; 2951 if (mm_state->head == &mm_list->fifo) { 2952 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 2953 last = true; 2954 break; 2955 } 2956 2957 /* force scan for those added after the last iteration */ 2958 if (!mm_state->tail || mm_state->tail == mm_state->head) { 2959 mm_state->tail = mm_state->head->next; 2960 walk->force_scan = true; 2961 } 2962 } while (!(mm = get_next_mm(walk))); 2963 done: 2964 if (*iter || last) 2965 reset_mm_stats(walk, last); 2966 2967 spin_unlock(&mm_list->lock); 2968 2969 if (mm && first) 2970 reset_bloom_filter(mm_state, walk->seq + 1); 2971 2972 if (*iter) 2973 mmput_async(*iter); 2974 2975 *iter = mm; 2976 2977 return last; 2978 } 2979 2980 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long seq) 2981 { 2982 bool success = false; 2983 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 2984 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 2985 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 2986 2987 spin_lock(&mm_list->lock); 2988 2989 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); 2990 2991 if (seq > mm_state->seq) { 2992 mm_state->head = NULL; 2993 mm_state->tail = NULL; 2994 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); 2995 success = true; 2996 } 2997 2998 spin_unlock(&mm_list->lock); 2999 3000 return success; 3001 } 3002 3003 /****************************************************************************** 3004 * PID controller 3005 ******************************************************************************/ 3006 3007 /* 3008 * A feedback loop based on Proportional-Integral-Derivative (PID) controller. 3009 * 3010 * The P term is refaulted/(evicted+protected) from a tier in the generation 3011 * currently being evicted; the I term is the exponential moving average of the 3012 * P term over the generations previously evicted, using the smoothing factor 3013 * 1/2; the D term isn't supported. 3014 * 3015 * The setpoint (SP) is always the first tier of one type; the process variable 3016 * (PV) is either any tier of the other type or any other tier of the same 3017 * type. 3018 * 3019 * The error is the difference between the SP and the PV; the correction is to 3020 * turn off protection when SP>PV or turn on protection when SP<PV. 3021 * 3022 * For future optimizations: 3023 * 1. The D term may discount the other two terms over time so that long-lived 3024 * generations can resist stale information. 3025 */ 3026 struct ctrl_pos { 3027 unsigned long refaulted; 3028 unsigned long total; 3029 int gain; 3030 }; 3031 3032 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, 3033 struct ctrl_pos *pos) 3034 { 3035 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3036 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 3037 3038 pos->refaulted = lrugen->avg_refaulted[type][tier] + 3039 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3040 pos->total = lrugen->avg_total[type][tier] + 3041 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3042 if (tier) 3043 pos->total += lrugen->protected[hist][type][tier - 1]; 3044 pos->gain = gain; 3045 } 3046 3047 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) 3048 { 3049 int hist, tier; 3050 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3051 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; 3052 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; 3053 3054 lockdep_assert_held(&lruvec->lru_lock); 3055 3056 if (!carryover && !clear) 3057 return; 3058 3059 hist = lru_hist_from_seq(seq); 3060 3061 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 3062 if (carryover) { 3063 unsigned long sum; 3064 3065 sum = lrugen->avg_refaulted[type][tier] + 3066 atomic_long_read(&lrugen->refaulted[hist][type][tier]); 3067 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); 3068 3069 sum = lrugen->avg_total[type][tier] + 3070 atomic_long_read(&lrugen->evicted[hist][type][tier]); 3071 if (tier) 3072 sum += lrugen->protected[hist][type][tier - 1]; 3073 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); 3074 } 3075 3076 if (clear) { 3077 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); 3078 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); 3079 if (tier) 3080 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); 3081 } 3082 } 3083 } 3084 3085 static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) 3086 { 3087 /* 3088 * Return true if the PV has a limited number of refaults or a lower 3089 * refaulted/total than the SP. 3090 */ 3091 return pv->refaulted < MIN_LRU_BATCH || 3092 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= 3093 (sp->refaulted + 1) * pv->total * pv->gain; 3094 } 3095 3096 /****************************************************************************** 3097 * the aging 3098 ******************************************************************************/ 3099 3100 /* promote pages accessed through page tables */ 3101 static int folio_update_gen(struct folio *folio, int gen) 3102 { 3103 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3104 3105 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 3106 VM_WARN_ON_ONCE(!rcu_read_lock_held()); 3107 3108 do { 3109 /* lru_gen_del_folio() has isolated this page? */ 3110 if (!(old_flags & LRU_GEN_MASK)) { 3111 /* for shrink_folio_list() */ 3112 new_flags = old_flags | BIT(PG_referenced); 3113 continue; 3114 } 3115 3116 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3117 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; 3118 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3119 3120 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3121 } 3122 3123 /* protect pages accessed multiple times through file descriptors */ 3124 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 3125 { 3126 int type = folio_is_file_lru(folio); 3127 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3128 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3129 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 3130 3131 VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); 3132 3133 do { 3134 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 3135 /* folio_update_gen() has promoted this page? */ 3136 if (new_gen >= 0 && new_gen != old_gen) 3137 return new_gen; 3138 3139 new_gen = (old_gen + 1) % MAX_NR_GENS; 3140 3141 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); 3142 new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; 3143 /* for folio_end_writeback() */ 3144 if (reclaiming) 3145 new_flags |= BIT(PG_reclaim); 3146 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 3147 3148 lru_gen_update_size(lruvec, folio, old_gen, new_gen); 3149 3150 return new_gen; 3151 } 3152 3153 static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, 3154 int old_gen, int new_gen) 3155 { 3156 int type = folio_is_file_lru(folio); 3157 int zone = folio_zonenum(folio); 3158 int delta = folio_nr_pages(folio); 3159 3160 VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); 3161 VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); 3162 3163 walk->batched++; 3164 3165 walk->nr_pages[old_gen][type][zone] -= delta; 3166 walk->nr_pages[new_gen][type][zone] += delta; 3167 } 3168 3169 static void reset_batch_size(struct lru_gen_mm_walk *walk) 3170 { 3171 int gen, type, zone; 3172 struct lruvec *lruvec = walk->lruvec; 3173 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3174 3175 walk->batched = 0; 3176 3177 for_each_gen_type_zone(gen, type, zone) { 3178 enum lru_list lru = type * LRU_INACTIVE_FILE; 3179 int delta = walk->nr_pages[gen][type][zone]; 3180 3181 if (!delta) 3182 continue; 3183 3184 walk->nr_pages[gen][type][zone] = 0; 3185 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], 3186 lrugen->nr_pages[gen][type][zone] + delta); 3187 3188 if (lru_gen_is_active(lruvec, gen)) 3189 lru += LRU_ACTIVE; 3190 __update_lru_size(lruvec, lru, zone, delta); 3191 } 3192 } 3193 3194 static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) 3195 { 3196 struct address_space *mapping; 3197 struct vm_area_struct *vma = args->vma; 3198 struct lru_gen_mm_walk *walk = args->private; 3199 3200 if (!vma_is_accessible(vma)) 3201 return true; 3202 3203 if (is_vm_hugetlb_page(vma)) 3204 return true; 3205 3206 if (!vma_has_recency(vma)) 3207 return true; 3208 3209 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) 3210 return true; 3211 3212 if (vma == get_gate_vma(vma->vm_mm)) 3213 return true; 3214 3215 if (vma_is_anonymous(vma)) 3216 return !walk->can_swap; 3217 3218 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) 3219 return true; 3220 3221 mapping = vma->vm_file->f_mapping; 3222 if (mapping_unevictable(mapping)) 3223 return true; 3224 3225 if (shmem_mapping(mapping)) 3226 return !walk->can_swap; 3227 3228 /* to exclude special mappings like dax, etc. */ 3229 return !mapping->a_ops->read_folio; 3230 } 3231 3232 /* 3233 * Some userspace memory allocators map many single-page VMAs. Instead of 3234 * returning back to the PGD table for each of such VMAs, finish an entire PMD 3235 * table to reduce zigzags and improve cache performance. 3236 */ 3237 static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, 3238 unsigned long *vm_start, unsigned long *vm_end) 3239 { 3240 unsigned long start = round_up(*vm_end, size); 3241 unsigned long end = (start | ~mask) + 1; 3242 VMA_ITERATOR(vmi, args->mm, start); 3243 3244 VM_WARN_ON_ONCE(mask & size); 3245 VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); 3246 3247 for_each_vma(vmi, args->vma) { 3248 if (end && end <= args->vma->vm_start) 3249 return false; 3250 3251 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) 3252 continue; 3253 3254 *vm_start = max(start, args->vma->vm_start); 3255 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; 3256 3257 return true; 3258 } 3259 3260 return false; 3261 } 3262 3263 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) 3264 { 3265 unsigned long pfn = pte_pfn(pte); 3266 3267 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3268 3269 if (!pte_present(pte) || is_zero_pfn(pfn)) 3270 return -1; 3271 3272 if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) 3273 return -1; 3274 3275 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3276 return -1; 3277 3278 return pfn; 3279 } 3280 3281 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) 3282 { 3283 unsigned long pfn = pmd_pfn(pmd); 3284 3285 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); 3286 3287 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) 3288 return -1; 3289 3290 if (WARN_ON_ONCE(pmd_devmap(pmd))) 3291 return -1; 3292 3293 if (WARN_ON_ONCE(!pfn_valid(pfn))) 3294 return -1; 3295 3296 return pfn; 3297 } 3298 3299 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, 3300 struct pglist_data *pgdat, bool can_swap) 3301 { 3302 struct folio *folio; 3303 3304 /* try to avoid unnecessary memory loads */ 3305 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3306 return NULL; 3307 3308 folio = pfn_folio(pfn); 3309 if (folio_nid(folio) != pgdat->node_id) 3310 return NULL; 3311 3312 if (folio_memcg_rcu(folio) != memcg) 3313 return NULL; 3314 3315 /* file VMAs can contain anon pages from COW */ 3316 if (!folio_is_file_lru(folio) && !can_swap) 3317 return NULL; 3318 3319 return folio; 3320 } 3321 3322 static bool suitable_to_scan(int total, int young) 3323 { 3324 int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); 3325 3326 /* suitable if the average number of young PTEs per cacheline is >=1 */ 3327 return young * n >= total; 3328 } 3329 3330 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, 3331 struct mm_walk *args) 3332 { 3333 int i; 3334 pte_t *pte; 3335 spinlock_t *ptl; 3336 unsigned long addr; 3337 int total = 0; 3338 int young = 0; 3339 struct lru_gen_mm_walk *walk = args->private; 3340 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3341 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3342 DEFINE_MAX_SEQ(walk->lruvec); 3343 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3344 3345 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); 3346 if (!pte) 3347 return false; 3348 if (!spin_trylock(ptl)) { 3349 pte_unmap(pte); 3350 return false; 3351 } 3352 3353 arch_enter_lazy_mmu_mode(); 3354 restart: 3355 for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { 3356 unsigned long pfn; 3357 struct folio *folio; 3358 pte_t ptent = ptep_get(pte + i); 3359 3360 total++; 3361 walk->mm_stats[MM_LEAF_TOTAL]++; 3362 3363 pfn = get_pte_pfn(ptent, args->vma, addr); 3364 if (pfn == -1) 3365 continue; 3366 3367 if (!pte_young(ptent)) { 3368 walk->mm_stats[MM_LEAF_OLD]++; 3369 continue; 3370 } 3371 3372 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3373 if (!folio) 3374 continue; 3375 3376 if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) 3377 VM_WARN_ON_ONCE(true); 3378 3379 young++; 3380 walk->mm_stats[MM_LEAF_YOUNG]++; 3381 3382 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 3383 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3384 !folio_test_swapcache(folio))) 3385 folio_mark_dirty(folio); 3386 3387 old_gen = folio_update_gen(folio, new_gen); 3388 if (old_gen >= 0 && old_gen != new_gen) 3389 update_batch_size(walk, folio, old_gen, new_gen); 3390 } 3391 3392 if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) 3393 goto restart; 3394 3395 arch_leave_lazy_mmu_mode(); 3396 pte_unmap_unlock(pte, ptl); 3397 3398 return suitable_to_scan(total, young); 3399 } 3400 3401 static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, 3402 struct mm_walk *args, unsigned long *bitmap, unsigned long *first) 3403 { 3404 int i; 3405 pmd_t *pmd; 3406 spinlock_t *ptl; 3407 struct lru_gen_mm_walk *walk = args->private; 3408 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); 3409 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3410 DEFINE_MAX_SEQ(walk->lruvec); 3411 int old_gen, new_gen = lru_gen_from_seq(max_seq); 3412 3413 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3414 3415 /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ 3416 if (*first == -1) { 3417 *first = addr; 3418 bitmap_zero(bitmap, MIN_LRU_BATCH); 3419 return; 3420 } 3421 3422 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); 3423 if (i && i <= MIN_LRU_BATCH) { 3424 __set_bit(i - 1, bitmap); 3425 return; 3426 } 3427 3428 pmd = pmd_offset(pud, *first); 3429 3430 ptl = pmd_lockptr(args->mm, pmd); 3431 if (!spin_trylock(ptl)) 3432 goto done; 3433 3434 arch_enter_lazy_mmu_mode(); 3435 3436 do { 3437 unsigned long pfn; 3438 struct folio *folio; 3439 3440 /* don't round down the first address */ 3441 addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; 3442 3443 pfn = get_pmd_pfn(pmd[i], vma, addr); 3444 if (pfn == -1) 3445 goto next; 3446 3447 if (!pmd_trans_huge(pmd[i])) { 3448 if (should_clear_pmd_young()) 3449 pmdp_test_and_clear_young(vma, addr, pmd + i); 3450 goto next; 3451 } 3452 3453 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); 3454 if (!folio) 3455 goto next; 3456 3457 if (!pmdp_test_and_clear_young(vma, addr, pmd + i)) 3458 goto next; 3459 3460 walk->mm_stats[MM_LEAF_YOUNG]++; 3461 3462 if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && 3463 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 3464 !folio_test_swapcache(folio))) 3465 folio_mark_dirty(folio); 3466 3467 old_gen = folio_update_gen(folio, new_gen); 3468 if (old_gen >= 0 && old_gen != new_gen) 3469 update_batch_size(walk, folio, old_gen, new_gen); 3470 next: 3471 i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; 3472 } while (i <= MIN_LRU_BATCH); 3473 3474 arch_leave_lazy_mmu_mode(); 3475 spin_unlock(ptl); 3476 done: 3477 *first = -1; 3478 } 3479 3480 static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, 3481 struct mm_walk *args) 3482 { 3483 int i; 3484 pmd_t *pmd; 3485 unsigned long next; 3486 unsigned long addr; 3487 struct vm_area_struct *vma; 3488 DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); 3489 unsigned long first = -1; 3490 struct lru_gen_mm_walk *walk = args->private; 3491 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); 3492 3493 VM_WARN_ON_ONCE(pud_leaf(*pud)); 3494 3495 /* 3496 * Finish an entire PMD in two passes: the first only reaches to PTE 3497 * tables to avoid taking the PMD lock; the second, if necessary, takes 3498 * the PMD lock to clear the accessed bit in PMD entries. 3499 */ 3500 pmd = pmd_offset(pud, start & PUD_MASK); 3501 restart: 3502 /* walk_pte_range() may call get_next_vma() */ 3503 vma = args->vma; 3504 for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { 3505 pmd_t val = pmdp_get_lockless(pmd + i); 3506 3507 next = pmd_addr_end(addr, end); 3508 3509 if (!pmd_present(val) || is_huge_zero_pmd(val)) { 3510 walk->mm_stats[MM_LEAF_TOTAL]++; 3511 continue; 3512 } 3513 3514 if (pmd_trans_huge(val)) { 3515 unsigned long pfn = pmd_pfn(val); 3516 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); 3517 3518 walk->mm_stats[MM_LEAF_TOTAL]++; 3519 3520 if (!pmd_young(val)) { 3521 walk->mm_stats[MM_LEAF_OLD]++; 3522 continue; 3523 } 3524 3525 /* try to avoid unnecessary memory loads */ 3526 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) 3527 continue; 3528 3529 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3530 continue; 3531 } 3532 3533 walk->mm_stats[MM_NONLEAF_TOTAL]++; 3534 3535 if (should_clear_pmd_young()) { 3536 if (!pmd_young(val)) 3537 continue; 3538 3539 walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); 3540 } 3541 3542 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) 3543 continue; 3544 3545 walk->mm_stats[MM_NONLEAF_FOUND]++; 3546 3547 if (!walk_pte_range(&val, addr, next, args)) 3548 continue; 3549 3550 walk->mm_stats[MM_NONLEAF_ADDED]++; 3551 3552 /* carry over to the next generation */ 3553 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); 3554 } 3555 3556 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); 3557 3558 if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) 3559 goto restart; 3560 } 3561 3562 static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, 3563 struct mm_walk *args) 3564 { 3565 int i; 3566 pud_t *pud; 3567 unsigned long addr; 3568 unsigned long next; 3569 struct lru_gen_mm_walk *walk = args->private; 3570 3571 VM_WARN_ON_ONCE(p4d_leaf(*p4d)); 3572 3573 pud = pud_offset(p4d, start & P4D_MASK); 3574 restart: 3575 for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { 3576 pud_t val = READ_ONCE(pud[i]); 3577 3578 next = pud_addr_end(addr, end); 3579 3580 if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) 3581 continue; 3582 3583 walk_pmd_range(&val, addr, next, args); 3584 3585 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { 3586 end = (addr | ~PUD_MASK) + 1; 3587 goto done; 3588 } 3589 } 3590 3591 if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) 3592 goto restart; 3593 3594 end = round_up(end, P4D_SIZE); 3595 done: 3596 if (!end || !args->vma) 3597 return 1; 3598 3599 walk->next_addr = max(end, args->vma->vm_start); 3600 3601 return -EAGAIN; 3602 } 3603 3604 static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) 3605 { 3606 static const struct mm_walk_ops mm_walk_ops = { 3607 .test_walk = should_skip_vma, 3608 .p4d_entry = walk_pud_range, 3609 .walk_lock = PGWALK_RDLOCK, 3610 }; 3611 3612 int err; 3613 struct lruvec *lruvec = walk->lruvec; 3614 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3615 3616 walk->next_addr = FIRST_USER_ADDRESS; 3617 3618 do { 3619 DEFINE_MAX_SEQ(lruvec); 3620 3621 err = -EBUSY; 3622 3623 /* another thread might have called inc_max_seq() */ 3624 if (walk->seq != max_seq) 3625 break; 3626 3627 /* folio_update_gen() requires stable folio_memcg() */ 3628 if (!mem_cgroup_trylock_pages(memcg)) 3629 break; 3630 3631 /* the caller might be holding the lock for write */ 3632 if (mmap_read_trylock(mm)) { 3633 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); 3634 3635 mmap_read_unlock(mm); 3636 } 3637 3638 mem_cgroup_unlock_pages(); 3639 3640 if (walk->batched) { 3641 spin_lock_irq(&lruvec->lru_lock); 3642 reset_batch_size(walk); 3643 spin_unlock_irq(&lruvec->lru_lock); 3644 } 3645 3646 cond_resched(); 3647 } while (err == -EAGAIN); 3648 } 3649 3650 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) 3651 { 3652 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3653 3654 if (pgdat && current_is_kswapd()) { 3655 VM_WARN_ON_ONCE(walk); 3656 3657 walk = &pgdat->mm_walk; 3658 } else if (!walk && force_alloc) { 3659 VM_WARN_ON_ONCE(current_is_kswapd()); 3660 3661 walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 3662 } 3663 3664 current->reclaim_state->mm_walk = walk; 3665 3666 return walk; 3667 } 3668 3669 static void clear_mm_walk(void) 3670 { 3671 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; 3672 3673 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); 3674 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); 3675 3676 current->reclaim_state->mm_walk = NULL; 3677 3678 if (!current_is_kswapd()) 3679 kfree(walk); 3680 } 3681 3682 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) 3683 { 3684 int zone; 3685 int remaining = MAX_LRU_BATCH; 3686 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3687 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); 3688 3689 if (type == LRU_GEN_ANON && !can_swap) 3690 goto done; 3691 3692 /* prevent cold/hot inversion if force_scan is true */ 3693 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3694 struct list_head *head = &lrugen->folios[old_gen][type][zone]; 3695 3696 while (!list_empty(head)) { 3697 struct folio *folio = lru_to_folio(head); 3698 3699 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 3700 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 3701 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 3702 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 3703 3704 new_gen = folio_inc_gen(lruvec, folio, false); 3705 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); 3706 3707 if (!--remaining) 3708 return false; 3709 } 3710 } 3711 done: 3712 reset_ctrl_pos(lruvec, type, true); 3713 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); 3714 3715 return true; 3716 } 3717 3718 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) 3719 { 3720 int gen, type, zone; 3721 bool success = false; 3722 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3723 DEFINE_MIN_SEQ(lruvec); 3724 3725 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3726 3727 /* find the oldest populated generation */ 3728 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3729 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { 3730 gen = lru_gen_from_seq(min_seq[type]); 3731 3732 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3733 if (!list_empty(&lrugen->folios[gen][type][zone])) 3734 goto next; 3735 } 3736 3737 min_seq[type]++; 3738 } 3739 next: 3740 ; 3741 } 3742 3743 /* see the comment on lru_gen_folio */ 3744 if (can_swap) { 3745 min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); 3746 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); 3747 } 3748 3749 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3750 if (min_seq[type] == lrugen->min_seq[type]) 3751 continue; 3752 3753 reset_ctrl_pos(lruvec, type, true); 3754 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); 3755 success = true; 3756 } 3757 3758 return success; 3759 } 3760 3761 static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3762 bool can_swap, bool force_scan) 3763 { 3764 bool success; 3765 int prev, next; 3766 int type, zone; 3767 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3768 restart: 3769 if (seq < READ_ONCE(lrugen->max_seq)) 3770 return false; 3771 3772 spin_lock_irq(&lruvec->lru_lock); 3773 3774 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 3775 3776 success = seq == lrugen->max_seq; 3777 if (!success) 3778 goto unlock; 3779 3780 for (type = ANON_AND_FILE - 1; type >= 0; type--) { 3781 if (get_nr_gens(lruvec, type) != MAX_NR_GENS) 3782 continue; 3783 3784 VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); 3785 3786 if (inc_min_seq(lruvec, type, can_swap)) 3787 continue; 3788 3789 spin_unlock_irq(&lruvec->lru_lock); 3790 cond_resched(); 3791 goto restart; 3792 } 3793 3794 /* 3795 * Update the active/inactive LRU sizes for compatibility. Both sides of 3796 * the current max_seq need to be covered, since max_seq+1 can overlap 3797 * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do 3798 * overlap, cold/hot inversion happens. 3799 */ 3800 prev = lru_gen_from_seq(lrugen->max_seq - 1); 3801 next = lru_gen_from_seq(lrugen->max_seq + 1); 3802 3803 for (type = 0; type < ANON_AND_FILE; type++) { 3804 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 3805 enum lru_list lru = type * LRU_INACTIVE_FILE; 3806 long delta = lrugen->nr_pages[prev][type][zone] - 3807 lrugen->nr_pages[next][type][zone]; 3808 3809 if (!delta) 3810 continue; 3811 3812 __update_lru_size(lruvec, lru, zone, delta); 3813 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); 3814 } 3815 } 3816 3817 for (type = 0; type < ANON_AND_FILE; type++) 3818 reset_ctrl_pos(lruvec, type, false); 3819 3820 WRITE_ONCE(lrugen->timestamps[next], jiffies); 3821 /* make sure preceding modifications appear */ 3822 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); 3823 unlock: 3824 spin_unlock_irq(&lruvec->lru_lock); 3825 3826 return success; 3827 } 3828 3829 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, 3830 bool can_swap, bool force_scan) 3831 { 3832 bool success; 3833 struct lru_gen_mm_walk *walk; 3834 struct mm_struct *mm = NULL; 3835 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3836 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 3837 3838 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); 3839 3840 if (!mm_state) 3841 return inc_max_seq(lruvec, seq, can_swap, force_scan); 3842 3843 /* see the comment in iterate_mm_list() */ 3844 if (seq <= READ_ONCE(mm_state->seq)) 3845 return false; 3846 3847 /* 3848 * If the hardware doesn't automatically set the accessed bit, fallback 3849 * to lru_gen_look_around(), which only clears the accessed bit in a 3850 * handful of PTEs. Spreading the work out over a period of time usually 3851 * is less efficient, but it avoids bursty page faults. 3852 */ 3853 if (!should_walk_mmu()) { 3854 success = iterate_mm_list_nowalk(lruvec, seq); 3855 goto done; 3856 } 3857 3858 walk = set_mm_walk(NULL, true); 3859 if (!walk) { 3860 success = iterate_mm_list_nowalk(lruvec, seq); 3861 goto done; 3862 } 3863 3864 walk->lruvec = lruvec; 3865 walk->seq = seq; 3866 walk->can_swap = can_swap; 3867 walk->force_scan = force_scan; 3868 3869 do { 3870 success = iterate_mm_list(walk, &mm); 3871 if (mm) 3872 walk_mm(mm, walk); 3873 } while (mm); 3874 done: 3875 if (success) { 3876 success = inc_max_seq(lruvec, seq, can_swap, force_scan); 3877 WARN_ON_ONCE(!success); 3878 } 3879 3880 return success; 3881 } 3882 3883 /****************************************************************************** 3884 * working set protection 3885 ******************************************************************************/ 3886 3887 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) 3888 { 3889 int gen, type, zone; 3890 unsigned long total = 0; 3891 bool can_swap = get_swappiness(lruvec, sc); 3892 struct lru_gen_folio *lrugen = &lruvec->lrugen; 3893 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3894 DEFINE_MAX_SEQ(lruvec); 3895 DEFINE_MIN_SEQ(lruvec); 3896 3897 for (type = !can_swap; type < ANON_AND_FILE; type++) { 3898 unsigned long seq; 3899 3900 for (seq = min_seq[type]; seq <= max_seq; seq++) { 3901 gen = lru_gen_from_seq(seq); 3902 3903 for (zone = 0; zone < MAX_NR_ZONES; zone++) 3904 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 3905 } 3906 } 3907 3908 /* whether the size is big enough to be helpful */ 3909 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; 3910 } 3911 3912 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, 3913 unsigned long min_ttl) 3914 { 3915 int gen; 3916 unsigned long birth; 3917 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 3918 DEFINE_MIN_SEQ(lruvec); 3919 3920 /* see the comment on lru_gen_folio */ 3921 gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); 3922 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 3923 3924 if (time_is_after_jiffies(birth + min_ttl)) 3925 return false; 3926 3927 if (!lruvec_is_sizable(lruvec, sc)) 3928 return false; 3929 3930 mem_cgroup_calculate_protection(NULL, memcg); 3931 3932 return !mem_cgroup_below_min(NULL, memcg); 3933 } 3934 3935 /* to protect the working set of the last N jiffies */ 3936 static unsigned long lru_gen_min_ttl __read_mostly; 3937 3938 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 3939 { 3940 struct mem_cgroup *memcg; 3941 unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); 3942 3943 VM_WARN_ON_ONCE(!current_is_kswapd()); 3944 3945 /* check the order to exclude compaction-induced reclaim */ 3946 if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) 3947 return; 3948 3949 memcg = mem_cgroup_iter(NULL, NULL, NULL); 3950 do { 3951 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 3952 3953 if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) { 3954 mem_cgroup_iter_break(NULL, memcg); 3955 return; 3956 } 3957 3958 cond_resched(); 3959 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 3960 3961 /* 3962 * The main goal is to OOM kill if every generation from all memcgs is 3963 * younger than min_ttl. However, another possibility is all memcgs are 3964 * either too small or below min. 3965 */ 3966 if (mutex_trylock(&oom_lock)) { 3967 struct oom_control oc = { 3968 .gfp_mask = sc->gfp_mask, 3969 }; 3970 3971 out_of_memory(&oc); 3972 3973 mutex_unlock(&oom_lock); 3974 } 3975 } 3976 3977 /****************************************************************************** 3978 * rmap/PT walk feedback 3979 ******************************************************************************/ 3980 3981 /* 3982 * This function exploits spatial locality when shrink_folio_list() walks the 3983 * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If 3984 * the scan was done cacheline efficiently, it adds the PMD entry pointing to 3985 * the PTE table to the Bloom filter. This forms a feedback loop between the 3986 * eviction and the aging. 3987 */ 3988 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) 3989 { 3990 int i; 3991 unsigned long start; 3992 unsigned long end; 3993 struct lru_gen_mm_walk *walk; 3994 int young = 0; 3995 pte_t *pte = pvmw->pte; 3996 unsigned long addr = pvmw->address; 3997 struct vm_area_struct *vma = pvmw->vma; 3998 struct folio *folio = pfn_folio(pvmw->pfn); 3999 bool can_swap = !folio_is_file_lru(folio); 4000 struct mem_cgroup *memcg = folio_memcg(folio); 4001 struct pglist_data *pgdat = folio_pgdat(folio); 4002 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 4003 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 4004 DEFINE_MAX_SEQ(lruvec); 4005 int old_gen, new_gen = lru_gen_from_seq(max_seq); 4006 4007 lockdep_assert_held(pvmw->ptl); 4008 VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); 4009 4010 if (spin_is_contended(pvmw->ptl)) 4011 return; 4012 4013 /* exclude special VMAs containing anon pages from COW */ 4014 if (vma->vm_flags & VM_SPECIAL) 4015 return; 4016 4017 /* avoid taking the LRU lock under the PTL when possible */ 4018 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; 4019 4020 start = max(addr & PMD_MASK, vma->vm_start); 4021 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; 4022 4023 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { 4024 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) 4025 end = start + MIN_LRU_BATCH * PAGE_SIZE; 4026 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) 4027 start = end - MIN_LRU_BATCH * PAGE_SIZE; 4028 else { 4029 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; 4030 end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; 4031 } 4032 } 4033 4034 /* folio_update_gen() requires stable folio_memcg() */ 4035 if (!mem_cgroup_trylock_pages(memcg)) 4036 return; 4037 4038 arch_enter_lazy_mmu_mode(); 4039 4040 pte -= (addr - start) / PAGE_SIZE; 4041 4042 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { 4043 unsigned long pfn; 4044 pte_t ptent = ptep_get(pte + i); 4045 4046 pfn = get_pte_pfn(ptent, vma, addr); 4047 if (pfn == -1) 4048 continue; 4049 4050 if (!pte_young(ptent)) 4051 continue; 4052 4053 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); 4054 if (!folio) 4055 continue; 4056 4057 if (!ptep_test_and_clear_young(vma, addr, pte + i)) 4058 VM_WARN_ON_ONCE(true); 4059 4060 young++; 4061 4062 if (pte_dirty(ptent) && !folio_test_dirty(folio) && 4063 !(folio_test_anon(folio) && folio_test_swapbacked(folio) && 4064 !folio_test_swapcache(folio))) 4065 folio_mark_dirty(folio); 4066 4067 if (walk) { 4068 old_gen = folio_update_gen(folio, new_gen); 4069 if (old_gen >= 0 && old_gen != new_gen) 4070 update_batch_size(walk, folio, old_gen, new_gen); 4071 4072 continue; 4073 } 4074 4075 old_gen = folio_lru_gen(folio); 4076 if (old_gen < 0) 4077 folio_set_referenced(folio); 4078 else if (old_gen != new_gen) 4079 folio_activate(folio); 4080 } 4081 4082 arch_leave_lazy_mmu_mode(); 4083 mem_cgroup_unlock_pages(); 4084 4085 /* feedback from rmap walkers to page table walkers */ 4086 if (mm_state && suitable_to_scan(i, young)) 4087 update_bloom_filter(mm_state, max_seq, pvmw->pmd); 4088 } 4089 4090 /****************************************************************************** 4091 * memcg LRU 4092 ******************************************************************************/ 4093 4094 /* see the comment on MEMCG_NR_GENS */ 4095 enum { 4096 MEMCG_LRU_NOP, 4097 MEMCG_LRU_HEAD, 4098 MEMCG_LRU_TAIL, 4099 MEMCG_LRU_OLD, 4100 MEMCG_LRU_YOUNG, 4101 }; 4102 4103 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) 4104 { 4105 int seg; 4106 int old, new; 4107 unsigned long flags; 4108 int bin = get_random_u32_below(MEMCG_NR_BINS); 4109 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4110 4111 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); 4112 4113 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); 4114 4115 seg = 0; 4116 new = old = lruvec->lrugen.gen; 4117 4118 /* see the comment on MEMCG_NR_GENS */ 4119 if (op == MEMCG_LRU_HEAD) 4120 seg = MEMCG_LRU_HEAD; 4121 else if (op == MEMCG_LRU_TAIL) 4122 seg = MEMCG_LRU_TAIL; 4123 else if (op == MEMCG_LRU_OLD) 4124 new = get_memcg_gen(pgdat->memcg_lru.seq); 4125 else if (op == MEMCG_LRU_YOUNG) 4126 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); 4127 else 4128 VM_WARN_ON_ONCE(true); 4129 4130 WRITE_ONCE(lruvec->lrugen.seg, seg); 4131 WRITE_ONCE(lruvec->lrugen.gen, new); 4132 4133 hlist_nulls_del_rcu(&lruvec->lrugen.list); 4134 4135 if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) 4136 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4137 else 4138 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); 4139 4140 pgdat->memcg_lru.nr_memcgs[old]--; 4141 pgdat->memcg_lru.nr_memcgs[new]++; 4142 4143 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) 4144 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4145 4146 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); 4147 } 4148 4149 #ifdef CONFIG_MEMCG 4150 4151 void lru_gen_online_memcg(struct mem_cgroup *memcg) 4152 { 4153 int gen; 4154 int nid; 4155 int bin = get_random_u32_below(MEMCG_NR_BINS); 4156 4157 for_each_node(nid) { 4158 struct pglist_data *pgdat = NODE_DATA(nid); 4159 struct lruvec *lruvec = get_lruvec(memcg, nid); 4160 4161 spin_lock_irq(&pgdat->memcg_lru.lock); 4162 4163 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); 4164 4165 gen = get_memcg_gen(pgdat->memcg_lru.seq); 4166 4167 lruvec->lrugen.gen = gen; 4168 4169 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); 4170 pgdat->memcg_lru.nr_memcgs[gen]++; 4171 4172 spin_unlock_irq(&pgdat->memcg_lru.lock); 4173 } 4174 } 4175 4176 void lru_gen_offline_memcg(struct mem_cgroup *memcg) 4177 { 4178 int nid; 4179 4180 for_each_node(nid) { 4181 struct lruvec *lruvec = get_lruvec(memcg, nid); 4182 4183 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); 4184 } 4185 } 4186 4187 void lru_gen_release_memcg(struct mem_cgroup *memcg) 4188 { 4189 int gen; 4190 int nid; 4191 4192 for_each_node(nid) { 4193 struct pglist_data *pgdat = NODE_DATA(nid); 4194 struct lruvec *lruvec = get_lruvec(memcg, nid); 4195 4196 spin_lock_irq(&pgdat->memcg_lru.lock); 4197 4198 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) 4199 goto unlock; 4200 4201 gen = lruvec->lrugen.gen; 4202 4203 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); 4204 pgdat->memcg_lru.nr_memcgs[gen]--; 4205 4206 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) 4207 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); 4208 unlock: 4209 spin_unlock_irq(&pgdat->memcg_lru.lock); 4210 } 4211 } 4212 4213 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) 4214 { 4215 struct lruvec *lruvec = get_lruvec(memcg, nid); 4216 4217 /* see the comment on MEMCG_NR_GENS */ 4218 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) 4219 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); 4220 } 4221 4222 #endif /* CONFIG_MEMCG */ 4223 4224 /****************************************************************************** 4225 * the eviction 4226 ******************************************************************************/ 4227 4228 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, 4229 int tier_idx) 4230 { 4231 bool success; 4232 int gen = folio_lru_gen(folio); 4233 int type = folio_is_file_lru(folio); 4234 int zone = folio_zonenum(folio); 4235 int delta = folio_nr_pages(folio); 4236 int refs = folio_lru_refs(folio); 4237 int tier = lru_tier_from_refs(refs); 4238 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4239 4240 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); 4241 4242 /* unevictable */ 4243 if (!folio_evictable(folio)) { 4244 success = lru_gen_del_folio(lruvec, folio, true); 4245 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4246 folio_set_unevictable(folio); 4247 lruvec_add_folio(lruvec, folio); 4248 __count_vm_events(UNEVICTABLE_PGCULLED, delta); 4249 return true; 4250 } 4251 4252 /* dirty lazyfree */ 4253 if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) { 4254 success = lru_gen_del_folio(lruvec, folio, true); 4255 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4256 folio_set_swapbacked(folio); 4257 lruvec_add_folio_tail(lruvec, folio); 4258 return true; 4259 } 4260 4261 /* promoted */ 4262 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { 4263 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4264 return true; 4265 } 4266 4267 /* protected */ 4268 if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) { 4269 int hist = lru_hist_from_seq(lrugen->min_seq[type]); 4270 4271 gen = folio_inc_gen(lruvec, folio, false); 4272 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4273 4274 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 4275 lrugen->protected[hist][type][tier - 1] + delta); 4276 return true; 4277 } 4278 4279 /* ineligible */ 4280 if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { 4281 gen = folio_inc_gen(lruvec, folio, false); 4282 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 4283 return true; 4284 } 4285 4286 /* waiting for writeback */ 4287 if (folio_test_locked(folio) || folio_test_writeback(folio) || 4288 (type == LRU_GEN_FILE && folio_test_dirty(folio))) { 4289 gen = folio_inc_gen(lruvec, folio, true); 4290 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); 4291 return true; 4292 } 4293 4294 return false; 4295 } 4296 4297 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) 4298 { 4299 bool success; 4300 4301 /* swap constrained */ 4302 if (!(sc->gfp_mask & __GFP_IO) && 4303 (folio_test_dirty(folio) || 4304 (folio_test_anon(folio) && !folio_test_swapcache(folio)))) 4305 return false; 4306 4307 /* raced with release_pages() */ 4308 if (!folio_try_get(folio)) 4309 return false; 4310 4311 /* raced with another isolation */ 4312 if (!folio_test_clear_lru(folio)) { 4313 folio_put(folio); 4314 return false; 4315 } 4316 4317 /* see the comment on MAX_NR_TIERS */ 4318 if (!folio_test_referenced(folio)) 4319 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); 4320 4321 /* for shrink_folio_list() */ 4322 folio_clear_reclaim(folio); 4323 folio_clear_referenced(folio); 4324 4325 success = lru_gen_del_folio(lruvec, folio, true); 4326 VM_WARN_ON_ONCE_FOLIO(!success, folio); 4327 4328 return true; 4329 } 4330 4331 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, 4332 int type, int tier, struct list_head *list) 4333 { 4334 int i; 4335 int gen; 4336 enum vm_event_item item; 4337 int sorted = 0; 4338 int scanned = 0; 4339 int isolated = 0; 4340 int skipped = 0; 4341 int remaining = MAX_LRU_BATCH; 4342 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4343 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4344 4345 VM_WARN_ON_ONCE(!list_empty(list)); 4346 4347 if (get_nr_gens(lruvec, type) == MIN_NR_GENS) 4348 return 0; 4349 4350 gen = lru_gen_from_seq(lrugen->min_seq[type]); 4351 4352 for (i = MAX_NR_ZONES; i > 0; i--) { 4353 LIST_HEAD(moved); 4354 int skipped_zone = 0; 4355 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; 4356 struct list_head *head = &lrugen->folios[gen][type][zone]; 4357 4358 while (!list_empty(head)) { 4359 struct folio *folio = lru_to_folio(head); 4360 int delta = folio_nr_pages(folio); 4361 4362 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 4363 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 4364 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 4365 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 4366 4367 scanned += delta; 4368 4369 if (sort_folio(lruvec, folio, sc, tier)) 4370 sorted += delta; 4371 else if (isolate_folio(lruvec, folio, sc)) { 4372 list_add(&folio->lru, list); 4373 isolated += delta; 4374 } else { 4375 list_move(&folio->lru, &moved); 4376 skipped_zone += delta; 4377 } 4378 4379 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) 4380 break; 4381 } 4382 4383 if (skipped_zone) { 4384 list_splice(&moved, head); 4385 __count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone); 4386 skipped += skipped_zone; 4387 } 4388 4389 if (!remaining || isolated >= MIN_LRU_BATCH) 4390 break; 4391 } 4392 4393 item = PGSCAN_KSWAPD + reclaimer_offset(); 4394 if (!cgroup_reclaim(sc)) { 4395 __count_vm_events(item, isolated); 4396 __count_vm_events(PGREFILL, sorted); 4397 } 4398 __count_memcg_events(memcg, item, isolated); 4399 __count_memcg_events(memcg, PGREFILL, sorted); 4400 __count_vm_events(PGSCAN_ANON + type, isolated); 4401 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, 4402 scanned, skipped, isolated, 4403 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4404 4405 /* 4406 * There might not be eligible folios due to reclaim_idx. Check the 4407 * remaining to prevent livelock if it's not making progress. 4408 */ 4409 return isolated || !remaining ? scanned : 0; 4410 } 4411 4412 static int get_tier_idx(struct lruvec *lruvec, int type) 4413 { 4414 int tier; 4415 struct ctrl_pos sp, pv; 4416 4417 /* 4418 * To leave a margin for fluctuations, use a larger gain factor (1:2). 4419 * This value is chosen because any other tier would have at least twice 4420 * as many refaults as the first tier. 4421 */ 4422 read_ctrl_pos(lruvec, type, 0, 1, &sp); 4423 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4424 read_ctrl_pos(lruvec, type, tier, 2, &pv); 4425 if (!positive_ctrl_err(&sp, &pv)) 4426 break; 4427 } 4428 4429 return tier - 1; 4430 } 4431 4432 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) 4433 { 4434 int type, tier; 4435 struct ctrl_pos sp, pv; 4436 int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; 4437 4438 /* 4439 * Compare the first tier of anon with that of file to determine which 4440 * type to scan. Also need to compare other tiers of the selected type 4441 * with the first tier of the other type to determine the last tier (of 4442 * the selected type) to evict. 4443 */ 4444 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); 4445 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); 4446 type = positive_ctrl_err(&sp, &pv); 4447 4448 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); 4449 for (tier = 1; tier < MAX_NR_TIERS; tier++) { 4450 read_ctrl_pos(lruvec, type, tier, gain[type], &pv); 4451 if (!positive_ctrl_err(&sp, &pv)) 4452 break; 4453 } 4454 4455 *tier_idx = tier - 1; 4456 4457 return type; 4458 } 4459 4460 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, 4461 int *type_scanned, struct list_head *list) 4462 { 4463 int i; 4464 int type; 4465 int scanned; 4466 int tier = -1; 4467 DEFINE_MIN_SEQ(lruvec); 4468 4469 /* 4470 * Try to make the obvious choice first, and if anon and file are both 4471 * available from the same generation, 4472 * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon 4473 * first. 4474 * 2. If !__GFP_IO, file first since clean pagecache is more likely to 4475 * exist than clean swapcache. 4476 */ 4477 if (!swappiness) 4478 type = LRU_GEN_FILE; 4479 else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) 4480 type = LRU_GEN_ANON; 4481 else if (swappiness == 1) 4482 type = LRU_GEN_FILE; 4483 else if (swappiness == 200) 4484 type = LRU_GEN_ANON; 4485 else if (!(sc->gfp_mask & __GFP_IO)) 4486 type = LRU_GEN_FILE; 4487 else 4488 type = get_type_to_scan(lruvec, swappiness, &tier); 4489 4490 for (i = !swappiness; i < ANON_AND_FILE; i++) { 4491 if (tier < 0) 4492 tier = get_tier_idx(lruvec, type); 4493 4494 scanned = scan_folios(lruvec, sc, type, tier, list); 4495 if (scanned) 4496 break; 4497 4498 type = !type; 4499 tier = -1; 4500 } 4501 4502 *type_scanned = type; 4503 4504 return scanned; 4505 } 4506 4507 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) 4508 { 4509 int type; 4510 int scanned; 4511 int reclaimed; 4512 LIST_HEAD(list); 4513 LIST_HEAD(clean); 4514 struct folio *folio; 4515 struct folio *next; 4516 enum vm_event_item item; 4517 struct reclaim_stat stat; 4518 struct lru_gen_mm_walk *walk; 4519 bool skip_retry = false; 4520 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4521 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4522 4523 spin_lock_irq(&lruvec->lru_lock); 4524 4525 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); 4526 4527 scanned += try_to_inc_min_seq(lruvec, swappiness); 4528 4529 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) 4530 scanned = 0; 4531 4532 spin_unlock_irq(&lruvec->lru_lock); 4533 4534 if (list_empty(&list)) 4535 return scanned; 4536 retry: 4537 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); 4538 sc->nr_reclaimed += reclaimed; 4539 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, 4540 scanned, reclaimed, &stat, sc->priority, 4541 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4542 4543 list_for_each_entry_safe_reverse(folio, next, &list, lru) { 4544 if (!folio_evictable(folio)) { 4545 list_del(&folio->lru); 4546 folio_putback_lru(folio); 4547 continue; 4548 } 4549 4550 if (folio_test_reclaim(folio) && 4551 (folio_test_dirty(folio) || folio_test_writeback(folio))) { 4552 /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ 4553 if (folio_test_workingset(folio)) 4554 folio_set_referenced(folio); 4555 continue; 4556 } 4557 4558 if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || 4559 folio_mapped(folio) || folio_test_locked(folio) || 4560 folio_test_dirty(folio) || folio_test_writeback(folio)) { 4561 /* don't add rejected folios to the oldest generation */ 4562 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 4563 BIT(PG_active)); 4564 continue; 4565 } 4566 4567 /* retry folios that may have missed folio_rotate_reclaimable() */ 4568 list_move(&folio->lru, &clean); 4569 sc->nr_scanned -= folio_nr_pages(folio); 4570 } 4571 4572 spin_lock_irq(&lruvec->lru_lock); 4573 4574 move_folios_to_lru(lruvec, &list); 4575 4576 walk = current->reclaim_state->mm_walk; 4577 if (walk && walk->batched) { 4578 walk->lruvec = lruvec; 4579 reset_batch_size(walk); 4580 } 4581 4582 item = PGSTEAL_KSWAPD + reclaimer_offset(); 4583 if (!cgroup_reclaim(sc)) 4584 __count_vm_events(item, reclaimed); 4585 __count_memcg_events(memcg, item, reclaimed); 4586 __count_vm_events(PGSTEAL_ANON + type, reclaimed); 4587 4588 spin_unlock_irq(&lruvec->lru_lock); 4589 4590 list_splice_init(&clean, &list); 4591 4592 if (!list_empty(&list)) { 4593 skip_retry = true; 4594 goto retry; 4595 } 4596 4597 return scanned; 4598 } 4599 4600 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, 4601 bool can_swap, unsigned long *nr_to_scan) 4602 { 4603 int gen, type, zone; 4604 unsigned long old = 0; 4605 unsigned long young = 0; 4606 unsigned long total = 0; 4607 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4608 DEFINE_MIN_SEQ(lruvec); 4609 4610 /* whether this lruvec is completely out of cold folios */ 4611 if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { 4612 *nr_to_scan = 0; 4613 return true; 4614 } 4615 4616 for (type = !can_swap; type < ANON_AND_FILE; type++) { 4617 unsigned long seq; 4618 4619 for (seq = min_seq[type]; seq <= max_seq; seq++) { 4620 unsigned long size = 0; 4621 4622 gen = lru_gen_from_seq(seq); 4623 4624 for (zone = 0; zone < MAX_NR_ZONES; zone++) 4625 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 4626 4627 total += size; 4628 if (seq == max_seq) 4629 young += size; 4630 else if (seq + MIN_NR_GENS == max_seq) 4631 old += size; 4632 } 4633 } 4634 4635 *nr_to_scan = total; 4636 4637 /* 4638 * The aging tries to be lazy to reduce the overhead, while the eviction 4639 * stalls when the number of generations reaches MIN_NR_GENS. Hence, the 4640 * ideal number of generations is MIN_NR_GENS+1. 4641 */ 4642 if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) 4643 return false; 4644 4645 /* 4646 * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) 4647 * of the total number of pages for each generation. A reasonable range 4648 * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The 4649 * aging cares about the upper bound of hot pages, while the eviction 4650 * cares about the lower bound of cold pages. 4651 */ 4652 if (young * MIN_NR_GENS > total) 4653 return true; 4654 if (old * (MIN_NR_GENS + 2) < total) 4655 return true; 4656 4657 return false; 4658 } 4659 4660 /* 4661 * For future optimizations: 4662 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg 4663 * reclaim. 4664 */ 4665 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) 4666 { 4667 bool success; 4668 unsigned long nr_to_scan; 4669 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4670 DEFINE_MAX_SEQ(lruvec); 4671 4672 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) 4673 return -1; 4674 4675 success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan); 4676 4677 /* try to scrape all its memory if this memcg was deleted */ 4678 if (nr_to_scan && !mem_cgroup_online(memcg)) 4679 return nr_to_scan; 4680 4681 /* try to get away with not aging at the default priority */ 4682 if (!success || sc->priority == DEF_PRIORITY) 4683 return nr_to_scan >> sc->priority; 4684 4685 /* stop scanning this lruvec as it's low on cold folios */ 4686 return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; 4687 } 4688 4689 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) 4690 { 4691 int i; 4692 enum zone_watermarks mark; 4693 4694 /* don't abort memcg reclaim to ensure fairness */ 4695 if (!root_reclaim(sc)) 4696 return false; 4697 4698 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) 4699 return true; 4700 4701 /* check the order to exclude compaction-induced reclaim */ 4702 if (!current_is_kswapd() || sc->order) 4703 return false; 4704 4705 mark = sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ? 4706 WMARK_PROMO : WMARK_HIGH; 4707 4708 for (i = 0; i <= sc->reclaim_idx; i++) { 4709 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; 4710 unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH; 4711 4712 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) 4713 return false; 4714 } 4715 4716 /* kswapd should abort if all eligible zones are safe */ 4717 return true; 4718 } 4719 4720 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4721 { 4722 long nr_to_scan; 4723 unsigned long scanned = 0; 4724 int swappiness = get_swappiness(lruvec, sc); 4725 4726 while (true) { 4727 int delta; 4728 4729 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); 4730 if (nr_to_scan <= 0) 4731 break; 4732 4733 delta = evict_folios(lruvec, sc, swappiness); 4734 if (!delta) 4735 break; 4736 4737 scanned += delta; 4738 if (scanned >= nr_to_scan) 4739 break; 4740 4741 if (should_abort_scan(lruvec, sc)) 4742 break; 4743 4744 cond_resched(); 4745 } 4746 4747 /* whether this lruvec should be rotated */ 4748 return nr_to_scan < 0; 4749 } 4750 4751 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) 4752 { 4753 bool success; 4754 unsigned long scanned = sc->nr_scanned; 4755 unsigned long reclaimed = sc->nr_reclaimed; 4756 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4757 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 4758 4759 mem_cgroup_calculate_protection(NULL, memcg); 4760 4761 if (mem_cgroup_below_min(NULL, memcg)) 4762 return MEMCG_LRU_YOUNG; 4763 4764 if (mem_cgroup_below_low(NULL, memcg)) { 4765 /* see the comment on MEMCG_NR_GENS */ 4766 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) 4767 return MEMCG_LRU_TAIL; 4768 4769 memcg_memory_event(memcg, MEMCG_LOW); 4770 } 4771 4772 success = try_to_shrink_lruvec(lruvec, sc); 4773 4774 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); 4775 4776 if (!sc->proactive) 4777 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, 4778 sc->nr_reclaimed - reclaimed); 4779 4780 flush_reclaim_state(sc); 4781 4782 if (success && mem_cgroup_online(memcg)) 4783 return MEMCG_LRU_YOUNG; 4784 4785 if (!success && lruvec_is_sizable(lruvec, sc)) 4786 return 0; 4787 4788 /* one retry if offlined or too small */ 4789 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? 4790 MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; 4791 } 4792 4793 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) 4794 { 4795 int op; 4796 int gen; 4797 int bin; 4798 int first_bin; 4799 struct lruvec *lruvec; 4800 struct lru_gen_folio *lrugen; 4801 struct mem_cgroup *memcg; 4802 struct hlist_nulls_node *pos; 4803 4804 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); 4805 bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); 4806 restart: 4807 op = 0; 4808 memcg = NULL; 4809 4810 rcu_read_lock(); 4811 4812 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { 4813 if (op) { 4814 lru_gen_rotate_memcg(lruvec, op); 4815 op = 0; 4816 } 4817 4818 mem_cgroup_put(memcg); 4819 memcg = NULL; 4820 4821 if (gen != READ_ONCE(lrugen->gen)) 4822 continue; 4823 4824 lruvec = container_of(lrugen, struct lruvec, lrugen); 4825 memcg = lruvec_memcg(lruvec); 4826 4827 if (!mem_cgroup_tryget(memcg)) { 4828 lru_gen_release_memcg(memcg); 4829 memcg = NULL; 4830 continue; 4831 } 4832 4833 rcu_read_unlock(); 4834 4835 op = shrink_one(lruvec, sc); 4836 4837 rcu_read_lock(); 4838 4839 if (should_abort_scan(lruvec, sc)) 4840 break; 4841 } 4842 4843 rcu_read_unlock(); 4844 4845 if (op) 4846 lru_gen_rotate_memcg(lruvec, op); 4847 4848 mem_cgroup_put(memcg); 4849 4850 if (!is_a_nulls(pos)) 4851 return; 4852 4853 /* restart if raced with lru_gen_rotate_memcg() */ 4854 if (gen != get_nulls_value(pos)) 4855 goto restart; 4856 4857 /* try the rest of the bins of the current generation */ 4858 bin = get_memcg_bin(bin + 1); 4859 if (bin != first_bin) 4860 goto restart; 4861 } 4862 4863 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 4864 { 4865 struct blk_plug plug; 4866 4867 VM_WARN_ON_ONCE(root_reclaim(sc)); 4868 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); 4869 4870 lru_add_drain(); 4871 4872 blk_start_plug(&plug); 4873 4874 set_mm_walk(NULL, sc->proactive); 4875 4876 if (try_to_shrink_lruvec(lruvec, sc)) 4877 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); 4878 4879 clear_mm_walk(); 4880 4881 blk_finish_plug(&plug); 4882 } 4883 4884 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) 4885 { 4886 int priority; 4887 unsigned long reclaimable; 4888 4889 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) 4890 return; 4891 /* 4892 * Determine the initial priority based on 4893 * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, 4894 * where reclaimed_to_scanned_ratio = inactive / total. 4895 */ 4896 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); 4897 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 4898 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); 4899 4900 /* round down reclaimable and round up sc->nr_to_reclaim */ 4901 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); 4902 4903 sc->priority = clamp(priority, 0, DEF_PRIORITY); 4904 } 4905 4906 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 4907 { 4908 struct blk_plug plug; 4909 unsigned long reclaimed = sc->nr_reclaimed; 4910 4911 VM_WARN_ON_ONCE(!root_reclaim(sc)); 4912 4913 /* 4914 * Unmapped clean folios are already prioritized. Scanning for more of 4915 * them is likely futile and can cause high reclaim latency when there 4916 * is a large number of memcgs. 4917 */ 4918 if (!sc->may_writepage || !sc->may_unmap) 4919 goto done; 4920 4921 lru_add_drain(); 4922 4923 blk_start_plug(&plug); 4924 4925 set_mm_walk(pgdat, sc->proactive); 4926 4927 set_initial_priority(pgdat, sc); 4928 4929 if (current_is_kswapd()) 4930 sc->nr_reclaimed = 0; 4931 4932 if (mem_cgroup_disabled()) 4933 shrink_one(&pgdat->__lruvec, sc); 4934 else 4935 shrink_many(pgdat, sc); 4936 4937 if (current_is_kswapd()) 4938 sc->nr_reclaimed += reclaimed; 4939 4940 clear_mm_walk(); 4941 4942 blk_finish_plug(&plug); 4943 done: 4944 /* kswapd should never fail */ 4945 pgdat->kswapd_failures = 0; 4946 } 4947 4948 /****************************************************************************** 4949 * state change 4950 ******************************************************************************/ 4951 4952 static bool __maybe_unused state_is_valid(struct lruvec *lruvec) 4953 { 4954 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4955 4956 if (lrugen->enabled) { 4957 enum lru_list lru; 4958 4959 for_each_evictable_lru(lru) { 4960 if (!list_empty(&lruvec->lists[lru])) 4961 return false; 4962 } 4963 } else { 4964 int gen, type, zone; 4965 4966 for_each_gen_type_zone(gen, type, zone) { 4967 if (!list_empty(&lrugen->folios[gen][type][zone])) 4968 return false; 4969 } 4970 } 4971 4972 return true; 4973 } 4974 4975 static bool fill_evictable(struct lruvec *lruvec) 4976 { 4977 enum lru_list lru; 4978 int remaining = MAX_LRU_BATCH; 4979 4980 for_each_evictable_lru(lru) { 4981 int type = is_file_lru(lru); 4982 bool active = is_active_lru(lru); 4983 struct list_head *head = &lruvec->lists[lru]; 4984 4985 while (!list_empty(head)) { 4986 bool success; 4987 struct folio *folio = lru_to_folio(head); 4988 4989 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 4990 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); 4991 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 4992 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); 4993 4994 lruvec_del_folio(lruvec, folio); 4995 success = lru_gen_add_folio(lruvec, folio, false); 4996 VM_WARN_ON_ONCE(!success); 4997 4998 if (!--remaining) 4999 return false; 5000 } 5001 } 5002 5003 return true; 5004 } 5005 5006 static bool drain_evictable(struct lruvec *lruvec) 5007 { 5008 int gen, type, zone; 5009 int remaining = MAX_LRU_BATCH; 5010 5011 for_each_gen_type_zone(gen, type, zone) { 5012 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; 5013 5014 while (!list_empty(head)) { 5015 bool success; 5016 struct folio *folio = lru_to_folio(head); 5017 5018 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 5019 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 5020 VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); 5021 VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); 5022 5023 success = lru_gen_del_folio(lruvec, folio, false); 5024 VM_WARN_ON_ONCE(!success); 5025 lruvec_add_folio(lruvec, folio); 5026 5027 if (!--remaining) 5028 return false; 5029 } 5030 } 5031 5032 return true; 5033 } 5034 5035 static void lru_gen_change_state(bool enabled) 5036 { 5037 static DEFINE_MUTEX(state_mutex); 5038 5039 struct mem_cgroup *memcg; 5040 5041 cgroup_lock(); 5042 cpus_read_lock(); 5043 get_online_mems(); 5044 mutex_lock(&state_mutex); 5045 5046 if (enabled == lru_gen_enabled()) 5047 goto unlock; 5048 5049 if (enabled) 5050 static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5051 else 5052 static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); 5053 5054 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5055 do { 5056 int nid; 5057 5058 for_each_node(nid) { 5059 struct lruvec *lruvec = get_lruvec(memcg, nid); 5060 5061 spin_lock_irq(&lruvec->lru_lock); 5062 5063 VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); 5064 VM_WARN_ON_ONCE(!state_is_valid(lruvec)); 5065 5066 lruvec->lrugen.enabled = enabled; 5067 5068 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { 5069 spin_unlock_irq(&lruvec->lru_lock); 5070 cond_resched(); 5071 spin_lock_irq(&lruvec->lru_lock); 5072 } 5073 5074 spin_unlock_irq(&lruvec->lru_lock); 5075 } 5076 5077 cond_resched(); 5078 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5079 unlock: 5080 mutex_unlock(&state_mutex); 5081 put_online_mems(); 5082 cpus_read_unlock(); 5083 cgroup_unlock(); 5084 } 5085 5086 /****************************************************************************** 5087 * sysfs interface 5088 ******************************************************************************/ 5089 5090 static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5091 { 5092 return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); 5093 } 5094 5095 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5096 static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, 5097 const char *buf, size_t len) 5098 { 5099 unsigned int msecs; 5100 5101 if (kstrtouint(buf, 0, &msecs)) 5102 return -EINVAL; 5103 5104 WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); 5105 5106 return len; 5107 } 5108 5109 static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); 5110 5111 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 5112 { 5113 unsigned int caps = 0; 5114 5115 if (get_cap(LRU_GEN_CORE)) 5116 caps |= BIT(LRU_GEN_CORE); 5117 5118 if (should_walk_mmu()) 5119 caps |= BIT(LRU_GEN_MM_WALK); 5120 5121 if (should_clear_pmd_young()) 5122 caps |= BIT(LRU_GEN_NONLEAF_YOUNG); 5123 5124 return sysfs_emit(buf, "0x%04x\n", caps); 5125 } 5126 5127 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5128 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 5129 const char *buf, size_t len) 5130 { 5131 int i; 5132 unsigned int caps; 5133 5134 if (tolower(*buf) == 'n') 5135 caps = 0; 5136 else if (tolower(*buf) == 'y') 5137 caps = -1; 5138 else if (kstrtouint(buf, 0, &caps)) 5139 return -EINVAL; 5140 5141 for (i = 0; i < NR_LRU_GEN_CAPS; i++) { 5142 bool enabled = caps & BIT(i); 5143 5144 if (i == LRU_GEN_CORE) 5145 lru_gen_change_state(enabled); 5146 else if (enabled) 5147 static_branch_enable(&lru_gen_caps[i]); 5148 else 5149 static_branch_disable(&lru_gen_caps[i]); 5150 } 5151 5152 return len; 5153 } 5154 5155 static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); 5156 5157 static struct attribute *lru_gen_attrs[] = { 5158 &lru_gen_min_ttl_attr.attr, 5159 &lru_gen_enabled_attr.attr, 5160 NULL 5161 }; 5162 5163 static const struct attribute_group lru_gen_attr_group = { 5164 .name = "lru_gen", 5165 .attrs = lru_gen_attrs, 5166 }; 5167 5168 /****************************************************************************** 5169 * debugfs interface 5170 ******************************************************************************/ 5171 5172 static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) 5173 { 5174 struct mem_cgroup *memcg; 5175 loff_t nr_to_skip = *pos; 5176 5177 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); 5178 if (!m->private) 5179 return ERR_PTR(-ENOMEM); 5180 5181 memcg = mem_cgroup_iter(NULL, NULL, NULL); 5182 do { 5183 int nid; 5184 5185 for_each_node_state(nid, N_MEMORY) { 5186 if (!nr_to_skip--) 5187 return get_lruvec(memcg, nid); 5188 } 5189 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); 5190 5191 return NULL; 5192 } 5193 5194 static void lru_gen_seq_stop(struct seq_file *m, void *v) 5195 { 5196 if (!IS_ERR_OR_NULL(v)) 5197 mem_cgroup_iter_break(NULL, lruvec_memcg(v)); 5198 5199 kvfree(m->private); 5200 m->private = NULL; 5201 } 5202 5203 static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) 5204 { 5205 int nid = lruvec_pgdat(v)->node_id; 5206 struct mem_cgroup *memcg = lruvec_memcg(v); 5207 5208 ++*pos; 5209 5210 nid = next_memory_node(nid); 5211 if (nid == MAX_NUMNODES) { 5212 memcg = mem_cgroup_iter(NULL, memcg, NULL); 5213 if (!memcg) 5214 return NULL; 5215 5216 nid = first_memory_node; 5217 } 5218 5219 return get_lruvec(memcg, nid); 5220 } 5221 5222 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, 5223 unsigned long max_seq, unsigned long *min_seq, 5224 unsigned long seq) 5225 { 5226 int i; 5227 int type, tier; 5228 int hist = lru_hist_from_seq(seq); 5229 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5230 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5231 5232 for (tier = 0; tier < MAX_NR_TIERS; tier++) { 5233 seq_printf(m, " %10d", tier); 5234 for (type = 0; type < ANON_AND_FILE; type++) { 5235 const char *s = " "; 5236 unsigned long n[3] = {}; 5237 5238 if (seq == max_seq) { 5239 s = "RT "; 5240 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); 5241 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); 5242 } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { 5243 s = "rep"; 5244 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); 5245 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); 5246 if (tier) 5247 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); 5248 } 5249 5250 for (i = 0; i < 3; i++) 5251 seq_printf(m, " %10lu%c", n[i], s[i]); 5252 } 5253 seq_putc(m, '\n'); 5254 } 5255 5256 if (!mm_state) 5257 return; 5258 5259 seq_puts(m, " "); 5260 for (i = 0; i < NR_MM_STATS; i++) { 5261 const char *s = " "; 5262 unsigned long n = 0; 5263 5264 if (seq == max_seq && NR_HIST_GENS == 1) { 5265 s = "LOYNFA"; 5266 n = READ_ONCE(mm_state->stats[hist][i]); 5267 } else if (seq != max_seq && NR_HIST_GENS > 1) { 5268 s = "loynfa"; 5269 n = READ_ONCE(mm_state->stats[hist][i]); 5270 } 5271 5272 seq_printf(m, " %10lu%c", n, s[i]); 5273 } 5274 seq_putc(m, '\n'); 5275 } 5276 5277 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5278 static int lru_gen_seq_show(struct seq_file *m, void *v) 5279 { 5280 unsigned long seq; 5281 bool full = !debugfs_real_fops(m->file)->write; 5282 struct lruvec *lruvec = v; 5283 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5284 int nid = lruvec_pgdat(lruvec)->node_id; 5285 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 5286 DEFINE_MAX_SEQ(lruvec); 5287 DEFINE_MIN_SEQ(lruvec); 5288 5289 if (nid == first_memory_node) { 5290 const char *path = memcg ? m->private : ""; 5291 5292 #ifdef CONFIG_MEMCG 5293 if (memcg) 5294 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); 5295 #endif 5296 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); 5297 } 5298 5299 seq_printf(m, " node %5d\n", nid); 5300 5301 if (!full) 5302 seq = min_seq[LRU_GEN_ANON]; 5303 else if (max_seq >= MAX_NR_GENS) 5304 seq = max_seq - MAX_NR_GENS + 1; 5305 else 5306 seq = 0; 5307 5308 for (; seq <= max_seq; seq++) { 5309 int type, zone; 5310 int gen = lru_gen_from_seq(seq); 5311 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); 5312 5313 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); 5314 5315 for (type = 0; type < ANON_AND_FILE; type++) { 5316 unsigned long size = 0; 5317 char mark = full && seq < min_seq[type] ? 'x' : ' '; 5318 5319 for (zone = 0; zone < MAX_NR_ZONES; zone++) 5320 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); 5321 5322 seq_printf(m, " %10lu%c", size, mark); 5323 } 5324 5325 seq_putc(m, '\n'); 5326 5327 if (full) 5328 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); 5329 } 5330 5331 return 0; 5332 } 5333 5334 static const struct seq_operations lru_gen_seq_ops = { 5335 .start = lru_gen_seq_start, 5336 .stop = lru_gen_seq_stop, 5337 .next = lru_gen_seq_next, 5338 .show = lru_gen_seq_show, 5339 }; 5340 5341 static int run_aging(struct lruvec *lruvec, unsigned long seq, 5342 bool can_swap, bool force_scan) 5343 { 5344 DEFINE_MAX_SEQ(lruvec); 5345 DEFINE_MIN_SEQ(lruvec); 5346 5347 if (seq < max_seq) 5348 return 0; 5349 5350 if (seq > max_seq) 5351 return -EINVAL; 5352 5353 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) 5354 return -ERANGE; 5355 5356 try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan); 5357 5358 return 0; 5359 } 5360 5361 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, 5362 int swappiness, unsigned long nr_to_reclaim) 5363 { 5364 DEFINE_MAX_SEQ(lruvec); 5365 5366 if (seq + MIN_NR_GENS > max_seq) 5367 return -EINVAL; 5368 5369 sc->nr_reclaimed = 0; 5370 5371 while (!signal_pending(current)) { 5372 DEFINE_MIN_SEQ(lruvec); 5373 5374 if (seq < min_seq[!swappiness]) 5375 return 0; 5376 5377 if (sc->nr_reclaimed >= nr_to_reclaim) 5378 return 0; 5379 5380 if (!evict_folios(lruvec, sc, swappiness)) 5381 return 0; 5382 5383 cond_resched(); 5384 } 5385 5386 return -EINTR; 5387 } 5388 5389 static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, 5390 struct scan_control *sc, int swappiness, unsigned long opt) 5391 { 5392 struct lruvec *lruvec; 5393 int err = -EINVAL; 5394 struct mem_cgroup *memcg = NULL; 5395 5396 if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) 5397 return -EINVAL; 5398 5399 if (!mem_cgroup_disabled()) { 5400 rcu_read_lock(); 5401 5402 memcg = mem_cgroup_from_id(memcg_id); 5403 if (!mem_cgroup_tryget(memcg)) 5404 memcg = NULL; 5405 5406 rcu_read_unlock(); 5407 5408 if (!memcg) 5409 return -EINVAL; 5410 } 5411 5412 if (memcg_id != mem_cgroup_id(memcg)) 5413 goto done; 5414 5415 lruvec = get_lruvec(memcg, nid); 5416 5417 if (swappiness < 0) 5418 swappiness = get_swappiness(lruvec, sc); 5419 else if (swappiness > 200) 5420 goto done; 5421 5422 switch (cmd) { 5423 case '+': 5424 err = run_aging(lruvec, seq, swappiness, opt); 5425 break; 5426 case '-': 5427 err = run_eviction(lruvec, seq, sc, swappiness, opt); 5428 break; 5429 } 5430 done: 5431 mem_cgroup_put(memcg); 5432 5433 return err; 5434 } 5435 5436 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ 5437 static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, 5438 size_t len, loff_t *pos) 5439 { 5440 void *buf; 5441 char *cur, *next; 5442 unsigned int flags; 5443 struct blk_plug plug; 5444 int err = -EINVAL; 5445 struct scan_control sc = { 5446 .may_writepage = true, 5447 .may_unmap = true, 5448 .may_swap = true, 5449 .reclaim_idx = MAX_NR_ZONES - 1, 5450 .gfp_mask = GFP_KERNEL, 5451 }; 5452 5453 buf = kvmalloc(len + 1, GFP_KERNEL); 5454 if (!buf) 5455 return -ENOMEM; 5456 5457 if (copy_from_user(buf, src, len)) { 5458 kvfree(buf); 5459 return -EFAULT; 5460 } 5461 5462 set_task_reclaim_state(current, &sc.reclaim_state); 5463 flags = memalloc_noreclaim_save(); 5464 blk_start_plug(&plug); 5465 if (!set_mm_walk(NULL, true)) { 5466 err = -ENOMEM; 5467 goto done; 5468 } 5469 5470 next = buf; 5471 next[len] = '\0'; 5472 5473 while ((cur = strsep(&next, ",;\n"))) { 5474 int n; 5475 int end; 5476 char cmd; 5477 unsigned int memcg_id; 5478 unsigned int nid; 5479 unsigned long seq; 5480 unsigned int swappiness = -1; 5481 unsigned long opt = -1; 5482 5483 cur = skip_spaces(cur); 5484 if (!*cur) 5485 continue; 5486 5487 n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, 5488 &seq, &end, &swappiness, &end, &opt, &end); 5489 if (n < 4 || cur[end]) { 5490 err = -EINVAL; 5491 break; 5492 } 5493 5494 err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); 5495 if (err) 5496 break; 5497 } 5498 done: 5499 clear_mm_walk(); 5500 blk_finish_plug(&plug); 5501 memalloc_noreclaim_restore(flags); 5502 set_task_reclaim_state(current, NULL); 5503 5504 kvfree(buf); 5505 5506 return err ? : len; 5507 } 5508 5509 static int lru_gen_seq_open(struct inode *inode, struct file *file) 5510 { 5511 return seq_open(file, &lru_gen_seq_ops); 5512 } 5513 5514 static const struct file_operations lru_gen_rw_fops = { 5515 .open = lru_gen_seq_open, 5516 .read = seq_read, 5517 .write = lru_gen_seq_write, 5518 .llseek = seq_lseek, 5519 .release = seq_release, 5520 }; 5521 5522 static const struct file_operations lru_gen_ro_fops = { 5523 .open = lru_gen_seq_open, 5524 .read = seq_read, 5525 .llseek = seq_lseek, 5526 .release = seq_release, 5527 }; 5528 5529 /****************************************************************************** 5530 * initialization 5531 ******************************************************************************/ 5532 5533 void lru_gen_init_pgdat(struct pglist_data *pgdat) 5534 { 5535 int i, j; 5536 5537 spin_lock_init(&pgdat->memcg_lru.lock); 5538 5539 for (i = 0; i < MEMCG_NR_GENS; i++) { 5540 for (j = 0; j < MEMCG_NR_BINS; j++) 5541 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); 5542 } 5543 } 5544 5545 void lru_gen_init_lruvec(struct lruvec *lruvec) 5546 { 5547 int i; 5548 int gen, type, zone; 5549 struct lru_gen_folio *lrugen = &lruvec->lrugen; 5550 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5551 5552 lrugen->max_seq = MIN_NR_GENS + 1; 5553 lrugen->enabled = lru_gen_enabled(); 5554 5555 for (i = 0; i <= MIN_NR_GENS + 1; i++) 5556 lrugen->timestamps[i] = jiffies; 5557 5558 for_each_gen_type_zone(gen, type, zone) 5559 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); 5560 5561 if (mm_state) 5562 mm_state->seq = MIN_NR_GENS; 5563 } 5564 5565 #ifdef CONFIG_MEMCG 5566 5567 void lru_gen_init_memcg(struct mem_cgroup *memcg) 5568 { 5569 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5570 5571 if (!mm_list) 5572 return; 5573 5574 INIT_LIST_HEAD(&mm_list->fifo); 5575 spin_lock_init(&mm_list->lock); 5576 } 5577 5578 void lru_gen_exit_memcg(struct mem_cgroup *memcg) 5579 { 5580 int i; 5581 int nid; 5582 struct lru_gen_mm_list *mm_list = get_mm_list(memcg); 5583 5584 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); 5585 5586 for_each_node(nid) { 5587 struct lruvec *lruvec = get_lruvec(memcg, nid); 5588 struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); 5589 5590 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, 5591 sizeof(lruvec->lrugen.nr_pages))); 5592 5593 lruvec->lrugen.list.next = LIST_POISON1; 5594 5595 if (!mm_state) 5596 continue; 5597 5598 for (i = 0; i < NR_BLOOM_FILTERS; i++) { 5599 bitmap_free(mm_state->filters[i]); 5600 mm_state->filters[i] = NULL; 5601 } 5602 } 5603 } 5604 5605 #endif /* CONFIG_MEMCG */ 5606 5607 static int __init init_lru_gen(void) 5608 { 5609 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); 5610 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); 5611 5612 if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) 5613 pr_err("lru_gen: failed to create sysfs group\n"); 5614 5615 debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); 5616 debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); 5617 5618 return 0; 5619 }; 5620 late_initcall(init_lru_gen); 5621 5622 #else /* !CONFIG_LRU_GEN */ 5623 5624 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) 5625 { 5626 BUILD_BUG(); 5627 } 5628 5629 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5630 { 5631 BUILD_BUG(); 5632 } 5633 5634 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) 5635 { 5636 BUILD_BUG(); 5637 } 5638 5639 #endif /* CONFIG_LRU_GEN */ 5640 5641 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) 5642 { 5643 unsigned long nr[NR_LRU_LISTS]; 5644 unsigned long targets[NR_LRU_LISTS]; 5645 unsigned long nr_to_scan; 5646 enum lru_list lru; 5647 unsigned long nr_reclaimed = 0; 5648 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 5649 bool proportional_reclaim; 5650 struct blk_plug plug; 5651 5652 if (lru_gen_enabled() && !root_reclaim(sc)) { 5653 lru_gen_shrink_lruvec(lruvec, sc); 5654 return; 5655 } 5656 5657 get_scan_count(lruvec, sc, nr); 5658 5659 /* Record the original scan target for proportional adjustments later */ 5660 memcpy(targets, nr, sizeof(nr)); 5661 5662 /* 5663 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal 5664 * event that can occur when there is little memory pressure e.g. 5665 * multiple streaming readers/writers. Hence, we do not abort scanning 5666 * when the requested number of pages are reclaimed when scanning at 5667 * DEF_PRIORITY on the assumption that the fact we are direct 5668 * reclaiming implies that kswapd is not keeping up and it is best to 5669 * do a batch of work at once. For memcg reclaim one check is made to 5670 * abort proportional reclaim if either the file or anon lru has already 5671 * dropped to zero at the first pass. 5672 */ 5673 proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && 5674 sc->priority == DEF_PRIORITY); 5675 5676 blk_start_plug(&plug); 5677 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 5678 nr[LRU_INACTIVE_FILE]) { 5679 unsigned long nr_anon, nr_file, percentage; 5680 unsigned long nr_scanned; 5681 5682 for_each_evictable_lru(lru) { 5683 if (nr[lru]) { 5684 nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); 5685 nr[lru] -= nr_to_scan; 5686 5687 nr_reclaimed += shrink_list(lru, nr_to_scan, 5688 lruvec, sc); 5689 } 5690 } 5691 5692 cond_resched(); 5693 5694 if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) 5695 continue; 5696 5697 /* 5698 * For kswapd and memcg, reclaim at least the number of pages 5699 * requested. Ensure that the anon and file LRUs are scanned 5700 * proportionally what was requested by get_scan_count(). We 5701 * stop reclaiming one LRU and reduce the amount scanning 5702 * proportional to the original scan target. 5703 */ 5704 nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; 5705 nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; 5706 5707 /* 5708 * It's just vindictive to attack the larger once the smaller 5709 * has gone to zero. And given the way we stop scanning the 5710 * smaller below, this makes sure that we only make one nudge 5711 * towards proportionality once we've got nr_to_reclaim. 5712 */ 5713 if (!nr_file || !nr_anon) 5714 break; 5715 5716 if (nr_file > nr_anon) { 5717 unsigned long scan_target = targets[LRU_INACTIVE_ANON] + 5718 targets[LRU_ACTIVE_ANON] + 1; 5719 lru = LRU_BASE; 5720 percentage = nr_anon * 100 / scan_target; 5721 } else { 5722 unsigned long scan_target = targets[LRU_INACTIVE_FILE] + 5723 targets[LRU_ACTIVE_FILE] + 1; 5724 lru = LRU_FILE; 5725 percentage = nr_file * 100 / scan_target; 5726 } 5727 5728 /* Stop scanning the smaller of the LRU */ 5729 nr[lru] = 0; 5730 nr[lru + LRU_ACTIVE] = 0; 5731 5732 /* 5733 * Recalculate the other LRU scan count based on its original 5734 * scan target and the percentage scanning already complete 5735 */ 5736 lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; 5737 nr_scanned = targets[lru] - nr[lru]; 5738 nr[lru] = targets[lru] * (100 - percentage) / 100; 5739 nr[lru] -= min(nr[lru], nr_scanned); 5740 5741 lru += LRU_ACTIVE; 5742 nr_scanned = targets[lru] - nr[lru]; 5743 nr[lru] = targets[lru] * (100 - percentage) / 100; 5744 nr[lru] -= min(nr[lru], nr_scanned); 5745 } 5746 blk_finish_plug(&plug); 5747 sc->nr_reclaimed += nr_reclaimed; 5748 5749 /* 5750 * Even if we did not try to evict anon pages at all, we want to 5751 * rebalance the anon lru active/inactive ratio. 5752 */ 5753 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && 5754 inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 5755 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 5756 sc, LRU_ACTIVE_ANON); 5757 } 5758 5759 /* Use reclaim/compaction for costly allocs or under memory pressure */ 5760 static bool in_reclaim_compaction(struct scan_control *sc) 5761 { 5762 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && 5763 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 5764 sc->priority < DEF_PRIORITY - 2)) 5765 return true; 5766 5767 return false; 5768 } 5769 5770 /* 5771 * Reclaim/compaction is used for high-order allocation requests. It reclaims 5772 * order-0 pages before compacting the zone. should_continue_reclaim() returns 5773 * true if more pages should be reclaimed such that when the page allocator 5774 * calls try_to_compact_pages() that it will have enough free pages to succeed. 5775 * It will give up earlier than that if there is difficulty reclaiming pages. 5776 */ 5777 static inline bool should_continue_reclaim(struct pglist_data *pgdat, 5778 unsigned long nr_reclaimed, 5779 struct scan_control *sc) 5780 { 5781 unsigned long pages_for_compaction; 5782 unsigned long inactive_lru_pages; 5783 int z; 5784 5785 /* If not in reclaim/compaction mode, stop */ 5786 if (!in_reclaim_compaction(sc)) 5787 return false; 5788 5789 /* 5790 * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX 5791 * number of pages that were scanned. This will return to the caller 5792 * with the risk reclaim/compaction and the resulting allocation attempt 5793 * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL 5794 * allocations through requiring that the full LRU list has been scanned 5795 * first, by assuming that zero delta of sc->nr_scanned means full LRU 5796 * scan, but that approximation was wrong, and there were corner cases 5797 * where always a non-zero amount of pages were scanned. 5798 */ 5799 if (!nr_reclaimed) 5800 return false; 5801 5802 /* If compaction would go ahead or the allocation would succeed, stop */ 5803 for (z = 0; z <= sc->reclaim_idx; z++) { 5804 struct zone *zone = &pgdat->node_zones[z]; 5805 if (!managed_zone(zone)) 5806 continue; 5807 5808 /* Allocation can already succeed, nothing to do */ 5809 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 5810 sc->reclaim_idx, 0)) 5811 return false; 5812 5813 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) 5814 return false; 5815 } 5816 5817 /* 5818 * If we have not reclaimed enough pages for compaction and the 5819 * inactive lists are large enough, continue reclaiming 5820 */ 5821 pages_for_compaction = compact_gap(sc->order); 5822 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); 5823 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) 5824 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); 5825 5826 return inactive_lru_pages > pages_for_compaction; 5827 } 5828 5829 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) 5830 { 5831 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; 5832 struct mem_cgroup *memcg; 5833 5834 memcg = mem_cgroup_iter(target_memcg, NULL, NULL); 5835 do { 5836 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 5837 unsigned long reclaimed; 5838 unsigned long scanned; 5839 5840 /* 5841 * This loop can become CPU-bound when target memcgs 5842 * aren't eligible for reclaim - either because they 5843 * don't have any reclaimable pages, or because their 5844 * memory is explicitly protected. Avoid soft lockups. 5845 */ 5846 cond_resched(); 5847 5848 mem_cgroup_calculate_protection(target_memcg, memcg); 5849 5850 if (mem_cgroup_below_min(target_memcg, memcg)) { 5851 /* 5852 * Hard protection. 5853 * If there is no reclaimable memory, OOM. 5854 */ 5855 continue; 5856 } else if (mem_cgroup_below_low(target_memcg, memcg)) { 5857 /* 5858 * Soft protection. 5859 * Respect the protection only as long as 5860 * there is an unprotected supply 5861 * of reclaimable memory from other cgroups. 5862 */ 5863 if (!sc->memcg_low_reclaim) { 5864 sc->memcg_low_skipped = 1; 5865 continue; 5866 } 5867 memcg_memory_event(memcg, MEMCG_LOW); 5868 } 5869 5870 reclaimed = sc->nr_reclaimed; 5871 scanned = sc->nr_scanned; 5872 5873 shrink_lruvec(lruvec, sc); 5874 5875 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, 5876 sc->priority); 5877 5878 /* Record the group's reclaim efficiency */ 5879 if (!sc->proactive) 5880 vmpressure(sc->gfp_mask, memcg, false, 5881 sc->nr_scanned - scanned, 5882 sc->nr_reclaimed - reclaimed); 5883 5884 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); 5885 } 5886 5887 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) 5888 { 5889 unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; 5890 struct lruvec *target_lruvec; 5891 bool reclaimable = false; 5892 5893 if (lru_gen_enabled() && root_reclaim(sc)) { 5894 lru_gen_shrink_node(pgdat, sc); 5895 return; 5896 } 5897 5898 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); 5899 5900 again: 5901 memset(&sc->nr, 0, sizeof(sc->nr)); 5902 5903 nr_reclaimed = sc->nr_reclaimed; 5904 nr_scanned = sc->nr_scanned; 5905 5906 prepare_scan_control(pgdat, sc); 5907 5908 shrink_node_memcgs(pgdat, sc); 5909 5910 flush_reclaim_state(sc); 5911 5912 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; 5913 5914 /* Record the subtree's reclaim efficiency */ 5915 if (!sc->proactive) 5916 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, 5917 sc->nr_scanned - nr_scanned, nr_node_reclaimed); 5918 5919 if (nr_node_reclaimed) 5920 reclaimable = true; 5921 5922 if (current_is_kswapd()) { 5923 /* 5924 * If reclaim is isolating dirty pages under writeback, 5925 * it implies that the long-lived page allocation rate 5926 * is exceeding the page laundering rate. Either the 5927 * global limits are not being effective at throttling 5928 * processes due to the page distribution throughout 5929 * zones or there is heavy usage of a slow backing 5930 * device. The only option is to throttle from reclaim 5931 * context which is not ideal as there is no guarantee 5932 * the dirtying process is throttled in the same way 5933 * balance_dirty_pages() manages. 5934 * 5935 * Once a node is flagged PGDAT_WRITEBACK, kswapd will 5936 * count the number of pages under pages flagged for 5937 * immediate reclaim and stall if any are encountered 5938 * in the nr_immediate check below. 5939 */ 5940 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) 5941 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 5942 5943 /* Allow kswapd to start writing pages during reclaim.*/ 5944 if (sc->nr.unqueued_dirty == sc->nr.file_taken) 5945 set_bit(PGDAT_DIRTY, &pgdat->flags); 5946 5947 /* 5948 * If kswapd scans pages marked for immediate 5949 * reclaim and under writeback (nr_immediate), it 5950 * implies that pages are cycling through the LRU 5951 * faster than they are written so forcibly stall 5952 * until some pages complete writeback. 5953 */ 5954 if (sc->nr.immediate) 5955 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); 5956 } 5957 5958 /* 5959 * Tag a node/memcg as congested if all the dirty pages were marked 5960 * for writeback and immediate reclaim (counted in nr.congested). 5961 * 5962 * Legacy memcg will stall in page writeback so avoid forcibly 5963 * stalling in reclaim_throttle(). 5964 */ 5965 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { 5966 if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) 5967 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); 5968 5969 if (current_is_kswapd()) 5970 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); 5971 } 5972 5973 /* 5974 * Stall direct reclaim for IO completions if the lruvec is 5975 * node is congested. Allow kswapd to continue until it 5976 * starts encountering unqueued dirty pages or cycling through 5977 * the LRU too quickly. 5978 */ 5979 if (!current_is_kswapd() && current_may_throttle() && 5980 !sc->hibernation_mode && 5981 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || 5982 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) 5983 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); 5984 5985 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) 5986 goto again; 5987 5988 /* 5989 * Kswapd gives up on balancing particular nodes after too 5990 * many failures to reclaim anything from them and goes to 5991 * sleep. On reclaim progress, reset the failure counter. A 5992 * successful direct reclaim run will revive a dormant kswapd. 5993 */ 5994 if (reclaimable) 5995 pgdat->kswapd_failures = 0; 5996 else if (sc->cache_trim_mode) 5997 sc->cache_trim_mode_failed = 1; 5998 } 5999 6000 /* 6001 * Returns true if compaction should go ahead for a costly-order request, or 6002 * the allocation would already succeed without compaction. Return false if we 6003 * should reclaim first. 6004 */ 6005 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 6006 { 6007 unsigned long watermark; 6008 6009 if (!gfp_compaction_allowed(sc->gfp_mask)) 6010 return false; 6011 6012 /* Allocation can already succeed, nothing to do */ 6013 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), 6014 sc->reclaim_idx, 0)) 6015 return true; 6016 6017 /* Compaction cannot yet proceed. Do reclaim. */ 6018 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) 6019 return false; 6020 6021 /* 6022 * Compaction is already possible, but it takes time to run and there 6023 * are potentially other callers using the pages just freed. So proceed 6024 * with reclaim to make a buffer of free pages available to give 6025 * compaction a reasonable chance of completing and allocating the page. 6026 * Note that we won't actually reclaim the whole buffer in one attempt 6027 * as the target watermark in should_continue_reclaim() is lower. But if 6028 * we are already above the high+gap watermark, don't reclaim at all. 6029 */ 6030 watermark = high_wmark_pages(zone) + compact_gap(sc->order); 6031 6032 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); 6033 } 6034 6035 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) 6036 { 6037 /* 6038 * If reclaim is making progress greater than 12% efficiency then 6039 * wake all the NOPROGRESS throttled tasks. 6040 */ 6041 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { 6042 wait_queue_head_t *wqh; 6043 6044 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; 6045 if (waitqueue_active(wqh)) 6046 wake_up(wqh); 6047 6048 return; 6049 } 6050 6051 /* 6052 * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will 6053 * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages 6054 * under writeback and marked for immediate reclaim at the tail of the 6055 * LRU. 6056 */ 6057 if (current_is_kswapd() || cgroup_reclaim(sc)) 6058 return; 6059 6060 /* Throttle if making no progress at high prioities. */ 6061 if (sc->priority == 1 && !sc->nr_reclaimed) 6062 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); 6063 } 6064 6065 /* 6066 * This is the direct reclaim path, for page-allocating processes. We only 6067 * try to reclaim pages from zones which will satisfy the caller's allocation 6068 * request. 6069 * 6070 * If a zone is deemed to be full of pinned pages then just give it a light 6071 * scan then give up on it. 6072 */ 6073 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) 6074 { 6075 struct zoneref *z; 6076 struct zone *zone; 6077 unsigned long nr_soft_reclaimed; 6078 unsigned long nr_soft_scanned; 6079 gfp_t orig_mask; 6080 pg_data_t *last_pgdat = NULL; 6081 pg_data_t *first_pgdat = NULL; 6082 6083 /* 6084 * If the number of buffer_heads in the machine exceeds the maximum 6085 * allowed level, force direct reclaim to scan the highmem zone as 6086 * highmem pages could be pinning lowmem pages storing buffer_heads 6087 */ 6088 orig_mask = sc->gfp_mask; 6089 if (buffer_heads_over_limit) { 6090 sc->gfp_mask |= __GFP_HIGHMEM; 6091 sc->reclaim_idx = gfp_zone(sc->gfp_mask); 6092 } 6093 6094 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6095 sc->reclaim_idx, sc->nodemask) { 6096 /* 6097 * Take care memory controller reclaiming has small influence 6098 * to global LRU. 6099 */ 6100 if (!cgroup_reclaim(sc)) { 6101 if (!cpuset_zone_allowed(zone, 6102 GFP_KERNEL | __GFP_HARDWALL)) 6103 continue; 6104 6105 /* 6106 * If we already have plenty of memory free for 6107 * compaction in this zone, don't free any more. 6108 * Even though compaction is invoked for any 6109 * non-zero order, only frequent costly order 6110 * reclamation is disruptive enough to become a 6111 * noticeable problem, like transparent huge 6112 * page allocations. 6113 */ 6114 if (IS_ENABLED(CONFIG_COMPACTION) && 6115 sc->order > PAGE_ALLOC_COSTLY_ORDER && 6116 compaction_ready(zone, sc)) { 6117 sc->compaction_ready = true; 6118 continue; 6119 } 6120 6121 /* 6122 * Shrink each node in the zonelist once. If the 6123 * zonelist is ordered by zone (not the default) then a 6124 * node may be shrunk multiple times but in that case 6125 * the user prefers lower zones being preserved. 6126 */ 6127 if (zone->zone_pgdat == last_pgdat) 6128 continue; 6129 6130 /* 6131 * This steals pages from memory cgroups over softlimit 6132 * and returns the number of reclaimed pages and 6133 * scanned pages. This works for global memory pressure 6134 * and balancing, not for a memcg's limit. 6135 */ 6136 nr_soft_scanned = 0; 6137 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, 6138 sc->order, sc->gfp_mask, 6139 &nr_soft_scanned); 6140 sc->nr_reclaimed += nr_soft_reclaimed; 6141 sc->nr_scanned += nr_soft_scanned; 6142 /* need some check for avoid more shrink_zone() */ 6143 } 6144 6145 if (!first_pgdat) 6146 first_pgdat = zone->zone_pgdat; 6147 6148 /* See comment about same check for global reclaim above */ 6149 if (zone->zone_pgdat == last_pgdat) 6150 continue; 6151 last_pgdat = zone->zone_pgdat; 6152 shrink_node(zone->zone_pgdat, sc); 6153 } 6154 6155 if (first_pgdat) 6156 consider_reclaim_throttle(first_pgdat, sc); 6157 6158 /* 6159 * Restore to original mask to avoid the impact on the caller if we 6160 * promoted it to __GFP_HIGHMEM. 6161 */ 6162 sc->gfp_mask = orig_mask; 6163 } 6164 6165 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) 6166 { 6167 struct lruvec *target_lruvec; 6168 unsigned long refaults; 6169 6170 if (lru_gen_enabled()) 6171 return; 6172 6173 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 6174 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); 6175 target_lruvec->refaults[WORKINGSET_ANON] = refaults; 6176 refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); 6177 target_lruvec->refaults[WORKINGSET_FILE] = refaults; 6178 } 6179 6180 /* 6181 * This is the main entry point to direct page reclaim. 6182 * 6183 * If a full scan of the inactive list fails to free enough memory then we 6184 * are "out of memory" and something needs to be killed. 6185 * 6186 * If the caller is !__GFP_FS then the probability of a failure is reasonably 6187 * high - the zone may be full of dirty or under-writeback pages, which this 6188 * caller can't do much about. We kick the writeback threads and take explicit 6189 * naps in the hope that some of these pages can be written. But if the 6190 * allocating task holds filesystem locks which prevent writeout this might not 6191 * work, and the allocation attempt will fail. 6192 * 6193 * returns: 0, if no pages reclaimed 6194 * else, the number of pages reclaimed 6195 */ 6196 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 6197 struct scan_control *sc) 6198 { 6199 int initial_priority = sc->priority; 6200 pg_data_t *last_pgdat; 6201 struct zoneref *z; 6202 struct zone *zone; 6203 retry: 6204 delayacct_freepages_start(); 6205 6206 if (!cgroup_reclaim(sc)) 6207 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); 6208 6209 do { 6210 if (!sc->proactive) 6211 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, 6212 sc->priority); 6213 sc->nr_scanned = 0; 6214 shrink_zones(zonelist, sc); 6215 6216 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 6217 break; 6218 6219 if (sc->compaction_ready) 6220 break; 6221 6222 /* 6223 * If we're getting trouble reclaiming, start doing 6224 * writepage even in laptop mode. 6225 */ 6226 if (sc->priority < DEF_PRIORITY - 2) 6227 sc->may_writepage = 1; 6228 } while (--sc->priority >= 0); 6229 6230 last_pgdat = NULL; 6231 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, 6232 sc->nodemask) { 6233 if (zone->zone_pgdat == last_pgdat) 6234 continue; 6235 last_pgdat = zone->zone_pgdat; 6236 6237 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); 6238 6239 if (cgroup_reclaim(sc)) { 6240 struct lruvec *lruvec; 6241 6242 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, 6243 zone->zone_pgdat); 6244 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6245 } 6246 } 6247 6248 delayacct_freepages_end(); 6249 6250 if (sc->nr_reclaimed) 6251 return sc->nr_reclaimed; 6252 6253 /* Aborted reclaim to try compaction? don't OOM, then */ 6254 if (sc->compaction_ready) 6255 return 1; 6256 6257 /* 6258 * We make inactive:active ratio decisions based on the node's 6259 * composition of memory, but a restrictive reclaim_idx or a 6260 * memory.low cgroup setting can exempt large amounts of 6261 * memory from reclaim. Neither of which are very common, so 6262 * instead of doing costly eligibility calculations of the 6263 * entire cgroup subtree up front, we assume the estimates are 6264 * good, and retry with forcible deactivation if that fails. 6265 */ 6266 if (sc->skipped_deactivate) { 6267 sc->priority = initial_priority; 6268 sc->force_deactivate = 1; 6269 sc->skipped_deactivate = 0; 6270 goto retry; 6271 } 6272 6273 /* Untapped cgroup reserves? Don't OOM, retry. */ 6274 if (sc->memcg_low_skipped) { 6275 sc->priority = initial_priority; 6276 sc->force_deactivate = 0; 6277 sc->memcg_low_reclaim = 1; 6278 sc->memcg_low_skipped = 0; 6279 goto retry; 6280 } 6281 6282 return 0; 6283 } 6284 6285 static bool allow_direct_reclaim(pg_data_t *pgdat) 6286 { 6287 struct zone *zone; 6288 unsigned long pfmemalloc_reserve = 0; 6289 unsigned long free_pages = 0; 6290 int i; 6291 bool wmark_ok; 6292 6293 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6294 return true; 6295 6296 for (i = 0; i <= ZONE_NORMAL; i++) { 6297 zone = &pgdat->node_zones[i]; 6298 if (!managed_zone(zone)) 6299 continue; 6300 6301 if (!zone_reclaimable_pages(zone)) 6302 continue; 6303 6304 pfmemalloc_reserve += min_wmark_pages(zone); 6305 free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); 6306 } 6307 6308 /* If there are no reserves (unexpected config) then do not throttle */ 6309 if (!pfmemalloc_reserve) 6310 return true; 6311 6312 wmark_ok = free_pages > pfmemalloc_reserve / 2; 6313 6314 /* kswapd must be awake if processes are being throttled */ 6315 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 6316 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 6317 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 6318 6319 wake_up_interruptible(&pgdat->kswapd_wait); 6320 } 6321 6322 return wmark_ok; 6323 } 6324 6325 /* 6326 * Throttle direct reclaimers if backing storage is backed by the network 6327 * and the PFMEMALLOC reserve for the preferred node is getting dangerously 6328 * depleted. kswapd will continue to make progress and wake the processes 6329 * when the low watermark is reached. 6330 * 6331 * Returns true if a fatal signal was delivered during throttling. If this 6332 * happens, the page allocator should not consider triggering the OOM killer. 6333 */ 6334 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, 6335 nodemask_t *nodemask) 6336 { 6337 struct zoneref *z; 6338 struct zone *zone; 6339 pg_data_t *pgdat = NULL; 6340 6341 /* 6342 * Kernel threads should not be throttled as they may be indirectly 6343 * responsible for cleaning pages necessary for reclaim to make forward 6344 * progress. kjournald for example may enter direct reclaim while 6345 * committing a transaction where throttling it could forcing other 6346 * processes to block on log_wait_commit(). 6347 */ 6348 if (current->flags & PF_KTHREAD) 6349 goto out; 6350 6351 /* 6352 * If a fatal signal is pending, this process should not throttle. 6353 * It should return quickly so it can exit and free its memory 6354 */ 6355 if (fatal_signal_pending(current)) 6356 goto out; 6357 6358 /* 6359 * Check if the pfmemalloc reserves are ok by finding the first node 6360 * with a usable ZONE_NORMAL or lower zone. The expectation is that 6361 * GFP_KERNEL will be required for allocating network buffers when 6362 * swapping over the network so ZONE_HIGHMEM is unusable. 6363 * 6364 * Throttling is based on the first usable node and throttled processes 6365 * wait on a queue until kswapd makes progress and wakes them. There 6366 * is an affinity then between processes waking up and where reclaim 6367 * progress has been made assuming the process wakes on the same node. 6368 * More importantly, processes running on remote nodes will not compete 6369 * for remote pfmemalloc reserves and processes on different nodes 6370 * should make reasonable progress. 6371 */ 6372 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6373 gfp_zone(gfp_mask), nodemask) { 6374 if (zone_idx(zone) > ZONE_NORMAL) 6375 continue; 6376 6377 /* Throttle based on the first usable node */ 6378 pgdat = zone->zone_pgdat; 6379 if (allow_direct_reclaim(pgdat)) 6380 goto out; 6381 break; 6382 } 6383 6384 /* If no zone was usable by the allocation flags then do not throttle */ 6385 if (!pgdat) 6386 goto out; 6387 6388 /* Account for the throttling */ 6389 count_vm_event(PGSCAN_DIRECT_THROTTLE); 6390 6391 /* 6392 * If the caller cannot enter the filesystem, it's possible that it 6393 * is due to the caller holding an FS lock or performing a journal 6394 * transaction in the case of a filesystem like ext[3|4]. In this case, 6395 * it is not safe to block on pfmemalloc_wait as kswapd could be 6396 * blocked waiting on the same lock. Instead, throttle for up to a 6397 * second before continuing. 6398 */ 6399 if (!(gfp_mask & __GFP_FS)) 6400 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, 6401 allow_direct_reclaim(pgdat), HZ); 6402 else 6403 /* Throttle until kswapd wakes the process */ 6404 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, 6405 allow_direct_reclaim(pgdat)); 6406 6407 if (fatal_signal_pending(current)) 6408 return true; 6409 6410 out: 6411 return false; 6412 } 6413 6414 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 6415 gfp_t gfp_mask, nodemask_t *nodemask) 6416 { 6417 unsigned long nr_reclaimed; 6418 struct scan_control sc = { 6419 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6420 .gfp_mask = current_gfp_context(gfp_mask), 6421 .reclaim_idx = gfp_zone(gfp_mask), 6422 .order = order, 6423 .nodemask = nodemask, 6424 .priority = DEF_PRIORITY, 6425 .may_writepage = !laptop_mode, 6426 .may_unmap = 1, 6427 .may_swap = 1, 6428 }; 6429 6430 /* 6431 * scan_control uses s8 fields for order, priority, and reclaim_idx. 6432 * Confirm they are large enough for max values. 6433 */ 6434 BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX); 6435 BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); 6436 BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); 6437 6438 /* 6439 * Do not enter reclaim if fatal signal was delivered while throttled. 6440 * 1 is returned so that the page allocator does not OOM kill at this 6441 * point. 6442 */ 6443 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) 6444 return 1; 6445 6446 set_task_reclaim_state(current, &sc.reclaim_state); 6447 trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); 6448 6449 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6450 6451 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 6452 set_task_reclaim_state(current, NULL); 6453 6454 return nr_reclaimed; 6455 } 6456 6457 #ifdef CONFIG_MEMCG 6458 6459 /* Only used by soft limit reclaim. Do not reuse for anything else. */ 6460 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 6461 gfp_t gfp_mask, bool noswap, 6462 pg_data_t *pgdat, 6463 unsigned long *nr_scanned) 6464 { 6465 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); 6466 struct scan_control sc = { 6467 .nr_to_reclaim = SWAP_CLUSTER_MAX, 6468 .target_mem_cgroup = memcg, 6469 .may_writepage = !laptop_mode, 6470 .may_unmap = 1, 6471 .reclaim_idx = MAX_NR_ZONES - 1, 6472 .may_swap = !noswap, 6473 }; 6474 6475 WARN_ON_ONCE(!current->reclaim_state); 6476 6477 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 6478 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 6479 6480 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, 6481 sc.gfp_mask); 6482 6483 /* 6484 * NOTE: Although we can get the priority field, using it 6485 * here is not a good idea, since it limits the pages we can scan. 6486 * if we don't reclaim here, the shrink_node from balance_pgdat 6487 * will pick up pages from other mem cgroup's as well. We hack 6488 * the priority and make it zero. 6489 */ 6490 shrink_lruvec(lruvec, &sc); 6491 6492 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 6493 6494 *nr_scanned = sc.nr_scanned; 6495 6496 return sc.nr_reclaimed; 6497 } 6498 6499 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 6500 unsigned long nr_pages, 6501 gfp_t gfp_mask, 6502 unsigned int reclaim_options) 6503 { 6504 unsigned long nr_reclaimed; 6505 unsigned int noreclaim_flag; 6506 struct scan_control sc = { 6507 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 6508 .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | 6509 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 6510 .reclaim_idx = MAX_NR_ZONES - 1, 6511 .target_mem_cgroup = memcg, 6512 .priority = DEF_PRIORITY, 6513 .may_writepage = !laptop_mode, 6514 .may_unmap = 1, 6515 .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), 6516 .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), 6517 }; 6518 /* 6519 * Traverse the ZONELIST_FALLBACK zonelist of the current node to put 6520 * equal pressure on all the nodes. This is based on the assumption that 6521 * the reclaim does not bail out early. 6522 */ 6523 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 6524 6525 set_task_reclaim_state(current, &sc.reclaim_state); 6526 trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); 6527 noreclaim_flag = memalloc_noreclaim_save(); 6528 6529 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 6530 6531 memalloc_noreclaim_restore(noreclaim_flag); 6532 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 6533 set_task_reclaim_state(current, NULL); 6534 6535 return nr_reclaimed; 6536 } 6537 #endif 6538 6539 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) 6540 { 6541 struct mem_cgroup *memcg; 6542 struct lruvec *lruvec; 6543 6544 if (lru_gen_enabled()) { 6545 lru_gen_age_node(pgdat, sc); 6546 return; 6547 } 6548 6549 if (!can_age_anon_pages(pgdat, sc)) 6550 return; 6551 6552 lruvec = mem_cgroup_lruvec(NULL, pgdat); 6553 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) 6554 return; 6555 6556 memcg = mem_cgroup_iter(NULL, NULL, NULL); 6557 do { 6558 lruvec = mem_cgroup_lruvec(memcg, pgdat); 6559 shrink_active_list(SWAP_CLUSTER_MAX, lruvec, 6560 sc, LRU_ACTIVE_ANON); 6561 memcg = mem_cgroup_iter(NULL, memcg, NULL); 6562 } while (memcg); 6563 } 6564 6565 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 6566 { 6567 int i; 6568 struct zone *zone; 6569 6570 /* 6571 * Check for watermark boosts top-down as the higher zones 6572 * are more likely to be boosted. Both watermarks and boosts 6573 * should not be checked at the same time as reclaim would 6574 * start prematurely when there is no boosting and a lower 6575 * zone is balanced. 6576 */ 6577 for (i = highest_zoneidx; i >= 0; i--) { 6578 zone = pgdat->node_zones + i; 6579 if (!managed_zone(zone)) 6580 continue; 6581 6582 if (zone->watermark_boost) 6583 return true; 6584 } 6585 6586 return false; 6587 } 6588 6589 /* 6590 * Returns true if there is an eligible zone balanced for the request order 6591 * and highest_zoneidx 6592 */ 6593 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 6594 { 6595 int i; 6596 unsigned long mark = -1; 6597 struct zone *zone; 6598 6599 /* 6600 * Check watermarks bottom-up as lower zones are more likely to 6601 * meet watermarks. 6602 */ 6603 for (i = 0; i <= highest_zoneidx; i++) { 6604 zone = pgdat->node_zones + i; 6605 6606 if (!managed_zone(zone)) 6607 continue; 6608 6609 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) 6610 mark = wmark_pages(zone, WMARK_PROMO); 6611 else 6612 mark = high_wmark_pages(zone); 6613 if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 6614 return true; 6615 } 6616 6617 /* 6618 * If a node has no managed zone within highest_zoneidx, it does not 6619 * need balancing by definition. This can happen if a zone-restricted 6620 * allocation tries to wake a remote kswapd. 6621 */ 6622 if (mark == -1) 6623 return true; 6624 6625 return false; 6626 } 6627 6628 /* Clear pgdat state for congested, dirty or under writeback. */ 6629 static void clear_pgdat_congested(pg_data_t *pgdat) 6630 { 6631 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); 6632 6633 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); 6634 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); 6635 clear_bit(PGDAT_DIRTY, &pgdat->flags); 6636 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); 6637 } 6638 6639 /* 6640 * Prepare kswapd for sleeping. This verifies that there are no processes 6641 * waiting in throttle_direct_reclaim() and that watermarks have been met. 6642 * 6643 * Returns true if kswapd is ready to sleep 6644 */ 6645 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 6646 int highest_zoneidx) 6647 { 6648 /* 6649 * The throttled processes are normally woken up in balance_pgdat() as 6650 * soon as allow_direct_reclaim() is true. But there is a potential 6651 * race between when kswapd checks the watermarks and a process gets 6652 * throttled. There is also a potential race if processes get 6653 * throttled, kswapd wakes, a large process exits thereby balancing the 6654 * zones, which causes kswapd to exit balance_pgdat() before reaching 6655 * the wake up checks. If kswapd is going to sleep, no process should 6656 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 6657 * the wake up is premature, processes will wake kswapd and get 6658 * throttled again. The difference from wake ups in balance_pgdat() is 6659 * that here we are under prepare_to_wait(). 6660 */ 6661 if (waitqueue_active(&pgdat->pfmemalloc_wait)) 6662 wake_up_all(&pgdat->pfmemalloc_wait); 6663 6664 /* Hopeless node, leave it to direct reclaim */ 6665 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 6666 return true; 6667 6668 if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 6669 clear_pgdat_congested(pgdat); 6670 return true; 6671 } 6672 6673 return false; 6674 } 6675 6676 /* 6677 * kswapd shrinks a node of pages that are at or below the highest usable 6678 * zone that is currently unbalanced. 6679 * 6680 * Returns true if kswapd scanned at least the requested number of pages to 6681 * reclaim or if the lack of progress was due to pages under writeback. 6682 * This is used to determine if the scanning priority needs to be raised. 6683 */ 6684 static bool kswapd_shrink_node(pg_data_t *pgdat, 6685 struct scan_control *sc) 6686 { 6687 struct zone *zone; 6688 int z; 6689 6690 /* Reclaim a number of pages proportional to the number of zones */ 6691 sc->nr_to_reclaim = 0; 6692 for (z = 0; z <= sc->reclaim_idx; z++) { 6693 zone = pgdat->node_zones + z; 6694 if (!managed_zone(zone)) 6695 continue; 6696 6697 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 6698 } 6699 6700 /* 6701 * Historically care was taken to put equal pressure on all zones but 6702 * now pressure is applied based on node LRU order. 6703 */ 6704 shrink_node(pgdat, sc); 6705 6706 /* 6707 * Fragmentation may mean that the system cannot be rebalanced for 6708 * high-order allocations. If twice the allocation size has been 6709 * reclaimed then recheck watermarks only at order-0 to prevent 6710 * excessive reclaim. Assume that a process requested a high-order 6711 * can direct reclaim/compact. 6712 */ 6713 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) 6714 sc->order = 0; 6715 6716 return sc->nr_scanned >= sc->nr_to_reclaim; 6717 } 6718 6719 /* Page allocator PCP high watermark is lowered if reclaim is active. */ 6720 static inline void 6721 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) 6722 { 6723 int i; 6724 struct zone *zone; 6725 6726 for (i = 0; i <= highest_zoneidx; i++) { 6727 zone = pgdat->node_zones + i; 6728 6729 if (!managed_zone(zone)) 6730 continue; 6731 6732 if (active) 6733 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6734 else 6735 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); 6736 } 6737 } 6738 6739 static inline void 6740 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6741 { 6742 update_reclaim_active(pgdat, highest_zoneidx, true); 6743 } 6744 6745 static inline void 6746 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) 6747 { 6748 update_reclaim_active(pgdat, highest_zoneidx, false); 6749 } 6750 6751 /* 6752 * For kswapd, balance_pgdat() will reclaim pages across a node from zones 6753 * that are eligible for use by the caller until at least one zone is 6754 * balanced. 6755 * 6756 * Returns the order kswapd finished reclaiming at. 6757 * 6758 * kswapd scans the zones in the highmem->normal->dma direction. It skips 6759 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 6760 * found to have free_pages <= high_wmark_pages(zone), any page in that zone 6761 * or lower is eligible for reclaim until at least one usable zone is 6762 * balanced. 6763 */ 6764 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 6765 { 6766 int i; 6767 unsigned long nr_soft_reclaimed; 6768 unsigned long nr_soft_scanned; 6769 unsigned long pflags; 6770 unsigned long nr_boost_reclaim; 6771 unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; 6772 bool boosted; 6773 struct zone *zone; 6774 struct scan_control sc = { 6775 .gfp_mask = GFP_KERNEL, 6776 .order = order, 6777 .may_unmap = 1, 6778 }; 6779 6780 set_task_reclaim_state(current, &sc.reclaim_state); 6781 psi_memstall_enter(&pflags); 6782 __fs_reclaim_acquire(_THIS_IP_); 6783 6784 count_vm_event(PAGEOUTRUN); 6785 6786 /* 6787 * Account for the reclaim boost. Note that the zone boost is left in 6788 * place so that parallel allocations that are near the watermark will 6789 * stall or direct reclaim until kswapd is finished. 6790 */ 6791 nr_boost_reclaim = 0; 6792 for (i = 0; i <= highest_zoneidx; i++) { 6793 zone = pgdat->node_zones + i; 6794 if (!managed_zone(zone)) 6795 continue; 6796 6797 nr_boost_reclaim += zone->watermark_boost; 6798 zone_boosts[i] = zone->watermark_boost; 6799 } 6800 boosted = nr_boost_reclaim; 6801 6802 restart: 6803 set_reclaim_active(pgdat, highest_zoneidx); 6804 sc.priority = DEF_PRIORITY; 6805 do { 6806 unsigned long nr_reclaimed = sc.nr_reclaimed; 6807 bool raise_priority = true; 6808 bool balanced; 6809 bool ret; 6810 bool was_frozen; 6811 6812 sc.reclaim_idx = highest_zoneidx; 6813 6814 /* 6815 * If the number of buffer_heads exceeds the maximum allowed 6816 * then consider reclaiming from all zones. This has a dual 6817 * purpose -- on 64-bit systems it is expected that 6818 * buffer_heads are stripped during active rotation. On 32-bit 6819 * systems, highmem pages can pin lowmem memory and shrinking 6820 * buffers can relieve lowmem pressure. Reclaim may still not 6821 * go ahead if all eligible zones for the original allocation 6822 * request are balanced to avoid excessive reclaim from kswapd. 6823 */ 6824 if (buffer_heads_over_limit) { 6825 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 6826 zone = pgdat->node_zones + i; 6827 if (!managed_zone(zone)) 6828 continue; 6829 6830 sc.reclaim_idx = i; 6831 break; 6832 } 6833 } 6834 6835 /* 6836 * If the pgdat is imbalanced then ignore boosting and preserve 6837 * the watermarks for a later time and restart. Note that the 6838 * zone watermarks will be still reset at the end of balancing 6839 * on the grounds that the normal reclaim should be enough to 6840 * re-evaluate if boosting is required when kswapd next wakes. 6841 */ 6842 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 6843 if (!balanced && nr_boost_reclaim) { 6844 nr_boost_reclaim = 0; 6845 goto restart; 6846 } 6847 6848 /* 6849 * If boosting is not active then only reclaim if there are no 6850 * eligible zones. Note that sc.reclaim_idx is not used as 6851 * buffer_heads_over_limit may have adjusted it. 6852 */ 6853 if (!nr_boost_reclaim && balanced) 6854 goto out; 6855 6856 /* Limit the priority of boosting to avoid reclaim writeback */ 6857 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) 6858 raise_priority = false; 6859 6860 /* 6861 * Do not writeback or swap pages for boosted reclaim. The 6862 * intent is to relieve pressure not issue sub-optimal IO 6863 * from reclaim context. If no pages are reclaimed, the 6864 * reclaim will be aborted. 6865 */ 6866 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 6867 sc.may_swap = !nr_boost_reclaim; 6868 6869 /* 6870 * Do some background aging, to give pages a chance to be 6871 * referenced before reclaiming. All pages are rotated 6872 * regardless of classzone as this is about consistent aging. 6873 */ 6874 kswapd_age_node(pgdat, &sc); 6875 6876 /* 6877 * If we're getting trouble reclaiming, start doing writepage 6878 * even in laptop mode. 6879 */ 6880 if (sc.priority < DEF_PRIORITY - 2) 6881 sc.may_writepage = 1; 6882 6883 /* Call soft limit reclaim before calling shrink_node. */ 6884 sc.nr_scanned = 0; 6885 nr_soft_scanned = 0; 6886 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, 6887 sc.gfp_mask, &nr_soft_scanned); 6888 sc.nr_reclaimed += nr_soft_reclaimed; 6889 6890 /* 6891 * There should be no need to raise the scanning priority if 6892 * enough pages are already being scanned that that high 6893 * watermark would be met at 100% efficiency. 6894 */ 6895 if (kswapd_shrink_node(pgdat, &sc)) 6896 raise_priority = false; 6897 6898 /* 6899 * If the low watermark is met there is no need for processes 6900 * to be throttled on pfmemalloc_wait as they should not be 6901 * able to safely make forward progress. Wake them 6902 */ 6903 if (waitqueue_active(&pgdat->pfmemalloc_wait) && 6904 allow_direct_reclaim(pgdat)) 6905 wake_up_all(&pgdat->pfmemalloc_wait); 6906 6907 /* Check if kswapd should be suspending */ 6908 __fs_reclaim_release(_THIS_IP_); 6909 ret = kthread_freezable_should_stop(&was_frozen); 6910 __fs_reclaim_acquire(_THIS_IP_); 6911 if (was_frozen || ret) 6912 break; 6913 6914 /* 6915 * Raise priority if scanning rate is too low or there was no 6916 * progress in reclaiming pages 6917 */ 6918 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; 6919 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); 6920 6921 /* 6922 * If reclaim made no progress for a boost, stop reclaim as 6923 * IO cannot be queued and it could be an infinite loop in 6924 * extreme circumstances. 6925 */ 6926 if (nr_boost_reclaim && !nr_reclaimed) 6927 break; 6928 6929 if (raise_priority || !nr_reclaimed) 6930 sc.priority--; 6931 } while (sc.priority >= 1); 6932 6933 /* 6934 * Restart only if it went through the priority loop all the way, 6935 * but cache_trim_mode didn't work. 6936 */ 6937 if (!sc.nr_reclaimed && sc.priority < 1 && 6938 !sc.no_cache_trim_mode && sc.cache_trim_mode_failed) { 6939 sc.no_cache_trim_mode = 1; 6940 goto restart; 6941 } 6942 6943 if (!sc.nr_reclaimed) 6944 pgdat->kswapd_failures++; 6945 6946 out: 6947 clear_reclaim_active(pgdat, highest_zoneidx); 6948 6949 /* If reclaim was boosted, account for the reclaim done in this pass */ 6950 if (boosted) { 6951 unsigned long flags; 6952 6953 for (i = 0; i <= highest_zoneidx; i++) { 6954 if (!zone_boosts[i]) 6955 continue; 6956 6957 /* Increments are under the zone lock */ 6958 zone = pgdat->node_zones + i; 6959 spin_lock_irqsave(&zone->lock, flags); 6960 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); 6961 spin_unlock_irqrestore(&zone->lock, flags); 6962 } 6963 6964 /* 6965 * As there is now likely space, wakeup kcompact to defragment 6966 * pageblocks. 6967 */ 6968 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 6969 } 6970 6971 snapshot_refaults(NULL, pgdat); 6972 __fs_reclaim_release(_THIS_IP_); 6973 psi_memstall_leave(&pflags); 6974 set_task_reclaim_state(current, NULL); 6975 6976 /* 6977 * Return the order kswapd stopped reclaiming at as 6978 * prepare_kswapd_sleep() takes it into account. If another caller 6979 * entered the allocator slow path while kswapd was awake, order will 6980 * remain at the higher level. 6981 */ 6982 return sc.order; 6983 } 6984 6985 /* 6986 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 6987 * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 6988 * not a valid index then either kswapd runs for first time or kswapd couldn't 6989 * sleep after previous reclaim attempt (node is still unbalanced). In that 6990 * case return the zone index of the previous kswapd reclaim cycle. 6991 */ 6992 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 6993 enum zone_type prev_highest_zoneidx) 6994 { 6995 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 6996 6997 return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 6998 } 6999 7000 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 7001 unsigned int highest_zoneidx) 7002 { 7003 long remaining = 0; 7004 DEFINE_WAIT(wait); 7005 7006 if (freezing(current) || kthread_should_stop()) 7007 return; 7008 7009 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7010 7011 /* 7012 * Try to sleep for a short interval. Note that kcompactd will only be 7013 * woken if it is possible to sleep for a short interval. This is 7014 * deliberate on the assumption that if reclaim cannot keep an 7015 * eligible zone balanced that it's also unlikely that compaction will 7016 * succeed. 7017 */ 7018 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7019 /* 7020 * Compaction records what page blocks it recently failed to 7021 * isolate pages from and skips them in the future scanning. 7022 * When kswapd is going to sleep, it is reasonable to assume 7023 * that pages and compaction may succeed so reset the cache. 7024 */ 7025 reset_isolation_suitable(pgdat); 7026 7027 /* 7028 * We have freed the memory, now we should compact it to make 7029 * allocation of the requested order possible. 7030 */ 7031 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 7032 7033 remaining = schedule_timeout(HZ/10); 7034 7035 /* 7036 * If woken prematurely then reset kswapd_highest_zoneidx and 7037 * order. The values will either be from a wakeup request or 7038 * the previous request that slept prematurely. 7039 */ 7040 if (remaining) { 7041 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 7042 kswapd_highest_zoneidx(pgdat, 7043 highest_zoneidx)); 7044 7045 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 7046 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); 7047 } 7048 7049 finish_wait(&pgdat->kswapd_wait, &wait); 7050 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 7051 } 7052 7053 /* 7054 * After a short sleep, check if it was a premature sleep. If not, then 7055 * go fully to sleep until explicitly woken up. 7056 */ 7057 if (!remaining && 7058 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 7059 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 7060 7061 /* 7062 * vmstat counters are not perfectly accurate and the estimated 7063 * value for counters such as NR_FREE_PAGES can deviate from the 7064 * true value by nr_online_cpus * threshold. To avoid the zone 7065 * watermarks being breached while under pressure, we reduce the 7066 * per-cpu vmstat threshold while kswapd is awake and restore 7067 * them before going back to sleep. 7068 */ 7069 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 7070 7071 if (!kthread_should_stop()) 7072 schedule(); 7073 7074 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 7075 } else { 7076 if (remaining) 7077 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 7078 else 7079 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 7080 } 7081 finish_wait(&pgdat->kswapd_wait, &wait); 7082 } 7083 7084 /* 7085 * The background pageout daemon, started as a kernel thread 7086 * from the init process. 7087 * 7088 * This basically trickles out pages so that we have _some_ 7089 * free memory available even if there is no other activity 7090 * that frees anything up. This is needed for things like routing 7091 * etc, where we otherwise might have all activity going on in 7092 * asynchronous contexts that cannot page things out. 7093 * 7094 * If there are applications that are active memory-allocators 7095 * (most normal use), this basically shouldn't matter. 7096 */ 7097 static int kswapd(void *p) 7098 { 7099 unsigned int alloc_order, reclaim_order; 7100 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 7101 pg_data_t *pgdat = (pg_data_t *)p; 7102 struct task_struct *tsk = current; 7103 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 7104 7105 if (!cpumask_empty(cpumask)) 7106 set_cpus_allowed_ptr(tsk, cpumask); 7107 7108 /* 7109 * Tell the memory management that we're a "memory allocator", 7110 * and that if we need more memory we should get access to it 7111 * regardless (see "__alloc_pages()"). "kswapd" should 7112 * never get caught in the normal page freeing logic. 7113 * 7114 * (Kswapd normally doesn't need memory anyway, but sometimes 7115 * you need a small amount of memory in order to be able to 7116 * page out something else, and this flag essentially protects 7117 * us from recursively trying to free more memory as we're 7118 * trying to free the first piece of memory in the first place). 7119 */ 7120 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; 7121 set_freezable(); 7122 7123 WRITE_ONCE(pgdat->kswapd_order, 0); 7124 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7125 atomic_set(&pgdat->nr_writeback_throttled, 0); 7126 for ( ; ; ) { 7127 bool was_frozen; 7128 7129 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 7130 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7131 highest_zoneidx); 7132 7133 kswapd_try_sleep: 7134 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 7135 highest_zoneidx); 7136 7137 /* Read the new order and highest_zoneidx */ 7138 alloc_order = READ_ONCE(pgdat->kswapd_order); 7139 highest_zoneidx = kswapd_highest_zoneidx(pgdat, 7140 highest_zoneidx); 7141 WRITE_ONCE(pgdat->kswapd_order, 0); 7142 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 7143 7144 if (kthread_freezable_should_stop(&was_frozen)) 7145 break; 7146 7147 /* 7148 * We can speed up thawing tasks if we don't call balance_pgdat 7149 * after returning from the refrigerator 7150 */ 7151 if (was_frozen) 7152 continue; 7153 7154 /* 7155 * Reclaim begins at the requested order but if a high-order 7156 * reclaim fails then kswapd falls back to reclaiming for 7157 * order-0. If that happens, kswapd will consider sleeping 7158 * for the order it finished reclaiming at (reclaim_order) 7159 * but kcompactd is woken to compact for the original 7160 * request (alloc_order). 7161 */ 7162 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 7163 alloc_order); 7164 reclaim_order = balance_pgdat(pgdat, alloc_order, 7165 highest_zoneidx); 7166 if (reclaim_order < alloc_order) 7167 goto kswapd_try_sleep; 7168 } 7169 7170 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); 7171 7172 return 0; 7173 } 7174 7175 /* 7176 * A zone is low on free memory or too fragmented for high-order memory. If 7177 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's 7178 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim 7179 * has failed or is not needed, still wake up kcompactd if only compaction is 7180 * needed. 7181 */ 7182 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 7183 enum zone_type highest_zoneidx) 7184 { 7185 pg_data_t *pgdat; 7186 enum zone_type curr_idx; 7187 7188 if (!managed_zone(zone)) 7189 return; 7190 7191 if (!cpuset_zone_allowed(zone, gfp_flags)) 7192 return; 7193 7194 pgdat = zone->zone_pgdat; 7195 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 7196 7197 if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 7198 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 7199 7200 if (READ_ONCE(pgdat->kswapd_order) < order) 7201 WRITE_ONCE(pgdat->kswapd_order, order); 7202 7203 if (!waitqueue_active(&pgdat->kswapd_wait)) 7204 return; 7205 7206 /* Hopeless node, leave it to direct reclaim if possible */ 7207 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 7208 (pgdat_balanced(pgdat, order, highest_zoneidx) && 7209 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 7210 /* 7211 * There may be plenty of free memory available, but it's too 7212 * fragmented for high-order allocations. Wake up kcompactd 7213 * and rely on compaction_suitable() to determine if it's 7214 * needed. If it fails, it will defer subsequent attempts to 7215 * ratelimit its work. 7216 */ 7217 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 7218 wakeup_kcompactd(pgdat, order, highest_zoneidx); 7219 return; 7220 } 7221 7222 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 7223 gfp_flags); 7224 wake_up_interruptible(&pgdat->kswapd_wait); 7225 } 7226 7227 #ifdef CONFIG_HIBERNATION 7228 /* 7229 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 7230 * freed pages. 7231 * 7232 * Rather than trying to age LRUs the aim is to preserve the overall 7233 * LRU order by reclaiming preferentially 7234 * inactive > active > active referenced > active mapped 7235 */ 7236 unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 7237 { 7238 struct scan_control sc = { 7239 .nr_to_reclaim = nr_to_reclaim, 7240 .gfp_mask = GFP_HIGHUSER_MOVABLE, 7241 .reclaim_idx = MAX_NR_ZONES - 1, 7242 .priority = DEF_PRIORITY, 7243 .may_writepage = 1, 7244 .may_unmap = 1, 7245 .may_swap = 1, 7246 .hibernation_mode = 1, 7247 }; 7248 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 7249 unsigned long nr_reclaimed; 7250 unsigned int noreclaim_flag; 7251 7252 fs_reclaim_acquire(sc.gfp_mask); 7253 noreclaim_flag = memalloc_noreclaim_save(); 7254 set_task_reclaim_state(current, &sc.reclaim_state); 7255 7256 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 7257 7258 set_task_reclaim_state(current, NULL); 7259 memalloc_noreclaim_restore(noreclaim_flag); 7260 fs_reclaim_release(sc.gfp_mask); 7261 7262 return nr_reclaimed; 7263 } 7264 #endif /* CONFIG_HIBERNATION */ 7265 7266 /* 7267 * This kswapd start function will be called by init and node-hot-add. 7268 */ 7269 void __meminit kswapd_run(int nid) 7270 { 7271 pg_data_t *pgdat = NODE_DATA(nid); 7272 7273 pgdat_kswapd_lock(pgdat); 7274 if (!pgdat->kswapd) { 7275 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 7276 if (IS_ERR(pgdat->kswapd)) { 7277 /* failure at boot is fatal */ 7278 pr_err("Failed to start kswapd on node %d,ret=%ld\n", 7279 nid, PTR_ERR(pgdat->kswapd)); 7280 BUG_ON(system_state < SYSTEM_RUNNING); 7281 pgdat->kswapd = NULL; 7282 } 7283 } 7284 pgdat_kswapd_unlock(pgdat); 7285 } 7286 7287 /* 7288 * Called by memory hotplug when all memory in a node is offlined. Caller must 7289 * be holding mem_hotplug_begin/done(). 7290 */ 7291 void __meminit kswapd_stop(int nid) 7292 { 7293 pg_data_t *pgdat = NODE_DATA(nid); 7294 struct task_struct *kswapd; 7295 7296 pgdat_kswapd_lock(pgdat); 7297 kswapd = pgdat->kswapd; 7298 if (kswapd) { 7299 kthread_stop(kswapd); 7300 pgdat->kswapd = NULL; 7301 } 7302 pgdat_kswapd_unlock(pgdat); 7303 } 7304 7305 static int __init kswapd_init(void) 7306 { 7307 int nid; 7308 7309 swap_setup(); 7310 for_each_node_state(nid, N_MEMORY) 7311 kswapd_run(nid); 7312 return 0; 7313 } 7314 7315 module_init(kswapd_init) 7316 7317 #ifdef CONFIG_NUMA 7318 /* 7319 * Node reclaim mode 7320 * 7321 * If non-zero call node_reclaim when the number of free pages falls below 7322 * the watermarks. 7323 */ 7324 int node_reclaim_mode __read_mostly; 7325 7326 /* 7327 * Priority for NODE_RECLAIM. This determines the fraction of pages 7328 * of a node considered for each zone_reclaim. 4 scans 1/16th of 7329 * a zone. 7330 */ 7331 #define NODE_RECLAIM_PRIORITY 4 7332 7333 /* 7334 * Percentage of pages in a zone that must be unmapped for node_reclaim to 7335 * occur. 7336 */ 7337 int sysctl_min_unmapped_ratio = 1; 7338 7339 /* 7340 * If the number of slab pages in a zone grows beyond this percentage then 7341 * slab reclaim needs to occur. 7342 */ 7343 int sysctl_min_slab_ratio = 5; 7344 7345 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 7346 { 7347 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 7348 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 7349 node_page_state(pgdat, NR_ACTIVE_FILE); 7350 7351 /* 7352 * It's possible for there to be more file mapped pages than 7353 * accounted for by the pages on the file LRU lists because 7354 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 7355 */ 7356 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 7357 } 7358 7359 /* Work out how many page cache pages we can reclaim in this reclaim_mode */ 7360 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) 7361 { 7362 unsigned long nr_pagecache_reclaimable; 7363 unsigned long delta = 0; 7364 7365 /* 7366 * If RECLAIM_UNMAP is set, then all file pages are considered 7367 * potentially reclaimable. Otherwise, we have to worry about 7368 * pages like swapcache and node_unmapped_file_pages() provides 7369 * a better estimate 7370 */ 7371 if (node_reclaim_mode & RECLAIM_UNMAP) 7372 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); 7373 else 7374 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); 7375 7376 /* If we can't clean pages, remove dirty pages from consideration */ 7377 if (!(node_reclaim_mode & RECLAIM_WRITE)) 7378 delta += node_page_state(pgdat, NR_FILE_DIRTY); 7379 7380 /* Watch for any possible underflows due to delta */ 7381 if (unlikely(delta > nr_pagecache_reclaimable)) 7382 delta = nr_pagecache_reclaimable; 7383 7384 return nr_pagecache_reclaimable - delta; 7385 } 7386 7387 /* 7388 * Try to free up some pages from this node through reclaim. 7389 */ 7390 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7391 { 7392 /* Minimum pages needed in order to stay on node */ 7393 const unsigned long nr_pages = 1 << order; 7394 struct task_struct *p = current; 7395 unsigned int noreclaim_flag; 7396 struct scan_control sc = { 7397 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), 7398 .gfp_mask = current_gfp_context(gfp_mask), 7399 .order = order, 7400 .priority = NODE_RECLAIM_PRIORITY, 7401 .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), 7402 .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), 7403 .may_swap = 1, 7404 .reclaim_idx = gfp_zone(gfp_mask), 7405 }; 7406 unsigned long pflags; 7407 7408 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, 7409 sc.gfp_mask); 7410 7411 cond_resched(); 7412 psi_memstall_enter(&pflags); 7413 delayacct_freepages_start(); 7414 fs_reclaim_acquire(sc.gfp_mask); 7415 /* 7416 * We need to be able to allocate from the reserves for RECLAIM_UNMAP 7417 */ 7418 noreclaim_flag = memalloc_noreclaim_save(); 7419 set_task_reclaim_state(p, &sc.reclaim_state); 7420 7421 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || 7422 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { 7423 /* 7424 * Free memory by calling shrink node with increasing 7425 * priorities until we have enough memory freed. 7426 */ 7427 do { 7428 shrink_node(pgdat, &sc); 7429 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); 7430 } 7431 7432 set_task_reclaim_state(p, NULL); 7433 memalloc_noreclaim_restore(noreclaim_flag); 7434 fs_reclaim_release(sc.gfp_mask); 7435 psi_memstall_leave(&pflags); 7436 delayacct_freepages_end(); 7437 7438 trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); 7439 7440 return sc.nr_reclaimed >= nr_pages; 7441 } 7442 7443 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) 7444 { 7445 int ret; 7446 7447 /* 7448 * Node reclaim reclaims unmapped file backed pages and 7449 * slab pages if we are over the defined limits. 7450 * 7451 * A small portion of unmapped file backed pages is needed for 7452 * file I/O otherwise pages read by file I/O will be immediately 7453 * thrown out if the node is overallocated. So we do not reclaim 7454 * if less than a specified percentage of the node is used by 7455 * unmapped file backed pages. 7456 */ 7457 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && 7458 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= 7459 pgdat->min_slab_pages) 7460 return NODE_RECLAIM_FULL; 7461 7462 /* 7463 * Do not scan if the allocation should not be delayed. 7464 */ 7465 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) 7466 return NODE_RECLAIM_NOSCAN; 7467 7468 /* 7469 * Only run node reclaim on the local node or on nodes that do not 7470 * have associated processors. This will favor the local processor 7471 * over remote processors and spread off node memory allocations 7472 * as wide as possible. 7473 */ 7474 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) 7475 return NODE_RECLAIM_NOSCAN; 7476 7477 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) 7478 return NODE_RECLAIM_NOSCAN; 7479 7480 ret = __node_reclaim(pgdat, gfp_mask, order); 7481 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); 7482 7483 if (!ret) 7484 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 7485 7486 return ret; 7487 } 7488 #endif 7489 7490 /** 7491 * check_move_unevictable_folios - Move evictable folios to appropriate zone 7492 * lru list 7493 * @fbatch: Batch of lru folios to check. 7494 * 7495 * Checks folios for evictability, if an evictable folio is in the unevictable 7496 * lru list, moves it to the appropriate evictable lru list. This function 7497 * should be only used for lru folios. 7498 */ 7499 void check_move_unevictable_folios(struct folio_batch *fbatch) 7500 { 7501 struct lruvec *lruvec = NULL; 7502 int pgscanned = 0; 7503 int pgrescued = 0; 7504 int i; 7505 7506 for (i = 0; i < fbatch->nr; i++) { 7507 struct folio *folio = fbatch->folios[i]; 7508 int nr_pages = folio_nr_pages(folio); 7509 7510 pgscanned += nr_pages; 7511 7512 /* block memcg migration while the folio moves between lrus */ 7513 if (!folio_test_clear_lru(folio)) 7514 continue; 7515 7516 lruvec = folio_lruvec_relock_irq(folio, lruvec); 7517 if (folio_evictable(folio) && folio_test_unevictable(folio)) { 7518 lruvec_del_folio(lruvec, folio); 7519 folio_clear_unevictable(folio); 7520 lruvec_add_folio(lruvec, folio); 7521 pgrescued += nr_pages; 7522 } 7523 folio_set_lru(folio); 7524 } 7525 7526 if (lruvec) { 7527 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 7528 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7529 unlock_page_lruvec_irq(lruvec); 7530 } else if (pgscanned) { 7531 count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); 7532 } 7533 } 7534 EXPORT_SYMBOL_GPL(check_move_unevictable_folios); 7535